1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2016 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/coresight.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/iommu.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 #include "coresight-catu.h"
13 #include "coresight-priv.h"
14 #include "coresight-tmc.h"
24 * The TMC ETR SG has a page size of 4K. The SG table contains pointers
25 * to 4KB buffers. However, the OS may use a PAGE_SIZE different from
26 * 4K (i.e, 16KB or 64KB). This implies that a single OS page could
27 * contain more than one SG buffer and tables.
29 * A table entry has the following format:
31 * ---Bit31------------Bit4-------Bit1-----Bit0--
32 * | Address[39:12] | SBZ | Entry Type |
33 * ----------------------------------------------
35 * Address: Bits [39:12] of a physical page address. Bits [11:0] are
40 * b01 - Last entry in the tables, points to 4K page buffer.
41 * b10 - Normal entry, points to 4K page buffer.
42 * b11 - Link. The address points to the base of next table.
47 #define ETR_SG_PAGE_SHIFT 12
48 #define ETR_SG_PAGE_SIZE (1UL << ETR_SG_PAGE_SHIFT)
49 #define ETR_SG_PAGES_PER_SYSPAGE (PAGE_SIZE / ETR_SG_PAGE_SIZE)
50 #define ETR_SG_PTRS_PER_PAGE (ETR_SG_PAGE_SIZE / sizeof(sgte_t))
51 #define ETR_SG_PTRS_PER_SYSPAGE (PAGE_SIZE / sizeof(sgte_t))
53 #define ETR_SG_ET_MASK 0x3
54 #define ETR_SG_ET_LAST 0x1
55 #define ETR_SG_ET_NORMAL 0x2
56 #define ETR_SG_ET_LINK 0x3
58 #define ETR_SG_ADDR_SHIFT 4
60 #define ETR_SG_ENTRY(addr, type) \
61 (sgte_t)((((addr) >> ETR_SG_PAGE_SHIFT) << ETR_SG_ADDR_SHIFT) | \
62 (type & ETR_SG_ET_MASK))
64 #define ETR_SG_ADDR(entry) \
65 (((dma_addr_t)(entry) >> ETR_SG_ADDR_SHIFT) << ETR_SG_PAGE_SHIFT)
66 #define ETR_SG_ET(entry) ((entry) & ETR_SG_ET_MASK)
69 * struct etr_sg_table : ETR SG Table
70 * @sg_table: Generic SG Table holding the data/table pages.
71 * @hwaddr: hwaddress used by the TMC, which is the base
72 * address of the table.
75 struct tmc_sg_table *sg_table;
80 * tmc_etr_sg_table_entries: Total number of table entries required to map
81 * @nr_pages system pages.
83 * We need to map @nr_pages * ETR_SG_PAGES_PER_SYSPAGE data pages.
84 * Each TMC page can map (ETR_SG_PTRS_PER_PAGE - 1) buffer pointers,
85 * with the last entry pointing to another page of table entries.
86 * If we spill over to a new page for mapping 1 entry, we could as
87 * well replace the link entry of the previous page with the last entry.
89 static inline unsigned long __attribute_const__
90 tmc_etr_sg_table_entries(int nr_pages)
92 unsigned long nr_sgpages = nr_pages * ETR_SG_PAGES_PER_SYSPAGE;
93 unsigned long nr_sglinks = nr_sgpages / (ETR_SG_PTRS_PER_PAGE - 1);
95 * If we spill over to a new page for 1 entry, we could as well
96 * make it the LAST entry in the previous page, skipping the Link
99 if (nr_sglinks && (nr_sgpages % (ETR_SG_PTRS_PER_PAGE - 1) < 2))
101 return nr_sgpages + nr_sglinks;
105 * tmc_pages_get_offset: Go through all the pages in the tmc_pages
106 * and map the device address @addr to an offset within the virtual
110 tmc_pages_get_offset(struct tmc_pages *tmc_pages, dma_addr_t addr)
113 dma_addr_t page_start;
115 for (i = 0; i < tmc_pages->nr_pages; i++) {
116 page_start = tmc_pages->daddrs[i];
117 if (addr >= page_start && addr < (page_start + PAGE_SIZE))
118 return i * PAGE_SIZE + (addr - page_start);
125 * tmc_pages_free : Unmap and free the pages used by tmc_pages.
126 * If the pages were not allocated in tmc_pages_alloc(), we would
127 * simply drop the refcount.
129 static void tmc_pages_free(struct tmc_pages *tmc_pages,
130 struct device *dev, enum dma_data_direction dir)
134 for (i = 0; i < tmc_pages->nr_pages; i++) {
135 if (tmc_pages->daddrs && tmc_pages->daddrs[i])
136 dma_unmap_page(dev, tmc_pages->daddrs[i],
138 if (tmc_pages->pages && tmc_pages->pages[i])
139 __free_page(tmc_pages->pages[i]);
142 kfree(tmc_pages->pages);
143 kfree(tmc_pages->daddrs);
144 tmc_pages->pages = NULL;
145 tmc_pages->daddrs = NULL;
146 tmc_pages->nr_pages = 0;
150 * tmc_pages_alloc : Allocate and map pages for a given @tmc_pages.
151 * If @pages is not NULL, the list of page virtual addresses are
152 * used as the data pages. The pages are then dma_map'ed for @dev
153 * with dma_direction @dir.
155 * Returns 0 upon success, else the error number.
157 static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
158 struct device *dev, int node,
159 enum dma_data_direction dir, void **pages)
165 nr_pages = tmc_pages->nr_pages;
166 tmc_pages->daddrs = kcalloc(nr_pages, sizeof(*tmc_pages->daddrs),
168 if (!tmc_pages->daddrs)
170 tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages),
172 if (!tmc_pages->pages) {
173 kfree(tmc_pages->daddrs);
174 tmc_pages->daddrs = NULL;
178 for (i = 0; i < nr_pages; i++) {
179 if (pages && pages[i]) {
180 page = virt_to_page(pages[i]);
181 /* Hold a refcount on the page */
184 page = alloc_pages_node(node,
185 GFP_KERNEL | __GFP_ZERO, 0);
189 paddr = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
190 if (dma_mapping_error(dev, paddr))
192 tmc_pages->daddrs[i] = paddr;
193 tmc_pages->pages[i] = page;
197 tmc_pages_free(tmc_pages, dev, dir);
202 tmc_sg_get_data_page_offset(struct tmc_sg_table *sg_table, dma_addr_t addr)
204 return tmc_pages_get_offset(&sg_table->data_pages, addr);
207 static inline void tmc_free_table_pages(struct tmc_sg_table *sg_table)
209 if (sg_table->table_vaddr)
210 vunmap(sg_table->table_vaddr);
211 tmc_pages_free(&sg_table->table_pages, sg_table->dev, DMA_TO_DEVICE);
214 static void tmc_free_data_pages(struct tmc_sg_table *sg_table)
216 if (sg_table->data_vaddr)
217 vunmap(sg_table->data_vaddr);
218 tmc_pages_free(&sg_table->data_pages, sg_table->dev, DMA_FROM_DEVICE);
221 void tmc_free_sg_table(struct tmc_sg_table *sg_table)
223 tmc_free_table_pages(sg_table);
224 tmc_free_data_pages(sg_table);
228 * Alloc pages for the table. Since this will be used by the device,
229 * allocate the pages closer to the device (i.e, dev_to_node(dev)
230 * rather than the CPU node).
232 static int tmc_alloc_table_pages(struct tmc_sg_table *sg_table)
235 struct tmc_pages *table_pages = &sg_table->table_pages;
237 rc = tmc_pages_alloc(table_pages, sg_table->dev,
238 dev_to_node(sg_table->dev),
239 DMA_TO_DEVICE, NULL);
242 sg_table->table_vaddr = vmap(table_pages->pages,
243 table_pages->nr_pages,
246 if (!sg_table->table_vaddr)
249 sg_table->table_daddr = table_pages->daddrs[0];
253 static int tmc_alloc_data_pages(struct tmc_sg_table *sg_table, void **pages)
257 /* Allocate data pages on the node requested by the caller */
258 rc = tmc_pages_alloc(&sg_table->data_pages,
259 sg_table->dev, sg_table->node,
260 DMA_FROM_DEVICE, pages);
262 sg_table->data_vaddr = vmap(sg_table->data_pages.pages,
263 sg_table->data_pages.nr_pages,
266 if (!sg_table->data_vaddr)
273 * tmc_alloc_sg_table: Allocate and setup dma pages for the TMC SG table
274 * and data buffers. TMC writes to the data buffers and reads from the SG
277 * @dev - Device to which page should be DMA mapped.
278 * @node - Numa node for mem allocations
279 * @nr_tpages - Number of pages for the table entries.
280 * @nr_dpages - Number of pages for Data buffer.
281 * @pages - Optional list of virtual address of pages.
283 struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
290 struct tmc_sg_table *sg_table;
292 sg_table = kzalloc(sizeof(*sg_table), GFP_KERNEL);
294 return ERR_PTR(-ENOMEM);
295 sg_table->data_pages.nr_pages = nr_dpages;
296 sg_table->table_pages.nr_pages = nr_tpages;
297 sg_table->node = node;
300 rc = tmc_alloc_data_pages(sg_table, pages);
302 rc = tmc_alloc_table_pages(sg_table);
304 tmc_free_sg_table(sg_table);
313 * tmc_sg_table_sync_data_range: Sync the data buffer written
314 * by the device from @offset upto a @size bytes.
316 void tmc_sg_table_sync_data_range(struct tmc_sg_table *table,
317 u64 offset, u64 size)
320 int npages = DIV_ROUND_UP(size, PAGE_SIZE);
321 struct device *dev = table->dev;
322 struct tmc_pages *data = &table->data_pages;
324 start = offset >> PAGE_SHIFT;
325 for (i = start; i < (start + npages); i++) {
326 index = i % data->nr_pages;
327 dma_sync_single_for_cpu(dev, data->daddrs[index],
328 PAGE_SIZE, DMA_FROM_DEVICE);
332 /* tmc_sg_sync_table: Sync the page table */
333 void tmc_sg_table_sync_table(struct tmc_sg_table *sg_table)
336 struct device *dev = sg_table->dev;
337 struct tmc_pages *table_pages = &sg_table->table_pages;
339 for (i = 0; i < table_pages->nr_pages; i++)
340 dma_sync_single_for_device(dev, table_pages->daddrs[i],
341 PAGE_SIZE, DMA_TO_DEVICE);
345 * tmc_sg_table_get_data: Get the buffer pointer for data @offset
346 * in the SG buffer. The @bufpp is updated to point to the buffer.
348 * the length of linear data available at @offset.
350 * <= 0 if no data is available.
352 ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
353 u64 offset, size_t len, char **bufpp)
356 int pg_idx = offset >> PAGE_SHIFT;
357 int pg_offset = offset & (PAGE_SIZE - 1);
358 struct tmc_pages *data_pages = &sg_table->data_pages;
360 size = tmc_sg_table_buf_size(sg_table);
364 /* Make sure we don't go beyond the end */
365 len = (len < (size - offset)) ? len : size - offset;
366 /* Respect the page boundaries */
367 len = (len < (PAGE_SIZE - pg_offset)) ? len : (PAGE_SIZE - pg_offset);
369 *bufpp = page_address(data_pages->pages[pg_idx]) + pg_offset;
374 /* Map a dma address to virtual address */
376 tmc_sg_daddr_to_vaddr(struct tmc_sg_table *sg_table,
377 dma_addr_t addr, bool table)
381 struct tmc_pages *tmc_pages;
384 tmc_pages = &sg_table->table_pages;
385 base = (unsigned long)sg_table->table_vaddr;
387 tmc_pages = &sg_table->data_pages;
388 base = (unsigned long)sg_table->data_vaddr;
391 offset = tmc_pages_get_offset(tmc_pages, addr);
394 return base + offset;
397 /* Dump the given sg_table */
398 static void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table)
403 struct tmc_sg_table *sg_table = etr_table->sg_table;
405 ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
406 etr_table->hwaddr, true);
408 addr = ETR_SG_ADDR(*ptr);
409 switch (ETR_SG_ET(*ptr)) {
410 case ETR_SG_ET_NORMAL:
411 dev_dbg(sg_table->dev,
412 "%05d: %p\t:[N] 0x%llx\n", i, ptr, addr);
416 dev_dbg(sg_table->dev,
417 "%05d: *** %p\t:{L} 0x%llx ***\n",
419 ptr = (sgte_t *)tmc_sg_daddr_to_vaddr(sg_table,
423 dev_dbg(sg_table->dev,
424 "%05d: ### %p\t:[L] 0x%llx ###\n",
428 dev_dbg(sg_table->dev,
429 "%05d: xxx %p\t:[INVALID] 0x%llx xxx\n",
435 dev_dbg(sg_table->dev, "******* End of Table *****\n");
438 static inline void tmc_etr_sg_table_dump(struct etr_sg_table *etr_table) {}
442 * Populate the SG Table page table entries from table/data
443 * pages allocated. Each Data page has ETR_SG_PAGES_PER_SYSPAGE SG pages.
444 * So does a Table page. So we keep track of indices of the tables
445 * in each system page and move the pointers accordingly.
447 #define INC_IDX_ROUND(idx, size) ((idx) = ((idx) + 1) % (size))
448 static void tmc_etr_sg_table_populate(struct etr_sg_table *etr_table)
451 int i, type, nr_entries;
452 int tpidx = 0; /* index to the current system table_page */
453 int sgtidx = 0; /* index to the sg_table within the current syspage */
454 int sgtentry = 0; /* the entry within the sg_table */
455 int dpidx = 0; /* index to the current system data_page */
456 int spidx = 0; /* index to the SG page within the current data page */
457 sgte_t *ptr; /* pointer to the table entry to fill */
458 struct tmc_sg_table *sg_table = etr_table->sg_table;
459 dma_addr_t *table_daddrs = sg_table->table_pages.daddrs;
460 dma_addr_t *data_daddrs = sg_table->data_pages.daddrs;
462 nr_entries = tmc_etr_sg_table_entries(sg_table->data_pages.nr_pages);
464 * Use the contiguous virtual address of the table to update entries.
466 ptr = sg_table->table_vaddr;
468 * Fill all the entries, except the last entry to avoid special
469 * checks within the loop.
471 for (i = 0; i < nr_entries - 1; i++) {
472 if (sgtentry == ETR_SG_PTRS_PER_PAGE - 1) {
474 * Last entry in a sg_table page is a link address to
475 * the next table page. If this sg_table is the last
476 * one in the system page, it links to the first
477 * sg_table in the next system page. Otherwise, it
478 * links to the next sg_table page within the system
481 if (sgtidx == ETR_SG_PAGES_PER_SYSPAGE - 1) {
482 paddr = table_daddrs[tpidx + 1];
484 paddr = table_daddrs[tpidx] +
485 (ETR_SG_PAGE_SIZE * (sgtidx + 1));
487 type = ETR_SG_ET_LINK;
490 * Update the indices to the data_pages to point to the
491 * next sg_page in the data buffer.
493 type = ETR_SG_ET_NORMAL;
494 paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
495 if (!INC_IDX_ROUND(spidx, ETR_SG_PAGES_PER_SYSPAGE))
498 *ptr++ = ETR_SG_ENTRY(paddr, type);
500 * Move to the next table pointer, moving the table page index
503 if (!INC_IDX_ROUND(sgtentry, ETR_SG_PTRS_PER_PAGE)) {
504 if (!INC_IDX_ROUND(sgtidx, ETR_SG_PAGES_PER_SYSPAGE))
509 /* Set up the last entry, which is always a data pointer */
510 paddr = data_daddrs[dpidx] + spidx * ETR_SG_PAGE_SIZE;
511 *ptr++ = ETR_SG_ENTRY(paddr, ETR_SG_ET_LAST);
515 * tmc_init_etr_sg_table: Allocate a TMC ETR SG table, data buffer of @size and
516 * populate the table.
518 * @dev - Device pointer for the TMC
519 * @node - NUMA node where the memory should be allocated
520 * @size - Total size of the data buffer
521 * @pages - Optional list of page virtual address
523 static struct etr_sg_table *
524 tmc_init_etr_sg_table(struct device *dev, int node,
525 unsigned long size, void **pages)
527 int nr_entries, nr_tpages;
528 int nr_dpages = size >> PAGE_SHIFT;
529 struct tmc_sg_table *sg_table;
530 struct etr_sg_table *etr_table;
532 etr_table = kzalloc(sizeof(*etr_table), GFP_KERNEL);
534 return ERR_PTR(-ENOMEM);
535 nr_entries = tmc_etr_sg_table_entries(nr_dpages);
536 nr_tpages = DIV_ROUND_UP(nr_entries, ETR_SG_PTRS_PER_SYSPAGE);
538 sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages);
539 if (IS_ERR(sg_table)) {
541 return ERR_CAST(sg_table);
544 etr_table->sg_table = sg_table;
545 /* TMC should use table base address for DBA */
546 etr_table->hwaddr = sg_table->table_daddr;
547 tmc_etr_sg_table_populate(etr_table);
548 /* Sync the table pages for the HW */
549 tmc_sg_table_sync_table(sg_table);
550 tmc_etr_sg_table_dump(etr_table);
556 * tmc_etr_alloc_flat_buf: Allocate a contiguous DMA buffer.
558 static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
559 struct etr_buf *etr_buf, int node,
562 struct etr_flat_buf *flat_buf;
564 /* We cannot reuse existing pages for flat buf */
568 flat_buf = kzalloc(sizeof(*flat_buf), GFP_KERNEL);
572 flat_buf->vaddr = dma_alloc_coherent(drvdata->dev, etr_buf->size,
573 &flat_buf->daddr, GFP_KERNEL);
574 if (!flat_buf->vaddr) {
579 flat_buf->size = etr_buf->size;
580 flat_buf->dev = drvdata->dev;
581 etr_buf->hwaddr = flat_buf->daddr;
582 etr_buf->mode = ETR_MODE_FLAT;
583 etr_buf->private = flat_buf;
587 static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf)
589 struct etr_flat_buf *flat_buf = etr_buf->private;
591 if (flat_buf && flat_buf->daddr)
592 dma_free_coherent(flat_buf->dev, flat_buf->size,
593 flat_buf->vaddr, flat_buf->daddr);
597 static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
600 * Adjust the buffer to point to the beginning of the trace data
601 * and update the available trace data.
603 etr_buf->offset = rrp - etr_buf->hwaddr;
605 etr_buf->len = etr_buf->size;
607 etr_buf->len = rwp - rrp;
610 static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf,
611 u64 offset, size_t len, char **bufpp)
613 struct etr_flat_buf *flat_buf = etr_buf->private;
615 *bufpp = (char *)flat_buf->vaddr + offset;
617 * tmc_etr_buf_get_data already adjusts the length to handle
618 * buffer wrapping around.
623 static const struct etr_buf_operations etr_flat_buf_ops = {
624 .alloc = tmc_etr_alloc_flat_buf,
625 .free = tmc_etr_free_flat_buf,
626 .sync = tmc_etr_sync_flat_buf,
627 .get_data = tmc_etr_get_data_flat_buf,
631 * tmc_etr_alloc_sg_buf: Allocate an SG buf @etr_buf. Setup the parameters
634 static int tmc_etr_alloc_sg_buf(struct tmc_drvdata *drvdata,
635 struct etr_buf *etr_buf, int node,
638 struct etr_sg_table *etr_table;
640 etr_table = tmc_init_etr_sg_table(drvdata->dev, node,
641 etr_buf->size, pages);
642 if (IS_ERR(etr_table))
644 etr_buf->hwaddr = etr_table->hwaddr;
645 etr_buf->mode = ETR_MODE_ETR_SG;
646 etr_buf->private = etr_table;
650 static void tmc_etr_free_sg_buf(struct etr_buf *etr_buf)
652 struct etr_sg_table *etr_table = etr_buf->private;
655 tmc_free_sg_table(etr_table->sg_table);
660 static ssize_t tmc_etr_get_data_sg_buf(struct etr_buf *etr_buf, u64 offset,
661 size_t len, char **bufpp)
663 struct etr_sg_table *etr_table = etr_buf->private;
665 return tmc_sg_table_get_data(etr_table->sg_table, offset, len, bufpp);
668 static void tmc_etr_sync_sg_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
670 long r_offset, w_offset;
671 struct etr_sg_table *etr_table = etr_buf->private;
672 struct tmc_sg_table *table = etr_table->sg_table;
674 /* Convert hw address to offset in the buffer */
675 r_offset = tmc_sg_get_data_page_offset(table, rrp);
678 "Unable to map RRP %llx to offset\n", rrp);
683 w_offset = tmc_sg_get_data_page_offset(table, rwp);
686 "Unable to map RWP %llx to offset\n", rwp);
691 etr_buf->offset = r_offset;
693 etr_buf->len = etr_buf->size;
695 etr_buf->len = ((w_offset < r_offset) ? etr_buf->size : 0) +
697 tmc_sg_table_sync_data_range(table, r_offset, etr_buf->len);
700 static const struct etr_buf_operations etr_sg_buf_ops = {
701 .alloc = tmc_etr_alloc_sg_buf,
702 .free = tmc_etr_free_sg_buf,
703 .sync = tmc_etr_sync_sg_buf,
704 .get_data = tmc_etr_get_data_sg_buf,
708 * TMC ETR could be connected to a CATU device, which can provide address
709 * translation service. This is represented by the Output port of the TMC
710 * (ETR) connected to the input port of the CATU.
712 * Returns : coresight_device ptr for the CATU device if a CATU is found.
715 struct coresight_device *
716 tmc_etr_get_catu_device(struct tmc_drvdata *drvdata)
719 struct coresight_device *tmp, *etr = drvdata->csdev;
721 if (!IS_ENABLED(CONFIG_CORESIGHT_CATU))
724 for (i = 0; i < etr->nr_outport; i++) {
725 tmp = etr->conns[i].child_dev;
726 if (tmp && coresight_is_catu_device(tmp))
733 static inline void tmc_etr_enable_catu(struct tmc_drvdata *drvdata)
735 struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
737 if (catu && helper_ops(catu)->enable)
738 helper_ops(catu)->enable(catu, drvdata->etr_buf);
741 static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
743 struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
745 if (catu && helper_ops(catu)->disable)
746 helper_ops(catu)->disable(catu, drvdata->etr_buf);
749 static const struct etr_buf_operations *etr_buf_ops[] = {
750 [ETR_MODE_FLAT] = &etr_flat_buf_ops,
751 [ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
752 [ETR_MODE_CATU] = IS_ENABLED(CONFIG_CORESIGHT_CATU)
753 ? &etr_catu_buf_ops : NULL,
756 static inline int tmc_etr_mode_alloc_buf(int mode,
757 struct tmc_drvdata *drvdata,
758 struct etr_buf *etr_buf, int node,
765 case ETR_MODE_ETR_SG:
767 if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc)
768 rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
771 etr_buf->ops = etr_buf_ops[mode];
779 * tmc_alloc_etr_buf: Allocate a buffer use by ETR.
780 * @drvdata : ETR device details.
781 * @size : size of the requested buffer.
782 * @flags : Required properties for the buffer.
783 * @node : Node for memory allocations.
784 * @pages : An optional list of pages.
786 static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
787 ssize_t size, int flags,
788 int node, void **pages)
791 bool has_etr_sg, has_iommu;
792 bool has_sg, has_catu;
793 struct etr_buf *etr_buf;
795 has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
796 has_iommu = iommu_get_domain_for_dev(drvdata->dev);
797 has_catu = !!tmc_etr_get_catu_device(drvdata);
799 has_sg = has_catu || has_etr_sg;
801 etr_buf = kzalloc(sizeof(*etr_buf), GFP_KERNEL);
803 return ERR_PTR(-ENOMEM);
805 etr_buf->size = size;
808 * If we have to use an existing list of pages, we cannot reliably
809 * use a contiguous DMA memory (even if we have an IOMMU). Otherwise,
810 * we use the contiguous DMA memory if at least one of the following
811 * conditions is true:
812 * a) The ETR cannot use Scatter-Gather.
813 * b) we have a backing IOMMU
814 * c) The requested memory size is smaller (< 1M).
816 * Fallback to available mechanisms.
820 (!has_sg || has_iommu || size < SZ_1M))
821 rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata,
822 etr_buf, node, pages);
823 if (rc && has_etr_sg)
824 rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata,
825 etr_buf, node, pages);
827 rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata,
828 etr_buf, node, pages);
834 dev_dbg(drvdata->dev, "allocated buffer of size %ldKB in mode %d\n",
835 (unsigned long)size >> 10, etr_buf->mode);
839 static void tmc_free_etr_buf(struct etr_buf *etr_buf)
841 WARN_ON(!etr_buf->ops || !etr_buf->ops->free);
842 etr_buf->ops->free(etr_buf);
847 * tmc_etr_buf_get_data: Get the pointer the trace data at @offset
848 * with a maximum of @len bytes.
849 * Returns: The size of the linear data available @pos, with *bufpp
850 * updated to point to the buffer.
852 static ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf,
853 u64 offset, size_t len, char **bufpp)
855 /* Adjust the length to limit this transaction to end of buffer */
856 len = (len < (etr_buf->size - offset)) ? len : etr_buf->size - offset;
858 return etr_buf->ops->get_data(etr_buf, (u64)offset, len, bufpp);
862 tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
867 len = tmc_etr_buf_get_data(etr_buf, offset,
868 CORESIGHT_BARRIER_PKT_SIZE, &bufp);
869 if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE))
871 coresight_insert_barrier_packet(bufp);
872 return offset + CORESIGHT_BARRIER_PKT_SIZE;
876 * tmc_sync_etr_buf: Sync the trace buffer availability with drvdata.
877 * Makes sure the trace data is synced to the memory for consumption.
878 * @etr_buf->offset will hold the offset to the beginning of the trace data
879 * within the buffer, with @etr_buf->len bytes to consume.
881 static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
883 struct etr_buf *etr_buf = drvdata->etr_buf;
887 rrp = tmc_read_rrp(drvdata);
888 rwp = tmc_read_rwp(drvdata);
889 status = readl_relaxed(drvdata->base + TMC_STS);
890 etr_buf->full = status & TMC_STS_FULL;
892 WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
894 etr_buf->ops->sync(etr_buf, rrp, rwp);
896 /* Insert barrier packets at the beginning, if there was an overflow */
898 tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
901 static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
902 struct etr_buf *etr_buf)
906 /* Callers should provide an appropriate buffer for use */
907 if (WARN_ON(!etr_buf || drvdata->etr_buf))
909 drvdata->etr_buf = etr_buf;
912 * If this ETR is connected to a CATU, enable it before we turn
915 tmc_etr_enable_catu(drvdata);
917 CS_UNLOCK(drvdata->base);
919 /* Wait for TMCSReady bit to be set */
920 tmc_wait_for_tmcready(drvdata);
922 writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
923 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
925 axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
926 axictl &= ~TMC_AXICTL_CLEAR_MASK;
927 axictl |= (TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16);
928 axictl |= TMC_AXICTL_AXCACHE_OS;
930 if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
931 axictl &= ~TMC_AXICTL_ARCACHE_MASK;
932 axictl |= TMC_AXICTL_ARCACHE_OS;
935 if (etr_buf->mode == ETR_MODE_ETR_SG) {
936 if (WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG)))
938 axictl |= TMC_AXICTL_SCT_GAT_MODE;
941 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
942 tmc_write_dba(drvdata, etr_buf->hwaddr);
944 * If the TMC pointers must be programmed before the session,
945 * we have to set it properly (i.e, RRP/RWP to base address and
946 * STS to "not full").
948 if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
949 tmc_write_rrp(drvdata, etr_buf->hwaddr);
950 tmc_write_rwp(drvdata, etr_buf->hwaddr);
951 sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
952 writel_relaxed(sts, drvdata->base + TMC_STS);
955 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
956 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
957 TMC_FFCR_TRIGON_TRIGIN,
958 drvdata->base + TMC_FFCR);
959 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
960 tmc_enable_hw(drvdata);
962 CS_LOCK(drvdata->base);
966 * Return the available trace data in the buffer (starts at etr_buf->offset,
967 * limited by etr_buf->len) from @pos, with a maximum limit of @len,
968 * also updating the @bufpp on where to find it. Since the trace data
969 * starts at anywhere in the buffer, depending on the RRP, we adjust the
970 * @len returned to handle buffer wrapping around.
972 * We are protected here by drvdata->reading != 0, which ensures the
973 * sysfs_buf stays alive.
975 ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
976 loff_t pos, size_t len, char **bufpp)
979 ssize_t actual = len;
980 struct etr_buf *etr_buf = drvdata->sysfs_buf;
982 if (pos + actual > etr_buf->len)
983 actual = etr_buf->len - pos;
987 /* Compute the offset from which we read the data */
988 offset = etr_buf->offset + pos;
989 if (offset >= etr_buf->size)
990 offset -= etr_buf->size;
991 return tmc_etr_buf_get_data(etr_buf, offset, actual, bufpp);
994 static struct etr_buf *
995 tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata)
997 return tmc_alloc_etr_buf(drvdata, drvdata->size,
998 0, cpu_to_node(0), NULL);
1002 tmc_etr_free_sysfs_buf(struct etr_buf *buf)
1005 tmc_free_etr_buf(buf);
1008 static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
1010 struct etr_buf *etr_buf = drvdata->etr_buf;
1012 if (WARN_ON(drvdata->sysfs_buf != etr_buf)) {
1013 tmc_etr_free_sysfs_buf(drvdata->sysfs_buf);
1014 drvdata->sysfs_buf = NULL;
1016 tmc_sync_etr_buf(drvdata);
1020 static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
1022 CS_UNLOCK(drvdata->base);
1024 tmc_flush_and_stop(drvdata);
1026 * When operating in sysFS mode the content of the buffer needs to be
1027 * read before the TMC is disabled.
1029 if (drvdata->mode == CS_MODE_SYSFS)
1030 tmc_etr_sync_sysfs_buf(drvdata);
1032 tmc_disable_hw(drvdata);
1034 CS_LOCK(drvdata->base);
1036 /* Disable CATU device if this ETR is connected to one */
1037 tmc_etr_disable_catu(drvdata);
1038 /* Reset the ETR buf used by hardware */
1039 drvdata->etr_buf = NULL;
1042 static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
1045 unsigned long flags;
1046 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1047 struct etr_buf *sysfs_buf = NULL, *new_buf = NULL, *free_buf = NULL;
1050 * If we are enabling the ETR from disabled state, we need to make
1051 * sure we have a buffer with the right size. The etr_buf is not reset
1052 * immediately after we stop the tracing in SYSFS mode as we wait for
1053 * the user to collect the data. We may be able to reuse the existing
1054 * buffer, provided the size matches. Any allocation has to be done
1055 * with the lock released.
1057 spin_lock_irqsave(&drvdata->spinlock, flags);
1058 sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
1059 if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) {
1060 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1062 /* Allocate memory with the locks released */
1063 free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
1064 if (IS_ERR(new_buf))
1065 return PTR_ERR(new_buf);
1067 /* Let's try again */
1068 spin_lock_irqsave(&drvdata->spinlock, flags);
1071 if (drvdata->reading || drvdata->mode == CS_MODE_PERF) {
1077 * In sysFS mode we can have multiple writers per sink. Since this
1078 * sink is already enabled no memory is needed and the HW need not be
1079 * touched, even if the buffer size has changed.
1081 if (drvdata->mode == CS_MODE_SYSFS)
1085 * If we don't have a buffer or it doesn't match the requested size,
1086 * use the buffer allocated above. Otherwise reuse the existing buffer.
1088 sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
1089 if (!sysfs_buf || (new_buf && sysfs_buf->size != new_buf->size)) {
1090 free_buf = sysfs_buf;
1091 drvdata->sysfs_buf = new_buf;
1094 drvdata->mode = CS_MODE_SYSFS;
1095 tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
1097 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1099 /* Free memory outside the spinlock if need be */
1101 tmc_etr_free_sysfs_buf(free_buf);
1104 dev_info(drvdata->dev, "TMC-ETR enabled\n");
1109 static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
1111 /* We don't support perf mode yet ! */
1115 static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
1119 return tmc_enable_etr_sink_sysfs(csdev);
1121 return tmc_enable_etr_sink_perf(csdev);
1124 /* We shouldn't be here */
1128 static void tmc_disable_etr_sink(struct coresight_device *csdev)
1130 unsigned long flags;
1131 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1133 spin_lock_irqsave(&drvdata->spinlock, flags);
1134 if (drvdata->reading) {
1135 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1139 /* Disable the TMC only if it needs to */
1140 if (drvdata->mode != CS_MODE_DISABLED) {
1141 tmc_etr_disable_hw(drvdata);
1142 drvdata->mode = CS_MODE_DISABLED;
1145 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1147 dev_info(drvdata->dev, "TMC-ETR disabled\n");
1150 static const struct coresight_ops_sink tmc_etr_sink_ops = {
1151 .enable = tmc_enable_etr_sink,
1152 .disable = tmc_disable_etr_sink,
1155 const struct coresight_ops tmc_etr_cs_ops = {
1156 .sink_ops = &tmc_etr_sink_ops,
1159 int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
1162 unsigned long flags;
1164 /* config types are set a boot time and never change */
1165 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
1168 spin_lock_irqsave(&drvdata->spinlock, flags);
1169 if (drvdata->reading) {
1174 /* Don't interfere if operated from Perf */
1175 if (drvdata->mode == CS_MODE_PERF) {
1180 /* If sysfs_buf is NULL the trace data has been read already */
1181 if (!drvdata->sysfs_buf) {
1186 /* Disable the TMC if we are trying to read from a running session */
1187 if (drvdata->mode == CS_MODE_SYSFS)
1188 tmc_etr_disable_hw(drvdata);
1190 drvdata->reading = true;
1192 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1197 int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
1199 unsigned long flags;
1200 struct etr_buf *sysfs_buf = NULL;
1202 /* config types are set a boot time and never change */
1203 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
1206 spin_lock_irqsave(&drvdata->spinlock, flags);
1208 /* RE-enable the TMC if need be */
1209 if (drvdata->mode == CS_MODE_SYSFS) {
1211 * The trace run will continue with the same allocated trace
1212 * buffer. Since the tracer is still enabled drvdata::buf can't
1215 tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
1218 * The ETR is not tracing and the buffer was just read.
1219 * As such prepare to free the trace buffer.
1221 sysfs_buf = drvdata->sysfs_buf;
1222 drvdata->sysfs_buf = NULL;
1225 drvdata->reading = false;
1226 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1228 /* Free allocated memory out side of the spinlock */
1230 tmc_etr_free_sysfs_buf(sysfs_buf);