Add support to use reserved memory for coresight ETR trace buffer.
Introduce a new ETR buffer mode called ETR_MODE_RESRV, which becomes available when ETR device tree node is supplied with a valid reserved memory region.
ETR_MODE_RESRV can be selected only by explicit user request.
$ echo resrv >/sys/bus/coresight/devices/tmc_etr<N>/buf_mode_preferred
Signed-off-by: Anil Kumar Reddy areddy3@marvell.com Signed-off-by: Linu Cherian lcherian@marvell.com --- .../hwtracing/coresight/coresight-tmc-core.c | 39 ++++++++++ .../hwtracing/coresight/coresight-tmc-etr.c | 74 ++++++++++++++++++- drivers/hwtracing/coresight/coresight-tmc.h | 24 ++++++ 3 files changed, 136 insertions(+), 1 deletion(-)
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c index 7ec5365e2b64..c37dc629408b 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-core.c +++ b/drivers/hwtracing/coresight/coresight-tmc-core.c @@ -22,6 +22,7 @@ #include <linux/spinlock.h> #include <linux/pm_runtime.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/coresight.h> #include <linux/amba/bus.h>
@@ -370,6 +371,42 @@ static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata) return (auth & TMC_AUTH_NSID_MASK) == 0x3; }
+static void tmc_get_reserved_region(struct device *parent) +{ + struct tmc_drvdata *drvdata = dev_get_drvdata(parent); + struct device_node *node; + struct resource res; + int rc; + + node = of_parse_phandle(parent->of_node, "memory-region", 0); + if (!node) { + dev_dbg(parent, "No reserved trace buffer specified\n"); + goto out; + } + + rc = of_address_to_resource(node, 0, &res); + of_node_put(node); + if (rc || res.start == 0 || resource_size(&res) == 0) { + dev_err(parent, "Reserved trace buffer memory is invalid\n"); + goto out; + } + + drvdata->resrv_buf.vaddr = memremap(res.start, + resource_size(&res), + MEMREMAP_WB); + if (IS_ERR(drvdata->resrv_buf.vaddr)) { + dev_err(parent, "Reserved trace buffer mapping failed\n"); + rc = PTR_ERR(drvdata->resrv_buf.vaddr); + goto out; + } + + drvdata->resrv_buf.paddr = res.start; + drvdata->resrv_buf.size = resource_size(&res); + +out: + return; +} + /* Detect and initialise the capabilities of a TMC ETR */ static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps) { @@ -482,6 +519,8 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4; }
+ tmc_get_reserved_region(dev); + desc.dev = dev;
switch (drvdata->config_type) { diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index cd2d85a86b68..334226cd7201 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -30,6 +30,7 @@ struct etr_buf_hw { bool has_iommu; bool has_etr_sg; bool has_catu; + bool has_resrv; };
/* @@ -692,6 +693,61 @@ static const struct etr_buf_operations etr_flat_buf_ops = { .get_data = tmc_etr_get_data_flat_buf, };
+/* + * tmc_etr_alloc_resrv_buf: Allocate a contiguous DMA buffer from reserved region. + */ +static int tmc_etr_alloc_resrv_buf(struct tmc_drvdata *drvdata, + struct etr_buf *etr_buf, int node, + void **pages) +{ + struct etr_flat_buf *resrv_buf; + struct device *real_dev = drvdata->csdev->dev.parent; + + /* We cannot reuse existing pages for resrv buf */ + if (pages) + return -EINVAL; + + resrv_buf = kzalloc(sizeof(*resrv_buf), GFP_KERNEL); + if (!resrv_buf) + return -ENOMEM; + + resrv_buf->daddr = dma_map_resource(real_dev, drvdata->resrv_buf.paddr, + etr_buf->size, DMA_FROM_DEVICE, 0); + if (dma_mapping_error(real_dev, resrv_buf->daddr)) { + dev_err(real_dev, "failed to map source buffer address\n"); + kfree(resrv_buf); + return -ENOMEM; + } + + resrv_buf->vaddr = drvdata->resrv_buf.vaddr; + resrv_buf->size = etr_buf->size; + resrv_buf->dev = &drvdata->csdev->dev; + etr_buf->hwaddr = resrv_buf->daddr; + etr_buf->mode = ETR_MODE_RESRV; + etr_buf->private = resrv_buf; + return 0; +} + +static void tmc_etr_free_resrv_buf(struct etr_buf *etr_buf) +{ + struct etr_flat_buf *resrv_buf = etr_buf->private; + + if (resrv_buf && resrv_buf->daddr) { + struct device *real_dev = resrv_buf->dev->parent; + + dma_unmap_resource(real_dev, resrv_buf->daddr, + resrv_buf->size, DMA_FROM_DEVICE, 0); + } + kfree(resrv_buf); +} + +static const struct etr_buf_operations etr_resrv_buf_ops = { + .alloc = tmc_etr_alloc_resrv_buf, + .free = tmc_etr_free_resrv_buf, + .sync = tmc_etr_sync_flat_buf, + .get_data = tmc_etr_get_data_flat_buf, +}; + /* * tmc_etr_alloc_sg_buf: Allocate an SG buf @etr_buf. Setup the parameters * appropriately. @@ -798,6 +854,7 @@ static const struct etr_buf_operations *etr_buf_ops[] = { [ETR_MODE_FLAT] = &etr_flat_buf_ops, [ETR_MODE_ETR_SG] = &etr_sg_buf_ops, [ETR_MODE_CATU] = NULL, + [ETR_MODE_RESRV] = &etr_resrv_buf_ops };
void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu) @@ -823,6 +880,7 @@ static inline int tmc_etr_mode_alloc_buf(int mode, case ETR_MODE_FLAT: case ETR_MODE_ETR_SG: case ETR_MODE_CATU: + case ETR_MODE_RESRV: if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc) rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf, node, pages); @@ -841,6 +899,7 @@ static void get_etr_buf_hw(struct device *dev, struct etr_buf_hw *buf_hw) buf_hw->has_iommu = iommu_get_domain_for_dev(dev->parent); buf_hw->has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG); buf_hw->has_catu = !!tmc_etr_get_catu_device(drvdata); + buf_hw->has_resrv = is_tmc_reserved_region_valid(dev->parent); }
static bool etr_can_use_flat_mode(struct etr_buf_hw *buf_hw, ssize_t etr_buf_size) @@ -872,13 +931,19 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata, if (!etr_buf) return ERR_PTR(-ENOMEM);
- etr_buf->size = size; + /* Overiride the buffer size here for reserved mode */ + etr_buf->size = (drvdata->etr_mode == ETR_MODE_RESRV) ? + drvdata->resrv_buf.size : size;
/* If there is user directive for buffer mode, try that first */ if (drvdata->etr_mode != ETR_MODE_AUTO) rc = tmc_etr_mode_alloc_buf(drvdata->etr_mode, drvdata, etr_buf, node, pages);
+ /* Fallback mechanism is not valid for reserved mode */ + if (rc && (drvdata->etr_mode == ETR_MODE_RESRV)) + goto done; + /* * If we have to use an existing list of pages, we cannot reliably * use a contiguous DMA memory (even if we have an IOMMU). Otherwise, @@ -900,6 +965,7 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata, if (rc && buf_hw.has_catu) rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata, etr_buf, node, pages); +done: if (rc) { kfree(etr_buf); return ERR_PTR(rc); @@ -1827,6 +1893,7 @@ static const char *const buf_modes_str[] = { [ETR_MODE_FLAT] = "flat", [ETR_MODE_ETR_SG] = "tmc-sg", [ETR_MODE_CATU] = "catu", + [ETR_MODE_RESRV] = "resrv", [ETR_MODE_AUTO] = "auto", };
@@ -1845,6 +1912,9 @@ static ssize_t buf_modes_available_show(struct device *dev, if (buf_hw.has_catu) size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_CATU]);
+ if (buf_hw.has_resrv) + size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_RESRV]); + size += sysfs_emit_at(buf, size, "\n"); return size; } @@ -1872,6 +1942,8 @@ static ssize_t buf_mode_preferred_store(struct device *dev, drvdata->etr_mode = ETR_MODE_ETR_SG; else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_CATU]) && buf_hw.has_catu) drvdata->etr_mode = ETR_MODE_CATU; + else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_RESRV]) && buf_hw.has_resrv) + drvdata->etr_mode = ETR_MODE_RESRV; else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_AUTO])) drvdata->etr_mode = ETR_MODE_AUTO; else diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h index e59d423a9749..942fcd960586 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.h +++ b/drivers/hwtracing/coresight/coresight-tmc.h @@ -135,6 +135,7 @@ enum etr_mode { ETR_MODE_FLAT, /* Uses contiguous flat buffer */ ETR_MODE_ETR_SG, /* Uses in-built TMC ETR SG mechanism */ ETR_MODE_CATU, /* Use SG mechanism in CATU */ + ETR_MODE_RESRV, /* Use reserved region contiguous buffer */ ETR_MODE_AUTO, /* Use the default mechanism */ };
@@ -164,6 +165,17 @@ struct etr_buf { void *private; };
+/** + * @paddr : Start address of reserved memory region. + * @vaddr : Corresponding CPU virtual address. + * @size : Size of reserved memory region. + */ +struct tmc_resrv_buf { + phys_addr_t paddr; + void *vaddr; + size_t size; +}; + /** * struct tmc_drvdata - specifics associated to an TMC component * @base: memory mapped base address for this component. @@ -188,6 +200,7 @@ struct etr_buf { * @idr_mutex: Access serialisation for idr. * @sysfs_buf: SYSFS buffer for ETR. * @perf_buf: PERF buffer for ETR. + * @resrv_buf: Reserved Memory for trace data buffer. Used by ETR/ETF. */ struct tmc_drvdata { void __iomem *base; @@ -213,6 +226,7 @@ struct tmc_drvdata { struct mutex idr_mutex; struct etr_buf *sysfs_buf; struct etr_buf *perf_buf; + struct tmc_resrv_buf resrv_buf; };
struct etr_buf_operations { @@ -330,6 +344,16 @@ tmc_sg_table_buf_size(struct tmc_sg_table *sg_table) return sg_table->data_pages.nr_pages << PAGE_SHIFT; }
+static inline bool is_tmc_reserved_region_valid(struct device *dev) +{ + struct tmc_drvdata *drvdata = dev_get_drvdata(dev); + + if (drvdata->resrv_buf.paddr && + drvdata->resrv_buf.size) + return true; + return false; +} + struct coresight_device *tmc_etr_get_catu_device(struct tmc_drvdata *drvdata);
void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu);