iI Mathieu, Daniel,.
On Thu, 26 Aug 2021 at 18:33, Mathieu Poirier mathieu.poirier@linaro.org wrote:
On Tue, Jul 13, 2021 at 02:15:32PM +0200, Daniel Kiss wrote:
ETR might fill up the buffer sooner than an event makes perf to trigger the synchronisation especially in system wide trace. Polling runs periodically to sync the ETR buffer. Period is configurable via sysfs, disabled by default.
Signed-off-by: Daniel Kiss daniel.kiss@arm.com Signed-off-by: Branislav Rankov Branislav.Rankov@arm.com Tested-by: Denis Nikitin denik@chromium.org
.../testing/sysfs-bus-coresight-devices-tmc | 8 + drivers/hwtracing/coresight/Kconfig | 12 + drivers/hwtracing/coresight/Makefile | 1 + .../hwtracing/coresight/coresight-etm-perf.c | 8 + .../coresight/coresight-etr-perf-polling.c | 275 ++++++++++++++++++ .../coresight/coresight-etr-perf-polling.h | 38 +++ .../hwtracing/coresight/coresight-tmc-core.c | 4 + .../hwtracing/coresight/coresight-tmc-etr.c | 13 + 8 files changed, 359 insertions(+) create mode 100644 drivers/hwtracing/coresight/coresight-etr-perf-polling.c create mode 100644 drivers/hwtracing/coresight/coresight-etr-perf-polling.h
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc index 6aa527296c710..3b411e8a6f417 100644 --- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc +++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc @@ -91,3 +91,11 @@ Contact: Mathieu Poirier mathieu.poirier@linaro.org Description: (RW) Size of the trace buffer for TMC-ETR when used in SYSFS mode. Writable only for TMC-ETR configurations. The value should be aligned to the kernel pagesize.
+What: /sys/bus/coresight/devices/<memory_map>.tmc/polling/period +Date: July 2021 +KernelVersion: 5.14 +Contact: Daniel Kiss daniel.kiss@arm.com +Description: (RW) Time in milliseconds when the TMC-ETR is synced.
Default value is 0, means the feature is disabled.
Writable only for TMC-ETR configurations.
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig index 84530fd80998c..4e91fb98849f4 100644 --- a/drivers/hwtracing/coresight/Kconfig +++ b/drivers/hwtracing/coresight/Kconfig @@ -44,6 +44,18 @@ config CORESIGHT_LINK_AND_SINK_TMC To compile this driver as a module, choose M here: the module will be called coresight-tmc.
+config CORESIGHT_ETR_PERF_POLL
bool "Coresight ETR Perf Polling"
depends on CORESIGHT_LINK_AND_SINK_TMC
help
Enable the support for software periodic synchronization of the ETR buffer.
ETR might fill up the buffer sooner than an event makes perf to trigger
the synchronization especially in system wide trace. Polling runs
periodically to sync the ETR buffer. Period is configurable via sysfs,
disabled by default.
config CORESIGHT_CATU tristate "Coresight Address Translation Unit (CATU) driver" depends on CORESIGHT_LINK_AND_SINK_TMC diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile index d60816509755c..6baac328eea87 100644 --- a/drivers/hwtracing/coresight/Makefile +++ b/drivers/hwtracing/coresight/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_CORESIGHT) += coresight.o coresight-y := coresight-core.o coresight-etm-perf.o coresight-platform.o \ coresight-sysfs.o +coresight-$(CONFIG_CORESIGHT_ETR_PERF_POLL) += coresight-etr-perf-polling.o obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o coresight-tmc-y := coresight-tmc-core.o coresight-tmc-etf.o \ coresight-tmc-etr.o diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c index a3f4c07f5bf8b..3095840a567c4 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c @@ -19,6 +19,7 @@ #include <linux/workqueue.h>
#include "coresight-etm-perf.h" +#include "coresight-etr-perf-polling.h" #include "coresight-priv.h"
static struct pmu etm_pmu; @@ -438,6 +439,8 @@ static void etm_event_start(struct perf_event *event, int flags) /* Tell the perf core the event is alive */ event->hw.state = 0;
etr_perf_polling_event_start(event, event_data, handle);
/* Finally enable the tracer */ if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF)) goto fail_disable_path;
@@ -497,6 +500,8 @@ static void etm_event_stop(struct perf_event *event, int mode) if (!sink) return;
etr_perf_polling_event_stop(event, event_data);
/* stop tracer */ source_ops(csdev)->disable(csdev, event);
@@ -741,6 +746,8 @@ int __init etm_perf_init(void) etm_pmu.addr_filters_validate = etm_addr_filters_validate; etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX;
etr_perf_polling_init();
The problem here is that a function specific to an ETR is inserted in code that should be generic. So if we want to do the same for ETB, ETF and any kind of sink that's out there, we'd have to duplicate code and it would quickly get messy. What is needed is a generic solution.
Moreover what is proposed here is centered on sinks, while the entire coresight framework is centered on sources. The polling function, although specific to sinks, should emanate from and be driven by the generic code that handles sources. There should also be a single event responsible for doing the polling rather than all of them as it is the case in this patchset. In per-thread mode where a single thread is traced it isn't a problem. In CPU-wide scenarios the first event should do the polling. There will be a gap between the time that event stops and all the other events in the trace session stop but that is a limitation we will have to live with.
In my previous reviews of this I have suggested that this must be ETR specific - the object of feature is to use polling to mimic the interrupt we get with somehting like TRBE, and get more consistent output across the entire trace run - which is the objective of the feature and has been backed up by evidence from those that have tried it out.. Granted, TRBE is per core and thus can only be operating on the single source, but the guiding idea remains the same - save data whenever the sink hardware is full (or artificailly stopped in the case of polling). TRBE does not, as far as I recall wait for an event to stop before servicing the full interrupt.,
The worry is that is this is source event driven then any advantages would be wiped out by waiting for events in the trace session to stop.
Regards
Mike
That leaves us with per-thread sessions where child threads are spun off. You will have to look into how events are handle in that case. Suzuki has done work in that area and might remember a few things.
There are other design problems with this set that I won't get into since it needs a refactoring anyway.
Regards, Mathieu
ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1); if (ret == 0) etm_perf_up = true;
@@ -750,5 +757,6 @@ int __init etm_perf_init(void)
void __exit etm_perf_exit(void) {
etr_perf_polling_exit(); perf_pmu_unregister(&etm_pmu);
} diff --git a/drivers/hwtracing/coresight/coresight-etr-perf-polling.c b/drivers/hwtracing/coresight/coresight-etr-perf-polling.c new file mode 100644 index 0000000000000..87e6bc42a62de --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-etr-perf-polling.c @@ -0,0 +1,275 @@ +// SPDX-License-Identifier: GPL-2.0 +/*
- Copyright(C) 2021 Arm Limited. All rights reserved.
- Author: Daniel Kiss daniel.kiss@arm.com
- */
+#include <linux/coresight.h> +#include <linux/coresight-pmu.h> +#include <linux/cpumask.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/percpu-defs.h> +#include <linux/perf_event.h> +#include <linux/slab.h> +#include <linux/stringhash.h> +#include <linux/types.h> +#include <linux/workqueue.h>
+#include "coresight-etr-perf-polling.h" +#include "coresight-priv.h" +#include "coresight-tmc.h"
+struct polling_event_list {
struct perf_event *perf_event;
struct etm_event_data *etm_event_data;
struct perf_output_handle *ctx_handle;
void (*tmc_etr_reset_hw)(struct tmc_drvdata *data);
struct list_head list;
+};
+struct polling {
int cpu;
struct polling_event_list *polled_event;
struct delayed_work delayed_work;
+};
+static atomic_t period; +static spinlock_t spinlock_re; +static struct list_head registered_events;
+static DEFINE_PER_CPU(struct polling, polling);
+static ssize_t period_show(struct device *dev, struct device_attribute *attr,
char *buf)
+{
int temp;
temp = atomic_read(&period);
return sprintf(buf, "%i\n", temp);
+}
+static ssize_t period_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
+{
int temp = 0;
if (!kstrtoint(buf, 10, &temp) && (temp >= 0))
atomic_set(&period, temp);
return count;
+}
+static DEVICE_ATTR_RW(period);
+static struct attribute *coresight_tmc_polling_attrs[] = {
&dev_attr_period.attr,
NULL,
+}; +const struct attribute_group coresight_tmc_polling_group = {
.attrs = coresight_tmc_polling_attrs,
.name = "polling",
+}; +EXPORT_SYMBOL_GPL(coresight_tmc_polling_group);
+static inline void polling_sched_worker(struct polling *p) +{
int tickrate = atomic_read(&period);
if (tickrate > 0)
schedule_delayed_work_on(p->cpu, &p->delayed_work,
msecs_to_jiffies(tickrate));
+}
+static inline bool is_etr_related(struct etm_event_data *etm_event_data, int cpu) +{
struct list_head *path;
struct coresight_device *sink;
struct tmc_drvdata *drvdata;
path = etm_event_cpu_path(etm_event_data, cpu);
if (WARN_ON(!path))
return false;
sink = coresight_get_sink(path);
if (WARN_ON(!sink))
return false;
drvdata = dev_get_drvdata(sink->dev.parent);
if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
return false;
return true;
+}
+/*
- Adds the event to the polled events list.
- */
+void etr_perf_polling_event_start(struct perf_event *event,
struct etm_event_data *etm_event_data,
struct perf_output_handle *ctx_handle)
+{
int cpu = smp_processor_id();
struct polling *p = per_cpu_ptr(&polling, cpu);
struct polling_event_list *element, *tmp;
if (!is_etr_related(etm_event_data, cpu))
return;
spin_lock(&spinlock_re);
list_for_each_entry_safe(element, tmp, ®istered_events, list) {
if (element->ctx_handle == ctx_handle) {
element->perf_event = event;
element->etm_event_data = etm_event_data;
spin_unlock(&spinlock_re);
p->polled_event = element;
polling_sched_worker(p);
return;
}
}
spin_unlock(&spinlock_re);
+}
+/*
- Removes the event from the to be polled events list.
- */
+void etr_perf_polling_event_stop(struct perf_event *event,
struct etm_event_data *etm_event_data)
+{
int cpu = smp_processor_id();
struct polling *p = per_cpu_ptr(&polling, cpu);
if (!is_etr_related(etm_event_data, cpu))
return;
if (p->polled_event) {
struct polling_event_list *element = p->polled_event;
if (element->perf_event == event) {
p->polled_event = NULL;
element->perf_event = NULL;
element->etm_event_data = NULL;
cancel_delayed_work(&p->delayed_work);
return;
}
}
+}
+/*
- The polling worker is a workqueue job which is periodically
- woken up to update the perf aux buffer from the etr shrink.
- */
+static void etr_perf_polling_worker(struct work_struct *work) +{
unsigned long flags;
struct list_head *path;
struct coresight_device *sink;
int size;
int cpu = smp_processor_id();
struct polling *p = per_cpu_ptr(&polling, cpu);
if (!atomic_read(&period))
return;
if (!p->polled_event)
return;
/*
* Scheduling would do the same from the perf hooks,
* this should be done in one go.
*/
local_irq_save(flags);
polling_sched_worker(p);
path = etm_event_cpu_path(p->polled_event->etm_event_data, cpu);
sink = coresight_get_sink(path);
size = sink_ops(sink)->update_buffer(
sink, p->polled_event->ctx_handle,
p->polled_event->etm_event_data->snk_config);
/*
* Restart the trace.
*/
if (p->polled_event->tmc_etr_reset_hw)
p->polled_event->tmc_etr_reset_hw(dev_get_drvdata(sink->dev.parent));
WARN_ON(size < 0);
if (size > 0) {
struct etm_event_data *new_event_data;
perf_aux_output_end(p->polled_event->ctx_handle, size);
new_event_data = perf_aux_output_begin(
p->polled_event->ctx_handle,
p->polled_event->perf_event);
if (WARN_ON(new_event_data == NULL)) {
local_irq_restore(flags);
return;
}
p->polled_event->etm_event_data = new_event_data;
WARN_ON(new_event_data->snk_config !=
p->polled_event->etm_event_data->snk_config);
}
local_irq_restore(flags);
+}
+void etr_perf_polling_handle_register(struct perf_output_handle *handle,
void (*tmc_etr_reset_hw)(struct tmc_drvdata *drvdata))
+{
struct polling_event_list *element;
element = kmalloc(sizeof(*element), GFP_ATOMIC);
if (WARN_ON(!element))
return;
memset(element, 0, sizeof(*element));
element->ctx_handle = handle;
element->tmc_etr_reset_hw = tmc_etr_reset_hw;
spin_lock(&spinlock_re);
list_add(&element->list, ®istered_events);
spin_unlock(&spinlock_re);
+} +EXPORT_SYMBOL_GPL(etr_perf_polling_handle_register);
+void etr_perf_polling_handle_deregister(struct perf_output_handle *handle) +{
struct polling_event_list *element, *tmp;
spin_lock(&spinlock_re);
list_for_each_entry_safe(element, tmp, ®istered_events, list) {
if (element->ctx_handle == handle) {
list_del(&element->list);
spin_unlock(&spinlock_re);
kfree(element);
return;
}
}
spin_unlock(&spinlock_re);
+} +EXPORT_SYMBOL_GPL(etr_perf_polling_handle_deregister);
+void etr_perf_polling_init(void) +{
int cpu;
spin_lock_init(&spinlock_re);
INIT_LIST_HEAD(®istered_events);
atomic_set(&period, 0);
for_each_possible_cpu(cpu) {
struct polling *p = per_cpu_ptr(&polling, cpu);
p->cpu = cpu;
p->polled_event = NULL;
INIT_DELAYED_WORK(&p->delayed_work, etr_perf_polling_worker);
}
+}
+void etr_perf_polling_exit(void) +{
int cpu;
for_each_possible_cpu(cpu) {
struct polling *p = per_cpu_ptr(&polling, cpu);
cancel_delayed_work_sync(&p->delayed_work);
WARN_ON(p->polled_event);
}
WARN_ON(!list_empty(®istered_events));
+} diff --git a/drivers/hwtracing/coresight/coresight-etr-perf-polling.h b/drivers/hwtracing/coresight/coresight-etr-perf-polling.h new file mode 100644 index 0000000000000..d47b4424594e6 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-etr-perf-polling.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/*
- Copyright(C) 2021 Arm Limited. All rights reserved.
- Author: Daniel Kiss daniel.kiss@arm.com
- */
+#ifndef _CORESIGHT_ETM_PERF_POLLING_H +#define _CORESIGHT_ETM_PERF_POLLING_H
+#include <linux/coresight.h> +#include <linux/perf_event.h> +#include "coresight-etm-perf.h" +#include "coresight-tmc.h"
+#ifdef CONFIG_CORESIGHT_ETR_PERF_POLL +void etr_perf_polling_init(void); +void etr_perf_polling_exit(void); +void etr_perf_polling_handle_register(struct perf_output_handle *handle,
void (*tmc_etr_reset_hw)(struct tmc_drvdata *drvdata));
+void etr_perf_polling_handle_deregister(struct perf_output_handle *handle); +void etr_perf_polling_event_start(struct perf_event *event,
struct etm_event_data *etm_event_data,
struct perf_output_handle *ctx_handle);
+void etr_perf_polling_event_stop(struct perf_event *event,
struct etm_event_data *etm_event_data);
+extern const struct attribute_group coresight_tmc_polling_group;
+#else /* !CONFIG_CORESIGHT_ETR_PERF_POLL */ +#define etr_perf_polling_init() +#define etr_perf_polling_exit() +#define etr_perf_polling_handle_register(...) +#define etr_perf_polling_handle_deregister(...) +#define etr_perf_polling_event_start(...) +#define etr_perf_polling_event_stop(...) +#endif
+#endif diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c index 74c6323d4d6ab..dbcdba162bd38 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-core.c +++ b/drivers/hwtracing/coresight/coresight-tmc-core.c @@ -26,6 +26,7 @@
#include "coresight-priv.h" #include "coresight-tmc.h" +#include "coresight-etr-perf-polling.h"
DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb"); DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf"); @@ -365,6 +366,9 @@ static const struct attribute_group coresight_tmc_mgmt_group = { static const struct attribute_group *coresight_tmc_groups[] = { &coresight_tmc_group, &coresight_tmc_mgmt_group, +#ifdef CONFIG_CORESIGHT_ETR_PERF_POLL
&coresight_tmc_polling_group,
+#endif NULL, };
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index 55c9b5fd9f832..67cd4bdcda71b 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -16,6 +16,7 @@ #include <linux/vmalloc.h> #include "coresight-catu.h" #include "coresight-etm-perf.h" +#include "coresight-etr-perf-polling.h" #include "coresight-priv.h" #include "coresight-tmc.h"
@@ -1137,6 +1138,16 @@ void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) drvdata->etr_buf = NULL; }
+#ifdef CONFIG_CORESIGHT_ETR_PERF_POLL
+static void tmc_etr_reset_hw(struct tmc_drvdata *drvdata) +{
__tmc_etr_disable_hw(drvdata);
__tmc_etr_enable_hw(drvdata);
+}
+#endif
static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev) { int ret = 0; @@ -1620,6 +1631,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data) drvdata->mode = CS_MODE_PERF; drvdata->perf_buf = etr_perf->etr_buf; drvdata->perf_handle = handle;
etr_perf_polling_handle_register(handle, tmc_etr_reset_hw); atomic_inc(csdev->refcnt); }
@@ -1667,6 +1679,7 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev) drvdata->mode = CS_MODE_DISABLED; /* Reset perf specific data */ drvdata->perf_buf = NULL;
etr_perf_polling_handle_deregister(drvdata->perf_handle); drvdata->perf_handle = NULL; spin_unlock_irqrestore(&drvdata->spinlock, flags);
-- 2.25.1