When coresight unreigster is called, check whether this device is used by any perf event. Stop and release that perf event to avoid accessing removed coresight device data later.
Signed-off-by: Tingwei Zhang tingwei@codeaurora.org --- drivers/hwtracing/coresight/coresight-core.c | 7 + .../hwtracing/coresight/coresight-etm-perf.c | 157 +++++++++++++++++- .../hwtracing/coresight/coresight-etm-perf.h | 2 + drivers/hwtracing/coresight/coresight-priv.h | 4 +- kernel/events/core.c | 1 + 5 files changed, 164 insertions(+), 7 deletions(-)
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index 55ff45a9729e..8261d527c8fb 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -1475,9 +1475,16 @@ void coresight_unregister(struct coresight_device *csdev)
mutex_lock(&coresight_mutex); coresight_disable_with(csdev); + /* + * Disable all perf events that have trace path related to csdev. + * Deny any request to create new trace path. + */ + etm_perf_disable_with(csdev); /* Remove references of that device in the topology */ coresight_remove_conns(csdev); coresight_release_platform_data(csdev->pdata); + /* New trace path in perf can be established */ + etm_perf_disable_done(); mutex_unlock(&coresight_mutex); device_unregister(&csdev->dev); } diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c index b466162d8254..95aeb1ca807c 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c @@ -23,9 +23,22 @@
static struct pmu etm_pmu; static bool etm_perf_up; +/* Count for on going tasks which are changing coresight topology */ +static atomic_t *cs_updating_cnt; + +static DEFINE_MUTEX(cs_path_mutex);
static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle); static DEFINE_PER_CPU(struct coresight_device *, csdev_src); +static DEFINE_PER_CPU(bool, csdev_src_removing); + +struct ev_data { + struct etm_event_data *event_data; + struct list_head link; +}; + +/* List for all events */ +static LIST_HEAD(ev_list);
/* ETMv3.5/PTM's ETMCR is 'config' */ PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC)); @@ -64,6 +77,8 @@ static const struct attribute_group *etm_pmu_attr_groups[] = { NULL, };
+static void update_cs_cfg(void *ignored); + static inline struct list_head ** etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu) { @@ -135,15 +150,35 @@ static void free_sink_buffer(struct etm_event_data *event_data) cpu = cpumask_first(mask); sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu)); sink_ops(sink)->free_buffer(event_data->snk_config); + event_data->snk_config = NULL; }
-static void free_event_data(struct work_struct *work) +static void del_event_from_list(struct etm_event_data *event_data, + struct ev_data *del_ev_data) +{ + struct ev_data *ev_data = NULL; + struct ev_data *ev_data_next = NULL; + + if (IS_ERR_OR_NULL(del_ev_data)) { + list_for_each_entry_safe(ev_data, ev_data_next, &ev_list, link) { + if (ev_data->event_data == event_data) { + del_ev_data = ev_data; + break; + } + } + } + if (!(IS_ERR_OR_NULL(del_ev_data))) { + list_del(&del_ev_data->link); + kfree(del_ev_data); + } +} + +static void _free_event_data(struct etm_event_data *event_data, + struct ev_data *ev_data) { int cpu; cpumask_t *mask; - struct etm_event_data *event_data;
- event_data = container_of(work, struct etm_event_data, work); mask = &event_data->mask;
/* Free the sink buffers, if there are any */ @@ -159,6 +194,20 @@ static void free_event_data(struct work_struct *work) }
free_percpu(event_data->path); + del_event_from_list(event_data, ev_data); +} + +static void free_event_data(struct work_struct *work) +{ + struct etm_event_data *event_data; + + event_data = container_of(work, struct etm_event_data, work); + + mutex_lock(&cs_path_mutex); + if (event_data->snk_config) + _free_event_data(event_data, NULL); + mutex_unlock(&cs_path_mutex); + kfree(event_data); }
@@ -212,7 +261,10 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, cpumask_t *mask; struct coresight_device *sink; struct etm_event_data *event_data = NULL; + struct ev_data *ev_data;
+ if (atomic_read(cs_updating_cnt)) + return NULL; event_data = alloc_event_data(cpu); if (!event_data) return NULL; @@ -231,6 +283,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
mask = &event_data->mask;
+ mutex_lock(&cs_path_mutex); /* * Setup the path for each CPU in a trace session. We try to build * trace path for each CPU in the mask. If we don't find an ETM @@ -282,10 +335,19 @@ static void *etm_setup_aux(struct perf_event *event, void **pages, if (!event_data->snk_config) goto err;
+ ev_data = kzalloc(sizeof(struct ev_data), GFP_KERNEL); + if (!ev_data) { + *etm_event_cpu_path_ptr(event_data, cpu) = NULL; + goto err; + } + ev_data->event_data = event_data; + list_add(&ev_data->link, &ev_list); + mutex_unlock(&cs_path_mutex); out: return event_data;
err: + mutex_unlock(&cs_path_mutex); etm_free_aux(event_data); event_data = NULL; goto out; @@ -299,7 +361,7 @@ static void etm_event_start(struct perf_event *event, int flags) struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); struct list_head *path;
- if (!csdev) + if (!csdev || atomic_read(cs_updating_cnt)) goto fail;
/* @@ -310,6 +372,9 @@ static void etm_event_start(struct perf_event *event, int flags) if (!event_data) goto fail;
+ if (!event_data->snk_config) + goto fail; + path = etm_event_cpu_path(event_data, cpu); /* We need a sink, no need to continue without one */ sink = coresight_get_sink(path); @@ -391,7 +456,7 @@ static int etm_event_add(struct perf_event *event, int mode) int ret = 0; struct hw_perf_event *hwc = &event->hw;
- if (mode & PERF_EF_START) { + if (mode & PERF_EF_START && !atomic_read(cs_updating_cnt)) { etm_event_start(event, 0); if (hwc->state & PERF_HES_STOPPED) ret = -EINVAL; @@ -499,9 +564,14 @@ int etm_perf_symlink(struct coresight_device *csdev, bool link) if (ret) return ret; per_cpu(csdev_src, cpu) = csdev; + per_cpu(csdev_src_removing, cpu) = false; } else { sysfs_remove_link(&pmu_dev->kobj, entry); - per_cpu(csdev_src, cpu) = NULL; + /* + * Set to NULL later when device is unregistered to avoid + * conflict with ongoing event. + */ + per_cpu(csdev_src_removing, cpu) = true; }
return 0; @@ -580,10 +650,79 @@ void etm_perf_del_symlink_sink(struct coresight_device *csdev) csdev->ea = NULL; }
+static void update_cs_cfg(void *ignored) +{ + /* + * Reschedule running events. + * Events will be stopped and coresight path will be disabled. + * Coresight path won't be enabled again until coresight update + * is done. + */ + perf_pmu_resched(&etm_pmu); +} + +static void update_cs_path(struct coresight_device *csdev) +{ + struct ev_data *ev_data = NULL; + struct ev_data *ev_data_next = NULL; + struct etm_event_data *event_data; + unsigned int cpu; + cpumask_t *mask; + bool ret = false; + + mutex_lock(&cs_path_mutex); + + list_for_each_entry_safe(ev_data, ev_data_next, &ev_list, link) { + event_data = ev_data->event_data; + if (!event_data->snk_config) + continue; + + mask = &event_data->mask; + + for_each_cpu(cpu, mask) { + struct list_head **ppath; + + ppath = etm_event_cpu_path_ptr(event_data, cpu); + if (!(IS_ERR_OR_NULL(*ppath))) { + ret = coresight_dev_on_path(*ppath, csdev); + if (ret) + break; + } + } + + if (ret) + _free_event_data(event_data, ev_data); + } + + mutex_unlock(&cs_path_mutex); + for_each_possible_cpu(cpu) { + if (per_cpu(csdev_src_removing, cpu)) { + per_cpu(csdev_src, cpu) = NULL; + per_cpu(csdev_src_removing, cpu) = false; + } + } +} + +void etm_perf_disable_with(struct coresight_device *csdev) +{ + atomic_inc(cs_updating_cnt); + get_online_cpus(); + on_each_cpu(update_cs_cfg, NULL, 1); + put_online_cpus(); + /* Free events which have trace path related to csdev */ + update_cs_path(csdev); +} + +void etm_perf_disable_done(void) +{ + atomic_dec(cs_updating_cnt); +} + int __init etm_perf_init(void) { int ret;
+ etm_pmu.module = THIS_MODULE; etm_pmu.capabilities = (PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE);
@@ -601,6 +740,11 @@ int __init etm_perf_init(void) etm_pmu.addr_filters_validate = etm_addr_filters_validate; etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX;
+ cs_updating_cnt = kcalloc(1, sizeof(*cs_updating_cnt), GFP_KERNEL); + if (!cs_updating_cnt) + return -ENOMEM; + atomic_set(cs_updating_cnt, 0); + ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1); if (ret == 0) etm_perf_up = true; @@ -610,5 +754,6 @@ int __init etm_perf_init(void)
void __exit etm_perf_exit(void) { + kfree(cs_updating_cnt); perf_pmu_unregister(&etm_pmu); } diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h index 3e4f2ad5e193..221831732cb1 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.h +++ b/drivers/hwtracing/coresight/coresight-etm-perf.h @@ -61,6 +61,8 @@ struct etm_event_data { int etm_perf_symlink(struct coresight_device *csdev, bool link); int etm_perf_add_symlink_sink(struct coresight_device *csdev); void etm_perf_del_symlink_sink(struct coresight_device *csdev); +void etm_perf_disable_with(struct coresight_device *csdev); +void etm_perf_disable_done(void); static inline void *etm_perf_sink_config(struct perf_output_handle *handle) { struct etm_event_data *data = perf_get_aux(handle); diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h index 56d677473be4..f91fff8267be 100644 --- a/drivers/hwtracing/coresight/coresight-priv.h +++ b/drivers/hwtracing/coresight/coresight-priv.h @@ -151,8 +151,10 @@ struct coresight_device *coresight_get_sink(struct list_head *path); struct coresight_device *coresight_get_enabled_sink(bool reset); struct coresight_device *coresight_get_sink_by_id(u32 id); struct list_head *coresight_build_path(struct coresight_device *csdev, - struct coresight_device *sink); + struct coresight_device *sink); void coresight_release_path(struct list_head *path); +bool coresight_dev_on_path(struct list_head *path, + struct coresight_device *csdev);
#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM3X) extern int etm_readl_cp14(u32 off, unsigned int *val); diff --git a/kernel/events/core.c b/kernel/events/core.c index e296c5c59c6f..cce7a2b82a4b 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2723,6 +2723,7 @@ void perf_pmu_resched(struct pmu *pmu) ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU); perf_ctx_unlock(cpuctx, task_ctx); } +EXPORT_SYMBOL_GPL(perf_pmu_resched);
/* * Cross CPU call to install and enable a performance event