6.1-stable review patch. If anyone has any objections, please let me know.
------------------
From: Namhyung Kim namhyung@kernel.org
[ Upstream commit e68b1c0098b959cb88afce5c93dd6a9324e6da78 ]
The work_atoms should be freed after use. Add free_work_atoms() to make sure to release all. It should use list_splice_init() when merging atoms to prevent accessing invalid pointers.
Fixes: b1ffe8f3e0c96f552 ("perf sched: Finish latency => atom rename and misc cleanups") Reviewed-by: Ian Rogers irogers@google.com Tested-by: Ian Rogers irogers@google.com Link: https://lore.kernel.org/r/20250703014942.1369397-8-namhyung@kernel.org Signed-off-by: Namhyung Kim namhyung@kernel.org Signed-off-by: Sasha Levin sashal@kernel.org --- tools/perf/builtin-sched.c | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-)
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index 244f9c6f61ae..3ffb41fa82b8 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1125,6 +1125,21 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) atoms->nb_atoms++; }
+static void free_work_atoms(struct work_atoms *atoms) +{ + struct work_atom *atom, *tmp; + + if (atoms == NULL) + return; + + list_for_each_entry_safe(atom, tmp, &atoms->work_list, list) { + list_del(&atom->list); + free(atom); + } + thread__zput(atoms->thread); + free(atoms); +} + static int latency_switch_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, @@ -3180,13 +3195,13 @@ static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *d this->total_runtime += data->total_runtime; this->nb_atoms += data->nb_atoms; this->total_lat += data->total_lat; - list_splice(&data->work_list, &this->work_list); + list_splice_init(&data->work_list, &this->work_list); if (this->max_lat < data->max_lat) { this->max_lat = data->max_lat; this->max_lat_start = data->max_lat_start; this->max_lat_end = data->max_lat_end; } - zfree(&data); + free_work_atoms(data); return; } } @@ -3265,7 +3280,6 @@ static int perf_sched__lat(struct perf_sched *sched) work_list = rb_entry(next, struct work_atoms, node); output_lat_thread(sched, work_list); next = rb_next(next); - thread__zput(work_list->thread); }
printf(" -----------------------------------------------------------------------------------------------------------------\n"); @@ -3279,6 +3293,13 @@ static int perf_sched__lat(struct perf_sched *sched)
rc = 0;
+ while ((next = rb_first_cached(&sched->sorted_atom_root))) { + struct work_atoms *data; + + data = rb_entry(next, struct work_atoms, node); + rb_erase_cached(next, &sched->sorted_atom_root); + free_work_atoms(data); + } out_free_cpus_switch_event: free_cpus_switch_event(sched); return rc;