From: Ravi Bangoria ravi.bangoria@amd.com
[ Upstream commit 2fad201fe38ff9a692acedb1990ece2c52a29f95 ]
Although, IBS pmus can be invoked via their own interface, indirect IBS invocation via core pmu events is also supported with fixed set of events: cpu-cycles:p, r076:p (same as cpu-cycles:p) and r0C1:p (micro-ops) for user convenience.
This indirect IBS invocation is broken since commit 66d258c5b048 ("perf/core: Optimize perf_init_event()"), which added RAW pmu under 'pmu_idr' list and thus if event_init() fails with RAW pmu, it started returning error instead of trying other pmus.
Forward precise events from core pmu to IBS by overwriting 'type' and 'config' in the kernel copy of perf_event_attr. Overwriting will cause perf_init_event() to retry with updated 'type' and 'config', which will automatically forward event to IBS pmu.
Without patch: $ sudo ./perf record -C 0 -e r076:p -- sleep 1 Error: The r076:p event is not supported.
With patch: $ sudo ./perf record -C 0 -e r076:p -- sleep 1 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.341 MB perf.data (37 samples) ]
Fixes: 66d258c5b048 ("perf/core: Optimize perf_init_event()") Reported-by: Stephane Eranian eranian@google.com Signed-off-by: Ravi Bangoria ravi.bangoria@amd.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Link: https://lkml.kernel.org/r/20230504110003.2548-3-ravi.bangoria@amd.com Signed-off-by: Sasha Levin sashal@kernel.org --- arch/x86/events/amd/core.c | 2 +- arch/x86/events/amd/ibs.c | 53 +++++++++++++++---------------- arch/x86/include/asm/perf_event.h | 2 ++ 3 files changed, 29 insertions(+), 28 deletions(-)
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 52eba415928a3..afc955340f81c 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -364,7 +364,7 @@ static int amd_pmu_hw_config(struct perf_event *event)
/* pass precise event sampling to ibs: */ if (event->attr.precise_ip && get_ibs_caps()) - return -ENOENT; + return forward_event_to_ibs(event);
if (has_branch_stack(event)) return -EOPNOTSUPP; diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 8a85658a24cc1..354d52e17ef55 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -202,7 +202,7 @@ static struct perf_ibs *get_ibs_pmu(int type) }
/* - * Use IBS for precise event sampling: + * core pmu config -> IBS config * * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count * perf record -a -e r076:p ... # same as -e cpu-cycles:p @@ -211,25 +211,9 @@ static struct perf_ibs *get_ibs_pmu(int type) * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl, * MSRC001_1033) is used to select either cycle or micro-ops counting * mode. - * - * The rip of IBS samples has skid 0. Thus, IBS supports precise - * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the - * rip is invalid when IBS was not able to record the rip correctly. - * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then. - * */ -static int perf_ibs_precise_event(struct perf_event *event, u64 *config) +static int core_pmu_ibs_config(struct perf_event *event, u64 *config) { - switch (event->attr.precise_ip) { - case 0: - return -ENOENT; - case 1: - case 2: - break; - default: - return -EOPNOTSUPP; - } - switch (event->attr.type) { case PERF_TYPE_HARDWARE: switch (event->attr.config) { @@ -255,22 +239,37 @@ static int perf_ibs_precise_event(struct perf_event *event, u64 *config) return -EOPNOTSUPP; }
+/* + * The rip of IBS samples has skid 0. Thus, IBS supports precise + * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the + * rip is invalid when IBS was not able to record the rip correctly. + * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then. + */ +int forward_event_to_ibs(struct perf_event *event) +{ + u64 config = 0; + + if (!event->attr.precise_ip || event->attr.precise_ip > 2) + return -EOPNOTSUPP; + + if (!core_pmu_ibs_config(event, &config)) { + event->attr.type = perf_ibs_op.pmu.type; + event->attr.config = config; + } + return -ENOENT; +} + static int perf_ibs_init(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; struct perf_ibs *perf_ibs; u64 max_cnt, config; - int ret;
perf_ibs = get_ibs_pmu(event->attr.type); - if (perf_ibs) { - config = event->attr.config; - } else { - perf_ibs = &perf_ibs_op; - ret = perf_ibs_precise_event(event, &config); - if (ret) - return ret; - } + if (!perf_ibs) + return -ENOENT; + + config = event->attr.config;
if (event->pmu != &perf_ibs->pmu) return -ENOENT; diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index b9a7fd0a27e2d..a4e4bbb7795d3 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -412,8 +412,10 @@ struct pebs_xmm {
#ifdef CONFIG_X86_LOCAL_APIC extern u32 get_ibs_caps(void); +extern int forward_event_to_ibs(struct perf_event *event); #else static inline u32 get_ibs_caps(void) { return 0; } +static inline int forward_event_to_ibs(struct perf_event *event) { return -ENOENT; } #endif
#ifdef CONFIG_PERF_EVENTS