From: Kan Liang kan.liang@linux.intel.com
Implement switch_guest_ctx interface for x86 PMU, switch PMI to dedicated KVM_GUEST_PMI_VECTOR at perf guest enter, and switch PMI back to NMI at perf guest exit.
Signed-off-by: Xiong Zhang xiong.y.zhang@linux.intel.com Signed-off-by: Kan Liang kan.liang@linux.intel.com Tested-by: Yongwei Ma yongwei.ma@intel.com Signed-off-by: Mingwei Zhang mizhang@google.com --- arch/x86/events/core.c | 12 ++++++++++++ 1 file changed, 12 insertions(+)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 8f218ac0d445..28161d6ff26d 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2677,6 +2677,16 @@ static bool x86_pmu_filter(struct pmu *pmu, int cpu) return ret; }
+static void x86_pmu_switch_guest_ctx(bool enter, void *data) +{ + u32 guest_lvtpc = *(u32 *)data; + + if (enter) + apic_write(APIC_LVTPC, guest_lvtpc); + else + apic_write(APIC_LVTPC, APIC_DM_NMI); +} + static struct pmu pmu = { .pmu_enable = x86_pmu_enable, .pmu_disable = x86_pmu_disable, @@ -2706,6 +2716,8 @@ static struct pmu pmu = { .aux_output_match = x86_pmu_aux_output_match,
.filter = x86_pmu_filter, + + .switch_guest_ctx = x86_pmu_switch_guest_ctx, };
void arch_perf_update_userpage(struct perf_event *event,