On Tue, Dec 09, 2025 at 08:51:11PM +0000, Colton Lewis wrote:
diff --git a/arch/arm64/include/asm/arm_pmuv3.h b/arch/arm64/include/asm/arm_pmuv3.h index 3e25c0313263c..41ec6730ebc62 100644 --- a/arch/arm64/include/asm/arm_pmuv3.h +++ b/arch/arm64/include/asm/arm_pmuv3.h @@ -39,6 +39,16 @@ static inline unsigned long read_pmevtypern(int n) return 0; } +static inline void write_pmxevcntr(u64 val) +{
- write_sysreg(val, pmxevcntr_el0);
+}
+static inline u64 read_pmxevcntr(void) +{
- return read_sysreg(pmxevcntr_el0);
+}
static inline unsigned long read_pmmir(void) { return read_cpuid(PMMIR_EL1); @@ -105,21 +115,41 @@ static inline void write_pmcntenset(u64 val) write_sysreg(val, pmcntenset_el0); } +static inline u64 read_pmcntenset(void) +{
- return read_sysreg(pmcntenset_el0);
+}
static inline void write_pmcntenclr(u64 val) { write_sysreg(val, pmcntenclr_el0); } +static inline u64 read_pmcntenclr(void) +{
- return read_sysreg(pmcntenclr_el0);
+}
static inline void write_pmintenset(u64 val) { write_sysreg(val, pmintenset_el1); } +static inline u64 read_pmintenset(void) +{
- return read_sysreg(pmintenset_el1);
+}
static inline void write_pmintenclr(u64 val) { write_sysreg(val, pmintenclr_el1); } +static inline u64 read_pmintenclr(void) +{
- return read_sysreg(pmintenclr_el1);
+}
static inline void write_pmccfiltr(u64 val) { write_sysreg(val, pmccfiltr_el0); @@ -160,11 +190,16 @@ static inline u64 read_pmovsclr(void) return read_sysreg(pmovsclr_el0); } -static inline void write_pmuserenr(u32 val) +static inline void write_pmuserenr(u64 val) { write_sysreg(val, pmuserenr_el0); } +static inline u64 read_pmuserenr(void) +{
- return read_sysreg(pmuserenr_el0);
+}
static inline void write_pmuacr(u64 val) { write_sysreg_s(val, SYS_PMUACR_EL1);
I wouldn't bother with adding accessors to the PMUv3 driver header, this gets a bit confusing when the 32-bit counterpart lacks matching definitions. Additionally, the part of KVM you're modifying is very much an "internal" part meant to run in a not-quite-kernel context.
Considering this, I'd prefer KVM directly accessed the PMU registers to avoid the possibility of taking some instrumented codepath in the future.
- if (!kvm_vcpu_pmu_is_partitioned(vcpu)
|| pmu_access_el0_disabled(vcpu))
Always put operators on the preceding line for line continuations.
Also, don't call pmu_access_el0_disabled() from here. It can potentially do a full emulated exception entry even though the vCPU is in an extremely inconsistent state (i.e. haven't returned to kernel context yet). Isn't the current value for PMUSERENR_EL0 on the CPU instead of in the vCPU's saved context anyway?
The fast-path accessors really need to be *just* accessors, reading state from the CPU in the unfortunate situation that a TPM trap has been forced upon us.
- case SYS_PMXEVCNTR_EL0:
idx = FIELD_GET(PMSELR_EL0_SEL, read_pmselr());if (pmu_access_event_counter_el0_disabled(vcpu))return false;if (!pmu_counter_idx_valid(vcpu, idx))return false;ret = handle_pmu_reg(vcpu, &p, PMEVCNTR0_EL0 + idx, rt, val,&read_pmxevcntr, &write_pmxevcntr);break;
It is a bit odd to handle the muxing for finding the in-memory value yet using the selector-based register for hardware.
- case SYS_PMEVCNTRn_EL0(0) ... SYS_PMEVCNTRn_EL0(30):
idx = ((p.CRm & 3) << 3) | (p.Op2 & 7);if (pmu_access_event_counter_el0_disabled(vcpu))return false;if (!pmu_counter_idx_valid(vcpu, idx))return false;if (p.is_write) {write_pmevcntrn(idx, val);__vcpu_assign_sys_reg(vcpu, PMEVCNTR0_EL0 + idx, val);} else {vcpu_set_reg(vcpu, rt, read_pmevcntrn(idx));}ret = true;break;
Can't both of these cases share a helper once you've worked out the index? Same for PMEVTYPERn_EL0.
- default:
ret = false;- }
- if (ret)
__kvm_skip_instr(vcpu);- return ret;
+}
static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) { if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && @@ -785,6 +983,9 @@ static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) if (kvm_handle_cntxct(vcpu)) return true;
- if (kvm_hyp_handle_pmu_regs(vcpu))
return true;
Since the whole partitioned PMU feature is constrained to VHE-only you should call this from kvm_hyp_handle_sysreg_vhe().
+bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags) +{
- u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
- bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
- if (!enabled)
kvm_inject_undefined(vcpu);- return !enabled;
+}
+bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu) +{
- return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
+}
+bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) +{
- u64 pmcr, val;
- pmcr = kvm_vcpu_read_pmcr(vcpu);
- val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
- if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
kvm_inject_undefined(vcpu);return false;- }
- return true;
+}
+bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu) +{
- return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
+}
+bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu) +{
- return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
+}
Refactorings need to happen in a separate patch.
Thanks, Oliver