Isaku Yamahata isaku.yamahata@gmail.com 于2023年7月19日周三 09:21写道:
On Mon, Jul 17, 2023 at 02:23:42PM +0800, Jinrong Liang ljr.kernel@gmail.com wrote:
From: Jinrong Liang cloudliang@tencent.com
Add tests to cover that pmu event_filter works as expected when it's applied to fixed performance counters, even if there is none fixed counter exists (e.g. Intel guest pmu version=1 or AMD guest).
Signed-off-by: Jinrong Liang cloudliang@tencent.com
.../kvm/x86_64/pmu_event_filter_test.c | 80 +++++++++++++++++++ 1 file changed, 80 insertions(+)
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index 63f85f583ef8..1872b848f734 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -27,6 +27,7 @@ #define ARCH_PERFMON_BRANCHES_RETIRED 5
#define NUM_BRANCHES 42 +#define INTEL_PMC_IDX_FIXED 32
/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */ #define MAX_FILTER_EVENTS 300 @@ -805,6 +806,84 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu) TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed"); }
+static void intel_run_fixed_counter_guest_code(uint8_t fixed_ctr_idx) +{
for (;;) {
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
wrmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx, 0);
/* Only OS_EN bit is enabled for fixed counter[idx]. */
wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * fixed_ctr_idx));
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL,
BIT_ULL(INTEL_PMC_IDX_FIXED + fixed_ctr_idx));
__asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
GUEST_SYNC(rdmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx));
}
+}
+static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu,
uint32_t action, uint32_t bitmap)
+{
struct __kvm_pmu_event_filter f = {
.action = action,
.fixed_counter_bitmap = bitmap,
};
do_vcpu_set_pmu_event_filter(vcpu, &f);
return run_vcpu_to_sync(vcpu);
+}
+static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx,
uint8_t nr_fixed_counters)
+{
unsigned int i;
uint32_t bitmap;
uint64_t count;
TEST_ASSERT(nr_fixed_counters < sizeof(bitmap),
sizeof(bitmap) * 8?
Thank you for pointing this out. You are correct that we should compare the number of fixed counters with the number of bits in the bitmap variable, not its byte size. I will update the test as follows:
TEST_ASSERT(nr_fixed_counters < sizeof(bitmap) * 8,
This ensures that nr_fixed_counters does not exceed the number of bits that the bitmap variable can represent (i.e., 32 bits).
"Invalid nr_fixed_counters");
/*
* Check the fixed performance counter can count normally when KVM
* userspace doesn't set any pmu filter.
*/
count = run_vcpu_to_sync(vcpu);
TEST_ASSERT(count, "Unexpected count value: %ld\n", count);
for (i = 0; i < BIT(nr_fixed_counters); i++) {
bitmap = BIT(i);
count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_ALLOW,
bitmap);
ASSERT_EQ(!!count, !!(bitmap & BIT(idx)));
count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_DENY,
bitmap);
ASSERT_EQ(!!count, !(bitmap & BIT(idx)));
}
+}
+static void test_fixed_counter_bitmap(void) +{
uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
uint8_t idx;
/*
* Check that pmu_event_filter works as expected when it's applied to
* fixed performance counters.
*/
for (idx = 0; idx < nr_fixed_counters; idx++) {
vm = vm_create_with_one_vcpu(&vcpu,
intel_run_fixed_counter_guest_code);
vcpu_args_set(vcpu, 1, idx);
__test_fixed_counter_bitmap(vcpu, idx, nr_fixed_counters);
kvm_vm_free(vm);
}
+}
int main(int argc, char *argv[]) { void (*guest_code)(void); @@ -848,6 +927,7 @@ int main(int argc, char *argv[]) kvm_vm_free(vm);
test_pmu_config_disable(guest_code);
test_fixed_counter_bitmap(); return 0;
}
2.39.3
-- Isaku Yamahata isaku.yamahata@gmail.com