From: Jinrong Liang cloudliang@tencent.com
From: Jinrong Liang cloudliang@tencent.com
Add fixed_counter_bitmap to the create_pmu_event_filter() to support the use of the same creator to control the use of guest fixed counters.
No functional change intended.
Signed-off-by: Jinrong Liang cloudliang@tencent.com --- .../kvm/x86_64/pmu_event_filter_test.c | 31 ++++++++++++------- 1 file changed, 19 insertions(+), 12 deletions(-)
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index c0521fc9e8f6..4e87eea6986b 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -192,19 +192,22 @@ static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents) return f; }
- static struct kvm_pmu_event_filter * create_pmu_event_filter(const uint64_t event_list[], uint32_t nevents, - uint32_t action, uint32_t flags) + uint32_t action, uint32_t flags, + uint32_t fixed_counter_bitmap) { struct kvm_pmu_event_filter *f; int i;
f = alloc_pmu_event_filter(nevents); f->action = action; + f->fixed_counter_bitmap = fixed_counter_bitmap; f->flags = flags; - for (i = 0; i < nevents; i++) - f->events[i] = event_list[i]; + if (f->nevents) { + for (i = 0; i < f->nevents; i++) + f->events[i] = event_list[i]; + }
return f; } @@ -213,7 +216,7 @@ static struct kvm_pmu_event_filter *event_filter(uint32_t action) { return create_pmu_event_filter(event_list, ARRAY_SIZE(event_list), - action, 0); + action, 0, 0); }
/* @@ -260,7 +263,7 @@ static void test_amd_deny_list(struct kvm_vcpu *vcpu) struct kvm_pmu_event_filter *f; uint64_t count;
- f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0); + f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0, 0); count = test_with_filter(vcpu, f);
free(f); @@ -544,7 +547,7 @@ static struct perf_counter run_masked_events_test(struct kvm_vcpu *vcpu,
f = create_pmu_event_filter(masked_events, nmasked_events, KVM_PMU_EVENT_ALLOW, - KVM_PMU_EVENT_FLAG_MASKED_EVENTS); + KVM_PMU_EVENT_FLAG_MASKED_EVENTS, 0); r.raw = test_with_filter(vcpu, f); free(f);
@@ -726,12 +729,14 @@ static void test_masked_events(struct kvm_vcpu *vcpu) }
static int run_filter_test(struct kvm_vcpu *vcpu, const uint64_t *events, - uint32_t nevents, uint32_t flags) + uint32_t nevents, uint32_t flags, uint32_t action, + uint32_t fixed_counter_bitmap) { struct kvm_pmu_event_filter *f; int r;
- f = create_pmu_event_filter(events, nevents, KVM_PMU_EVENT_ALLOW, flags); + f = create_pmu_event_filter(events, nevents, action, flags, + fixed_counter_bitmap); r = __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f); free(f);
@@ -747,14 +752,16 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu) * Unfortunately having invalid bits set in event data is expected to * pass when flags == 0 (bits other than eventsel+umask). */ - r = run_filter_test(vcpu, &e, 1, 0); + r = run_filter_test(vcpu, &e, 1, 0, KVM_PMU_EVENT_ALLOW, 0); TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
- r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS); + r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS, + KVM_PMU_EVENT_ALLOW, 0); TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail");
e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf); - r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS); + r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS, + KVM_PMU_EVENT_ALLOW, 0); TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing"); }