Add a selftest that loads itself into guest_memfd (via GUEST_MEMFD_FLAG_MMAP) and triggers an MMIO exit when executed. This exercises x86 MMIO emulation code inside KVM for guest_memfd-backed memslots where the guest_memfd folios are direct map removed. Particularly, it validates that x86 MMIO emulation code (guest page table walks + instruction fetch) correctly accesses gmem through the VMA that's been reflected into the memslot's userspace_addr field (instead of trying to do direct map accesses).
Signed-off-by: Patrick Roy roypat@amazon.co.uk --- .../selftests/kvm/set_memory_region_test.c | 50 +++++++++++++++++-- 1 file changed, 46 insertions(+), 4 deletions(-)
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c index ce3ac0fd6dfb..cb3bc642d376 100644 --- a/tools/testing/selftests/kvm/set_memory_region_test.c +++ b/tools/testing/selftests/kvm/set_memory_region_test.c @@ -603,6 +603,41 @@ static void test_mmio_during_vectoring(void)
kvm_vm_free(vm); } + +static void guest_code_trigger_mmio(void) +{ + /* + * Read some GPA that is not backed by a memslot. KVM consider this + * as MMIO and tell userspace to emulate the read. + */ + READ_ONCE(*((uint64_t *)MEM_REGION_GPA)); + + GUEST_DONE(); +} + +static void test_guest_memfd_mmio(void) +{ + struct kvm_vm *vm; + struct kvm_vcpu *vcpu; + struct vm_shape shape = { + .mode = VM_MODE_DEFAULT, + .src_type = VM_MEM_SRC_GUEST_MEMFD_NO_DIRECT_MAP, + }; + pthread_t vcpu_thread; + + pr_info("Testing MMIO emulation for instructions in gmem\n"); + + vm = __vm_create_shape_with_one_vcpu(shape, &vcpu, 0, guest_code_trigger_mmio); + + virt_map(vm, MEM_REGION_GPA, MEM_REGION_GPA, 1); + + pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu); + + /* If the MMIO read was successfully emulated, the vcpu thread will exit */ + pthread_join(vcpu_thread, NULL); + + kvm_vm_free(vm); +} #endif
int main(int argc, char *argv[]) @@ -626,10 +661,17 @@ int main(int argc, char *argv[]) test_add_max_memory_regions();
#ifdef __x86_64__ - if (kvm_has_cap(KVM_CAP_GUEST_MEMFD) && - (kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM))) { - test_add_private_memory_region(); - test_add_overlapping_private_memory_regions(); + if (kvm_has_cap(KVM_CAP_GUEST_MEMFD)) { + if (kvm_check_cap(KVM_CAP_VM_TYPES) & BIT(KVM_X86_SW_PROTECTED_VM)) { + test_add_private_memory_region(); + test_add_overlapping_private_memory_regions(); + } + + if (kvm_has_cap(KVM_CAP_GUEST_MEMFD_MMAP) && + kvm_has_cap(KVM_CAP_GUEST_MEMFD_NO_DIRECT_MAP)) + test_guest_memfd_mmio(); + else + pr_info("Skipping tests requiring KVM_CAP_GUEST_MEMFD_MMAP | KVM_CAP_GUEST_MEMFD_NO_DIRECT_MAP"); } else { pr_info("Skipping tests for KVM_MEM_GUEST_MEMFD memory regions\n"); }