On Sat, Apr 08, 2023, Nicholas Piggin wrote:
powerpc will require this to allocate MMU tables in guest memory that are aligned and larger than the base page size.
Signed-off-by: Nicholas Piggin npiggin@gmail.com
.../selftests/kvm/include/kvm_util_base.h | 2 + tools/testing/selftests/kvm/lib/kvm_util.c | 44 ++++++++++++------- 2 files changed, 29 insertions(+), 17 deletions(-)
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index 16425da16861..8a27bd4111ff 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -679,6 +679,8 @@ const char *exit_reason_str(unsigned int exit_reason); vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot); +vm_paddr_t vm_phy_pages_alloc_align(struct kvm_vm *vm, size_t num, size_t align,
vm_paddr_t paddr_min, uint32_t memslot);
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t paddr_min, uint32_t memslot); vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 8ec20ac33de0..4f15bbbb8f5e 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -1898,6 +1898,7 @@ const char *exit_reason_str(unsigned int exit_reason)
- Input Args:
- vm - Virtual Machine
- num - number of pages
- align - pages alignment
- paddr_min - Physical address minimum
- memslot - Memory region to allocate page from
@@ -1911,7 +1912,7 @@ const char *exit_reason_str(unsigned int exit_reason)
- and their base address is returned. A TEST_ASSERT failure occurs if
- not enough pages are available at or above paddr_min.
*/ -vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, +vm_paddr_t vm_phy_pages_alloc_align(struct kvm_vm *vm, size_t num, size_t align,
I'd prefer to use double underscores, even though they are imperfect, because appending a single specifier always seems to result in the name becoming stale sooner or later, e.g. when another param with a default is added.
And IIUC, PPC requires the page tables to be naturally aligned, so rather than expose the inner helper and copy+paste the rather odd KVM_GUEST_PAGE_TABLE_MIN_PADDR and vm->memslots[MEM_REGION_PT] stuff, what if we instead have vm_alloc_page_table() deal with the alignment? And provide a PPC-specific wrapper so that other architectures don't need to manually specify '1' page?
E.g.
--- .../selftests/kvm/include/kvm_util_base.h | 18 +++++++++++++++--- tools/testing/selftests/kvm/lib/kvm_util.c | 14 ++++++++------ .../selftests/kvm/lib/powerpc/processor.c | 8 ++------ 3 files changed, 25 insertions(+), 15 deletions(-)
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index f14a059f58fb..e52405c9fa8b 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -700,11 +700,23 @@ const char *exit_reason_str(unsigned int exit_reason);
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot); -vm_paddr_t vm_phy_pages_alloc_align(struct kvm_vm *vm, size_t num, size_t align, - vm_paddr_t paddr_min, uint32_t memslot); vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t paddr_min, uint32_t memslot); -vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); + +vm_paddr_t __vm_alloc_page_table(struct kvm_vm *vm, size_t nr_pages); + +/* + * PowerPC conditionally needs to allocate multiple pages for each page table, + * all other architectures consume exactly one page per table. + */ +#if defined(__powerpc64__ +#define vm_alloc_page_table __vm_alloc_page_table +#else +static inline vm_alloc_page_table(struct kvm_vm *vm) +{ + return __vm_alloc_page_table(vm, 1) +} +#endif
/* * ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 70f792ba444c..ffd18afe9725 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -1946,8 +1946,9 @@ const char *exit_reason_str(unsigned int exit_reason) * and their base address is returned. A TEST_ASSERT failure occurs if * not enough pages are available at or above paddr_min. */ -vm_paddr_t vm_phy_pages_alloc_align(struct kvm_vm *vm, size_t num, size_t align, - vm_paddr_t paddr_min, uint32_t memslot) +static vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + size_t align, vm_paddr_t paddr_min, + uint32_t memslot) { struct userspace_mem_region *region; sparsebit_idx_t pg, base; @@ -1992,7 +1993,7 @@ vm_paddr_t vm_phy_pages_alloc_align(struct kvm_vm *vm, size_t num, size_t align, vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t paddr_min, uint32_t memslot) { - return vm_phy_pages_alloc_align(vm, num, 1, paddr_min, memslot); + return __vm_phy_pages_alloc(vm, num, 1, paddr_min, memslot); }
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, @@ -2001,10 +2002,11 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); }
-vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) +vm_paddr_t __vm_alloc_page_table(struct kvm_vm *vm) { - return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, - vm->memslots[MEM_REGION_PT]); + return __vm_phy_pages_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, + nr_pages, nr_pages, + vm->memslots[MEM_REGION_PT]); }
/* diff --git a/tools/testing/selftests/kvm/lib/powerpc/processor.c b/tools/testing/selftests/kvm/lib/powerpc/processor.c index 7052ce9b5029..57d64d281467 100644 --- a/tools/testing/selftests/kvm/lib/powerpc/processor.c +++ b/tools/testing/selftests/kvm/lib/powerpc/processor.c @@ -44,9 +44,7 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm) pgd_pages = (1UL << (RADIX_PGD_INDEX_SIZE + 3)) >> vm->page_shift; if (!pgd_pages) pgd_pages = 1; - pgtb = vm_phy_pages_alloc_align(vm, pgd_pages, pgd_pages, - KVM_GUEST_PAGE_TABLE_MIN_PADDR, - vm->memslots[MEM_REGION_PT]); + pt = vm_alloc_page_table(vm, pgd_pages); vm->pgd = pgtb;
/* Set the base page directory in the proc table */ @@ -168,9 +166,7 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa) pt_pages = (1ULL << (nls + 3)) >> vm->page_shift; if (!pt_pages) pt_pages = 1; - pt = vm_phy_pages_alloc_align(vm, pt_pages, pt_pages, - KVM_GUEST_PAGE_TABLE_MIN_PADDR, - vm->memslots[MEM_REGION_PT]); + pt = vm_alloc_page_table(vm, pt_pages); pde = PDE_VALID | nls | pt; *pdep = cpu_to_be64(pde); }
base-commit: 15a281f5c83f34d4d1808e5f790403b0770c5e78