powerpc will require this to allocate MMU tables in guest memory that are aligned and larger than the base page size.
Signed-off-by: Nicholas Piggin npiggin@gmail.com --- .../selftests/kvm/include/kvm_util_base.h | 2 + tools/testing/selftests/kvm/lib/kvm_util.c | 44 ++++++++++++------- 2 files changed, 29 insertions(+), 17 deletions(-)
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h index 16425da16861..8a27bd4111ff 100644 --- a/tools/testing/selftests/kvm/include/kvm_util_base.h +++ b/tools/testing/selftests/kvm/include/kvm_util_base.h @@ -679,6 +679,8 @@ const char *exit_reason_str(unsigned int exit_reason);
vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot); +vm_paddr_t vm_phy_pages_alloc_align(struct kvm_vm *vm, size_t num, size_t align, + vm_paddr_t paddr_min, uint32_t memslot); vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, vm_paddr_t paddr_min, uint32_t memslot); vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm); diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 8ec20ac33de0..4f15bbbb8f5e 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -1898,6 +1898,7 @@ const char *exit_reason_str(unsigned int exit_reason) * Input Args: * vm - Virtual Machine * num - number of pages + * align - pages alignment * paddr_min - Physical address minimum * memslot - Memory region to allocate page from * @@ -1911,7 +1912,7 @@ const char *exit_reason_str(unsigned int exit_reason) * and their base address is returned. A TEST_ASSERT failure occurs if * not enough pages are available at or above paddr_min. */ -vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, +vm_paddr_t vm_phy_pages_alloc_align(struct kvm_vm *vm, size_t num, size_t align, vm_paddr_t paddr_min, uint32_t memslot) { struct userspace_mem_region *region; @@ -1925,24 +1926,27 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, paddr_min, vm->page_size);
region = memslot2region(vm, memslot); - base = pg = paddr_min >> vm->page_shift; - - do { - for (; pg < base + num; ++pg) { - if (!sparsebit_is_set(region->unused_phy_pages, pg)) { - base = pg = sparsebit_next_set(region->unused_phy_pages, pg); - break; + base = paddr_min >> vm->page_shift; + +again: + base = (base + align - 1) & ~(align - 1); + for (pg = base; pg < base + num; ++pg) { + if (!sparsebit_is_set(region->unused_phy_pages, pg)) { + base = sparsebit_next_set(region->unused_phy_pages, pg); + if (!base) { + fprintf(stderr, "No guest physical pages " + "available, paddr_min: 0x%lx " + "page_size: 0x%x memslot: %u " + "num_pages: %lu align: %lu\n", + paddr_min, vm->page_size, memslot, + num, align); + fputs("---- vm dump ----\n", stderr); + vm_dump(stderr, vm, 2); + TEST_ASSERT(false, "false"); + abort(); } + goto again; } - } while (pg && pg != base + num); - - if (pg == 0) { - fprintf(stderr, "No guest physical page available, " - "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", - paddr_min, vm->page_size, memslot); - fputs("---- vm dump ----\n", stderr); - vm_dump(stderr, vm, 2); - abort(); }
for (pg = base; pg < base + num; ++pg) @@ -1951,6 +1955,12 @@ vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, return base * vm->page_size; }
+vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + vm_paddr_t paddr_min, uint32_t memslot) +{ + return vm_phy_pages_alloc_align(vm, num, 1, paddr_min, memslot); +} + vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, uint32_t memslot) {