From: Jack Thomson jackabt@amazon.com
Add kvm_arch_vcpu_pre_fault_memory() for arm64. The implementation hands off the stage-2 faulting logic to either gmem_abort() or user_mem_abort().
Update __gmem_abort() and __user_mem_abort() to take the pre_fault parameter. When passed, the paths to determine write or exec faults are short circuited to false, as when pre-faulting, it should be treated as a read fault.
This closely follows the implementation on x86.
Signed-off-by: Jack Thomson jackabt@amazon.com --- arch/arm64/kvm/Kconfig | 1 + arch/arm64/kvm/arm.c | 1 + arch/arm64/kvm/mmu.c | 71 ++++++++++++++++++++++++++++++++++++------ 3 files changed, 64 insertions(+), 9 deletions(-)
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index bff62e75d681..1ac0605f86cb 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -25,6 +25,7 @@ menuconfig KVM select HAVE_KVM_CPU_RELAX_INTERCEPT select KVM_MMIO select KVM_GENERIC_DIRTYLOG_READ_PROTECT + select KVM_GENERIC_PRE_FAULT_MEMORY select KVM_XFER_TO_GUEST_WORK select KVM_VFIO select HAVE_KVM_DIRTY_RING_ACQ_REL diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 888f7c7abf54..65654a742864 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -322,6 +322,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_IRQFD_RESAMPLE: case KVM_CAP_COUNTER_OFFSET: case KVM_CAP_ARM_WRITABLE_IMP_ID_REGS: + case KVM_CAP_PRE_FAULT_MEMORY: r = 1; break; case KVM_CAP_SET_GUEST_DEBUG2: diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 082e7d8ae655..002f564c6ac7 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1523,7 +1523,8 @@ static void adjust_nested_fault_perms(struct kvm_s2_trans *nested,
static int __gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_s2_trans *nested, - struct kvm_memory_slot *memslot, bool is_perm) + struct kvm_memory_slot *memslot, bool is_perm, + bool pre_fault) { bool write_fault, exec_fault, writable; enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_MEMABORT_FLAGS; @@ -1537,6 +1538,9 @@ static int __gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, gfn_t gfn; int ret;
+ if (pre_fault) + flags |= KVM_PGTABLE_WALK_PRE_FAULT; + ret = prepare_mmu_memcache(vcpu, true, &memcache); if (ret) return ret; @@ -1546,8 +1550,8 @@ static int __gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, else gfn = fault_ipa >> PAGE_SHIFT;
- write_fault = kvm_is_write_fault(vcpu); - exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); + write_fault = !pre_fault && kvm_is_write_fault(vcpu); + exec_fault = !pre_fault && kvm_vcpu_trap_is_exec_fault(vcpu);
VM_WARN_ON_ONCE(write_fault && exec_fault);
@@ -1599,7 +1603,7 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_s2_trans *nested, struct kvm_memory_slot *memslot, bool is_perm) { - int ret = __gmem_abort(vcpu, fault_ipa, nested, memslot, is_perm); + int ret = __gmem_abort(vcpu, fault_ipa, nested, memslot, is_perm, false); return ret != -EAGAIN ? ret : 0; }
@@ -1607,7 +1611,7 @@ static int __user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_s2_trans *nested, struct kvm_memory_slot *memslot, long *page_size, unsigned long hva, - bool fault_is_perm) + bool fault_is_perm, bool pre_fault) { int ret = 0; bool topup_memcache; @@ -1631,10 +1635,13 @@ static int __user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, vm_flags_t vm_flags; enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_MEMABORT_FLAGS;
+ if (pre_fault) + flags |= KVM_PGTABLE_WALK_PRE_FAULT; + if (fault_is_perm) fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu); - write_fault = kvm_is_write_fault(vcpu); - exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); + write_fault = !pre_fault && kvm_is_write_fault(vcpu); + exec_fault = !pre_fault && kvm_vcpu_trap_is_exec_fault(vcpu); VM_WARN_ON_ONCE(write_fault && exec_fault);
/* @@ -1895,8 +1902,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_memory_slot *memslot, unsigned long hva, bool fault_is_perm) { - int ret = __user_mem_abort(vcpu, fault_ipa, nested, memslot, NULL, - hva, fault_is_perm); + int ret = __user_mem_abort(vcpu, fault_ipa, nested, memslot, NULL, hva, + fault_is_perm, false); return ret != -EAGAIN ? ret : 0; }
@@ -2468,3 +2475,49 @@ void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled); } + +long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu, + struct kvm_pre_fault_memory *range) +{ + int r; + hva_t hva; + phys_addr_t end; + long page_size; + struct kvm_memory_slot *memslot; + phys_addr_t ipa = range->gpa; + gfn_t gfn = gpa_to_gfn(range->gpa); + + while (true) { + page_size = PAGE_SIZE; + memslot = gfn_to_memslot(vcpu->kvm, gfn); + if (!memslot) + return -ENOENT; + + if (kvm_slot_has_gmem(memslot)) { + r = __gmem_abort(vcpu, ipa, NULL, memslot, false, true); + } else { + hva = gfn_to_hva_memslot_prot(memslot, gfn, NULL); + if (kvm_is_error_hva(hva)) + return -EFAULT; + r = __user_mem_abort(vcpu, ipa, NULL, memslot, &page_size, hva, false, + true); + } + + if (r != -EAGAIN) + break; + + if (signal_pending(current)) + return -EINTR; + + if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) + return -EIO; + + cond_resched(); + }; + + if (r < 0) + return r; + + end = (range->gpa & ~(page_size - 1)) + page_size; + return min(range->size, end - range->gpa); +}