From: Jack Thomson jackabt@amazon.com
Adding __gmem_abort and __user_mem_abort that preserve -EAGAIN results. These will be used by the pre-fault implementation which needs to retry on -EAGAIN.
Also add an optional page_size output parameter to __user_mem_abort to return the VMA page size, which will be needed for pre-faulting.
No functional changes are intended
Signed-off-by: Jack Thomson jackabt@amazon.com --- arch/arm64/kvm/mmu.c | 36 +++++++++++++++++++++++++++++------- 1 file changed, 29 insertions(+), 7 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index a36426ccd9b5..082e7d8ae655 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1521,9 +1521,9 @@ static void adjust_nested_fault_perms(struct kvm_s2_trans *nested,
#define KVM_PGTABLE_WALK_MEMABORT_FLAGS (KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED)
-static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, - struct kvm_s2_trans *nested, - struct kvm_memory_slot *memslot, bool is_perm) +static int __gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, + struct kvm_s2_trans *nested, + struct kvm_memory_slot *memslot, bool is_perm) { bool write_fault, exec_fault, writable; enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_MEMABORT_FLAGS; @@ -1592,13 +1592,22 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (writable && !ret) mark_page_dirty_in_slot(kvm, memslot, gfn);
+ return ret; +} + +static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, + struct kvm_s2_trans *nested, + struct kvm_memory_slot *memslot, bool is_perm) +{ + int ret = __gmem_abort(vcpu, fault_ipa, nested, memslot, is_perm); return ret != -EAGAIN ? ret : 0; }
-static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, - struct kvm_s2_trans *nested, - struct kvm_memory_slot *memslot, unsigned long hva, - bool fault_is_perm) +static int __user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, + struct kvm_s2_trans *nested, + struct kvm_memory_slot *memslot, + long *page_size, unsigned long hva, + bool fault_is_perm) { int ret = 0; bool topup_memcache; @@ -1871,10 +1880,23 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, kvm_release_faultin_page(kvm, page, !!ret, writable); kvm_fault_unlock(kvm);
+ if (page_size) + *page_size = vma_pagesize; + /* Mark the page dirty only if the fault is handled successfully */ if (writable && !ret) mark_page_dirty_in_slot(kvm, memslot, gfn);
+ return ret; +} + +static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, + struct kvm_s2_trans *nested, + struct kvm_memory_slot *memslot, unsigned long hva, + bool fault_is_perm) +{ + int ret = __user_mem_abort(vcpu, fault_ipa, nested, memslot, NULL, + hva, fault_is_perm); return ret != -EAGAIN ? ret : 0; }