From: "Mike Rapoport (Microsoft)" rppt@kernel.org
Hi,
These patches allow guest_memfd to notify userspace about minor page faults using userfaultfd and let userspace to resolve these page faults using UFFDIO_CONTINUE.
To allow UFFDIO_CONTINUE outside of the core mm I added a get_shmem_folio() callback to vm_ops that allows an address space backing a VMA to return a folio that exists in it's page cache (patch 2)
In order for guest_memfd to notify userspace about page faults, there is a new VM_FAULT_UFFD_MINOR that a ->fault() handler can return to inform the page fault handler that it needs to call handle_userfault() to complete the fault (patch 3).
Patch 4 plumbs these new goodies into guest_memfd.
This series is the minimal change I've been able to come up with to allow integration of guest_memfd with uffd and while refactoring uffd and making mfill_atomic() flow more linear would have been a nice improvement, it's way out of the scope of enabling uffd with guest_memfd.
v2 changes: * Introduce VM_FAULF_UFFD_MINOR to avoid exporting handle_userfault() * Simplify vma_can_mfill_atomic() * Rename get_pagecache_folio() to get_shared_folio() and use inode instead of vma as its argument
v1: https://lore.kernel.org/all/20251117114631.2029447-1-rppt@kernel.org
Mike Rapoport (Microsoft) (4): userfaultfd: move vma_can_userfault out of line userfaultfd, shmem: use a VMA callback to handle UFFDIO_CONTINUE mm: introduce VM_FAULT_UFFD_MINOR fault reason guest_memfd: add support for userfaultfd minor mode
Nikita Kalyazin (1): KVM: selftests: test userfaultfd minor for guest_memfd
include/linux/mm.h | 9 ++ include/linux/mm_types.h | 3 + include/linux/userfaultfd_k.h | 36 +----- mm/memory.c | 2 + mm/shmem.c | 21 +++- mm/userfaultfd.c | 80 +++++++++++--- .../testing/selftests/kvm/guest_memfd_test.c | 103 ++++++++++++++++++ virt/kvm/guest_memfd.c | 29 +++++ 8 files changed, 232 insertions(+), 51 deletions(-)
base-commit: 6a23ae0a96a600d1d12557add110e0bb6e32730c
From: "Mike Rapoport (Microsoft)" rppt@kernel.org
vma_can_userfault() has grown pretty big and it's not called on performance critical path.
Move it out of line.
No functional changes.
Reviewed-by: David Hildenbrand (Red Hat) david@kernel.org Signed-off-by: Mike Rapoport (Microsoft) rppt@kernel.org --- include/linux/userfaultfd_k.h | 36 ++--------------------------------- mm/userfaultfd.c | 34 +++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 34 deletions(-)
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index c0e716aec26a..e4f43e7b063f 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -208,40 +208,8 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) return vma->vm_flags & __VM_UFFD_FLAGS; }
-static inline bool vma_can_userfault(struct vm_area_struct *vma, - vm_flags_t vm_flags, - bool wp_async) -{ - vm_flags &= __VM_UFFD_FLAGS; - - if (vma->vm_flags & VM_DROPPABLE) - return false; - - if ((vm_flags & VM_UFFD_MINOR) && - (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma))) - return false; - - /* - * If wp async enabled, and WP is the only mode enabled, allow any - * memory type. - */ - if (wp_async && (vm_flags == VM_UFFD_WP)) - return true; - -#ifndef CONFIG_PTE_MARKER_UFFD_WP - /* - * If user requested uffd-wp but not enabled pte markers for - * uffd-wp, then shmem & hugetlbfs are not supported but only - * anonymous. - */ - if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma)) - return false; -#endif - - /* By default, allow any of anon|shmem|hugetlb */ - return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || - vma_is_shmem(vma); -} +bool vma_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags, + bool wp_async);
static inline bool vma_has_uffd_without_event_remap(struct vm_area_struct *vma) { diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index af61b95c89e4..8dc964389b0d 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -1977,6 +1977,40 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start, return moved ? moved : err; }
+bool vma_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags, + bool wp_async) +{ + vm_flags &= __VM_UFFD_FLAGS; + + if (vma->vm_flags & VM_DROPPABLE) + return false; + + if ((vm_flags & VM_UFFD_MINOR) && + (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma))) + return false; + + /* + * If wp async enabled, and WP is the only mode enabled, allow any + * memory type. + */ + if (wp_async && (vm_flags == VM_UFFD_WP)) + return true; + +#ifndef CONFIG_PTE_MARKER_UFFD_WP + /* + * If user requested uffd-wp but not enabled pte markers for + * uffd-wp, then shmem & hugetlbfs are not supported but only + * anonymous. + */ + if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma)) + return false; +#endif + + /* By default, allow any of anon|shmem|hugetlb */ + return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) || + vma_is_shmem(vma); +} + static void userfaultfd_set_vm_flags(struct vm_area_struct *vma, vm_flags_t vm_flags) {
From: "Mike Rapoport (Microsoft)" rppt@kernel.org
When userspace resolves a page fault in a shmem VMA with UFFDIO_CONTINUE it needs to get a folio that already exists in the pagecache backing that VMA.
Instead of using shmem_get_folio() for that, add a get_pagecache_folio() method to 'struct vm_operations_struct' that will return a folio if it exists in the VMA's pagecache at given pgoff.
Implement get_pagecache_folio() method for shmem and slightly refactor userfaultfd's mfill_atomic() and mfill_atomic_pte_continue() to support this new API.
Signed-off-by: Mike Rapoport (Microsoft) rppt@kernel.org --- include/linux/mm.h | 9 ++++++++ mm/shmem.c | 19 +++++++++++++++++ mm/userfaultfd.c | 52 +++++++++++++++++++++++++++++----------------- 3 files changed, 61 insertions(+), 19 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h index 7c79b3369b82..a5747c306cc2 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -690,6 +690,15 @@ struct vm_operations_struct { struct page *(*find_normal_page)(struct vm_area_struct *vma, unsigned long addr); #endif /* CONFIG_FIND_NORMAL_PAGE */ +#ifdef CONFIG_USERFAULTFD + /* + * Called by userfault to resolve UFFDIO_CONTINUE request. + * Should return the folio found at pgoff in the VMA's pagecache if it + * exists or ERR_PTR otherwise. + * The returned folio is locked and with reference held. + */ + struct folio *(*get_shared_folio)(struct inode *inode, pgoff_t pgoff); +#endif };
#ifdef CONFIG_NUMA_BALANCING diff --git a/mm/shmem.c b/mm/shmem.c index 58701d14dd96..aaa21bb60f51 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3263,6 +3263,19 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd, shmem_inode_unacct_blocks(inode, 1); return ret; } + +static struct folio *shmem_get_shared_folio(struct inode *inode, + pgoff_t pgoff) +{ + struct folio *folio; + int err; + + err = shmem_get_folio(inode, pgoff, 0, &folio, SGP_NOALLOC); + if (err) + return ERR_PTR(err); + + return folio; +} #endif /* CONFIG_USERFAULTFD */
#ifdef CONFIG_TMPFS @@ -5295,6 +5308,9 @@ static const struct vm_operations_struct shmem_vm_ops = { .set_policy = shmem_set_policy, .get_policy = shmem_get_policy, #endif +#ifdef CONFIG_USERFAULTFD + .get_shared_folio = shmem_get_shared_folio, +#endif };
static const struct vm_operations_struct shmem_anon_vm_ops = { @@ -5304,6 +5320,9 @@ static const struct vm_operations_struct shmem_anon_vm_ops = { .set_policy = shmem_set_policy, .get_policy = shmem_get_policy, #endif +#ifdef CONFIG_USERFAULTFD + .get_shared_folio = shmem_get_shared_folio, +#endif };
int shmem_init_fs_context(struct fs_context *fc) diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 8dc964389b0d..04563f88aab5 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -388,15 +388,12 @@ static int mfill_atomic_pte_continue(pmd_t *dst_pmd, struct page *page; int ret;
- ret = shmem_get_folio(inode, pgoff, 0, &folio, SGP_NOALLOC); + folio = dst_vma->vm_ops->get_shared_folio(inode, pgoff); /* Our caller expects us to return -EFAULT if we failed to find folio */ - if (ret == -ENOENT) - ret = -EFAULT; - if (ret) - goto out; - if (!folio) { - ret = -EFAULT; - goto out; + if (IS_ERR_OR_NULL(folio)) { + if (PTR_ERR(folio) == -ENOENT || !folio) + return -EFAULT; + return PTR_ERR(folio); }
page = folio_file_page(folio, pgoff); @@ -411,13 +408,12 @@ static int mfill_atomic_pte_continue(pmd_t *dst_pmd, goto out_release;
folio_unlock(folio); - ret = 0; -out: - return ret; + return 0; + out_release: folio_unlock(folio); folio_put(folio); - goto out; + return ret; }
/* Handles UFFDIO_POISON for all non-hugetlb VMAs. */ @@ -694,6 +690,15 @@ static __always_inline ssize_t mfill_atomic_pte(pmd_t *dst_pmd, return err; }
+static __always_inline bool vma_can_mfill_atomic(struct vm_area_struct *vma, + uffd_flags_t flags) +{ + if (uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) + return vma->vm_ops && vma->vm_ops->get_shared_folio; + + return vma_is_anonymous(vma) || vma_is_shmem(vma); +} + static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx, unsigned long dst_start, unsigned long src_start, @@ -766,10 +771,7 @@ static __always_inline ssize_t mfill_atomic(struct userfaultfd_ctx *ctx, return mfill_atomic_hugetlb(ctx, dst_vma, dst_start, src_start, len, flags);
- if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) - goto out_unlock; - if (!vma_is_shmem(dst_vma) && - uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE)) + if (!vma_can_mfill_atomic(dst_vma, flags)) goto out_unlock;
while (src_addr < src_start + len) { @@ -1985,9 +1987,21 @@ bool vma_can_userfault(struct vm_area_struct *vma, vm_flags_t vm_flags, if (vma->vm_flags & VM_DROPPABLE) return false;
- if ((vm_flags & VM_UFFD_MINOR) && - (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma))) - return false; + if (vm_flags & VM_UFFD_MINOR) { + /* + * If only MINOR mode is requested and we can request an + * existing folio from VMA's page cache, allow it + */ + if (vm_flags == VM_UFFD_MINOR && vma->vm_ops && + vma->vm_ops->get_shared_folio) + return true; + /* + * Only hugetlb and shmem can support MINOR mode in combination + * with other modes + */ + if (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)) + return false; + }
/* * If wp async enabled, and WP is the only mode enabled, allow any
From: "Mike Rapoport (Microsoft)" rppt@kernel.org
When a VMA is registered with userfaulfd in minor mode, its ->fault() method should check if a folio exists in the page cache and if yes ->fault() should call handle_userfault(VM_UFFD_MISSING).
Instead of calling handle_userfault() directly from a specific ->fault() implementation introduce new fault reason VM_FAULT_UFFD_MINOR that will notify the core page fault handler that it should call handle_userfaultfd(VM_UFFD_MISSING) to complete a page fault.
Replace a call to handle_userfault(VM_UFFD_MISSING) in shmem and use the new VM_FAULT_UFFD_MINOR there instead.
Suggested-by: David Hildenbrand (Red Hat) david@kernel.org Signed-off-by: Mike Rapoport (Microsoft) rppt@kernel.org --- include/linux/mm_types.h | 3 +++ mm/memory.c | 2 ++ mm/shmem.c | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 90e5790c318f..eb135369940f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -1523,6 +1523,8 @@ typedef __bitwise unsigned int vm_fault_t; * fsync() to complete (for synchronous page faults * in DAX) * @VM_FAULT_COMPLETED: ->fault completed, meanwhile mmap lock released + * @VM_FAULT_UFFD_MINOR: ->fault did not modify page tables and needs + * handle_userfault(VM_UFFD_MINOR) to complete * @VM_FAULT_HINDEX_MASK: mask HINDEX value * */ @@ -1540,6 +1542,7 @@ enum vm_fault_reason { VM_FAULT_DONE_COW = (__force vm_fault_t)0x001000, VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x002000, VM_FAULT_COMPLETED = (__force vm_fault_t)0x004000, + VM_FAULT_UFFD_MINOR = (__force vm_fault_t)0x008000, VM_FAULT_HINDEX_MASK = (__force vm_fault_t)0x0f0000, };
diff --git a/mm/memory.c b/mm/memory.c index b59ae7ce42eb..94acbac8cefb 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5279,6 +5279,8 @@ static vm_fault_t __do_fault(struct vm_fault *vmf) }
ret = vma->vm_ops->fault(vmf); + if (unlikely(ret & VM_FAULT_UFFD_MINOR)) + return handle_userfault(vmf, VM_UFFD_MINOR); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | VM_FAULT_DONE_COW))) return ret; diff --git a/mm/shmem.c b/mm/shmem.c index aaa21bb60f51..6dcb73b52bcc 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2461,7 +2461,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, if (folio && vma && userfaultfd_minor(vma)) { if (!xa_is_value(folio)) folio_put(folio); - *fault_type = handle_userfault(vmf, VM_UFFD_MINOR); + *fault_type = VM_FAULT_UFFD_MINOR; return 0; }
From: "Mike Rapoport (Microsoft)" rppt@kernel.org
userfaultfd notifications about minor page faults used for live migration and snapshotting of VMs with memory backed by shared hugetlbfs or tmpfs mappings as described in detail in commit 7677f7fd8be7 ("userfaultfd: add minor fault registration mode").
To use the same mechanism for VMs that use guest_memfd to map their memory, guest_memfd should support userfaultfd minor mode.
Extend ->fault() method of guest_memfd with ability to notify core page fault handler that a page fault requires handle_userfault(VM_UFFD_MINOR) to complete and add implementation of ->get_shared_folio() to guest_memfd vm_ops.
Signed-off-by: Mike Rapoport (Microsoft) rppt@kernel.org --- virt/kvm/guest_memfd.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+)
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c index ffadc5ee8e04..bc8337f104ce 100644 --- a/virt/kvm/guest_memfd.c +++ b/virt/kvm/guest_memfd.c @@ -4,6 +4,7 @@ #include <linux/kvm_host.h> #include <linux/pagemap.h> #include <linux/anon_inodes.h> +#include <linux/userfaultfd_k.h>
#include "kvm_mm.h"
@@ -369,6 +370,12 @@ static vm_fault_t kvm_gmem_fault_user_mapping(struct vm_fault *vmf) return vmf_error(err); }
+ if (userfaultfd_minor(vmf->vma)) { + folio_unlock(folio); + folio_put(folio); + return VM_FAULT_UFFD_MINOR; + } + if (WARN_ON_ONCE(folio_test_large(folio))) { ret = VM_FAULT_SIGBUS; goto out_folio; @@ -390,8 +397,30 @@ static vm_fault_t kvm_gmem_fault_user_mapping(struct vm_fault *vmf) return ret; }
+#ifdef CONFIG_USERFAULTFD +static struct folio *kvm_gmem_get_shared_folio(struct inode *inode, + pgoff_t pgoff) +{ + struct folio *folio; + + folio = kvm_gmem_get_folio(inode, pgoff); + if (IS_ERR_OR_NULL(folio)) + return folio; + + if (!folio_test_uptodate(folio)) { + clear_highpage(folio_page(folio, 0)); + kvm_gmem_mark_prepared(folio); + } + + return folio; +} +#endif + static const struct vm_operations_struct kvm_gmem_vm_ops = { .fault = kvm_gmem_fault_user_mapping, +#ifdef CONFIG_USERFAULTFD + .get_shared_folio = kvm_gmem_get_shared_folio, +#endif };
static int kvm_gmem_mmap(struct file *file, struct vm_area_struct *vma)
From: Nikita Kalyazin kalyazin@amazon.com
The test demonstrates that a minor userfaultfd event in guest_memfd can be resolved via a memcpy followed by a UFFDIO_CONTINUE ioctl.
Signed-off-by: Nikita Kalyazin kalyazin@amazon.com Co-developed-by: Mike Rapoport (Microsoft) rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) rppt@kernel.org --- .../testing/selftests/kvm/guest_memfd_test.c | 103 ++++++++++++++++++ 1 file changed, 103 insertions(+)
diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c index e7d9aeb418d3..a5d3ed21d7bb 100644 --- a/tools/testing/selftests/kvm/guest_memfd_test.c +++ b/tools/testing/selftests/kvm/guest_memfd_test.c @@ -10,13 +10,17 @@ #include <errno.h> #include <stdio.h> #include <fcntl.h> +#include <pthread.h>
#include <linux/bitmap.h> #include <linux/falloc.h> #include <linux/sizes.h> +#include <linux/userfaultfd.h> #include <sys/mman.h> #include <sys/types.h> #include <sys/stat.h> +#include <sys/syscall.h> +#include <sys/ioctl.h>
#include "kvm_util.h" #include "test_util.h" @@ -254,6 +258,104 @@ static void test_guest_memfd_flags(struct kvm_vm *vm) } }
+struct fault_args { + char *addr; + volatile char value; +}; + +static void *fault_thread_fn(void *arg) +{ + struct fault_args *args = arg; + + /* Trigger page fault */ + args->value = *args->addr; + return NULL; +} + +static void test_uffd_minor(int fd, size_t total_size) +{ + struct uffdio_api uffdio_api = { + .api = UFFD_API, + .features = UFFD_FEATURE_MINOR_GENERIC, + }; + struct uffdio_register uffd_reg; + struct uffdio_continue uffd_cont; + struct uffd_msg msg; + struct fault_args args; + pthread_t fault_thread; + void *mem, *mem_nofault, *buf = NULL; + int uffd, ret; + off_t offset = page_size; + void *fault_addr; + + ret = posix_memalign(&buf, page_size, total_size); + TEST_ASSERT_EQ(ret, 0); + + memset(buf, 0xaa, total_size); + + uffd = syscall(__NR_userfaultfd, O_CLOEXEC); + TEST_ASSERT(uffd != -1, "userfaultfd creation should succeed"); + + ret = ioctl(uffd, UFFDIO_API, &uffdio_api); + TEST_ASSERT(ret != -1, "ioctl(UFFDIO_API) should succeed"); + + mem = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + TEST_ASSERT(mem != MAP_FAILED, "mmap should succeed"); + + mem_nofault = mmap(NULL, total_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + TEST_ASSERT(mem_nofault != MAP_FAILED, "mmap should succeed"); + + uffd_reg.range.start = (unsigned long)mem; + uffd_reg.range.len = total_size; + uffd_reg.mode = UFFDIO_REGISTER_MODE_MINOR; + ret = ioctl(uffd, UFFDIO_REGISTER, &uffd_reg); + TEST_ASSERT(ret != -1, "ioctl(UFFDIO_REGISTER) should succeed"); + + ret = fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, + offset, page_size); + TEST_ASSERT(!ret, "fallocate(PUNCH_HOLE) should succeed"); + + fault_addr = mem + offset; + args.addr = fault_addr; + + ret = pthread_create(&fault_thread, NULL, fault_thread_fn, &args); + TEST_ASSERT(ret == 0, "pthread_create should succeed"); + + ret = read(uffd, &msg, sizeof(msg)); + TEST_ASSERT(ret != -1, "read from userfaultfd should succeed"); + TEST_ASSERT(msg.event == UFFD_EVENT_PAGEFAULT, "event type should be pagefault"); + TEST_ASSERT((void *)(msg.arg.pagefault.address & ~(page_size - 1)) == fault_addr, + "pagefault should occur at expected address"); + + memcpy(mem_nofault + offset, buf + offset, page_size); + + uffd_cont.range.start = (unsigned long)fault_addr; + uffd_cont.range.len = page_size; + uffd_cont.mode = 0; + ret = ioctl(uffd, UFFDIO_CONTINUE, &uffd_cont); + TEST_ASSERT(ret != -1, "ioctl(UFFDIO_CONTINUE) should succeed"); + + /* + * wait for fault_thread to finish to make sure fault happened and was + * resolved before we verify the values + */ + ret = pthread_join(fault_thread, NULL); + TEST_ASSERT(ret == 0, "pthread_join should succeed"); + + TEST_ASSERT(args.value == *(char *)(mem_nofault + offset), + "memory should contain the value that was copied"); + TEST_ASSERT(args.value == *(char *)(mem + offset), + "no further fault is expected"); + + ret = munmap(mem_nofault, total_size); + TEST_ASSERT(!ret, "munmap should succeed"); + + ret = munmap(mem, total_size); + TEST_ASSERT(!ret, "munmap should succeed"); + free(buf); + close(uffd); +} + #define gmem_test(__test, __vm, __flags) \ do { \ int fd = vm_create_guest_memfd(__vm, page_size * 4, __flags); \ @@ -273,6 +375,7 @@ static void __test_guest_memfd(struct kvm_vm *vm, uint64_t flags) if (flags & GUEST_MEMFD_FLAG_INIT_SHARED) { gmem_test(mmap_supported, vm, flags); gmem_test(fault_overflow, vm, flags); + gmem_test(uffd_minor, vm, flags); } else { gmem_test(fault_private, vm, flags); }
linux-kselftest-mirror@lists.linaro.org