On Tue, Apr 20, 2021 at 3:08 PM Axel Rasmussen axelrasmussen@google.com wrote:
With this change, userspace can resolve a minor fault within a shmem-backed area with a UFFDIO_CONTINUE ioctl. The semantics for this match those for hugetlbfs - we look up the existing page in the page cache, and install a PTE for it.
This commit introduces a new helper: mcopy_atomic_install_pte.
Why handle UFFDIO_CONTINUE for shmem in mm/userfaultfd.c, instead of in shmem.c? The existing userfault implementation only relies on shmem.c for VM_SHARED VMAs. However, minor fault handling / CONTINUE work just fine for !VM_SHARED VMAs as well. We'd prefer to handle CONTINUE for shmem in one place, regardless of shared/private (to reduce code duplication).
Why add a new mcopy_atomic_install_pte helper? A problem we have with continue is that shmem_mcopy_atomic_pte() and mcopy_atomic_pte() are *close* to what we want, but not exactly. We do want to setup the PTEs in a CONTINUE operation, but we don't want to e.g. allocate a new page, charge it (e.g. to the shmem inode), manipulate various flags, etc. Also we have the problem stated above: shmem_mcopy_atomic_pte() and mcopy_atomic_pte() both handle one-half of the problem (shared / private) continue cares about. So, introduce mcontinue_atomic_pte(), to handle all of the shmem continue cases. Introduce the helper so it doesn't duplicate code with mcopy_atomic_pte().
In a future commit, shmem_mcopy_atomic_pte() will also be modified to use this new helper. However, since this is a bigger refactor, it seems most clear to do it as a separate change.
Signed-off-by: Axel Rasmussen axelrasmussen@google.com
mm/userfaultfd.c | 172 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 127 insertions(+), 45 deletions(-)
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 23fa2583bbd1..51d8c0127161 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -48,6 +48,83 @@ struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm, return dst_vma; }
+/*
- Install PTEs, to map dst_addr (within dst_vma) to page.
- This function handles MCOPY_ATOMIC_CONTINUE (which is always file-backed),
- whether or not dst_vma is VM_SHARED. It also handles the more general
- MCOPY_ATOMIC_NORMAL case, when dst_vma is *not* VM_SHARED (it may be file
- backed, or not).
- Note that MCOPY_ATOMIC_NORMAL for a VM_SHARED dst_vma is handled by
- shmem_mcopy_atomic_pte instead.
- */
+static int mcopy_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr, struct page *page,
bool newly_allocated, bool wp_copy)
+{
int ret;
pte_t _dst_pte, *dst_pte;
bool writable = dst_vma->vm_flags & VM_WRITE;
bool vm_shared = dst_vma->vm_flags & VM_SHARED;
bool page_in_cache = page->mapping;
spinlock_t *ptl;
struct inode *inode;
pgoff_t offset, max_off;
_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
if (page_in_cache && !vm_shared)
writable = false;
if (writable || !page_in_cache)
_dst_pte = pte_mkdirty(_dst_pte);
if (writable) {
if (wp_copy)
_dst_pte = pte_mkuffd_wp(_dst_pte);
else
_dst_pte = pte_mkwrite(_dst_pte);
}
dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
if (vma_is_shmem(dst_vma)) {
/* serialize against truncate with the page table lock */
inode = dst_vma->vm_file->f_inode;
offset = linear_page_index(dst_vma, dst_addr);
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
ret = -EFAULT;
if (unlikely(offset >= max_off))
goto out_unlock;
}
ret = -EEXIST;
if (!pte_none(*dst_pte))
goto out_unlock;
if (page_in_cache)
page_add_file_rmap(page, false);
else
page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
/*
* Must happen after rmap, as mm_counter() checks mapping (via
* PageAnon()), which is set by __page_set_anon_rmap().
*/
inc_mm_counter(dst_mm, mm_counter(page));
Actually, I've noticed that this is still slightly incorrect.
As Hugh pointed out, this works for the anon case, because page_add_new_anon_rmap() sets page->mapping.
But for the page_in_cache case, it doesn't work: unlike its anon counterpart, page_add_file_rmap() *does not* set page->mapping. So, I think this line needs to become:
inc_mm_counter(dst_mm, page_in_cache ? mm_counter_file(page) : mm_counter(page));
I'll include this fix in a v5 as well.
if (newly_allocated)
lru_cache_add_inactive_or_unevictable(page, dst_vma);
set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
/* No need to invalidate - it was non-present before */
update_mmu_cache(dst_vma, dst_addr, dst_pte);
ret = 0;
+out_unlock:
pte_unmap_unlock(dst_pte, ptl);
return ret;
+}
static int mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, struct vm_area_struct *dst_vma, @@ -56,13 +133,9 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm, struct page **pagep, bool wp_copy) {
pte_t _dst_pte, *dst_pte;
spinlock_t *ptl; void *page_kaddr; int ret; struct page *page;
pgoff_t offset, max_off;
struct inode *inode; if (!*pagep) { ret = -ENOMEM;
@@ -99,43 +172,12 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm, if (mem_cgroup_charge(page, dst_mm, GFP_KERNEL)) goto out_release;
_dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot));
if (dst_vma->vm_flags & VM_WRITE) {
if (wp_copy)
_dst_pte = pte_mkuffd_wp(_dst_pte);
else
_dst_pte = pte_mkwrite(_dst_pte);
}
dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
if (dst_vma->vm_file) {
/* the shmem MAP_PRIVATE case requires checking the i_size */
inode = dst_vma->vm_file->f_inode;
offset = linear_page_index(dst_vma, dst_addr);
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
ret = -EFAULT;
if (unlikely(offset >= max_off))
goto out_release_uncharge_unlock;
}
ret = -EEXIST;
if (!pte_none(*dst_pte))
goto out_release_uncharge_unlock;
inc_mm_counter(dst_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
lru_cache_add_inactive_or_unevictable(page, dst_vma);
set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
/* No need to invalidate - it was non-present before */
update_mmu_cache(dst_vma, dst_addr, dst_pte);
pte_unmap_unlock(dst_pte, ptl);
ret = 0;
ret = mcopy_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
page, true, wp_copy);
if (ret)
goto out_release;
out: return ret; -out_release_uncharge_unlock:
pte_unmap_unlock(dst_pte, ptl);
out_release: put_page(page); goto out; @@ -176,6 +218,41 @@ static int mfill_zeropage_pte(struct mm_struct *dst_mm, return ret; }
+/* Handles UFFDIO_CONTINUE for all shmem VMAs (shared or private). */ +static int mcontinue_atomic_pte(struct mm_struct *dst_mm,
pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr,
bool wp_copy)
+{
struct inode *inode = file_inode(dst_vma->vm_file);
pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
struct page *page;
int ret;
ret = shmem_getpage(inode, pgoff, &page, SGP_READ);
if (ret)
goto out;
if (!page) {
ret = -EFAULT;
goto out;
}
ret = mcopy_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
page, false, wp_copy);
if (ret)
goto out_release;
unlock_page(page);
ret = 0;
+out:
return ret;
+out_release:
unlock_page(page);
put_page(page);
goto out;
+}
static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) { pgd_t *pgd; @@ -415,11 +492,16 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, unsigned long dst_addr, unsigned long src_addr, struct page **page,
bool zeropage,
enum mcopy_atomic_mode mode, bool wp_copy)
{ ssize_t err;
if (mode == MCOPY_ATOMIC_CONTINUE) {
return mcontinue_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
wp_copy);
}
/* * The normal page fault path for a shmem will invoke the * fault, fill the hole in the file and COW it right away. The
@@ -431,7 +513,7 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, * and not in the radix tree. */ if (!(dst_vma->vm_flags & VM_SHARED)) {
if (!zeropage)
if (mode == MCOPY_ATOMIC_NORMAL) err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr, src_addr, page, wp_copy);
@@ -441,7 +523,8 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm, } else { VM_WARN_ON_ONCE(wp_copy); err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
dst_addr, src_addr, zeropage,
dst_addr, src_addr,
mode != MCOPY_ATOMIC_NORMAL, page); }
@@ -463,7 +546,6 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, long copied; struct page *page; bool wp_copy;
bool zeropage = (mcopy_mode == MCOPY_ATOMIC_ZEROPAGE); /* * Sanitize the command parameters:
@@ -526,7 +608,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) goto out_unlock;
if (mcopy_mode == MCOPY_ATOMIC_CONTINUE)
if (!vma_is_shmem(dst_vma) && mcopy_mode == MCOPY_ATOMIC_CONTINUE) goto out_unlock; /*
@@ -574,7 +656,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, BUG_ON(pmd_trans_huge(*dst_pmd));
err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
src_addr, &page, zeropage, wp_copy);
src_addr, &page, mcopy_mode, wp_copy); cond_resched(); if (unlikely(err == -ENOENT)) {
-- 2.31.1.368.gbe11c130af-goog