Kernel drivers that pin pages should account these pages against
either user->locked_vm or mm->pinned_vm and fail the pinning if
RLIMIT_MEMLOCK is exceeded and CAP_IPC_LOCK isn't held.
Currently drivers open-code this accounting and use various methods to
update the atomic variables and check against the limits leading to
various bugs and inconsistencies. To fix this introduce a standard
interface for charging pinned and locked memory. As this involves
taking references on kernel objects such as mm_struct or user_struct
we introduce a new vm_account struct to hold these references. Several
helper functions are then introduced to grab references and check
limits.
As the way these limits are charged and enforced is visible to
userspace we need to be careful not to break existing applications by
charging to different counters. As a result the vm_account functions
support accounting to different counters as required.
A future change will extend this to also account against a cgroup for
pinned pages.
Signed-off-by: Alistair Popple <apopple(a)nvidia.com>
Cc: linux-kernel(a)vger.kernel.org
Cc: linuxppc-dev(a)lists.ozlabs.org
Cc: linux-fpga(a)vger.kernel.org
Cc: linux-rdma(a)vger.kernel.org
Cc: virtualization(a)lists.linux-foundation.org
Cc: kvm(a)vger.kernel.org
Cc: netdev(a)vger.kernel.org
Cc: cgroups(a)vger.kernel.org
Cc: io-uring(a)vger.kernel.org
Cc: linux-mm(a)kvack.org
Cc: bpf(a)vger.kernel.org
Cc: rds-devel(a)oss.oracle.com
Cc: linux-kselftest(a)vger.kernel.org
---
include/linux/mm_types.h | 87 ++++++++++++++++++++++++++++++++++++++++-
mm/util.c | 89 +++++++++++++++++++++++++++++++++++++++++-
2 files changed, 176 insertions(+)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 9757067..7de2168 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -1085,4 +1085,91 @@ enum fault_flag {
typedef unsigned int __bitwise zap_flags_t;
+/**
+ * enum vm_account_flags - Determine how pinned/locked memory is accounted.
+ * @VM_ACCOUNT_TASK: Account pinned memory to mm->pinned_vm.
+ * @VM_ACCOUNT_BYPASS: Don't enforce rlimit on any charges.
+ * @VM_ACCOUNT_USER: Accounnt locked memory to user->locked_vm.
+ *
+ * Determines which statistic pinned/locked memory is accounted
+ * against. All limits will be enforced against RLIMIT_MEMLOCK and the
+ * pins cgroup if CONFIG_CGROUP_PINS is enabled.
+ *
+ * New drivers should use VM_ACCOUNT_TASK. VM_ACCOUNT_USER is used by
+ * pre-existing drivers to maintain existing accounting against
+ * user->locked_mm rather than mm->pinned_mm.
+ *
+ * VM_ACCOUNT_BYPASS may also be specified to bypass rlimit
+ * checks. Typically this is used to cache CAP_IPC_LOCK from when a
+ * driver is first initialised. Note that this does not bypass cgroup
+ * limit checks.
+ */
+enum vm_account_flags {
+ VM_ACCOUNT_TASK = 0,
+ VM_ACCOUNT_BYPASS = 1,
+ VM_ACCOUNT_USER = 2,
+};
+
+struct vm_account {
+ struct task_struct *task;
+ union {
+ struct mm_struct *mm;
+ struct user_struct *user;
+ } a;
+ enum vm_account_flags flags;
+};
+
+/**
+ * vm_account_init - Initialise a new struct vm_account.
+ * @vm_account: pointer to uninitialised vm_account.
+ * @task: task to charge against.
+ * @user: user to charge against. Must be non-NULL for VM_ACCOUNT_USER.
+ * @flags: flags to use when charging to vm_account.
+ *
+ * Initialise a new uninitialiused struct vm_account. Takes references
+ * on the task/mm/user/cgroup as required although callers must ensure
+ * any references passed in remain valid for the duration of this
+ * call.
+ */
+void vm_account_init(struct vm_account *vm_account, struct task_struct *task,
+ struct user_struct *user, enum vm_account_flags flags);
+/**
+ * vm_account_init_current - Initialise a new struct vm_account.
+ * @vm_account: pointer to uninitialised vm_account.
+ *
+ * Helper to initialise a vm_account for the common case of charging
+ * with VM_ACCOUNT_TASK against current.
+ */
+void vm_account_init_current(struct vm_account *vm_account);
+
+/**
+ * vm_account_release - Initialise a new struct vm_account.
+ * @vm_account: pointer to initialised vm_account.
+ *
+ * Drop any object references obtained by vm_account_init(). The
+ * vm_account must not be used after calling this unless reinitialised
+ * with vm_account_init().
+ */
+void vm_account_release(struct vm_account *vm_account);
+
+/**
+ * vm_account_pinned - Charge pinned or locked memory to the vm_account.
+ * @vm_account: pointer to an initialised vm_account.
+ * @npages: number of pages to charge.
+ *
+ * Return: 0 on success, -ENOMEM if a limit would be exceeded.
+ *
+ * Note: All pages must be explicitly uncharged with
+ * vm_unaccount_pinned() prior to releasing the vm_account with
+ * vm_account_release().
+ */
+int vm_account_pinned(struct vm_account *vm_account, unsigned long npages);
+
+/**
+ * vm_unaccount_pinned - Uncharge pinned or locked memory to the vm_account.
+ * @vm_account: pointer to an initialised vm_account.
+ * @npages: number of pages to uncharge.
+ */
+void vm_unaccount_pinned(struct vm_account *vm_account, unsigned long npages);
+
#endif /* _LINUX_MM_TYPES_H */
diff --git a/mm/util.c b/mm/util.c
index b56c92f..af40b1e 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -430,6 +430,95 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
}
#endif
+void vm_account_init(struct vm_account *vm_account, struct task_struct *task,
+ struct user_struct *user, enum vm_account_flags flags)
+{
+ vm_account->task = get_task_struct(task);
+
+ if (flags & VM_ACCOUNT_USER) {
+ vm_account->a.user = get_uid(user);
+ } else {
+ mmgrab(task->mm);
+ vm_account->a.mm = task->mm;
+ }
+
+ vm_account->flags = flags;
+}
+EXPORT_SYMBOL_GPL(vm_account_init);
+
+void vm_account_init_current(struct vm_account *vm_account)
+{
+ vm_account_init(vm_account, current, NULL, VM_ACCOUNT_TASK);
+}
+EXPORT_SYMBOL_GPL(vm_account_init_current);
+
+void vm_account_release(struct vm_account *vm_account)
+{
+ put_task_struct(vm_account->task);
+ if (vm_account->flags & VM_ACCOUNT_USER)
+ free_uid(vm_account->a.user);
+ else
+ mmdrop(vm_account->a.mm);
+}
+EXPORT_SYMBOL_GPL(vm_account_release);
+
+/*
+ * Charge pages with an atomic compare and swap. Returns -ENOMEM on
+ * failure, 1 on success and 0 for retry.
+ */
+static int vm_account_cmpxchg(struct vm_account *vm_account,
+ unsigned long npages, unsigned long lock_limit)
+{
+ u64 cur_pages, new_pages;
+
+ if (vm_account->flags & VM_ACCOUNT_USER)
+ cur_pages = atomic_long_read(&vm_account->a.user->locked_vm);
+ else
+ cur_pages = atomic64_read(&vm_account->a.mm->pinned_vm);
+
+ new_pages = cur_pages + npages;
+ if (lock_limit != RLIM_INFINITY && new_pages > lock_limit)
+ return -ENOMEM;
+
+ if (vm_account->flags & VM_ACCOUNT_USER) {
+ return atomic_long_cmpxchg(&vm_account->a.user->locked_vm,
+ cur_pages, new_pages) == cur_pages;
+ } else {
+ return atomic64_cmpxchg(&vm_account->a.mm->pinned_vm,
+ cur_pages, new_pages) == cur_pages;
+ }
+}
+
+int vm_account_pinned(struct vm_account *vm_account, unsigned long npages)
+{
+ unsigned long lock_limit = RLIM_INFINITY;
+ int ret;
+
+ if (!(vm_account->flags & VM_ACCOUNT_BYPASS) && !capable(CAP_IPC_LOCK))
+ lock_limit = task_rlimit(vm_account->task,
+ RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+ while (true) {
+ ret = vm_account_cmpxchg(vm_account, npages, lock_limit);
+ if (ret > 0)
+ break;
+ else if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vm_account_pinned);
+
+void vm_unaccount_pinned(struct vm_account *vm_account, unsigned long npages)
+{
+ if (vm_account->flags & VM_ACCOUNT_USER)
+ atomic_long_sub(npages, &vm_account->a.user->locked_vm);
+ else
+ atomic64_sub(npages, &vm_account->a.mm->pinned_vm);
+}
+EXPORT_SYMBOL_GPL(vm_unaccount_pinned);
+
/**
* __account_locked_vm - account locked pages to an mm's locked_vm
* @mm: mm to account against
--
git-series 0.9.1
From: Brendan Higgins <brendan.higgins(a)linux.dev>
Looks like kunit_test_init_section_suites(...) was messed up in a merge
conflict. This fixes it.
kunit_test_init_section_suites(...) was not updated to avoid the extra
level of indirection when .kunit_test_suites was flattened. Given no-one
was actively using it, this went unnoticed for a long period of time.
Fixes: e5857d396f35 ("kunit: flatten kunit_suite*** to kunit_suite** in .kunit_test_suites")
Signed-off-by: Brendan Higgins <brendan.higgins(a)linux.dev>
Signed-off-by: David Gow <davidgow(a)google.com>
---
include/kunit/test.h | 1 -
1 file changed, 1 deletion(-)
diff --git a/include/kunit/test.h b/include/kunit/test.h
index 87ea90576b50..716deaeef3dd 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -303,7 +303,6 @@ static inline int kunit_run_all_tests(void)
*/
#define kunit_test_init_section_suites(__suites...) \
__kunit_test_suites(CONCATENATE(__UNIQUE_ID(array), _probe), \
- CONCATENATE(__UNIQUE_ID(suites), _probe), \
##__suites)
#define kunit_test_init_section_suite(suite) \
--
2.39.1.456.gfc5497dd1b-goog
On Fri, Jan 27, 2023 at 11:47:14AM +0500, Muhammad Usama Anjum wrote:
> >> diff --git a/mm/memory.c b/mm/memory.c
> >> index 4000e9f017e0..8c03b133d483 100644
> >> --- a/mm/memory.c
> >> +++ b/mm/memory.c
> >> @@ -3351,6 +3351,18 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
> >>
> >> if (likely(!unshare)) {
> >> if (userfaultfd_pte_wp(vma, *vmf->pte)) {
> >> + if (userfaultfd_wp_async(vma)) {
> >> + /*
> >> + * Nothing needed (cache flush, TLB invalidations,
> >> + * etc.) because we're only removing the uffd-wp bit,
> >> + * which is completely invisible to the user. This
> >> + * falls through to possible CoW.
> >
> > Here it says it falls through to CoW, but..
> >
> >> + */
> >> + pte_unmap_unlock(vmf->pte, vmf->ptl);
> >> + set_pte_at(vma->vm_mm, vmf->address, vmf->pte,
> >> + pte_clear_uffd_wp(*vmf->pte));
> >> + return 0;
> >
> > ... it's not doing so. The original lines should do:
> >
> > https://lore.kernel.org/all/Y8qq0dKIJBshua+X@x1n/
[1]
> >
> > Side note: you cannot modify pgtable after releasing the pgtable lock.
> > It's racy.
> If I don't unlock and return after removing the UFFD_WP flag in case of
> async wp, the target just gets stuck. Maybe the pte lock is not unlocked in
> some path.
>
> If I unlock and don't return, the crash happens.
>
> So I'd put unlock and return from here. Please comment on the below patch
> and what do you think should be done. I've missed something.
Have you tried to just use exactly what I suggested in [1]? I'll paste
again:
---8<---
diff --git a/mm/memory.c b/mm/memory.c
index 4000e9f017e0..09aab434654c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3351,8 +3351,20 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
if (likely(!unshare)) {
if (userfaultfd_pte_wp(vma, *vmf->pte)) {
- pte_unmap_unlock(vmf->pte, vmf->ptl);
- return handle_userfault(vmf, VM_UFFD_WP);
+ if (userfaultfd_uffd_wp_async(vma)) {
+ /*
+ * Nothing needed (cache flush, TLB
+ * invalidations, etc.) because we're only
+ * removing the uffd-wp bit, which is
+ * completely invisible to the user.
+ * This falls through to possible CoW.
+ */
+ set_pte_at(vma->vm_mm, vmf->address, vmf->pte,
+ pte_clear_uffd_wp(*vmf->pte));
+ } else {
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ return handle_userfault(vmf, VM_UFFD_WP);
+ }
}
---8<---
Note that there's no "return", neither the unlock. The lock is used in the
follow up write fault resolution and it's released later.
Meanwhile please fully digest how pgtable lock is used in this path before
moving forward on any of such changes.
>
> >
> >> + }
> >> pte_unmap_unlock(vmf->pte, vmf->ptl);
> >> return handle_userfault(vmf, VM_UFFD_WP);
> >> }
> >> @@ -4812,8 +4824,21 @@ static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
> >>
> >> if (vma_is_anonymous(vmf->vma)) {
> >> if (likely(!unshare) &&
> >> - userfaultfd_huge_pmd_wp(vmf->vma, vmf->orig_pmd))
> >> - return handle_userfault(vmf, VM_UFFD_WP);
> >> + userfaultfd_huge_pmd_wp(vmf->vma, vmf->orig_pmd)) {
> >> + if (userfaultfd_wp_async(vmf->vma)) {
> >> + /*
> >> + * Nothing needed (cache flush, TLB invalidations,
> >> + * etc.) because we're only removing the uffd-wp bit,
> >> + * which is completely invisible to the user. This
> >> + * falls through to possible CoW.
> >> + */
> >> + set_pmd_at(vmf->vma->vm_mm, vmf->address, vmf->pmd,
> >> + pmd_clear_uffd_wp(*vmf->pmd));
> >
> > This is for THP, not hugetlb.
> >
> > Clearing uffd-wp bit here for the whole pmd is wrong to me, because we
> > track writes in small page sizes only. We should just split.
> By detecting if the fault is async wp, just splitting the PMD doesn't work.
> The below given snippit is working right now. But definately, the fault of
> the whole PMD is being resolved which if we can bypass by correctly
> splitting would be highly desirable. Can you please take a look on UFFD
> side and suggest the changes? It would be much appreciated. I'm attaching
> WIP v9 patches for you to apply on next(next-20230105) and pagemap_ioctl
> selftest can be ran to test things after making changes.
Can you elaborate why thp split didn't work? Or if you want, I can look
into this and provide the patch to enable uffd async mode.
Thanks,
--
Peter Xu