From: Zi Yan ziy@nvidia.com
Hi all,
File folio supports any order and people would like to support flexible orders for anonymous folio[1] too. Currently, split_huge_page() only splits a huge page to order-0 pages, but splitting to orders higher than 0 is also useful. This patchset adds support for splitting a huge page to any lower order pages and uses it during folio truncate operations.
The patchset is on top of mm-everything-2023-03-27-21-20.
Changelog from v1 === 1. Changed split_page_memcg() and split_page_owner() parameter to use order 2. Used folio_test_pmd_mappable() in place of the equivalent code
Details ===
* Patch 1 changes split_page_memcg() to use order instead of nr_pages * Patch 2 changes split_page_owner() to use order instead of nr_pages * Patch 3 and 4 add new_order parameter split_page_memcg() and split_page_owner() and prepare for upcoming changes. * Patch 5 adds split_huge_page_to_list_to_order() to split a huge page to any lower order. The original split_huge_page_to_list() calls split_huge_page_to_list_to_order() with new_order = 0. * Patch 6 uses split_huge_page_to_list_to_order() in large pagecache folio truncation instead of split the large folio all the way down to order-0. * Patch 7 adds a test API to debugfs and test cases in split_huge_page_test selftests.
Comments and/or suggestions are welcome.
[1] https://lore.kernel.org/linux-mm/Y%2FblF0GIunm+pRIC@casper.infradead.org/
Zi Yan (7): mm/memcg: use order instead of nr in split_page_memcg() mm/page_owner: use order instead of nr in split_page_owner() mm: memcg: make memcg huge page split support any order split. mm: page_owner: add support for splitting to any order in split page_owner. mm: thp: split huge page to any lower order pages. mm: truncate: split huge page cache page to a non-zero order if possible. mm: huge_memory: enable debugfs to split huge pages to any order.
include/linux/huge_mm.h | 10 +- include/linux/memcontrol.h | 4 +- include/linux/page_owner.h | 10 +- mm/huge_memory.c | 137 ++++++++--- mm/memcontrol.c | 10 +- mm/page_alloc.c | 8 +- mm/page_owner.c | 10 +- mm/truncate.c | 21 +- .../selftests/mm/split_huge_page_test.c | 225 +++++++++++++++++- 9 files changed, 366 insertions(+), 69 deletions(-)
From: Zi Yan ziy@nvidia.com
We do not have non power of two pages, using nr is error prone if nr is not power-of-two. Use page order instead.
Signed-off-by: Zi Yan ziy@nvidia.com --- include/linux/memcontrol.h | 4 ++-- mm/huge_memory.c | 3 ++- mm/memcontrol.c | 3 ++- mm/page_alloc.c | 4 ++-- 4 files changed, 8 insertions(+), 6 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index aa69ea98e2d8..e06a61ea4fc1 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1151,7 +1151,7 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, rcu_read_unlock(); }
-void split_page_memcg(struct page *head, unsigned int nr); +void split_page_memcg(struct page *head, int order);
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, @@ -1588,7 +1588,7 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) { }
-static inline void split_page_memcg(struct page *head, unsigned int nr) +static inline void split_page_memcg(struct page *head, int order) { }
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 81a5689806af..3bb003eb80a3 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2512,10 +2512,11 @@ static void __split_huge_page(struct page *page, struct list_head *list, struct address_space *swap_cache = NULL; unsigned long offset = 0; unsigned int nr = thp_nr_pages(head); + int order = folio_order(folio); int i;
/* complete memcg works before add pages to LRU */ - split_page_memcg(head, nr); + split_page_memcg(head, order);
if (PageAnon(head) && PageSwapCache(head)) { swp_entry_t entry = { .val = page_private(head) }; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 681e7528a714..cab2828e188d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3414,11 +3414,12 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) /* * Because page_memcg(head) is not set on tails, set it now. */ -void split_page_memcg(struct page *head, unsigned int nr) +void split_page_memcg(struct page *head, int order) { struct folio *folio = page_folio(head); struct mem_cgroup *memcg = folio_memcg(folio); int i; + unsigned int nr = 1 << order;
if (mem_cgroup_disabled() || !memcg) return; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0767dd6bc5ba..d84b121d1e03 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2781,7 +2781,7 @@ void split_page(struct page *page, unsigned int order) for (i = 1; i < (1 << order); i++) set_page_refcounted(page + i); split_page_owner(page, 1 << order); - split_page_memcg(page, 1 << order); + split_page_memcg(page, order); } EXPORT_SYMBOL_GPL(split_page);
@@ -4997,7 +4997,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order, struct page *last = page + nr;
split_page_owner(page, 1 << order); - split_page_memcg(page, 1 << order); + split_page_memcg(page, order); while (page < --last) set_page_refcounted(last);
From: Zi Yan ziy@nvidia.com
We do not have non power of two pages, using nr is error prone if nr is not power-of-two. Use page order instead.
Signed-off-by: Zi Yan ziy@nvidia.com --- include/linux/page_owner.h | 8 ++++---- mm/huge_memory.c | 2 +- mm/page_alloc.c | 4 ++-- mm/page_owner.c | 3 ++- 4 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index 119a0c9d2a8b..d7878523adfc 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h @@ -11,7 +11,7 @@ extern struct page_ext_operations page_owner_ops; extern void __reset_page_owner(struct page *page, unsigned short order); extern void __set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask); -extern void __split_page_owner(struct page *page, unsigned int nr); +extern void __split_page_owner(struct page *page, int order); extern void __folio_copy_owner(struct folio *newfolio, struct folio *old); extern void __set_page_owner_migrate_reason(struct page *page, int reason); extern void __dump_page_owner(const struct page *page); @@ -31,10 +31,10 @@ static inline void set_page_owner(struct page *page, __set_page_owner(page, order, gfp_mask); }
-static inline void split_page_owner(struct page *page, unsigned int nr) +static inline void split_page_owner(struct page *page, int order) { if (static_branch_unlikely(&page_owner_inited)) - __split_page_owner(page, nr); + __split_page_owner(page, order); } static inline void folio_copy_owner(struct folio *newfolio, struct folio *old) { @@ -60,7 +60,7 @@ static inline void set_page_owner(struct page *page, { } static inline void split_page_owner(struct page *page, - unsigned short order) + int order) { } static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3bb003eb80a3..a21921c90b21 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2557,7 +2557,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, unlock_page_lruvec(lruvec); /* Caller disabled irqs, so they are still disabled here */
- split_page_owner(head, nr); + split_page_owner(head, order);
/* See comment in __split_huge_page_tail() */ if (PageAnon(head)) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d84b121d1e03..d537828bc4be 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2780,7 +2780,7 @@ void split_page(struct page *page, unsigned int order)
for (i = 1; i < (1 << order); i++) set_page_refcounted(page + i); - split_page_owner(page, 1 << order); + split_page_owner(page, order); split_page_memcg(page, order); } EXPORT_SYMBOL_GPL(split_page); @@ -4996,7 +4996,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order, struct page *page = virt_to_page((void *)addr); struct page *last = page + nr;
- split_page_owner(page, 1 << order); + split_page_owner(page, order); split_page_memcg(page, order); while (page < --last) set_page_refcounted(last); diff --git a/mm/page_owner.c b/mm/page_owner.c index 31169b3e7f06..64233b5b09d5 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -211,11 +211,12 @@ void __set_page_owner_migrate_reason(struct page *page, int reason) page_ext_put(page_ext); }
-void __split_page_owner(struct page *page, unsigned int nr) +void __split_page_owner(struct page *page, int order) { int i; struct page_ext *page_ext = page_ext_get(page); struct page_owner *page_owner; + unsigned int nr = 1 << order;
if (unlikely(!page_ext)) return;
From: Zi Yan ziy@nvidia.com
It sets memcg information for the pages after the split. A new parameter new_order is added to tell the order of subpages in the new page, always 0 for now. It prepares for upcoming changes to support split huge page to any lower order.
Signed-off-by: Zi Yan ziy@nvidia.com --- include/linux/memcontrol.h | 4 ++-- mm/huge_memory.c | 2 +- mm/memcontrol.c | 11 ++++++----- mm/page_alloc.c | 4 ++-- 4 files changed, 11 insertions(+), 10 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e06a61ea4fc1..1633c00fe393 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1151,7 +1151,7 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, rcu_read_unlock(); }
-void split_page_memcg(struct page *head, int order); +void split_page_memcg(struct page *head, int old_order, int new_order);
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, @@ -1588,7 +1588,7 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) { }
-static inline void split_page_memcg(struct page *head, int order) +static inline void split_page_memcg(struct page *head, int old_order, int new_order) { }
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a21921c90b21..106cde74d933 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2516,7 +2516,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, int i;
/* complete memcg works before add pages to LRU */ - split_page_memcg(head, order); + split_page_memcg(head, order, 0);
if (PageAnon(head) && PageSwapCache(head)) { swp_entry_t entry = { .val = page_private(head) }; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index cab2828e188d..93ae37f90c84 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3414,23 +3414,24 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) /* * Because page_memcg(head) is not set on tails, set it now. */ -void split_page_memcg(struct page *head, int order) +void split_page_memcg(struct page *head, int old_order, int new_order) { struct folio *folio = page_folio(head); struct mem_cgroup *memcg = folio_memcg(folio); int i; - unsigned int nr = 1 << order; + unsigned int old_nr = 1 << old_order; + unsigned int new_nr = 1 << new_order;
if (mem_cgroup_disabled() || !memcg) return;
- for (i = 1; i < nr; i++) + for (i = new_nr; i < old_nr; i += new_nr) folio_page(folio, i)->memcg_data = folio->memcg_data;
if (folio_memcg_kmem(folio)) - obj_cgroup_get_many(__folio_objcg(folio), nr - 1); + obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1); else - css_get_many(&memcg->css, nr - 1); + css_get_many(&memcg->css, old_nr / new_nr - 1); }
#ifdef CONFIG_SWAP diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d537828bc4be..ef559795525b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2781,7 +2781,7 @@ void split_page(struct page *page, unsigned int order) for (i = 1; i < (1 << order); i++) set_page_refcounted(page + i); split_page_owner(page, order); - split_page_memcg(page, order); + split_page_memcg(page, order, 0); } EXPORT_SYMBOL_GPL(split_page);
@@ -4997,7 +4997,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order, struct page *last = page + nr;
split_page_owner(page, order); - split_page_memcg(page, order); + split_page_memcg(page, order, 0); while (page < --last) set_page_refcounted(last);
From: Zi Yan ziy@nvidia.com
It adds a new_order parameter to set new page order in page owner. It prepares for upcoming changes to support split huge page to any lower order.
Signed-off-by: Zi Yan ziy@nvidia.com --- include/linux/page_owner.h | 10 +++++----- mm/huge_memory.c | 2 +- mm/page_alloc.c | 4 ++-- mm/page_owner.c | 11 ++++++----- 4 files changed, 14 insertions(+), 13 deletions(-)
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index d7878523adfc..a784ba69f67f 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h @@ -11,7 +11,7 @@ extern struct page_ext_operations page_owner_ops; extern void __reset_page_owner(struct page *page, unsigned short order); extern void __set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask); -extern void __split_page_owner(struct page *page, int order); +extern void __split_page_owner(struct page *page, int old_order, int new_order); extern void __folio_copy_owner(struct folio *newfolio, struct folio *old); extern void __set_page_owner_migrate_reason(struct page *page, int reason); extern void __dump_page_owner(const struct page *page); @@ -31,10 +31,10 @@ static inline void set_page_owner(struct page *page, __set_page_owner(page, order, gfp_mask); }
-static inline void split_page_owner(struct page *page, int order) +static inline void split_page_owner(struct page *page, int old_order, int new_order) { if (static_branch_unlikely(&page_owner_inited)) - __split_page_owner(page, order); + __split_page_owner(page, old_order, new_order); } static inline void folio_copy_owner(struct folio *newfolio, struct folio *old) { @@ -56,11 +56,11 @@ static inline void reset_page_owner(struct page *page, unsigned short order) { } static inline void set_page_owner(struct page *page, - unsigned int order, gfp_t gfp_mask) + unsigned short order, gfp_t gfp_mask) { } static inline void split_page_owner(struct page *page, - int order) + int old_order, int new_order) { } static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 106cde74d933..f8a8a72b207d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2557,7 +2557,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, unlock_page_lruvec(lruvec); /* Caller disabled irqs, so they are still disabled here */
- split_page_owner(head, order); + split_page_owner(head, order, 0);
/* See comment in __split_huge_page_tail() */ if (PageAnon(head)) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ef559795525b..4845ff6c4223 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2780,7 +2780,7 @@ void split_page(struct page *page, unsigned int order)
for (i = 1; i < (1 << order); i++) set_page_refcounted(page + i); - split_page_owner(page, order); + split_page_owner(page, order, 0); split_page_memcg(page, order, 0); } EXPORT_SYMBOL_GPL(split_page); @@ -4996,7 +4996,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order, struct page *page = virt_to_page((void *)addr); struct page *last = page + nr;
- split_page_owner(page, order); + split_page_owner(page, order, 0); split_page_memcg(page, order, 0); while (page < --last) set_page_refcounted(last); diff --git a/mm/page_owner.c b/mm/page_owner.c index 64233b5b09d5..347861fe9c50 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -211,20 +211,21 @@ void __set_page_owner_migrate_reason(struct page *page, int reason) page_ext_put(page_ext); }
-void __split_page_owner(struct page *page, int order) +void __split_page_owner(struct page *page, int old_order, int new_order) { int i; struct page_ext *page_ext = page_ext_get(page); struct page_owner *page_owner; - unsigned int nr = 1 << order; + unsigned int old_nr = 1 << old_order; + unsigned int new_nr = 1 << new_order;
if (unlikely(!page_ext)) return;
- for (i = 0; i < nr; i++) { + for (i = 0; i < old_nr; i += new_nr) { + page_ext = lookup_page_ext(page + i); page_owner = get_page_owner(page_ext); - page_owner->order = 0; - page_ext = page_ext_next(page_ext); + page_owner->order = new_order; } page_ext_put(page_ext); }
Hi Zi,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on akpm-mm/mm-everything] [also build test ERROR on linus/master v6.3-rc4 next-20230329] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Zi-Yan/mm-memcg-use-order-ins... base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything patch link: https://lore.kernel.org/r/20230329011712.3242298-5-zi.yan%40sent.com patch subject: [PATCH v2 4/7] mm: page_owner: add support for splitting to any order in split page_owner. config: i386-randconfig-m021 (https://download.01.org/0day-ci/archive/20230329/202303291732.7OqWI96E-lkp@i...) compiler: gcc-11 (Debian 11.3.0-8) 11.3.0 reproduce (this is a W=1 build): # https://github.com/intel-lab-lkp/linux/commit/6d1831c0e01a1a742e026454fe6e56... git remote add linux-review https://github.com/intel-lab-lkp/linux git fetch --no-tags linux-review Zi-Yan/mm-memcg-use-order-instead-of-nr-in-split_page_memcg/20230329-091809 git checkout 6d1831c0e01a1a742e026454fe6e5643e08c5985 # save the config file mkdir build_dir && cp config build_dir/.config make W=1 O=build_dir ARCH=i386 olddefconfig make W=1 O=build_dir ARCH=i386 SHELL=/bin/bash
If you fix the issue, kindly add following tag where applicable | Reported-by: kernel test robot lkp@intel.com | Link: https://lore.kernel.org/oe-kbuild-all/202303291732.7OqWI96E-lkp@intel.com/
All errors (new ones prefixed by >>):
mm/page_owner.c: In function '__split_page_owner':
mm/page_owner.c:226:28: error: implicit declaration of function 'lookup_page_ext' [-Werror=implicit-function-declaration]
226 | page_ext = lookup_page_ext(page + i); | ^~~~~~~~~~~~~~~ mm/page_owner.c:226:26: warning: assignment to 'struct page_ext *' from 'int' makes pointer from integer without a cast [-Wint-conversion] 226 | page_ext = lookup_page_ext(page + i); | ^ cc1: some warnings being treated as errors
vim +/lookup_page_ext +226 mm/page_owner.c
213 214 void __split_page_owner(struct page *page, int old_order, int new_order) 215 { 216 int i; 217 struct page_ext *page_ext = page_ext_get(page); 218 struct page_owner *page_owner; 219 unsigned int old_nr = 1 << old_order; 220 unsigned int new_nr = 1 << new_order; 221 222 if (unlikely(!page_ext)) 223 return; 224 225 for (i = 0; i < old_nr; i += new_nr) {
226 page_ext = lookup_page_ext(page + i);
227 page_owner = get_page_owner(page_ext); 228 page_owner->order = new_order; 229 } 230 page_ext_put(page_ext); 231 } 232
On 29 Mar 2023, at 5:58, kernel test robot wrote:
Hi Zi,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on akpm-mm/mm-everything] [also build test ERROR on linus/master v6.3-rc4 next-20230329] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Zi-Yan/mm-memcg-use-order-ins... base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything patch link: https://lore.kernel.org/r/20230329011712.3242298-5-zi.yan%40sent.com patch subject: [PATCH v2 4/7] mm: page_owner: add support for splitting to any order in split page_owner. config: i386-randconfig-m021 (https://download.01.org/0day-ci/archive/20230329/202303291732.7OqWI96E-lkp@i...) compiler: gcc-11 (Debian 11.3.0-8) 11.3.0 reproduce (this is a W=1 build): # https://github.com/intel-lab-lkp/linux/commit/6d1831c0e01a1a742e026454fe6e56... git remote add linux-review https://github.com/intel-lab-lkp/linux git fetch --no-tags linux-review Zi-Yan/mm-memcg-use-order-instead-of-nr-in-split_page_memcg/20230329-091809 git checkout 6d1831c0e01a1a742e026454fe6e5643e08c5985 # save the config file mkdir build_dir && cp config build_dir/.config make W=1 O=build_dir ARCH=i386 olddefconfig make W=1 O=build_dir ARCH=i386 SHELL=/bin/bash
If you fix the issue, kindly add following tag where applicable | Reported-by: kernel test robot lkp@intel.com | Link: https://lore.kernel.org/oe-kbuild-all/202303291732.7OqWI96E-lkp@intel.com/
All errors (new ones prefixed by >>):
mm/page_owner.c: In function '__split_page_owner':
mm/page_owner.c:226:28: error: implicit declaration of function 'lookup_page_ext' [-Werror=implicit-function-declaration]
226 | page_ext = lookup_page_ext(page + i); | ^~~~~~~~~~~~~~~
mm/page_owner.c:226:26: warning: assignment to 'struct page_ext *' from 'int' makes pointer from integer without a cast [-Wint-conversion] 226 | page_ext = lookup_page_ext(page + i); | ^ cc1: some warnings being treated as errors
vim +/lookup_page_ext +226 mm/page_owner.c
213 214 void __split_page_owner(struct page *page, int old_order, int new_order) 215 { 216 int i; 217 struct page_ext *page_ext = page_ext_get(page); 218 struct page_owner *page_owner; 219 unsigned int old_nr = 1 << old_order; 220 unsigned int new_nr = 1 << new_order; 221 222 if (unlikely(!page_ext)) 223 return; 224 225 for (i = 0; i < old_nr; i += new_nr) {
226 page_ext = lookup_page_ext(page + i);
227 page_owner = get_page_owner(page_ext); 228 page_owner->order = new_order; 229 } 230 page_ext_put(page_ext); 231 } 232
-- 0-DAY CI Kernel Test Service https://github.com/intel/lkp-tests
It can be fixed by the patch below. I will fix it in the next version. Thanks.
diff --git a/mm/page_owner.c b/mm/page_owner.c index 347861fe9c50..72244a4f1a31 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -223,9 +223,9 @@ void __split_page_owner(struct page *page, int old_order, int new_order) return;
for (i = 0; i < old_nr; i += new_nr) { - page_ext = lookup_page_ext(page + i); page_owner = get_page_owner(page_ext); page_owner->order = new_order; + page_ext = page_ext_next(page_ext); } page_ext_put(page_ext); }
-- Best Regards, Yan, Zi
Hi Zi,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on akpm-mm/mm-everything] [also build test ERROR on linus/master v6.3-rc4 next-20230329] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Zi-Yan/mm-memcg-use-order-ins... base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything patch link: https://lore.kernel.org/r/20230329011712.3242298-5-zi.yan%40sent.com patch subject: [PATCH v2 4/7] mm: page_owner: add support for splitting to any order in split page_owner. config: x86_64-randconfig-a016 (https://download.01.org/0day-ci/archive/20230329/202303292237.pg39cTKv-lkp@i...) compiler: clang version 14.0.6 (https://github.com/llvm/llvm-project f28c006a5895fc0e329fe15fead81e37457cb1d1) reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://github.com/intel-lab-lkp/linux/commit/6d1831c0e01a1a742e026454fe6e56... git remote add linux-review https://github.com/intel-lab-lkp/linux git fetch --no-tags linux-review Zi-Yan/mm-memcg-use-order-instead-of-nr-in-split_page_memcg/20230329-091809 git checkout 6d1831c0e01a1a742e026454fe6e5643e08c5985 # save the config file mkdir build_dir && cp config build_dir/.config COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=x86_64 olddefconfig COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash
If you fix the issue, kindly add following tag where applicable | Reported-by: kernel test robot lkp@intel.com | Link: https://lore.kernel.org/oe-kbuild-all/202303292237.pg39cTKv-lkp@intel.com/
All error/warnings (new ones prefixed by >>):
mm/page_owner.c:226:14: error: implicit declaration of function 'lookup_page_ext' is invalid in C99 [-Werror,-Wimplicit-function-declaration]
page_ext = lookup_page_ext(page + i); ^
mm/page_owner.c:226:12: warning: incompatible integer to pointer conversion assigning to 'struct page_ext *' from 'int' [-Wint-conversion]
page_ext = lookup_page_ext(page + i); ^ ~~~~~~~~~~~~~~~~~~~~~~~~~ 1 warning and 1 error generated.
vim +/lookup_page_ext +226 mm/page_owner.c
213 214 void __split_page_owner(struct page *page, int old_order, int new_order) 215 { 216 int i; 217 struct page_ext *page_ext = page_ext_get(page); 218 struct page_owner *page_owner; 219 unsigned int old_nr = 1 << old_order; 220 unsigned int new_nr = 1 << new_order; 221 222 if (unlikely(!page_ext)) 223 return; 224 225 for (i = 0; i < old_nr; i += new_nr) {
226 page_ext = lookup_page_ext(page + i);
227 page_owner = get_page_owner(page_ext); 228 page_owner->order = new_order; 229 } 230 page_ext_put(page_ext); 231 } 232
Hi Zi,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on akpm-mm/mm-everything] [also build test ERROR on linus/master v6.3-rc4 next-20230329] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch#_base_tree_information]
url: https://github.com/intel-lab-lkp/linux/commits/Zi-Yan/mm-memcg-use-order-ins... base: https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything patch link: https://lore.kernel.org/r/20230329011712.3242298-5-zi.yan%40sent.com patch subject: [PATCH v2 4/7] mm: page_owner: add support for splitting to any order in split page_owner. config: riscv-buildonly-randconfig-r006-20230329 (https://download.01.org/0day-ci/archive/20230330/202303300056.N12iGUqy-lkp@i...) compiler: clang version 17.0.0 (https://github.com/llvm/llvm-project 67409911353323ca5edf2049ef0df54132fa1ca7) reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # install riscv cross compiling tool for clang build # apt-get install binutils-riscv64-linux-gnu # https://github.com/intel-lab-lkp/linux/commit/6d1831c0e01a1a742e026454fe6e56... git remote add linux-review https://github.com/intel-lab-lkp/linux git fetch --no-tags linux-review Zi-Yan/mm-memcg-use-order-instead-of-nr-in-split_page_memcg/20230329-091809 git checkout 6d1831c0e01a1a742e026454fe6e5643e08c5985 # save the config file mkdir build_dir && cp config build_dir/.config COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=riscv olddefconfig COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=riscv SHELL=/bin/bash
If you fix the issue, kindly add following tag where applicable | Reported-by: kernel test robot lkp@intel.com | Link: https://lore.kernel.org/oe-kbuild-all/202303300056.N12iGUqy-lkp@intel.com/
All errors (new ones prefixed by >>):
mm/page_owner.c:226:14: error: call to undeclared function 'lookup_page_ext'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
page_ext = lookup_page_ext(page + i); ^
mm/page_owner.c:226:12: error: incompatible integer to pointer conversion assigning to 'struct page_ext *' from 'int' [-Wint-conversion]
page_ext = lookup_page_ext(page + i); ^ ~~~~~~~~~~~~~~~~~~~~~~~~~ 2 errors generated.
vim +/lookup_page_ext +226 mm/page_owner.c
213 214 void __split_page_owner(struct page *page, int old_order, int new_order) 215 { 216 int i; 217 struct page_ext *page_ext = page_ext_get(page); 218 struct page_owner *page_owner; 219 unsigned int old_nr = 1 << old_order; 220 unsigned int new_nr = 1 << new_order; 221 222 if (unlikely(!page_ext)) 223 return; 224 225 for (i = 0; i < old_nr; i += new_nr) {
226 page_ext = lookup_page_ext(page + i);
227 page_owner = get_page_owner(page_ext); 228 page_owner->order = new_order; 229 } 230 page_ext_put(page_ext); 231 } 232
From: Zi Yan ziy@nvidia.com
To split a THP to any lower order pages, we need to reform THPs on subpages at given order and add page refcount based on the new page order. Also we need to reinitialize page_deferred_list after removing the page from the split_queue, otherwise a subsequent split will see list corruption when checking the page_deferred_list again.
It has many uses, like minimizing the number of pages after truncating a huge pagecache page. For anonymous THPs, we can only split them to order-0 like before until we add support for any size anonymous THPs.
Signed-off-by: Zi Yan ziy@nvidia.com --- include/linux/huge_mm.h | 10 ++-- mm/huge_memory.c | 102 +++++++++++++++++++++++++++++----------- 2 files changed, 81 insertions(+), 31 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 20284387b841..32c91e1b59cd 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -147,10 +147,11 @@ void prep_transhuge_page(struct page *page); void free_transhuge_page(struct page *page);
bool can_split_folio(struct folio *folio, int *pextra_pins); -int split_huge_page_to_list(struct page *page, struct list_head *list); +int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, + unsigned int new_order); static inline int split_huge_page(struct page *page) { - return split_huge_page_to_list(page, NULL); + return split_huge_page_to_list_to_order(page, NULL, 0); } void deferred_split_folio(struct folio *folio);
@@ -297,7 +298,8 @@ can_split_folio(struct folio *folio, int *pextra_pins) return false; } static inline int -split_huge_page_to_list(struct page *page, struct list_head *list) +split_huge_page_to_list_to_order(struct page *page, struct list_head *list, + unsigned int new_order) { return 0; } @@ -397,7 +399,7 @@ static inline bool thp_migration_supported(void) static inline int split_folio_to_list(struct folio *folio, struct list_head *list) { - return split_huge_page_to_list(&folio->page, list); + return split_huge_page_to_list_to_order(&folio->page, list, 0); }
static inline int split_folio(struct folio *folio) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f8a8a72b207d..619d25278340 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2359,11 +2359,13 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
static void unmap_folio(struct folio *folio) { - enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | - TTU_SYNC; + enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SYNC;
VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+ if (folio_test_pmd_mappable(folio)) + ttu_flags |= TTU_SPLIT_HUGE_PMD; + /* * Anon pages need migration entries to preserve them, but file * pages can simply be left unmapped, then faulted back on demand. @@ -2395,7 +2397,6 @@ static void lru_add_page_tail(struct page *head, struct page *tail, struct lruvec *lruvec, struct list_head *list) { VM_BUG_ON_PAGE(!PageHead(head), head); - VM_BUG_ON_PAGE(PageCompound(tail), head); VM_BUG_ON_PAGE(PageLRU(tail), head); lockdep_assert_held(&lruvec->lru_lock);
@@ -2416,7 +2417,7 @@ static void lru_add_page_tail(struct page *head, struct page *tail, }
static void __split_huge_page_tail(struct page *head, int tail, - struct lruvec *lruvec, struct list_head *list) + struct lruvec *lruvec, struct list_head *list, unsigned int new_order) { struct page *page_tail = head + tail;
@@ -2483,10 +2484,15 @@ static void __split_huge_page_tail(struct page *head, int tail, * which needs correct compound_head(). */ clear_compound_head(page_tail); + if (new_order) { + prep_compound_page(page_tail, new_order); + prep_transhuge_page(page_tail); + }
/* Finally unfreeze refcount. Additional reference from page cache. */ - page_ref_unfreeze(page_tail, 1 + (!PageAnon(head) || - PageSwapCache(head))); + page_ref_unfreeze(page_tail, 1 + ((!PageAnon(head) || + PageSwapCache(head)) ? + thp_nr_pages(page_tail) : 0));
if (page_is_young(head)) set_page_young(page_tail); @@ -2504,7 +2510,7 @@ static void __split_huge_page_tail(struct page *head, int tail, }
static void __split_huge_page(struct page *page, struct list_head *list, - pgoff_t end) + pgoff_t end, unsigned int new_order) { struct folio *folio = page_folio(page); struct page *head = &folio->page; @@ -2512,11 +2518,12 @@ static void __split_huge_page(struct page *page, struct list_head *list, struct address_space *swap_cache = NULL; unsigned long offset = 0; unsigned int nr = thp_nr_pages(head); + unsigned int new_nr = 1 << new_order; int order = folio_order(folio); int i;
/* complete memcg works before add pages to LRU */ - split_page_memcg(head, order, 0); + split_page_memcg(head, order, new_order);
if (PageAnon(head) && PageSwapCache(head)) { swp_entry_t entry = { .val = page_private(head) }; @@ -2531,14 +2538,14 @@ static void __split_huge_page(struct page *page, struct list_head *list,
ClearPageHasHWPoisoned(head);
- for (i = nr - 1; i >= 1; i--) { - __split_huge_page_tail(head, i, lruvec, list); + for (i = nr - new_nr; i >= new_nr; i -= new_nr) { + __split_huge_page_tail(head, i, lruvec, list, new_order); /* Some pages can be beyond EOF: drop them from page cache */ if (head[i].index >= end) { struct folio *tail = page_folio(head + i);
if (shmem_mapping(head->mapping)) - shmem_uncharge(head->mapping->host, 1); + shmem_uncharge(head->mapping->host, new_nr); else if (folio_test_clear_dirty(tail)) folio_account_cleaned(tail, inode_to_wb(folio->mapping->host)); @@ -2548,29 +2555,38 @@ static void __split_huge_page(struct page *page, struct list_head *list, __xa_store(&head->mapping->i_pages, head[i].index, head + i, 0); } else if (swap_cache) { + /* + * split anonymous THPs (including swapped out ones) to + * non-zero order not supported + */ + VM_WARN_ONCE(new_order, + "Split swap-cached anon folio to non-0 order not supported"); __xa_store(&swap_cache->i_pages, offset + i, head + i, 0); } }
- ClearPageCompound(head); + if (!new_order) + ClearPageCompound(head); + else + set_compound_order(head, new_order); unlock_page_lruvec(lruvec); /* Caller disabled irqs, so they are still disabled here */
- split_page_owner(head, order, 0); + split_page_owner(head, order, new_order);
/* See comment in __split_huge_page_tail() */ if (PageAnon(head)) { /* Additional pin to swap cache */ if (PageSwapCache(head)) { - page_ref_add(head, 2); + page_ref_add(head, 1 + new_nr); xa_unlock(&swap_cache->i_pages); } else { page_ref_inc(head); } } else { /* Additional pin to page cache */ - page_ref_add(head, 2); + page_ref_add(head, 1 + new_nr); xa_unlock(&head->mapping->i_pages); } local_irq_enable(); @@ -2583,7 +2599,15 @@ static void __split_huge_page(struct page *page, struct list_head *list, split_swap_cluster(entry); }
- for (i = 0; i < nr; i++) { + /* + * set page to its compound_head when split to non order-0 pages, so + * we can skip unlocking it below, since PG_locked is transferred to + * the compound_head of the page and the caller will unlock it. + */ + if (new_order) + page = compound_head(page); + + for (i = 0; i < nr; i += new_nr) { struct page *subpage = head + i; if (subpage == page) continue; @@ -2617,29 +2641,31 @@ bool can_split_folio(struct folio *folio, int *pextra_pins) }
/* - * This function splits huge page into normal pages. @page can point to any - * subpage of huge page to split. Split doesn't change the position of @page. + * This function splits huge page into pages in @new_order. @page can point to + * any subpage of huge page to split. Split doesn't change the position of + * @page. * * Only caller must hold pin on the @page, otherwise split fails with -EBUSY. * The huge page must be locked. * * If @list is null, tail pages will be added to LRU list, otherwise, to @list. * - * Both head page and tail pages will inherit mapping, flags, and so on from - * the hugepage. + * Pages in new_order will inherit mapping, flags, and so on from the hugepage. * - * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if - * they are not mapped. + * GUP pin and PG_locked transferred to @page or the compound page @page belongs + * to. Rest subpages can be freed if they are not mapped. * * Returns 0 if the hugepage is split successfully. * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under * us. */ -int split_huge_page_to_list(struct page *page, struct list_head *list) +int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, + unsigned int new_order) { struct folio *folio = page_folio(page); struct deferred_split *ds_queue = get_deferred_split_queue(folio); - XA_STATE(xas, &folio->mapping->i_pages, folio->index); + /* reset xarray order to new order after split */ + XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order); struct anon_vma *anon_vma = NULL; struct address_space *mapping = NULL; int extra_pins, ret; @@ -2649,6 +2675,18 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
+ /* Cannot split THP to order-1 (no order-1 THPs) */ + if (new_order == 1) { + VM_WARN_ONCE(1, "Cannot split to order-1 folio"); + return -EINVAL; + } + + /* Split anonymous folio to non-zero order not support */ + if (folio_test_anon(folio) && new_order) { + VM_WARN_ONCE(1, "Split anon folio to non-0 order not support"); + return -EINVAL; + } + is_hzp = is_huge_zero_page(&folio->page); VM_WARN_ON_ONCE_FOLIO(is_hzp, folio); if (is_hzp) @@ -2744,7 +2782,13 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) if (folio_ref_freeze(folio, 1 + extra_pins)) { if (!list_empty(&folio->_deferred_list)) { ds_queue->split_queue_len--; - list_del(&folio->_deferred_list); + /* + * Reinitialize page_deferred_list after removing the + * page from the split_queue, otherwise a subsequent + * split will see list corruption when checking the + * page_deferred_list. + */ + list_del_init(&folio->_deferred_list); } spin_unlock(&ds_queue->split_queue_lock); if (mapping) { @@ -2754,14 +2798,18 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) if (folio_test_swapbacked(folio)) { __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr); - } else { + } else if (!new_order) { + /* + * Decrease THP stats only if split to normal + * pages + */ __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr); filemap_nr_thps_dec(mapping); } }
- __split_huge_page(page, list, end); + __split_huge_page(page, list, end, new_order); ret = 0; } else { spin_unlock(&ds_queue->split_queue_lock);
On 29.03.23 03:17, Zi Yan wrote:
From: Zi Yan ziy@nvidia.com
To split a THP to any lower order pages, we need to reform THPs on subpages at given order and add page refcount based on the new page order. Also we need to reinitialize page_deferred_list after removing the page from the split_queue, otherwise a subsequent split will see list corruption when checking the page_deferred_list again.
It has many uses, like minimizing the number of pages after truncating a huge pagecache page. For anonymous THPs, we can only split them to order-0 like before until we add support for any size anonymous THPs.
Because I'm currently looking into something that would also not be compatible with order-1 for now:
You should make it clear that order-1 is not supported, like:
"mm: thp: split huge page to any lower order pages (except order 1)"
And clarify in the subject why that is the case.
On 8 Aug 2023, at 5:01, David Hildenbrand wrote:
On 29.03.23 03:17, Zi Yan wrote:
From: Zi Yan ziy@nvidia.com
To split a THP to any lower order pages, we need to reform THPs on subpages at given order and add page refcount based on the new page order. Also we need to reinitialize page_deferred_list after removing the page from the split_queue, otherwise a subsequent split will see list corruption when checking the page_deferred_list again.
It has many uses, like minimizing the number of pages after truncating a huge pagecache page. For anonymous THPs, we can only split them to order-0 like before until we add support for any size anonymous THPs.
Because I'm currently looking into something that would also not be compatible with order-1 for now:
You should make it clear that order-1 is not supported, like:
"mm: thp: split huge page to any lower order pages (except order 1)"
And clarify in the subject why that is the case.
Sure.
I will add below to the commit message and the comment of split_huge_page_to_list_to_order():
Order-1 folio is not supported because _deferred_list, which is used by partially mapped folios, is stored in subpage 2 and an order-1 folio only has subpage 0 and 1.
-- Best Regards, Yan, Zi
From: Zi Yan ziy@nvidia.com
To minimize the number of pages after a huge page truncation, we do not need to split it all the way down to order-0. The huge page has at most three parts, the part before offset, the part to be truncated, the part remaining at the end. Find the greatest common divisor of them to calculate the new page order from it, so we can split the huge page to this order and keep the remaining pages as large and as few as possible.
Signed-off-by: Zi Yan ziy@nvidia.com --- mm/truncate.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-)
diff --git a/mm/truncate.c b/mm/truncate.c index 86de31ed4d32..817efd5e94b4 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -22,6 +22,7 @@ #include <linux/buffer_head.h> /* grr. try_to_release_page */ #include <linux/shmem_fs.h> #include <linux/rmap.h> +#include <linux/gcd.h> #include "internal.h"
/* @@ -211,7 +212,8 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio) bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) { loff_t pos = folio_pos(folio); - unsigned int offset, length; + unsigned int offset, length, remaining; + unsigned int new_order = folio_order(folio);
if (pos < start) offset = start - pos; @@ -222,6 +224,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) length = length - offset; else length = end + 1 - pos - offset; + remaining = folio_size(folio) - offset - length;
folio_wait_writeback(folio); if (length == folio_size(folio)) { @@ -236,11 +239,25 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end) */ folio_zero_range(folio, offset, length);
+ /* + * Use the greatest common divisor of offset, length, and remaining + * as the smallest page size and compute the new order from it. So we + * can truncate a subpage as large as possible. Round up gcd to + * PAGE_SIZE, otherwise ilog2 can give -1 when gcd/PAGE_SIZE is 0. + */ + new_order = ilog2(round_up(gcd(gcd(offset, length), remaining), + PAGE_SIZE) / PAGE_SIZE); + + /* order-1 THP not supported, downgrade to order-0 */ + if (new_order == 1) + new_order = 0; + + if (folio_has_private(folio)) folio_invalidate(folio, offset, length); if (!folio_test_large(folio)) return true; - if (split_folio(folio) == 0) + if (split_huge_page_to_list_to_order(&folio->page, NULL, new_order) == 0) return true; if (folio_test_dirty(folio)) return false;
From: Zi Yan ziy@nvidia.com
It is used to test split_huge_page_to_list_to_order for pagecache THPs. Also add test cases for split_huge_page_to_list_to_order via both debugfs, truncating a file, and punching holes in a file.
Signed-off-by: Zi Yan ziy@nvidia.com --- mm/huge_memory.c | 34 ++- .../selftests/mm/split_huge_page_test.c | 225 +++++++++++++++++- 2 files changed, 242 insertions(+), 17 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 619d25278340..ad5b29558a51 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3023,7 +3023,7 @@ static inline bool vma_not_suitable_for_thp_split(struct vm_area_struct *vma) }
static int split_huge_pages_pid(int pid, unsigned long vaddr_start, - unsigned long vaddr_end) + unsigned long vaddr_end, unsigned int new_order) { int ret = 0; struct task_struct *task; @@ -3085,13 +3085,19 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start, goto next;
total++; - if (!can_split_folio(page_folio(page), NULL)) + /* + * For folios with private, split_huge_page_to_list_to_order() + * will try to drop it before split and then check if the folio + * can be split or not. So skip the check here. + */ + if (!folio_test_private(page_folio(page)) && + !can_split_folio(page_folio(page), NULL)) goto next;
if (!trylock_page(page)) goto next;
- if (!split_huge_page(page)) + if (!split_huge_page_to_list_to_order(page, NULL, new_order)) split++;
unlock_page(page); @@ -3109,7 +3115,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start, }
static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, - pgoff_t off_end) + pgoff_t off_end, unsigned int new_order) { struct filename *file; struct file *candidate; @@ -3148,7 +3154,7 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start, if (!folio_trylock(folio)) goto next;
- if (!split_folio(folio)) + if (!split_huge_page_to_list_to_order(&folio->page, NULL, new_order)) split++;
folio_unlock(folio); @@ -3173,10 +3179,14 @@ static ssize_t split_huge_pages_write(struct file *file, const char __user *buf, { static DEFINE_MUTEX(split_debug_mutex); ssize_t ret; - /* hold pid, start_vaddr, end_vaddr or file_path, off_start, off_end */ + /* + * hold pid, start_vaddr, end_vaddr, new_order or + * file_path, off_start, off_end, new_order + */ char input_buf[MAX_INPUT_BUF_SZ]; int pid; unsigned long vaddr_start, vaddr_end; + unsigned int new_order = 0;
ret = mutex_lock_interruptible(&split_debug_mutex); if (ret) @@ -3205,29 +3215,29 @@ static ssize_t split_huge_pages_write(struct file *file, const char __user *buf, goto out; }
- ret = sscanf(buf, "0x%lx,0x%lx", &off_start, &off_end); - if (ret != 2) { + ret = sscanf(buf, "0x%lx,0x%lx,%d", &off_start, &off_end, &new_order); + if (ret != 2 && ret != 3) { ret = -EINVAL; goto out; } - ret = split_huge_pages_in_file(file_path, off_start, off_end); + ret = split_huge_pages_in_file(file_path, off_start, off_end, new_order); if (!ret) ret = input_len;
goto out; }
- ret = sscanf(input_buf, "%d,0x%lx,0x%lx", &pid, &vaddr_start, &vaddr_end); + ret = sscanf(input_buf, "%d,0x%lx,0x%lx,%d", &pid, &vaddr_start, &vaddr_end, &new_order); if (ret == 1 && pid == 1) { split_huge_pages_all(); ret = strlen(input_buf); goto out; - } else if (ret != 3) { + } else if (ret != 3 && ret != 4) { ret = -EINVAL; goto out; }
- ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end); + ret = split_huge_pages_pid(pid, vaddr_start, vaddr_end, new_order); if (!ret) ret = strlen(input_buf); out: diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c index b8558c7f1a39..cbb5e6893cbf 100644 --- a/tools/testing/selftests/mm/split_huge_page_test.c +++ b/tools/testing/selftests/mm/split_huge_page_test.c @@ -16,6 +16,7 @@ #include <sys/mount.h> #include <malloc.h> #include <stdbool.h> +#include <time.h> #include "vm_util.h"
uint64_t pagesize; @@ -23,10 +24,12 @@ unsigned int pageshift; uint64_t pmd_pagesize;
#define SPLIT_DEBUGFS "/sys/kernel/debug/split_huge_pages" +#define SMAP_PATH "/proc/self/smaps" +#define THP_FS_PATH "/mnt/thp_fs" #define INPUT_MAX 80
-#define PID_FMT "%d,0x%lx,0x%lx" -#define PATH_FMT "%s,0x%lx,0x%lx" +#define PID_FMT "%d,0x%lx,0x%lx,%d" +#define PATH_FMT "%s,0x%lx,0x%lx,%d"
#define PFN_MASK ((1UL<<55)-1) #define KPF_THP (1UL<<22) @@ -113,7 +116,7 @@ void split_pmd_thp(void)
/* split all THPs */ write_debugfs(PID_FMT, getpid(), (uint64_t)one_page, - (uint64_t)one_page + len); + (uint64_t)one_page + len, 0);
for (i = 0; i < len; i++) if (one_page[i] != (char)i) { @@ -203,7 +206,7 @@ void split_pte_mapped_thp(void)
/* split all remapped THPs */ write_debugfs(PID_FMT, getpid(), (uint64_t)pte_mapped, - (uint64_t)pte_mapped + pagesize * 4); + (uint64_t)pte_mapped + pagesize * 4, 0);
/* smap does not show THPs after mremap, use kpageflags instead */ thp_size = 0; @@ -269,7 +272,7 @@ void split_file_backed_thp(void) }
/* split the file-backed THP */ - write_debugfs(PATH_FMT, testfile, pgoff_start, pgoff_end); + write_debugfs(PATH_FMT, testfile, pgoff_start, pgoff_end, 0);
status = unlink(testfile); if (status) @@ -290,20 +293,232 @@ void split_file_backed_thp(void) printf("file-backed THP split test done, please check dmesg for more information\n"); }
+void create_pagecache_thp_and_fd(const char *testfile, size_t fd_size, int *fd, char **addr) +{ + size_t i; + int dummy; + + srand(time(NULL)); + + *fd = open(testfile, O_CREAT | O_RDWR, 0664); + if (*fd == -1) { + perror("Failed to create a file at "THP_FS_PATH); + exit(EXIT_FAILURE); + } + + for (i = 0; i < fd_size; i++) { + unsigned char byte = (unsigned char)i; + + write(*fd, &byte, sizeof(byte)); + } + close(*fd); + sync(); + *fd = open("/proc/sys/vm/drop_caches", O_WRONLY); + if (*fd == -1) { + perror("open drop_caches"); + goto err_out_unlink; + } + if (write(*fd, "3", 1) != 1) { + perror("write to drop_caches"); + goto err_out_unlink; + } + close(*fd); + + *fd = open(testfile, O_RDWR); + if (*fd == -1) { + perror("Failed to open a file at "THP_FS_PATH); + goto err_out_unlink; + } + + *addr = mmap(NULL, fd_size, PROT_READ|PROT_WRITE, MAP_SHARED, *fd, 0); + if (*addr == (char *)-1) { + perror("cannot mmap"); + goto err_out_close; + } + madvise(*addr, fd_size, MADV_HUGEPAGE); + + for (size_t i = 0; i < fd_size; i++) + dummy += *(*addr + i); + + if (!check_huge_file(*addr, fd_size / pmd_pagesize, pmd_pagesize)) { + printf("No pagecache THP generated, please mount a filesystem supporting pagecache THP at "THP_FS_PATH"\n"); + goto err_out_close; + } + return; +err_out_close: + close(*fd); +err_out_unlink: + unlink(testfile); + exit(EXIT_FAILURE); +} + +void split_thp_in_pagecache_to_order(size_t fd_size, int order) +{ + int fd; + char *addr; + size_t i; + const char testfile[] = THP_FS_PATH "/test"; + int err = 0; + + create_pagecache_thp_and_fd(testfile, fd_size, &fd, &addr); + + printf("split %ld kB PMD-mapped pagecache page to order %d ... ", fd_size >> 10, order); + write_debugfs(PID_FMT, getpid(), (uint64_t)addr, (uint64_t)addr + fd_size, order); + + for (i = 0; i < fd_size; i++) + if (*(addr + i) != (char)i) { + printf("%lu byte corrupted in the file\n", i); + err = EXIT_FAILURE; + goto out; + } + + if (!check_huge_file(addr, 0, pmd_pagesize)) { + printf("Still FilePmdMapped not split\n"); + err = EXIT_FAILURE; + goto out; + } + + printf("done\n"); +out: + close(fd); + unlink(testfile); + if (err) + exit(err); +} + +void truncate_thp_in_pagecache_to_order(size_t fd_size, int order) +{ + int fd; + char *addr; + size_t i; + const char testfile[] = THP_FS_PATH "/test"; + int err = 0; + + create_pagecache_thp_and_fd(testfile, fd_size, &fd, &addr); + + printf("truncate %ld kB PMD-mapped pagecache page to size %lu kB ... ", + fd_size >> 10, 4UL << order); + ftruncate(fd, pagesize << order); + + for (i = 0; i < (pagesize << order); i++) + if (*(addr + i) != (char)i) { + printf("%lu byte corrupted in the file\n", i); + err = EXIT_FAILURE; + goto out; + } + + if (!check_huge_file(addr, 0, pmd_pagesize)) { + printf("Still FilePmdMapped not split after truncate\n"); + err = EXIT_FAILURE; + goto out; + } + + printf("done\n"); +out: + close(fd); + unlink(testfile); + if (err) + exit(err); +} + +void punch_hole_in_pagecache_thp(size_t fd_size, off_t offset[], off_t len[], + int n, int num_left_thps) +{ + int fd, j; + char *addr; + size_t i; + const char testfile[] = THP_FS_PATH "/test"; + int err = 0; + + create_pagecache_thp_and_fd(testfile, fd_size, &fd, &addr); + + for (j = 0; j < n; j++) { + printf("punch a hole to %ld kB PMD-mapped pagecache page at addr: %lx, offset %ld, and len %ld ...\n", + fd_size >> 10, (unsigned long)addr, offset[j], len[j]); + fallocate(fd, FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE, offset[j], len[j]); + } + + for (i = 0; i < fd_size; i++) { + int in_hole = 0; + + for (j = 0; j < n; j++) + if (i >= offset[j] && i < (offset[j] + len[j])) { + in_hole = 1; + break; + } + + if (in_hole) { + if (*(addr + i)) { + printf("%lu byte non-zero after punch\n", i); + err = EXIT_FAILURE; + goto out; + } + continue; + } + if (*(addr + i) != (char)i) { + printf("%lu byte corrupted in the file\n", i); + err = EXIT_FAILURE; + goto out; + } + } + + if (!check_huge_file(addr, num_left_thps, pmd_pagesize)) { + printf("Still FilePmdMapped not split after punch\n"); + goto out; + } + printf("done\n"); +out: + close(fd); + unlink(testfile); + if (err) + exit(err); +} + int main(int argc, char **argv) { + int i; + size_t fd_size; + off_t offset[2], len[2]; + if (geteuid() != 0) { printf("Please run the benchmark as root\n"); exit(EXIT_FAILURE); }
+ setbuf(stdout, NULL); + pagesize = getpagesize(); pageshift = ffs(pagesize) - 1; pmd_pagesize = read_pmd_pagesize(); + fd_size = 2 * pmd_pagesize;
split_pmd_thp(); split_pte_mapped_thp(); split_file_backed_thp();
+ for (i = 8; i >= 0; i--) + if (i != 1) + split_thp_in_pagecache_to_order(fd_size, i); + + /* + * for i is 1, truncate code in the kernel should create order-0 pages + * instead of order-1 THPs, since order-1 THP is not supported. No error + * is expected. + */ + for (i = 8; i >= 0; i--) + truncate_thp_in_pagecache_to_order(fd_size, i); + + offset[0] = 123; + offset[1] = 4 * pagesize; + len[0] = 200 * pagesize; + len[1] = 16 * pagesize; + punch_hole_in_pagecache_thp(fd_size, offset, len, 2, 1); + + offset[0] = 259 * pagesize + pagesize / 2; + offset[1] = 33 * pagesize; + len[0] = 129 * pagesize; + len[1] = 16 * pagesize; + punch_hole_in_pagecache_thp(fd_size, offset, len, 2, 1); + return 0; }
linux-kselftest-mirror@lists.linaro.org