From: Zi Yan ziy@nvidia.com
It sets memcg information for the pages after the split. A new parameter new_order is added to tell the order of subpages in the new page, always 0 for now. It prepares for upcoming changes to support split huge page to any lower order.
Signed-off-by: Zi Yan ziy@nvidia.com --- include/linux/memcontrol.h | 4 ++-- mm/huge_memory.c | 2 +- mm/memcontrol.c | 11 ++++++----- mm/page_alloc.c | 4 ++-- 4 files changed, 11 insertions(+), 10 deletions(-)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e06a61ea4fc1..1633c00fe393 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1151,7 +1151,7 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, rcu_read_unlock(); }
-void split_page_memcg(struct page *head, int order); +void split_page_memcg(struct page *head, int old_order, int new_order);
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, @@ -1588,7 +1588,7 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) { }
-static inline void split_page_memcg(struct page *head, int order) +static inline void split_page_memcg(struct page *head, int old_order, int new_order) { }
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a21921c90b21..106cde74d933 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2516,7 +2516,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, int i;
/* complete memcg works before add pages to LRU */ - split_page_memcg(head, order); + split_page_memcg(head, order, 0);
if (PageAnon(head) && PageSwapCache(head)) { swp_entry_t entry = { .val = page_private(head) }; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index cab2828e188d..93ae37f90c84 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3414,23 +3414,24 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) /* * Because page_memcg(head) is not set on tails, set it now. */ -void split_page_memcg(struct page *head, int order) +void split_page_memcg(struct page *head, int old_order, int new_order) { struct folio *folio = page_folio(head); struct mem_cgroup *memcg = folio_memcg(folio); int i; - unsigned int nr = 1 << order; + unsigned int old_nr = 1 << old_order; + unsigned int new_nr = 1 << new_order;
if (mem_cgroup_disabled() || !memcg) return;
- for (i = 1; i < nr; i++) + for (i = new_nr; i < old_nr; i += new_nr) folio_page(folio, i)->memcg_data = folio->memcg_data;
if (folio_memcg_kmem(folio)) - obj_cgroup_get_many(__folio_objcg(folio), nr - 1); + obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1); else - css_get_many(&memcg->css, nr - 1); + css_get_many(&memcg->css, old_nr / new_nr - 1); }
#ifdef CONFIG_SWAP diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d537828bc4be..ef559795525b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2781,7 +2781,7 @@ void split_page(struct page *page, unsigned int order) for (i = 1; i < (1 << order); i++) set_page_refcounted(page + i); split_page_owner(page, order); - split_page_memcg(page, order); + split_page_memcg(page, order, 0); } EXPORT_SYMBOL_GPL(split_page);
@@ -4997,7 +4997,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order, struct page *last = page + nr;
split_page_owner(page, order); - split_page_memcg(page, order); + split_page_memcg(page, order, 0); while (page < --last) set_page_refcounted(last);