The quilt patch titled Subject: mm/page_alloc: ensure try_alloc_pages() plays well with unaccepted memory has been removed from the -mm tree. Its filename was mm-page_alloc-ensure-try_alloc_pages-plays-well-with-unaccepted-memory.patch
This patch was dropped because it was merged into the mm-hotfixes-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
------------------------------------------------------ From: "Kirill A. Shutemov" kirill.shutemov@linux.intel.com Subject: mm/page_alloc: ensure try_alloc_pages() plays well with unaccepted memory Date: Tue, 6 May 2025 14:25:08 +0300
try_alloc_pages() will not attempt to allocate memory if the system has *any* unaccepted memory. Memory is accepted as needed and can remain in the system indefinitely, causing the interface to always fail.
Rather than immediately giving up, attempt to use already accepted memory on free lists.
Pass 'alloc_flags' to cond_accept_memory() and do not accept new memory for ALLOC_TRYLOCK requests.
Found via code inspection - only BPF uses this at present and the runtime effects are unclear.
Link: https://lkml.kernel.org/r/20250506112509.905147-2-kirill.shutemov@linux.inte... Signed-off-by: Kirill A. Shutemov kirill.shutemov@linux.intel.com Fixes: 97769a53f117 ("mm, bpf: Introduce try_alloc_pages() for opportunistic page allocation") Cc: Alexei Starovoitov ast@kernel.org Cc: Vlastimil Babka vbabka@suse.cz Cc: Suren Baghdasaryan surenb@google.com Cc: Michal Hocko mhocko@suse.com Cc: Brendan Jackman jackmanb@google.com Cc: Johannes Weiner hannes@cmpxchg.org Cc: stable@vger.kernel.org Signed-off-by: Andrew Morton akpm@linux-foundation.org ---
mm/page_alloc.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-)
--- a/mm/page_alloc.c~mm-page_alloc-ensure-try_alloc_pages-plays-well-with-unaccepted-memory +++ a/mm/page_alloc.c @@ -290,7 +290,8 @@ EXPORT_SYMBOL(nr_online_nodes); #endif
static bool page_contains_unaccepted(struct page *page, unsigned int order); -static bool cond_accept_memory(struct zone *zone, unsigned int order); +static bool cond_accept_memory(struct zone *zone, unsigned int order, + int alloc_flags); static bool __free_unaccepted(struct page *page);
int page_group_by_mobility_disabled __read_mostly; @@ -3611,7 +3612,7 @@ retry: } }
- cond_accept_memory(zone, order); + cond_accept_memory(zone, order, alloc_flags);
/* * Detect whether the number of free pages is below high @@ -3638,7 +3639,7 @@ check_alloc_wmark: gfp_mask)) { int ret;
- if (cond_accept_memory(zone, order)) + if (cond_accept_memory(zone, order, alloc_flags)) goto try_this_zone;
/* @@ -3691,7 +3692,7 @@ try_this_zone:
return page; } else { - if (cond_accept_memory(zone, order)) + if (cond_accept_memory(zone, order, alloc_flags)) goto try_this_zone;
/* Try again if zone has deferred pages */ @@ -4844,7 +4845,7 @@ unsigned long alloc_pages_bulk_noprof(gf goto failed; }
- cond_accept_memory(zone, 0); + cond_accept_memory(zone, 0, alloc_flags); retry_this_zone: mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; if (zone_watermark_fast(zone, 0, mark, @@ -4853,7 +4854,7 @@ retry_this_zone: break; }
- if (cond_accept_memory(zone, 0)) + if (cond_accept_memory(zone, 0, alloc_flags)) goto retry_this_zone;
/* Try again if zone has deferred pages */ @@ -7281,7 +7282,8 @@ static inline bool has_unaccepted_memory return static_branch_unlikely(&zones_with_unaccepted_pages); }
-static bool cond_accept_memory(struct zone *zone, unsigned int order) +static bool cond_accept_memory(struct zone *zone, unsigned int order, + int alloc_flags) { long to_accept, wmark; bool ret = false; @@ -7292,6 +7294,10 @@ static bool cond_accept_memory(struct zo if (list_empty(&zone->unaccepted_pages)) return false;
+ /* Bailout, since try_to_accept_memory_one() needs to take a lock */ + if (alloc_flags & ALLOC_TRYLOCK) + return false; + wmark = promo_wmark_pages(zone);
/* @@ -7348,7 +7354,8 @@ static bool page_contains_unaccepted(str return false; }
-static bool cond_accept_memory(struct zone *zone, unsigned int order) +static bool cond_accept_memory(struct zone *zone, unsigned int order, + int alloc_flags) { return false; } @@ -7419,11 +7426,6 @@ struct page *try_alloc_pages_noprof(int if (!pcp_allowed_order(order)) return NULL;
-#ifdef CONFIG_UNACCEPTED_MEMORY - /* Bailout, since try_to_accept_memory_one() needs to take a lock */ - if (has_unaccepted_memory()) - return NULL; -#endif /* Bailout, since _deferred_grow_zone() needs to take a lock */ if (deferred_pages_enabled()) return NULL; _
Patches currently in -mm which might be from kirill.shutemov@linux.intel.com are
linux-stable-mirror@lists.linaro.org