The patch below does not apply to the 6.14-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
To reproduce the conflict and resubmit, you may use the following commands:
git fetch https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/ linux-6.14.y git checkout FETCH_HEAD git cherry-pick -x be8250786ca94952a19ce87f98ad9906448bc9ef # <resolve conflicts, build, test, etc.> git commit -s git send-email --to 'stable@vger.kernel.org' --in-reply-to '2025050521-provable-extent-4108@gregkh' --subject-prefix 'PATCH 6.14.y' HEAD^..
Possible dependencies:
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
From be8250786ca94952a19ce87f98ad9906448bc9ef Mon Sep 17 00:00:00 2001 From: Zhenhua Huang quic_zhenhuah@quicinc.com Date: Mon, 21 Apr 2025 15:52:32 +0800 Subject: [PATCH] mm, slab: clean up slab->obj_exts always
When memory allocation profiling is disabled at runtime or due to an error, shutdown_mem_profiling() is called: slab->obj_exts which previously allocated remains. It won't be cleared by unaccount_slab() because of mem_alloc_profiling_enabled() not true. It's incorrect, slab->obj_exts should always be cleaned up in unaccount_slab() to avoid following error:
[...]BUG: Bad page state in process... .. [...]page dumped because: page still charged to cgroup
[andriy.shevchenko@linux.intel.com: fold need_slab_obj_ext() into its only user] Fixes: 21c690a349ba ("mm: introduce slabobj_ext to support slab object extensions") Cc: stable@vger.kernel.org Signed-off-by: Zhenhua Huang quic_zhenhuah@quicinc.com Acked-by: David Rientjes rientjes@google.com Acked-by: Harry Yoo harry.yoo@oracle.com Tested-by: Harry Yoo harry.yoo@oracle.com Acked-by: Suren Baghdasaryan surenb@google.com Link: https://patch.msgid.link/20250421075232.2165527-1-quic_zhenhuah@quicinc.com Signed-off-by: Vlastimil Babka vbabka@suse.cz
diff --git a/mm/slub.c b/mm/slub.c index dc9e729e1d26..be8b09e09d30 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2028,8 +2028,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, return 0; }
-/* Should be called only if mem_alloc_profiling_enabled() */ -static noinline void free_slab_obj_exts(struct slab *slab) +static inline void free_slab_obj_exts(struct slab *slab) { struct slabobj_ext *obj_exts;
@@ -2049,18 +2048,6 @@ static noinline void free_slab_obj_exts(struct slab *slab) slab->obj_exts = 0; }
-static inline bool need_slab_obj_ext(void) -{ - if (mem_alloc_profiling_enabled()) - return true; - - /* - * CONFIG_MEMCG creates vector of obj_cgroup objects conditionally - * inside memcg_slab_post_alloc_hook. No other users for now. - */ - return false; -} - #else /* CONFIG_SLAB_OBJ_EXT */
static inline void init_slab_obj_exts(struct slab *slab) @@ -2077,11 +2064,6 @@ static inline void free_slab_obj_exts(struct slab *slab) { }
-static inline bool need_slab_obj_ext(void) -{ - return false; -} - #endif /* CONFIG_SLAB_OBJ_EXT */
#ifdef CONFIG_MEM_ALLOC_PROFILING @@ -2129,7 +2111,7 @@ __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) static inline void alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) { - if (need_slab_obj_ext()) + if (mem_alloc_profiling_enabled()) __alloc_tagging_slab_alloc_hook(s, object, flags); }
@@ -2601,8 +2583,12 @@ static __always_inline void account_slab(struct slab *slab, int order, static __always_inline void unaccount_slab(struct slab *slab, int order, struct kmem_cache *s) { - if (memcg_kmem_online() || need_slab_obj_ext()) - free_slab_obj_exts(slab); + /* + * The slab object extensions should now be freed regardless of + * whether mem_alloc_profiling_enabled() or not because profiling + * might have been disabled after slab->obj_exts got allocated. + */ + free_slab_obj_exts(slab);
mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), -(PAGE_SIZE << order));
On Mon, May 5, 2025 at 12:55 AM gregkh@linuxfoundation.org wrote:
The patch below does not apply to the 6.14-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
I'll work on the backport. Thanks!
To reproduce the conflict and resubmit, you may use the following commands:
git fetch https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/ linux-6.14.y git checkout FETCH_HEAD git cherry-pick -x be8250786ca94952a19ce87f98ad9906448bc9ef # <resolve conflicts, build, test, etc.> git commit -s git send-email --to 'stable@vger.kernel.org' --in-reply-to '2025050521-provable-extent-4108@gregkh' --subject-prefix 'PATCH 6.14.y' HEAD^..
Possible dependencies:
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
From be8250786ca94952a19ce87f98ad9906448bc9ef Mon Sep 17 00:00:00 2001 From: Zhenhua Huang quic_zhenhuah@quicinc.com Date: Mon, 21 Apr 2025 15:52:32 +0800 Subject: [PATCH] mm, slab: clean up slab->obj_exts always
When memory allocation profiling is disabled at runtime or due to an error, shutdown_mem_profiling() is called: slab->obj_exts which previously allocated remains. It won't be cleared by unaccount_slab() because of mem_alloc_profiling_enabled() not true. It's incorrect, slab->obj_exts should always be cleaned up in unaccount_slab() to avoid following error:
[...]BUG: Bad page state in process... .. [...]page dumped because: page still charged to cgroup
[andriy.shevchenko@linux.intel.com: fold need_slab_obj_ext() into its only user] Fixes: 21c690a349ba ("mm: introduce slabobj_ext to support slab object extensions") Cc: stable@vger.kernel.org Signed-off-by: Zhenhua Huang quic_zhenhuah@quicinc.com Acked-by: David Rientjes rientjes@google.com Acked-by: Harry Yoo harry.yoo@oracle.com Tested-by: Harry Yoo harry.yoo@oracle.com Acked-by: Suren Baghdasaryan surenb@google.com Link: https://patch.msgid.link/20250421075232.2165527-1-quic_zhenhuah@quicinc.com Signed-off-by: Vlastimil Babka vbabka@suse.cz
diff --git a/mm/slub.c b/mm/slub.c index dc9e729e1d26..be8b09e09d30 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2028,8 +2028,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, return 0; }
-/* Should be called only if mem_alloc_profiling_enabled() */ -static noinline void free_slab_obj_exts(struct slab *slab) +static inline void free_slab_obj_exts(struct slab *slab) { struct slabobj_ext *obj_exts;
@@ -2049,18 +2048,6 @@ static noinline void free_slab_obj_exts(struct slab *slab) slab->obj_exts = 0; }
-static inline bool need_slab_obj_ext(void) -{
if (mem_alloc_profiling_enabled())
return true;
/*
* CONFIG_MEMCG creates vector of obj_cgroup objects conditionally
* inside memcg_slab_post_alloc_hook. No other users for now.
*/
return false;
-}
#else /* CONFIG_SLAB_OBJ_EXT */
static inline void init_slab_obj_exts(struct slab *slab) @@ -2077,11 +2064,6 @@ static inline void free_slab_obj_exts(struct slab *slab) { }
-static inline bool need_slab_obj_ext(void) -{
return false;
-}
#endif /* CONFIG_SLAB_OBJ_EXT */
#ifdef CONFIG_MEM_ALLOC_PROFILING @@ -2129,7 +2111,7 @@ __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) static inline void alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) {
if (need_slab_obj_ext())
if (mem_alloc_profiling_enabled()) __alloc_tagging_slab_alloc_hook(s, object, flags);
}
@@ -2601,8 +2583,12 @@ static __always_inline void account_slab(struct slab *slab, int order, static __always_inline void unaccount_slab(struct slab *slab, int order, struct kmem_cache *s) {
if (memcg_kmem_online() || need_slab_obj_ext())
free_slab_obj_exts(slab);
/*
* The slab object extensions should now be freed regardless of
* whether mem_alloc_profiling_enabled() or not because profiling
* might have been disabled after slab->obj_exts got allocated.
*/
free_slab_obj_exts(slab); mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), -(PAGE_SIZE << order));
On Mon, May 5, 2025 at 10:30 AM Suren Baghdasaryan surenb@google.com wrote:
On Mon, May 5, 2025 at 12:55 AM gregkh@linuxfoundation.org wrote:
The patch below does not apply to the 6.14-stable tree. If someone wants it applied there, or to any other stable or longterm tree, then please email the backport, including the original git commit id to stable@vger.kernel.org.
I'll work on the backport. Thanks!
Posted both 6.12 and 6.14 backports.
To reproduce the conflict and resubmit, you may use the following commands:
git fetch https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/ linux-6.14.y git checkout FETCH_HEAD git cherry-pick -x be8250786ca94952a19ce87f98ad9906448bc9ef # <resolve conflicts, build, test, etc.> git commit -s git send-email --to 'stable@vger.kernel.org' --in-reply-to '2025050521-provable-extent-4108@gregkh' --subject-prefix 'PATCH 6.14.y' HEAD^..
Possible dependencies:
thanks,
greg k-h
------------------ original commit in Linus's tree ------------------
From be8250786ca94952a19ce87f98ad9906448bc9ef Mon Sep 17 00:00:00 2001 From: Zhenhua Huang quic_zhenhuah@quicinc.com Date: Mon, 21 Apr 2025 15:52:32 +0800 Subject: [PATCH] mm, slab: clean up slab->obj_exts always
When memory allocation profiling is disabled at runtime or due to an error, shutdown_mem_profiling() is called: slab->obj_exts which previously allocated remains. It won't be cleared by unaccount_slab() because of mem_alloc_profiling_enabled() not true. It's incorrect, slab->obj_exts should always be cleaned up in unaccount_slab() to avoid following error:
[...]BUG: Bad page state in process... .. [...]page dumped because: page still charged to cgroup
[andriy.shevchenko@linux.intel.com: fold need_slab_obj_ext() into its only user] Fixes: 21c690a349ba ("mm: introduce slabobj_ext to support slab object extensions") Cc: stable@vger.kernel.org Signed-off-by: Zhenhua Huang quic_zhenhuah@quicinc.com Acked-by: David Rientjes rientjes@google.com Acked-by: Harry Yoo harry.yoo@oracle.com Tested-by: Harry Yoo harry.yoo@oracle.com Acked-by: Suren Baghdasaryan surenb@google.com Link: https://patch.msgid.link/20250421075232.2165527-1-quic_zhenhuah@quicinc.com Signed-off-by: Vlastimil Babka vbabka@suse.cz
diff --git a/mm/slub.c b/mm/slub.c index dc9e729e1d26..be8b09e09d30 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2028,8 +2028,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, return 0; }
-/* Should be called only if mem_alloc_profiling_enabled() */ -static noinline void free_slab_obj_exts(struct slab *slab) +static inline void free_slab_obj_exts(struct slab *slab) { struct slabobj_ext *obj_exts;
@@ -2049,18 +2048,6 @@ static noinline void free_slab_obj_exts(struct slab *slab) slab->obj_exts = 0; }
-static inline bool need_slab_obj_ext(void) -{
if (mem_alloc_profiling_enabled())
return true;
/*
* CONFIG_MEMCG creates vector of obj_cgroup objects conditionally
* inside memcg_slab_post_alloc_hook. No other users for now.
*/
return false;
-}
#else /* CONFIG_SLAB_OBJ_EXT */
static inline void init_slab_obj_exts(struct slab *slab) @@ -2077,11 +2064,6 @@ static inline void free_slab_obj_exts(struct slab *slab) { }
-static inline bool need_slab_obj_ext(void) -{
return false;
-}
#endif /* CONFIG_SLAB_OBJ_EXT */
#ifdef CONFIG_MEM_ALLOC_PROFILING @@ -2129,7 +2111,7 @@ __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) static inline void alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) {
if (need_slab_obj_ext())
if (mem_alloc_profiling_enabled()) __alloc_tagging_slab_alloc_hook(s, object, flags);
}
@@ -2601,8 +2583,12 @@ static __always_inline void account_slab(struct slab *slab, int order, static __always_inline void unaccount_slab(struct slab *slab, int order, struct kmem_cache *s) {
if (memcg_kmem_online() || need_slab_obj_ext())
free_slab_obj_exts(slab);
/*
* The slab object extensions should now be freed regardless of
* whether mem_alloc_profiling_enabled() or not because profiling
* might have been disabled after slab->obj_exts got allocated.
*/
free_slab_obj_exts(slab); mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), -(PAGE_SIZE << order));
From: Zhenhua Huang quic_zhenhuah@quicinc.com
When memory allocation profiling is disabled at runtime or due to an error, shutdown_mem_profiling() is called: slab->obj_exts which previously allocated remains. It won't be cleared by unaccount_slab() because of mem_alloc_profiling_enabled() not true. It's incorrect, slab->obj_exts should always be cleaned up in unaccount_slab() to avoid following error:
[...]BUG: Bad page state in process... .. [...]page dumped because: page still charged to cgroup
[andriy.shevchenko@linux.intel.com: fold need_slab_obj_ext() into its only user] Fixes: 21c690a349ba ("mm: introduce slabobj_ext to support slab object extensions") Cc: stable@vger.kernel.org Signed-off-by: Zhenhua Huang quic_zhenhuah@quicinc.com Acked-by: David Rientjes rientjes@google.com Acked-by: Harry Yoo harry.yoo@oracle.com Tested-by: Harry Yoo harry.yoo@oracle.com Acked-by: Suren Baghdasaryan surenb@google.com Link: https://patch.msgid.link/20250421075232.2165527-1-quic_zhenhuah@quicinc.com Signed-off-by: Vlastimil Babka vbabka@suse.cz (cherry picked from commit be8250786ca94952a19ce87f98ad9906448bc9ef) [surenb: fixed trivial merge conflict in alloc_tagging_slab_alloc_hook(), skipped inlining free_slab_obj_exts() as it's already inline in 6.14] Signed-off-by: Suren Baghdasaryan surenb@google.com --- mm/slub.c | 27 +++++++-------------------- 1 file changed, 7 insertions(+), 20 deletions(-)
diff --git a/mm/slub.c b/mm/slub.c index 96babca6b330..87f3edf9acb8 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2025,18 +2025,6 @@ static inline void free_slab_obj_exts(struct slab *slab) slab->obj_exts = 0; }
-static inline bool need_slab_obj_ext(void) -{ - if (mem_alloc_profiling_enabled()) - return true; - - /* - * CONFIG_MEMCG creates vector of obj_cgroup objects conditionally - * inside memcg_slab_post_alloc_hook. No other users for now. - */ - return false; -} - #else /* CONFIG_SLAB_OBJ_EXT */
static inline void init_slab_obj_exts(struct slab *slab) @@ -2053,11 +2041,6 @@ static inline void free_slab_obj_exts(struct slab *slab) { }
-static inline bool need_slab_obj_ext(void) -{ - return false; -} - #endif /* CONFIG_SLAB_OBJ_EXT */
#ifdef CONFIG_MEM_ALLOC_PROFILING @@ -2089,7 +2072,7 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) static inline void alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) { - if (need_slab_obj_ext()) { + if (mem_alloc_profiling_enabled()) { struct slabobj_ext *obj_exts;
obj_exts = prepare_slab_obj_exts_hook(s, flags, object); @@ -2565,8 +2548,12 @@ static __always_inline void account_slab(struct slab *slab, int order, static __always_inline void unaccount_slab(struct slab *slab, int order, struct kmem_cache *s) { - if (memcg_kmem_online() || need_slab_obj_ext()) - free_slab_obj_exts(slab); + /* + * The slab object extensions should now be freed regardless of + * whether mem_alloc_profiling_enabled() or not because profiling + * might have been disabled after slab->obj_exts got allocated. + */ + free_slab_obj_exts(slab);
mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), -(PAGE_SIZE << order));
linux-stable-mirror@lists.linaro.org