From 01db0e1a48345aa1937f3bdfc7c7108d03ebcf7e Mon Sep 17 00:00:00 2001 From: Zhenhua Huang Date: Mon, 21 Apr 2025 15:52:32 +0800 Subject: [PATCH] mm, slab: clean up slab->obj_exts always commit be8250786ca94952a19ce87f98ad9906448bc9ef upstream. When memory allocation profiling is disabled at runtime or due to an error, shutdown_mem_profiling() is called: slab->obj_exts which previously allocated remains. It won't be cleared by unaccount_slab() because of mem_alloc_profiling_enabled() not true. It's incorrect, slab->obj_exts should always be cleaned up in unaccount_slab() to avoid following error: [...]BUG: Bad page state in process... .. [...]page dumped because: page still charged to cgroup [andriy.shevchenko@linux.intel.com: fold need_slab_obj_ext() into its only user] Fixes: 21c690a349ba ("mm: introduce slabobj_ext to support slab object extensions") Cc: stable@vger.kernel.org Signed-off-by: Zhenhua Huang Acked-by: David Rientjes Acked-by: Harry Yoo Tested-by: Harry Yoo Acked-by: Suren Baghdasaryan Link: https://patch.msgid.link/20250421075232.2165527-1-quic_zhenhuah@quicinc.com Signed-off-by: Vlastimil Babka [surenb: fixed trivial merge conflict in alloc_tagging_slab_alloc_hook(), skipped inlining free_slab_obj_exts() as it's already inline in 6.14] Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- mm/slub.c | 27 +++++++-------------------- 1 file changed, 7 insertions(+), 20 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 96babca6b330..87f3edf9acb8 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2025,18 +2025,6 @@ static inline void free_slab_obj_exts(struct slab *slab) slab->obj_exts = 0; } -static inline bool need_slab_obj_ext(void) -{ - if (mem_alloc_profiling_enabled()) - return true; - - /* - * CONFIG_MEMCG creates vector of obj_cgroup objects conditionally - * inside memcg_slab_post_alloc_hook. No other users for now. - */ - return false; -} - #else /* CONFIG_SLAB_OBJ_EXT */ static inline void init_slab_obj_exts(struct slab *slab) @@ -2053,11 +2041,6 @@ static inline void free_slab_obj_exts(struct slab *slab) { } -static inline bool need_slab_obj_ext(void) -{ - return false; -} - #endif /* CONFIG_SLAB_OBJ_EXT */ #ifdef CONFIG_MEM_ALLOC_PROFILING @@ -2089,7 +2072,7 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) static inline void alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) { - if (need_slab_obj_ext()) { + if (mem_alloc_profiling_enabled()) { struct slabobj_ext *obj_exts; obj_exts = prepare_slab_obj_exts_hook(s, flags, object); @@ -2565,8 +2548,12 @@ static __always_inline void account_slab(struct slab *slab, int order, static __always_inline void unaccount_slab(struct slab *slab, int order, struct kmem_cache *s) { - if (memcg_kmem_online() || need_slab_obj_ext()) - free_slab_obj_exts(slab); + /* + * The slab object extensions should now be freed regardless of + * whether mem_alloc_profiling_enabled() or not because profiling + * might have been disabled after slab->obj_exts got allocated. + */ + free_slab_obj_exts(slab); mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), -(PAGE_SIZE << order));