Skip to content

Commit

Permalink
mm, slab: clean up slab->obj_exts always
Browse files Browse the repository at this point in the history
When memory allocation profiling is disabled at runtime or due to an
error, shutdown_mem_profiling() is called: slab->obj_exts which
previously allocated remains.
It won't be cleared by unaccount_slab() because of
mem_alloc_profiling_enabled() not true. It's incorrect, slab->obj_exts
should always be cleaned up in unaccount_slab() to avoid following error:

[...]BUG: Bad page state in process...
..
[...]page dumped because: page still charged to cgroup

[andriy.shevchenko@linux.intel.com: fold need_slab_obj_ext() into its only user]
Fixes: 21c690a ("mm: introduce slabobj_ext to support slab object extensions")
Cc: stable@vger.kernel.org
Signed-off-by: Zhenhua Huang <quic_zhenhuah@quicinc.com>
Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Harry Yoo <harry.yoo@oracle.com>
Tested-by: Harry Yoo <harry.yoo@oracle.com>
Acked-by: Suren Baghdasaryan <surenb@google.com>
Link: https://patch.msgid.link/20250421075232.2165527-1-quic_zhenhuah@quicinc.com
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
  • Loading branch information
Zhenhua Huang authored and Vlastimil Babka committed Apr 24, 2025
1 parent d2f5819 commit be82507
Showing 1 changed file with 8 additions and 22 deletions.
30 changes: 8 additions & 22 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -2028,8 +2028,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
return 0;
}

/* Should be called only if mem_alloc_profiling_enabled() */
static noinline void free_slab_obj_exts(struct slab *slab)
static inline void free_slab_obj_exts(struct slab *slab)
{
struct slabobj_ext *obj_exts;

Expand All @@ -2049,18 +2048,6 @@ static noinline void free_slab_obj_exts(struct slab *slab)
slab->obj_exts = 0;
}

static inline bool need_slab_obj_ext(void)
{
if (mem_alloc_profiling_enabled())
return true;

/*
* CONFIG_MEMCG creates vector of obj_cgroup objects conditionally
* inside memcg_slab_post_alloc_hook. No other users for now.
*/
return false;
}

#else /* CONFIG_SLAB_OBJ_EXT */

static inline void init_slab_obj_exts(struct slab *slab)
Expand All @@ -2077,11 +2064,6 @@ static inline void free_slab_obj_exts(struct slab *slab)
{
}

static inline bool need_slab_obj_ext(void)
{
return false;
}

#endif /* CONFIG_SLAB_OBJ_EXT */

#ifdef CONFIG_MEM_ALLOC_PROFILING
Expand Down Expand Up @@ -2129,7 +2111,7 @@ __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
static inline void
alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
{
if (need_slab_obj_ext())
if (mem_alloc_profiling_enabled())
__alloc_tagging_slab_alloc_hook(s, object, flags);
}

Expand Down Expand Up @@ -2601,8 +2583,12 @@ static __always_inline void account_slab(struct slab *slab, int order,
static __always_inline void unaccount_slab(struct slab *slab, int order,
struct kmem_cache *s)
{
if (memcg_kmem_online() || need_slab_obj_ext())
free_slab_obj_exts(slab);
/*
* The slab object extensions should now be freed regardless of
* whether mem_alloc_profiling_enabled() or not because profiling
* might have been disabled after slab->obj_exts got allocated.
*/
free_slab_obj_exts(slab);

mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
-(PAGE_SIZE << order));
Expand Down

0 comments on commit be82507

Please sign in to comment.