Skip to content

Commit

Permalink
mm/slub: move free_debug_processing() further
Browse files Browse the repository at this point in the history
In the following patch, the function free_debug_processing() will be
calling add_partial(), remove_partial() and discard_slab(), se move it
below their definitions to avoid forward declarations. To make review
easier, separate the move from functional changes.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Acked-by: David Rientjes <rientjes@google.com>
  • Loading branch information
Vlastimil Babka committed Aug 25, 2022
1 parent 1c23f9e commit a579b05
Showing 1 changed file with 57 additions and 57 deletions.
114 changes: 57 additions & 57 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1385,63 +1385,6 @@ static inline int free_consistency_checks(struct kmem_cache *s,
return 1;
}

/* Supports checking bulk free of a constructed freelist */
static noinline int free_debug_processing(
struct kmem_cache *s, struct slab *slab,
void *head, void *tail, int bulk_cnt,
unsigned long addr)
{
struct kmem_cache_node *n = get_node(s, slab_nid(slab));
void *object = head;
int cnt = 0;
unsigned long flags, flags2;
int ret = 0;
depot_stack_handle_t handle = 0;

if (s->flags & SLAB_STORE_USER)
handle = set_track_prepare();

spin_lock_irqsave(&n->list_lock, flags);
slab_lock(slab, &flags2);

if (s->flags & SLAB_CONSISTENCY_CHECKS) {
if (!check_slab(s, slab))
goto out;
}

next_object:
cnt++;

if (s->flags & SLAB_CONSISTENCY_CHECKS) {
if (!free_consistency_checks(s, slab, object, addr))
goto out;
}

if (s->flags & SLAB_STORE_USER)
set_track_update(s, object, TRACK_FREE, addr, handle);
trace(s, slab, object, 0);
/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
init_object(s, object, SLUB_RED_INACTIVE);

/* Reached end of constructed freelist yet? */
if (object != tail) {
object = get_freepointer(s, object);
goto next_object;
}
ret = 1;

out:
if (cnt != bulk_cnt)
slab_err(s, slab, "Bulk freelist count(%d) invalid(%d)\n",
bulk_cnt, cnt);

slab_unlock(slab, &flags2);
spin_unlock_irqrestore(&n->list_lock, flags);
if (!ret)
slab_fix(s, "Object at 0x%p not freed", object);
return ret;
}

/*
* Parse a block of slub_debug options. Blocks are delimited by ';'
*
Expand Down Expand Up @@ -2788,6 +2731,63 @@ static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
{
return atomic_long_read(&n->total_objects);
}

/* Supports checking bulk free of a constructed freelist */
static noinline int free_debug_processing(
struct kmem_cache *s, struct slab *slab,
void *head, void *tail, int bulk_cnt,
unsigned long addr)
{
struct kmem_cache_node *n = get_node(s, slab_nid(slab));
void *object = head;
int cnt = 0;
unsigned long flags, flags2;
int ret = 0;
depot_stack_handle_t handle = 0;

if (s->flags & SLAB_STORE_USER)
handle = set_track_prepare();

spin_lock_irqsave(&n->list_lock, flags);
slab_lock(slab, &flags2);

if (s->flags & SLAB_CONSISTENCY_CHECKS) {
if (!check_slab(s, slab))
goto out;
}

next_object:
cnt++;

if (s->flags & SLAB_CONSISTENCY_CHECKS) {
if (!free_consistency_checks(s, slab, object, addr))
goto out;
}

if (s->flags & SLAB_STORE_USER)
set_track_update(s, object, TRACK_FREE, addr, handle);
trace(s, slab, object, 0);
/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
init_object(s, object, SLUB_RED_INACTIVE);

/* Reached end of constructed freelist yet? */
if (object != tail) {
object = get_freepointer(s, object);
goto next_object;
}
ret = 1;

out:
if (cnt != bulk_cnt)
slab_err(s, slab, "Bulk freelist count(%d) invalid(%d)\n",
bulk_cnt, cnt);

slab_unlock(slab, &flags2);
spin_unlock_irqrestore(&n->list_lock, flags);
if (!ret)
slab_fix(s, "Object at 0x%p not freed", object);
return ret;
}
#endif /* CONFIG_SLUB_DEBUG */

#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
Expand Down

0 comments on commit a579b05

Please sign in to comment.