Skip to content

Commit

Permalink
slub: Extract hooks for memory checkers from hotpaths
Browse files Browse the repository at this point in the history
Extract the code that memory checkers and other verification tools use from
the hotpaths. Makes it easier to add new ones and reduces the disturbances
of the hotpaths.

Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
  • Loading branch information
Christoph Lameter authored and Pekka Enberg committed Oct 2, 2010
1 parent 51df114 commit c016b0b
Showing 1 changed file with 38 additions and 11 deletions.
49 changes: 38 additions & 11 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -790,6 +790,37 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
}
}

/*
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
*/
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
{
lockdep_trace_alloc(flags);
might_sleep_if(flags & __GFP_WAIT);

return should_failslab(s->objsize, flags, s->flags);
}

static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
{
kmemcheck_slab_alloc(s, flags, object, s->objsize);
kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
}

static inline void slab_free_hook(struct kmem_cache *s, void *x)
{
kmemleak_free_recursive(x, s->flags);
}

static inline void slab_free_hook_irq(struct kmem_cache *s, void *object)
{
kmemcheck_slab_free(s, object, s->objsize);
debug_check_no_locks_freed(object, s->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(object, s->objsize);
}

/*
* Tracking of fully allocated slabs for debugging purposes.
*/
Expand Down Expand Up @@ -1696,10 +1727,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,

gfpflags &= gfp_allowed_mask;

lockdep_trace_alloc(gfpflags);
might_sleep_if(gfpflags & __GFP_WAIT);

if (should_failslab(s->objsize, gfpflags, s->flags))
if (slab_pre_alloc_hook(s, gfpflags))
return NULL;

local_irq_save(flags);
Expand All @@ -1718,8 +1746,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
if (unlikely(gfpflags & __GFP_ZERO) && object)
memset(object, 0, s->objsize);

kmemcheck_slab_alloc(s, gfpflags, object, s->objsize);
kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags);
slab_post_alloc_hook(s, gfpflags, object);

return object;
}
Expand Down Expand Up @@ -1849,13 +1876,13 @@ static __always_inline void slab_free(struct kmem_cache *s,
struct kmem_cache_cpu *c;
unsigned long flags;

kmemleak_free_recursive(x, s->flags);
slab_free_hook(s, x);

local_irq_save(flags);
c = __this_cpu_ptr(s->cpu_slab);
kmemcheck_slab_free(s, object, s->objsize);
debug_check_no_locks_freed(object, s->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(object, s->objsize);

slab_free_hook_irq(s, x);

if (likely(page == c->page && c->node >= 0)) {
set_freepointer(s, object, c->freelist);
c->freelist = object;
Expand Down

0 comments on commit c016b0b

Please sign in to comment.