Skip to content

Commit

Permalink
slab: add hooks for kmemcheck
Browse files Browse the repository at this point in the history
We now have SLAB support for kmemcheck! This means that it doesn't matter
whether one chooses SLAB or SLUB, or indeed whether Linus chooses to chuck
SLAB or SLUB.. ;-)

Cc: Ingo Molnar <mingo@elte.hu>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>

[rebased for mainline inclusion]
Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
  • Loading branch information
Pekka Enberg authored and Vegard Nossum committed Jun 15, 2009
1 parent 5a896d9 commit c175eea
Showing 1 changed file with 18 additions and 2 deletions.
20 changes: 18 additions & 2 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@
#include <linux/rtmutex.h>
#include <linux/reciprocal_div.h>
#include <linux/debugobjects.h>
#include <linux/kmemcheck.h>

#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
Expand Down Expand Up @@ -179,13 +180,13 @@
SLAB_STORE_USER | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
#else
# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
SLAB_CACHE_DMA | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
#endif

/*
Expand Down Expand Up @@ -1624,6 +1625,10 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
NR_SLAB_UNRECLAIMABLE, nr_pages);
for (i = 0; i < nr_pages; i++)
__SetPageSlab(page + i);

if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK))
kmemcheck_alloc_shadow(cachep, flags, nodeid, page, cachep->gfporder);

return page_address(page);
}

Expand All @@ -1636,6 +1641,9 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
struct page *page = virt_to_page(addr);
const unsigned long nr_freed = i;

if (kmemcheck_page_is_tracked(page))
kmemcheck_free_shadow(cachep, page, cachep->gfporder);

if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
sub_zone_page_state(page_zone(page),
NR_SLAB_RECLAIMABLE, nr_freed);
Expand Down Expand Up @@ -3309,6 +3317,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
flags);

if (likely(ptr))
kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep));

if (unlikely((flags & __GFP_ZERO) && ptr))
memset(ptr, 0, obj_size(cachep));

Expand Down Expand Up @@ -3367,6 +3378,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
flags);
prefetchw(objp);

if (likely(objp))
kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));

if (unlikely((flags & __GFP_ZERO) && objp))
memset(objp, 0, obj_size(cachep));

Expand Down Expand Up @@ -3483,6 +3497,8 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));

kmemcheck_slab_free(cachep, objp, obj_size(cachep));

/*
* Skip calling cache_free_alien() when the platform is not numa.
* This will avoid cache misses that happen while accessing slabp (which
Expand Down

0 comments on commit c175eea

Please sign in to comment.