Skip to content

Commit

Permalink
kmemtrace: SLAB hooks.
Browse files Browse the repository at this point in the history
This adds hooks for the SLAB allocator, to allow tracing with kmemtrace.

We also convert some inline functions to __always_inline to make sure
_RET_IP_, which expands to __builtin_return_address(0), always works
as expected.

Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
  • Loading branch information
Eduard - Gabriel Munteanu authored and Pekka Enberg committed Dec 29, 2008
1 parent aa46a7e commit 3655575
Show file tree
Hide file tree
Showing 2 changed files with 123 additions and 16 deletions.
68 changes: 59 additions & 9 deletions include/linux/slab_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
#include <linux/compiler.h>
#include <linux/kmemtrace.h>

/* Size description struct for general caches. */
struct cache_sizes {
Expand All @@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[];
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);

static inline void *kmalloc(size_t size, gfp_t flags)
#ifdef CONFIG_KMEMTRACE
extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
extern size_t slab_buffer_size(struct kmem_cache *cachep);
#else
static __always_inline void *
kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
{
return kmem_cache_alloc(cachep, flags);
}
static inline size_t slab_buffer_size(struct kmem_cache *cachep)
{
return 0;
}
#endif

static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
struct kmem_cache *cachep;
void *ret;

if (__builtin_constant_p(size)) {
int i = 0;

Expand All @@ -50,10 +69,17 @@ static inline void *kmalloc(size_t size, gfp_t flags)
found:
#ifdef CONFIG_ZONE_DMA
if (flags & GFP_DMA)
return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
flags);
cachep = malloc_sizes[i].cs_dmacachep;
else
#endif
return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
cachep = malloc_sizes[i].cs_cachep;

ret = kmem_cache_alloc_notrace(cachep, flags);

kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret,
size, slab_buffer_size(cachep), flags);

return ret;
}
return __kmalloc(size, flags);
}
Expand All @@ -62,8 +88,25 @@ static inline void *kmalloc(size_t size, gfp_t flags)
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);

static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
#ifdef CONFIG_KMEMTRACE
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid);
#else
static __always_inline void *
kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid)
{
return kmem_cache_alloc_node(cachep, flags, nodeid);
}
#endif

static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *cachep;
void *ret;

if (__builtin_constant_p(size)) {
int i = 0;

Expand All @@ -84,11 +127,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
found:
#ifdef CONFIG_ZONE_DMA
if (flags & GFP_DMA)
return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
flags, node);
cachep = malloc_sizes[i].cs_dmacachep;
else
#endif
return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
flags, node);
cachep = malloc_sizes[i].cs_cachep;

ret = kmem_cache_alloc_node_notrace(cachep, flags, node);

kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_,
ret, size, slab_buffer_size(cachep),
flags, node);

return ret;
}
return __kmalloc_node(size, flags, node);
}
Expand Down
71 changes: 64 additions & 7 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,7 @@
#include <linux/rtmutex.h>
#include <linux/reciprocal_div.h>
#include <linux/debugobjects.h>
#include <linux/kmemtrace.h>

#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
Expand Down Expand Up @@ -568,6 +569,14 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)

#endif

#ifdef CONFIG_KMEMTRACE
size_t slab_buffer_size(struct kmem_cache *cachep)
{
return cachep->buffer_size;
}
EXPORT_SYMBOL(slab_buffer_size);
#endif

/*
* Do not go above this order unless 0 objects fit into the slab.
*/
Expand Down Expand Up @@ -3613,10 +3622,23 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
*/
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
return __cache_alloc(cachep, flags, __builtin_return_address(0));
void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));

kmemtrace_mark_alloc(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
obj_size(cachep), cachep->buffer_size, flags);

return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc);

#ifdef CONFIG_KMEMTRACE
void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
{
return __cache_alloc(cachep, flags, __builtin_return_address(0));
}
EXPORT_SYMBOL(kmem_cache_alloc_notrace);
#endif

/**
* kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
* @cachep: the cache we're checking against
Expand Down Expand Up @@ -3661,23 +3683,47 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
return __cache_alloc_node(cachep, flags, nodeid,
__builtin_return_address(0));
void *ret = __cache_alloc_node(cachep, flags, nodeid,
__builtin_return_address(0));

kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, _RET_IP_, ret,
obj_size(cachep), cachep->buffer_size,
flags, nodeid);

return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node);

#ifdef CONFIG_KMEMTRACE
void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid)
{
return __cache_alloc_node(cachep, flags, nodeid,
__builtin_return_address(0));
}
EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
#endif

static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
{
struct kmem_cache *cachep;
void *ret;

cachep = kmem_find_general_cachep(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
return kmem_cache_alloc_node(cachep, flags, node);
ret = kmem_cache_alloc_node_notrace(cachep, flags, node);

kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
(unsigned long) caller, ret,
size, cachep->buffer_size, flags, node);

return ret;
}

#ifdef CONFIG_DEBUG_SLAB
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
return __do_kmalloc_node(size, flags, node,
Expand Down Expand Up @@ -3710,6 +3756,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
void *caller)
{
struct kmem_cache *cachep;
void *ret;

/* If you want to save a few bytes .text space: replace
* __ with kmem_.
Expand All @@ -3719,11 +3766,17 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
cachep = __find_general_cachep(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
return __cache_alloc(cachep, flags, caller);
ret = __cache_alloc(cachep, flags, caller);

kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC,
(unsigned long) caller, ret,
size, cachep->buffer_size, flags);

return ret;
}


#ifdef CONFIG_DEBUG_SLAB
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
void *__kmalloc(size_t size, gfp_t flags)
{
return __do_kmalloc(size, flags, __builtin_return_address(0));
Expand Down Expand Up @@ -3762,6 +3815,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
debug_check_no_obj_freed(objp, obj_size(cachep));
__cache_free(cachep, objp);
local_irq_restore(flags);

kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, objp);
}
EXPORT_SYMBOL(kmem_cache_free);

Expand All @@ -3788,6 +3843,8 @@ void kfree(const void *objp)
debug_check_no_obj_freed(objp, obj_size(c));
__cache_free(c, (void *)objp);
local_irq_restore(flags);

kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, objp);
}
EXPORT_SYMBOL(kfree);

Expand Down

0 comments on commit 3655575

Please sign in to comment.