Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 147734
b: refs/heads/master
c: d5cff63
h: refs/heads/master
v: v3
  • Loading branch information
Catalin Marinas committed Jun 11, 2009
1 parent 514ba7a commit 059f234
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 3 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 04f70336c80c43a15e617b36c2043dfa0ad6ed0f
refs/heads/master: d5cff635290aec9ad7e6ee546aa4fae895361cbb
2 changes: 2 additions & 0 deletions trunk/include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,8 @@
# define SLAB_DEBUG_OBJECTS 0x00000000UL
#endif

#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */

/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
Expand Down
32 changes: 30 additions & 2 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/nodemask.h>
#include <linux/kmemleak.h>
#include <linux/mempolicy.h>
#include <linux/mutex.h>
#include <linux/fault-inject.h>
Expand Down Expand Up @@ -178,13 +179,13 @@
SLAB_STORE_USER | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
SLAB_DEBUG_OBJECTS)
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
#else
# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
SLAB_CACHE_DMA | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
SLAB_DEBUG_OBJECTS)
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
#endif

/*
Expand Down Expand Up @@ -964,6 +965,14 @@ static struct array_cache *alloc_arraycache(int node, int entries,
struct array_cache *nc = NULL;

nc = kmalloc_node(memsize, GFP_KERNEL, node);
/*
* The array_cache structures contain pointers to free object.
* However, when such objects are allocated or transfered to another
* cache the pointers are not cleared and they could be counted as
* valid references during a kmemleak scan. Therefore, kmemleak must
* not scan such objects.
*/
kmemleak_no_scan(nc);
if (nc) {
nc->avail = 0;
nc->limit = entries;
Expand Down Expand Up @@ -2621,6 +2630,14 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
/* Slab management obj is off-slab. */
slabp = kmem_cache_alloc_node(cachep->slabp_cache,
local_flags, nodeid);
/*
* If the first object in the slab is leaked (it's allocated
* but no one has a reference to it), we want to make sure
* kmemleak does not treat the ->s_mem pointer as a reference
* to the object. Otherwise we will not report the leak.
*/
kmemleak_scan_area(slabp, offsetof(struct slab, list),
sizeof(struct list_head), local_flags);
if (!slabp)
return NULL;
} else {
Expand Down Expand Up @@ -3141,6 +3158,12 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
STATS_INC_ALLOCMISS(cachep);
objp = cache_alloc_refill(cachep, flags);
}
/*
* To avoid a false negative, if an object that is in one of the
* per-CPU caches is leaked, we need to make sure kmemleak doesn't
* treat the array pointers as a reference to the object.
*/
kmemleak_erase(&ac->entry[ac->avail]);
return objp;
}

Expand Down Expand Up @@ -3360,6 +3383,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
out:
local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
flags);

if (unlikely((flags & __GFP_ZERO) && ptr))
memset(ptr, 0, obj_size(cachep));
Expand Down Expand Up @@ -3415,6 +3440,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
objp = __do_cache_alloc(cachep, flags);
local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
flags);
prefetchw(objp);

if (unlikely((flags & __GFP_ZERO) && objp))
Expand Down Expand Up @@ -3530,6 +3557,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
struct array_cache *ac = cpu_cache_get(cachep);

check_irq_off();
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));

/*
Expand Down

0 comments on commit 059f234

Please sign in to comment.