Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 88401
b: refs/heads/master
c: 0f389ec
h: refs/heads/master
i:
  88399: 354f1d4
v: v3
  • Loading branch information
Christoph Lameter authored and Pekka Enberg committed Apr 14, 2008
1 parent 4f7c525 commit 8f047d7
Show file tree
Hide file tree
Showing 4 changed files with 43 additions and 14 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 49bd5221ce8fb55d12c04a3ffd375201c5bbfb7a
refs/heads/master: 0f389ec63077521166f071e1e970aed36147fd45
2 changes: 1 addition & 1 deletion trunk/include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,9 @@ struct kmem_cache_cpu {
struct kmem_cache_node {
spinlock_t list_lock; /* Protect partial list and nr_partial */
unsigned long nr_partial;
atomic_long_t nr_slabs;
struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
atomic_long_t nr_slabs;
struct list_head full;
#endif
};
Expand Down
2 changes: 1 addition & 1 deletion trunk/init/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -763,7 +763,7 @@ endmenu # General setup
config SLABINFO
bool
depends on PROC_FS
depends on SLAB || SLUB
depends on SLAB || SLUB_DEBUG
default y

config RT_MUTEXES
Expand Down
51 changes: 40 additions & 11 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -837,6 +837,35 @@ static void remove_full(struct kmem_cache *s, struct page *page)
spin_unlock(&n->list_lock);
}

/* Tracking of the number of slabs for debugging purposes */
static inline unsigned long slabs_node(struct kmem_cache *s, int node)
{
struct kmem_cache_node *n = get_node(s, node);

return atomic_long_read(&n->nr_slabs);
}

static inline void inc_slabs_node(struct kmem_cache *s, int node)
{
struct kmem_cache_node *n = get_node(s, node);

/*
* May be called early in order to allocate a slab for the
* kmem_cache_node structure. Solve the chicken-egg
* dilemma by deferring the increment of the count during
* bootstrap (see early_kmem_cache_node_alloc).
*/
if (!NUMA_BUILD || n)
atomic_long_inc(&n->nr_slabs);
}
static inline void dec_slabs_node(struct kmem_cache *s, int node)
{
struct kmem_cache_node *n = get_node(s, node);

atomic_long_dec(&n->nr_slabs);
}

/* Object debug checks for alloc/free paths */
static void setup_object_debug(struct kmem_cache *s, struct page *page,
void *object)
{
Expand Down Expand Up @@ -1028,6 +1057,11 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize,
return flags;
}
#define slub_debug 0

static inline unsigned long slabs_node(struct kmem_cache *s, int node)
{ return 0; }
static inline void inc_slabs_node(struct kmem_cache *s, int node) {}
static inline void dec_slabs_node(struct kmem_cache *s, int node) {}
#endif
/*
* Slab allocation and freeing
Expand Down Expand Up @@ -1066,7 +1100,6 @@ static void setup_object(struct kmem_cache *s, struct page *page,
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
{
struct page *page;
struct kmem_cache_node *n;
void *start;
void *last;
void *p;
Expand All @@ -1078,9 +1111,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
if (!page)
goto out;

n = get_node(s, page_to_nid(page));
if (n)
atomic_long_inc(&n->nr_slabs);
inc_slabs_node(s, page_to_nid(page));
page->slab = s;
page->flags |= 1 << PG_slab;
if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
Expand Down Expand Up @@ -1153,9 +1184,7 @@ static void free_slab(struct kmem_cache *s, struct page *page)

static void discard_slab(struct kmem_cache *s, struct page *page)
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));

atomic_long_dec(&n->nr_slabs);
dec_slabs_node(s, page_to_nid(page));
free_slab(s, page);
}

Expand Down Expand Up @@ -1894,10 +1923,10 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
static void init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
atomic_long_set(&n->nr_slabs, 0);
spin_lock_init(&n->list_lock);
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
INIT_LIST_HEAD(&n->full);
#endif
}
Expand Down Expand Up @@ -2066,7 +2095,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
init_tracking(kmalloc_caches, n);
#endif
init_kmem_cache_node(n);
atomic_long_inc(&n->nr_slabs);
inc_slabs_node(kmalloc_caches, node);

/*
* lockdep requires consistent irq usage for each lock
Expand Down Expand Up @@ -2379,7 +2408,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
struct kmem_cache_node *n = get_node(s, node);

n->nr_partial -= free_list(s, n, &n->partial);
if (atomic_long_read(&n->nr_slabs))
if (slabs_node(s, node))
return 1;
}
free_kmem_cache_nodes(s);
Expand Down Expand Up @@ -2801,7 +2830,7 @@ static void slab_mem_offline_callback(void *arg)
* and offline_pages() function shoudn't call this
* callback. So, we must fail.
*/
BUG_ON(atomic_long_read(&n->nr_slabs));
BUG_ON(slabs_node(s, offline_node));

s->node[offline_node] = NULL;
kmem_cache_free(kmalloc_caches, n);
Expand Down

0 comments on commit 8f047d7

Please sign in to comment.