Skip to content

Commit

Permalink
SLUB: Out-of-memory diagnostics
Browse files Browse the repository at this point in the history
As suggested by Mel Gorman, add out-of-memory diagnostics to the SLUB allocator
to make debugging OOM conditions easier. This patch helped hunt down a nasty
OOM issue that popped up every now that was caused by SLUB debugging code which
forced 4096 byte allocations to use order 1 pages even in the fallback case.

An example print out looks like this:

  <snip page allocator out-of-memory message>
  SLUB: Unable to allocate memory on node -1 (gfp=20)
    cache: kmalloc-4096, object size: 4096, buffer size: 4168, default order: 3, min order: 1
    node 0: slabs: 95, objs: 665, free: 0

Acked-by: Christoph Lameter <cl@linux-foundation.org>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Tested-by: Larry Finger <Larry.Finger@lwfinger.net>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
  • Loading branch information
Pekka Enberg committed Jun 11, 2009
1 parent 59a3759 commit 781b2ba
Showing 1 changed file with 51 additions and 19 deletions.
70 changes: 51 additions & 19 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1484,6 +1484,56 @@ static inline int node_match(struct kmem_cache_cpu *c, int node)
return 1;
}

static int count_free(struct page *page)
{
return page->objects - page->inuse;
}

static unsigned long count_partial(struct kmem_cache_node *n,
int (*get_count)(struct page *))
{
unsigned long flags;
unsigned long x = 0;
struct page *page;

spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru)
x += get_count(page);
spin_unlock_irqrestore(&n->list_lock, flags);
return x;
}

static noinline void
slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
{
int node;

printk(KERN_WARNING
"SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
nid, gfpflags);
printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
"default order: %d, min order: %d\n", s->name, s->objsize,
s->size, oo_order(s->oo), oo_order(s->min));

for_each_online_node(node) {
struct kmem_cache_node *n = get_node(s, node);
unsigned long nr_slabs;
unsigned long nr_objs;
unsigned long nr_free;

if (!n)
continue;

nr_slabs = atomic_long_read(&n->nr_slabs);
nr_objs = atomic_long_read(&n->total_objects);
nr_free = count_partial(n, count_free);

printk(KERN_WARNING
" node %d: slabs: %ld, objs: %ld, free: %ld\n",
node, nr_slabs, nr_objs, nr_free);
}
}

/*
* Slow path. The lockless freelist is empty or we need to perform
* debugging duties.
Expand Down Expand Up @@ -1565,6 +1615,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
c->page = new;
goto load_freelist;
}
slab_out_of_memory(s, gfpflags, node);
return NULL;
debug:
if (!alloc_debug_processing(s, c->page, object, addr))
Expand Down Expand Up @@ -3318,20 +3369,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
}

#ifdef CONFIG_SLUB_DEBUG
static unsigned long count_partial(struct kmem_cache_node *n,
int (*get_count)(struct page *))
{
unsigned long flags;
unsigned long x = 0;
struct page *page;

spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, lru)
x += get_count(page);
spin_unlock_irqrestore(&n->list_lock, flags);
return x;
}

static int count_inuse(struct page *page)
{
return page->inuse;
Expand All @@ -3342,11 +3379,6 @@ static int count_total(struct page *page)
return page->objects;
}

static int count_free(struct page *page)
{
return page->objects - page->inuse;
}

static int validate_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
{
Expand Down

0 comments on commit 781b2ba

Please sign in to comment.