Skip to content

Commit

Permalink
SLUB: Do not allocate object bit array on stack
Browse files Browse the repository at this point in the history
The objects per slab increase with the current patches in mm since we allow up
to order 3 allocs by default.  More patches in mm actually allow to use 2M or
higher sized slabs.  For slab validation we need per object bitmaps in order
to check a slab.  We end up with up to 64k objects per slab resulting in a
potential requirement of 8K stack space.  That does not look good.

Allocate the bit arrays via kmalloc.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed Jul 17, 2007
1 parent 94f6030 commit 434e245
Showing 1 changed file with 25 additions and 14 deletions.
39 changes: 25 additions & 14 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -2764,11 +2764,11 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
}

#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
static int validate_slab(struct kmem_cache *s, struct page *page)
static int validate_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
{
void *p;
void *addr = page_address(page);
DECLARE_BITMAP(map, s->objects);

if (!check_slab(s, page) ||
!on_freelist(s, page, NULL))
Expand All @@ -2790,10 +2790,11 @@ static int validate_slab(struct kmem_cache *s, struct page *page)
return 1;
}

static void validate_slab_slab(struct kmem_cache *s, struct page *page)
static void validate_slab_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
{
if (slab_trylock(page)) {
validate_slab(s, page);
validate_slab(s, page, map);
slab_unlock(page);
} else
printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
Expand All @@ -2810,7 +2811,8 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page)
}
}

static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n)
static int validate_slab_node(struct kmem_cache *s,
struct kmem_cache_node *n, unsigned long *map)
{
unsigned long count = 0;
struct page *page;
Expand All @@ -2819,7 +2821,7 @@ static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n)
spin_lock_irqsave(&n->list_lock, flags);

list_for_each_entry(page, &n->partial, lru) {
validate_slab_slab(s, page);
validate_slab_slab(s, page, map);
count++;
}
if (count != n->nr_partial)
Expand All @@ -2830,7 +2832,7 @@ static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n)
goto out;

list_for_each_entry(page, &n->full, lru) {
validate_slab_slab(s, page);
validate_slab_slab(s, page, map);
count++;
}
if (count != atomic_long_read(&n->nr_slabs))
Expand All @@ -2843,17 +2845,23 @@ static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n)
return count;
}

static unsigned long validate_slab_cache(struct kmem_cache *s)
static long validate_slab_cache(struct kmem_cache *s)
{
int node;
unsigned long count = 0;
unsigned long *map = kmalloc(BITS_TO_LONGS(s->objects) *
sizeof(unsigned long), GFP_KERNEL);

if (!map)
return -ENOMEM;

flush_all(s);
for_each_online_node(node) {
struct kmem_cache_node *n = get_node(s, node);

count += validate_slab_node(s, n);
count += validate_slab_node(s, n, map);
}
kfree(map);
return count;
}

Expand Down Expand Up @@ -3467,11 +3475,14 @@ static ssize_t validate_show(struct kmem_cache *s, char *buf)
static ssize_t validate_store(struct kmem_cache *s,
const char *buf, size_t length)
{
if (buf[0] == '1')
validate_slab_cache(s);
else
return -EINVAL;
return length;
int ret = -EINVAL;

if (buf[0] == '1') {
ret = validate_slab_cache(s);
if (ret >= 0)
ret = length;
}
return ret;
}
SLAB_ATTR(validate);

Expand Down

0 comments on commit 434e245

Please sign in to comment.