Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 54206
b: refs/heads/master
c: 643b113
h: refs/heads/master
v: v3
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed May 7, 2007
1 parent 478de77 commit 95b06a4
Show file tree
Hide file tree
Showing 3 changed files with 42 additions and 2 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 77c5e2d01af871f4bfbe08feefa3d5118cb1001b
refs/heads/master: 643b113849d8faa68c9f01c3c9d929bfbffd50bd
1 change: 1 addition & 0 deletions trunk/include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ struct kmem_cache_node {
unsigned long nr_partial;
atomic_long_t nr_slabs;
struct list_head partial;
struct list_head full;
};

/*
Expand Down
41 changes: 40 additions & 1 deletion trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -661,6 +661,40 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
return search == NULL;
}

/*
* Tracking of fully allocated slabs for debugging
*/
static void add_full(struct kmem_cache *s, struct page *page)
{
struct kmem_cache_node *n;

VM_BUG_ON(!irqs_disabled());

VM_BUG_ON(!irqs_disabled());

if (!(s->flags & SLAB_STORE_USER))
return;

n = get_node(s, page_to_nid(page));
spin_lock(&n->list_lock);
list_add(&page->lru, &n->full);
spin_unlock(&n->list_lock);
}

static void remove_full(struct kmem_cache *s, struct page *page)
{
struct kmem_cache_node *n;

if (!(s->flags & SLAB_STORE_USER))
return;

n = get_node(s, page_to_nid(page));

spin_lock(&n->list_lock);
list_del(&page->lru);
spin_unlock(&n->list_lock);
}

static int alloc_object_checks(struct kmem_cache *s, struct page *page,
void *object)
{
Expand Down Expand Up @@ -1090,6 +1124,8 @@ static void putback_slab(struct kmem_cache *s, struct page *page)
if (page->inuse) {
if (page->freelist)
add_partial(s, page);
else if (PageError(page))
add_full(s, page);
slab_unlock(page);
} else {
slab_unlock(page);
Expand Down Expand Up @@ -1302,7 +1338,7 @@ static void slab_free(struct kmem_cache *s, struct page *page,
slab_empty:
if (prior)
/*
* Partially used slab that is on the partial list.
* Slab on the partial list.
*/
remove_partial(s, page);

Expand All @@ -1314,6 +1350,8 @@ static void slab_free(struct kmem_cache *s, struct page *page,
debug:
if (!free_object_checks(s, page, x))
goto out_unlock;
if (!PageActive(page) && !page->freelist)
remove_full(s, page);
if (s->flags & SLAB_STORE_USER)
set_track(s, x, TRACK_FREE, addr);
goto checks_ok;
Expand Down Expand Up @@ -1466,6 +1504,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n)
atomic_long_set(&n->nr_slabs, 0);
spin_lock_init(&n->list_lock);
INIT_LIST_HEAD(&n->partial);
INIT_LIST_HEAD(&n->full);
}

#ifdef CONFIG_NUMA
Expand Down

0 comments on commit 95b06a4

Please sign in to comment.