Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 54208
b: refs/heads/master
c: e95eed5
h: refs/heads/master
v: v3
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed May 7, 2007
1 parent 738a4eb commit c2547c1
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 20 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 53e15af03be4fdaaf20802d78f141487d7272985
refs/heads/master: e95eed571e85d7ad4cde73576296c615f305f59f
55 changes: 36 additions & 19 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,9 @@
*/
#define SLUB_UNIMPLEMENTED (SLAB_DEBUG_INITIAL)

/* Mininum number of partial slabs */
#define MIN_PARTIAL 2

#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
SLAB_POISON | SLAB_STORE_USER)
/*
Expand Down Expand Up @@ -664,16 +667,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
/*
* Tracking of fully allocated slabs for debugging
*/
static void add_full(struct kmem_cache *s, struct page *page)
static void add_full(struct kmem_cache_node *n, struct page *page)
{
struct kmem_cache_node *n;

VM_BUG_ON(!irqs_disabled());

if (!(s->flags & SLAB_STORE_USER))
return;

n = get_node(s, page_to_nid(page));
spin_lock(&n->list_lock);
list_add(&page->lru, &n->full);
spin_unlock(&n->list_lock);
Expand Down Expand Up @@ -982,10 +977,16 @@ static __always_inline int slab_trylock(struct page *page)
/*
* Management of partially allocated slabs
*/
static void add_partial(struct kmem_cache *s, struct page *page)
static void add_partial_tail(struct kmem_cache_node *n, struct page *page)
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
spin_lock(&n->list_lock);
n->nr_partial++;
list_add_tail(&page->lru, &n->partial);
spin_unlock(&n->list_lock);
}

static void add_partial(struct kmem_cache_node *n, struct page *page)
{
spin_lock(&n->list_lock);
n->nr_partial++;
list_add(&page->lru, &n->partial);
Expand Down Expand Up @@ -1085,7 +1086,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
n = get_node(s, zone_to_nid(*z));

if (n && cpuset_zone_allowed_hardwall(*z, flags) &&
n->nr_partial > 2) {
n->nr_partial > MIN_PARTIAL) {
page = get_partial_node(n);
if (page)
return page;
Expand Down Expand Up @@ -1119,15 +1120,31 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
*/
static void putback_slab(struct kmem_cache *s, struct page *page)
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));

if (page->inuse) {

if (page->freelist)
add_partial(s, page);
else if (PageError(page))
add_full(s, page);
add_partial(n, page);
else if (PageError(page) && (s->flags & SLAB_STORE_USER))
add_full(n, page);
slab_unlock(page);

} else {
slab_unlock(page);
discard_slab(s, page);
if (n->nr_partial < MIN_PARTIAL) {
/*
* Adding an empty page to the partial slabs in order
* to avoid page allocator overhead. This page needs to
* come after all the others that are not fully empty
* in order to make sure that we do maximum
* defragmentation.
*/
add_partial_tail(n, page);
slab_unlock(page);
} else {
slab_unlock(page);
discard_slab(s, page);
}
}
}

Expand Down Expand Up @@ -1326,7 +1343,7 @@ static void slab_free(struct kmem_cache *s, struct page *page,
* then add it.
*/
if (unlikely(!prior))
add_partial(s, page);
add_partial(get_node(s, page_to_nid(page)), page);

out_unlock:
slab_unlock(page);
Expand Down Expand Up @@ -1535,7 +1552,7 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag
init_object(kmalloc_caches, n, 1);
init_kmem_cache_node(n);
atomic_long_inc(&n->nr_slabs);
add_partial(kmalloc_caches, page);
add_partial(n, page);
return n;
}

Expand Down

0 comments on commit c2547c1

Please sign in to comment.