Skip to content

Commit

Permalink
SLUB: Make slub statistics use this_cpu_inc
Browse files Browse the repository at this point in the history
this_cpu_inc() translates into a single instruction on x86 and does not
need any register. So use it in stat(). We also want to avoid the
calculation of the per cpu kmem_cache_cpu structure pointer. So pass
a kmem_cache pointer instead of a kmem_cache_cpu pointer.

Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
  • Loading branch information
Christoph Lameter authored and Pekka Enberg committed Dec 20, 2009
1 parent ff12059 commit 84e554e
Showing 1 changed file with 20 additions and 23 deletions.
43 changes: 20 additions & 23 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -217,10 +217,10 @@ static inline void sysfs_slab_remove(struct kmem_cache *s)

#endif

static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
static inline void stat(struct kmem_cache *s, enum stat_item si)
{
#ifdef CONFIG_SLUB_STATS
c->stat[si]++;
__this_cpu_inc(s->cpu_slab->stat[si]);
#endif
}

Expand Down Expand Up @@ -1108,7 +1108,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if (!page)
return NULL;

stat(this_cpu_ptr(s->cpu_slab), ORDER_FALLBACK);
stat(s, ORDER_FALLBACK);
}

if (kmemcheck_enabled
Expand Down Expand Up @@ -1406,23 +1406,22 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);

__ClearPageSlubFrozen(page);
if (page->inuse) {

if (page->freelist) {
add_partial(n, page, tail);
stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
} else {
stat(c, DEACTIVATE_FULL);
stat(s, DEACTIVATE_FULL);
if (SLABDEBUG && PageSlubDebug(page) &&
(s->flags & SLAB_STORE_USER))
add_full(n, page);
}
slab_unlock(page);
} else {
stat(c, DEACTIVATE_EMPTY);
stat(s, DEACTIVATE_EMPTY);
if (n->nr_partial < s->min_partial) {
/*
* Adding an empty slab to the partial slabs in order
Expand All @@ -1438,7 +1437,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
slab_unlock(page);
} else {
slab_unlock(page);
stat(__this_cpu_ptr(s->cpu_slab), FREE_SLAB);
stat(s, FREE_SLAB);
discard_slab(s, page);
}
}
Expand All @@ -1453,7 +1452,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
int tail = 1;

if (page->freelist)
stat(c, DEACTIVATE_REMOTE_FREES);
stat(s, DEACTIVATE_REMOTE_FREES);
/*
* Merge cpu freelist into slab freelist. Typically we get here
* because both freelists are empty. So this is unlikely
Expand All @@ -1479,7 +1478,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)

static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
stat(c, CPUSLAB_FLUSH);
stat(s, CPUSLAB_FLUSH);
slab_lock(c->page);
deactivate_slab(s, c);
}
Expand Down Expand Up @@ -1619,7 +1618,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (unlikely(!node_match(c, node)))
goto another_slab;

stat(c, ALLOC_REFILL);
stat(s, ALLOC_REFILL);

load_freelist:
object = c->page->freelist;
Expand All @@ -1634,7 +1633,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
c->node = page_to_nid(c->page);
unlock_out:
slab_unlock(c->page);
stat(c, ALLOC_SLOWPATH);
stat(s, ALLOC_SLOWPATH);
return object;

another_slab:
Expand All @@ -1644,7 +1643,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
new = get_partial(s, gfpflags, node);
if (new) {
c->page = new;
stat(c, ALLOC_FROM_PARTIAL);
stat(s, ALLOC_FROM_PARTIAL);
goto load_freelist;
}

Expand All @@ -1658,7 +1657,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,

if (new) {
c = __this_cpu_ptr(s->cpu_slab);
stat(c, ALLOC_SLAB);
stat(s, ALLOC_SLAB);
if (c->page)
flush_slab(s, c);
slab_lock(new);
Expand Down Expand Up @@ -1713,7 +1712,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,

else {
c->freelist = get_freepointer(s, object);
stat(c, ALLOC_FASTPATH);
stat(s, ALLOC_FASTPATH);
}
local_irq_restore(flags);

Expand Down Expand Up @@ -1780,10 +1779,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
{
void *prior;
void **object = (void *)x;
struct kmem_cache_cpu *c;

c = __this_cpu_ptr(s->cpu_slab);
stat(c, FREE_SLOWPATH);
stat(s, FREE_SLOWPATH);
slab_lock(page);

if (unlikely(SLABDEBUG && PageSlubDebug(page)))
Expand All @@ -1796,7 +1793,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
page->inuse--;

if (unlikely(PageSlubFrozen(page))) {
stat(c, FREE_FROZEN);
stat(s, FREE_FROZEN);
goto out_unlock;
}

Expand All @@ -1809,7 +1806,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/
if (unlikely(!prior)) {
add_partial(get_node(s, page_to_nid(page)), page, 1);
stat(c, FREE_ADD_PARTIAL);
stat(s, FREE_ADD_PARTIAL);
}

out_unlock:
Expand All @@ -1822,10 +1819,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* Slab still on the partial list.
*/
remove_partial(s, page);
stat(c, FREE_REMOVE_PARTIAL);
stat(s, FREE_REMOVE_PARTIAL);
}
slab_unlock(page);
stat(c, FREE_SLAB);
stat(s, FREE_SLAB);
discard_slab(s, page);
return;

Expand Down Expand Up @@ -1863,7 +1860,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
if (likely(page == c->page && c->node >= 0)) {
set_freepointer(s, object, c->freelist);
c->freelist = object;
stat(c, FREE_FASTPATH);
stat(s, FREE_FASTPATH);
} else
__slab_free(s, page, x, addr);

Expand Down

0 comments on commit 84e554e

Please sign in to comment.