Skip to content

Commit

Permalink
slub: move kmem_cache_node into it's own cacheline
Browse files Browse the repository at this point in the history
This patch is meant to improve the performance of SLUB by moving the local
kmem_cache_node lock into it's own cacheline separate from kmem_cache.
This is accomplished by simply removing the local_node when NUMA is enabled.

On my system with 2 nodes I saw around a 5% performance increase w/
hackbench times dropping from 6.2 seconds to 5.9 seconds on average.  I
suspect the performance gain would increase as the number of nodes
increases, but I do not have the data to currently back that up.

Bugzilla-Reference: http://bugzilla.kernel.org/show_bug.cgi?id=15713
Cc: <stable@kernel.org>
Reported-by: Alex Shi <alex.shi@intel.com>
Tested-by: Alex Shi <alex.shi@intel.com>
Acked-by: Yanmin Zhang <yanmin_zhang@linux.intel.com>
Acked-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
  • Loading branch information
Alexander Duyck authored and Pekka Enberg committed May 24, 2010
1 parent 7e125f7 commit 73367bd
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 28 deletions.
9 changes: 3 additions & 6 deletions include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,12 +75,6 @@ struct kmem_cache {
int offset; /* Free pointer offset. */
struct kmem_cache_order_objects oo;

/*
* Avoid an extra cache line for UP, SMP and for the node local to
* struct kmem_cache.
*/
struct kmem_cache_node local_node;

/* Allocation and freeing of slabs */
struct kmem_cache_order_objects max;
struct kmem_cache_order_objects min;
Expand All @@ -102,6 +96,9 @@ struct kmem_cache {
*/
int remote_node_defrag_ratio;
struct kmem_cache_node *node[MAX_NUMNODES];
#else
/* Avoid an extra cache line for UP */
struct kmem_cache_node local_node;
#endif
};

Expand Down
33 changes: 11 additions & 22 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -2133,7 +2133,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)

for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = s->node[node];
if (n && n != &s->local_node)
if (n)
kmem_cache_free(kmalloc_caches, n);
s->node[node] = NULL;
}
Expand All @@ -2142,33 +2142,22 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
{
int node;
int local_node;

if (slab_state >= UP && (s < kmalloc_caches ||
s >= kmalloc_caches + KMALLOC_CACHES))
local_node = page_to_nid(virt_to_page(s));
else
local_node = 0;

for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n;

if (local_node == node)
n = &s->local_node;
else {
if (slab_state == DOWN) {
early_kmem_cache_node_alloc(gfpflags, node);
continue;
}
n = kmem_cache_alloc_node(kmalloc_caches,
gfpflags, node);

if (!n) {
free_kmem_cache_nodes(s);
return 0;
}
if (slab_state == DOWN) {
early_kmem_cache_node_alloc(gfpflags, node);
continue;
}
n = kmem_cache_alloc_node(kmalloc_caches,
gfpflags, node);

if (!n) {
free_kmem_cache_nodes(s);
return 0;
}

s->node[node] = n;
init_kmem_cache_node(n, s);
}
Expand Down

0 comments on commit 73367bd

Please sign in to comment.