Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 42658
b: refs/heads/master
c: 3395ee0
h: refs/heads/master
v: v3
  • Loading branch information
Paul Menage authored and Linus Torvalds committed Dec 7, 2006
1 parent 4bada93 commit b2ca2bb
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 10 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 8f5be20bf87da7c7c59c5cc84f630a1eca5cc99c
refs/heads/master: 3395ee0588795b0b3bd889c260e55959cf2b61f5
4 changes: 4 additions & 0 deletions trunk/Documentation/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1012,6 +1012,10 @@ and is between 256 and 4096 characters. It is defined in the file
emulation library even if a 387 maths coprocessor
is present.

noaliencache [MM, NUMA] Disables the allcoation of alien caches in
the slab allocator. Saves per-node memory, but will
impact performance on real NUMA hardware.

noalign [KNL,ARM]

noapic [SMP,APIC] Tells the kernel to not make use of any
Expand Down
38 changes: 29 additions & 9 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -869,6 +869,22 @@ static void __slab_error(const char *function, struct kmem_cache *cachep,
dump_stack();
}

/*
* By default on NUMA we use alien caches to stage the freeing of
* objects allocated from other nodes. This causes massive memory
* inefficiencies when using fake NUMA setup to split memory into a
* large number of small nodes, so it can be disabled on the command
* line
*/

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
use_alien_caches = 0;
return 1;
}
__setup("noaliencache", noaliencache_setup);

#ifdef CONFIG_NUMA
/*
* Special reaping functions for NUMA systems called from cache_reap().
Expand Down Expand Up @@ -1117,7 +1133,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
* Make sure we are not freeing a object from another node to the array
* cache on this cpu.
*/
if (likely(slabp->nodeid == node))
if (likely(slabp->nodeid == node) || unlikely(!use_alien_caches))
return 0;

l3 = cachep->nodelists[node];
Expand Down Expand Up @@ -1195,7 +1211,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
struct array_cache *shared;
struct array_cache **alien;
struct array_cache **alien = NULL;

nc = alloc_arraycache(node, cachep->limit,
cachep->batchcount);
Expand All @@ -1207,9 +1223,11 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
if (!shared)
goto bad;

alien = alloc_alien_cache(node, cachep->limit);
if (!alien)
goto bad;
if (use_alien_caches) {
alien = alloc_alien_cache(node, cachep->limit);
if (!alien)
goto bad;
}
cachep->array[cpu] = nc;
l3 = cachep->nodelists[node];
BUG_ON(!l3);
Expand Down Expand Up @@ -3590,13 +3608,15 @@ static int alloc_kmemlist(struct kmem_cache *cachep)
int node;
struct kmem_list3 *l3;
struct array_cache *new_shared;
struct array_cache **new_alien;
struct array_cache **new_alien = NULL;

for_each_online_node(node) {

new_alien = alloc_alien_cache(node, cachep->limit);
if (!new_alien)
goto fail;
if (use_alien_caches) {
new_alien = alloc_alien_cache(node, cachep->limit);
if (!new_alien)
goto fail;
}

new_shared = alloc_arraycache(node,
cachep->shared*cachep->batchcount,
Expand Down

0 comments on commit b2ca2bb

Please sign in to comment.