From fdd42ce4219189db92711458df3f9c7d1b803912 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 6 Dec 2006 20:33:24 -0800 Subject: [PATCH] --- yaml --- r: 42697 b: refs/heads/master c: 5bcd234d881d83ac0259c6d42d98f134e31c60a8 h: refs/heads/master i: 42695: 8495ead8eb370de90dd5d634e544a9a1e0aa4994 v: v3 --- [refs] | 2 +- trunk/mm/slab.c | 40 ++++++++++++++++++++++++++++------------ 2 files changed, 29 insertions(+), 13 deletions(-) diff --git a/[refs] b/[refs] index bf13be417bd6..b3f6c61f462f 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 1b1cec4bbc59feac89670d5d6d222a02545bac94 +refs/heads/master: 5bcd234d881d83ac0259c6d42d98f134e31c60a8 diff --git a/trunk/mm/slab.c b/trunk/mm/slab.c index bb831ba63e1e..6da554fd3f6a 100644 --- a/trunk/mm/slab.c +++ b/trunk/mm/slab.c @@ -3459,29 +3459,45 @@ int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr) * @flags: See kmalloc(). * @nodeid: node number of the target node. * - * Identical to kmem_cache_alloc, except that this function is slow - * and can sleep. And it will allocate memory on the given node, which - * can improve the performance for cpu bound structures. - * New and improved: it will now make sure that the object gets - * put on the correct node list so that there is no false sharing. + * Identical to kmem_cache_alloc but it will allocate memory on the given + * node, which can improve the performance for cpu bound structures. + * + * Fallback to other node is possible if __GFP_THISNODE is not set. */ static __always_inline void * __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, void *caller) { unsigned long save_flags; - void *ptr; + void *ptr = NULL; cache_alloc_debugcheck_before(cachep, flags); local_irq_save(save_flags); - if (nodeid == -1 || nodeid == numa_node_id() || - !cachep->nodelists[nodeid]) - ptr = ____cache_alloc(cachep, flags); - else - ptr = ____cache_alloc_node(cachep, flags, nodeid); - local_irq_restore(save_flags); + if (unlikely(nodeid == -1)) + nodeid = numa_node_id(); + if (likely(cachep->nodelists[nodeid])) { + if (nodeid == numa_node_id()) { + /* + * Use the locally cached objects if possible. + * However ____cache_alloc does not allow fallback + * to other nodes. It may fail while we still have + * objects on other nodes available. + */ + ptr = ____cache_alloc(cachep, flags); + } + if (!ptr) { + /* ___cache_alloc_node can fall back to other nodes */ + ptr = ____cache_alloc_node(cachep, flags, nodeid); + } + } else { + /* Node not bootstrapped yet */ + if (!(flags & __GFP_THISNODE)) + ptr = fallback_alloc(cachep, flags); + } + + local_irq_restore(save_flags); ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); return ptr;