Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 47394
b: refs/heads/master
c: 8c8cc2c
h: refs/heads/master
v: v3
  • Loading branch information
Pekka Enberg authored and Linus Torvalds committed Feb 11, 2007
1 parent 886daf7 commit 4daf3f3
Show file tree
Hide file tree
Showing 2 changed files with 111 additions and 91 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6e40e73097a5e4ad1b9f92fa21757343fdd6a682
refs/heads/master: 8c8cc2c10c21943dd3499a2df7aac835093b37f3
200 changes: 110 additions & 90 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -3189,35 +3189,6 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
return objp;
}

static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
gfp_t flags, void *caller)
{
unsigned long save_flags;
void *objp = NULL;

cache_alloc_debugcheck_before(cachep, flags);

local_irq_save(save_flags);

if (unlikely(NUMA_BUILD &&
current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY)))
objp = alternate_node_alloc(cachep, flags);

if (!objp)
objp = ____cache_alloc(cachep, flags);
/*
* We may just have run out of memory on the local node.
* ____cache_alloc_node() knows how to locate memory on other nodes
*/
if (NUMA_BUILD && !objp)
objp = ____cache_alloc_node(cachep, flags, numa_node_id());
local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp,
caller);
prefetchw(objp);
return objp;
}

#ifdef CONFIG_NUMA
/*
* Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
Expand Down Expand Up @@ -3249,14 +3220,20 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
* allocator to do its reclaim / fallback magic. We then insert the
* slab into the proper nodelist and then allocate from it.
*/
void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
{
struct zonelist *zonelist = &NODE_DATA(slab_node(current->mempolicy))
->node_zonelists[gfp_zone(flags)];
struct zonelist *zonelist;
gfp_t local_flags;
struct zone **z;
void *obj = NULL;
int nid;
gfp_t local_flags = (flags & GFP_LEVEL_MASK);

if (flags & __GFP_THISNODE)
return NULL;

zonelist = &NODE_DATA(slab_node(current->mempolicy))
->node_zonelists[gfp_zone(flags)];
local_flags = (flags & GFP_LEVEL_MASK);

retry:
/*
Expand Down Expand Up @@ -3366,16 +3343,110 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
if (x)
goto retry;

if (!(flags & __GFP_THISNODE))
/* Unable to grow the cache. Fall back to other nodes. */
return fallback_alloc(cachep, flags);

return NULL;
return fallback_alloc(cachep, flags);

done:
return obj;
}
#endif

/**
* kmem_cache_alloc_node - Allocate an object on the specified node
* @cachep: The cache to allocate from.
* @flags: See kmalloc().
* @nodeid: node number of the target node.
* @caller: return address of caller, used for debug information
*
* Identical to kmem_cache_alloc but it will allocate memory on the given
* node, which can improve the performance for cpu bound structures.
*
* Fallback to other node is possible if __GFP_THISNODE is not set.
*/
static __always_inline void *
__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
void *caller)
{
unsigned long save_flags;
void *ptr;

cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);

if (unlikely(nodeid == -1))
nodeid = numa_node_id();

if (unlikely(!cachep->nodelists[nodeid])) {
/* Node not bootstrapped yet */
ptr = fallback_alloc(cachep, flags);
goto out;
}

if (nodeid == numa_node_id()) {
/*
* Use the locally cached objects if possible.
* However ____cache_alloc does not allow fallback
* to other nodes. It may fail while we still have
* objects on other nodes available.
*/
ptr = ____cache_alloc(cachep, flags);
if (ptr)
goto out;
}
/* ___cache_alloc_node can fall back to other nodes */
ptr = ____cache_alloc_node(cachep, flags, nodeid);
out:
local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);

return ptr;
}

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
{
void *objp;

if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
objp = alternate_node_alloc(cache, flags);
if (objp)
goto out;
}
objp = ____cache_alloc(cache, flags);

/*
* We may just have run out of memory on the local node.
* ____cache_alloc_node() knows how to locate memory on other nodes
*/
if (!objp)
objp = ____cache_alloc_node(cache, flags, numa_node_id());

out:
return objp;
}
#else

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
return ____cache_alloc(cachep, flags);
}

#endif /* CONFIG_NUMA */

static __always_inline void *
__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
{
unsigned long save_flags;
void *objp;

cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
objp = __do_cache_alloc(cachep, flags);
local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
prefetchw(objp);

return objp;
}

/*
* Caller needs to acquire correct kmem_list's list_lock
Expand Down Expand Up @@ -3574,57 +3645,6 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
}

#ifdef CONFIG_NUMA
/**
* kmem_cache_alloc_node - Allocate an object on the specified node
* @cachep: The cache to allocate from.
* @flags: See kmalloc().
* @nodeid: node number of the target node.
* @caller: return address of caller, used for debug information
*
* Identical to kmem_cache_alloc but it will allocate memory on the given
* node, which can improve the performance for cpu bound structures.
*
* Fallback to other node is possible if __GFP_THISNODE is not set.
*/
static __always_inline void *
__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
int nodeid, void *caller)
{
unsigned long save_flags;
void *ptr = NULL;

cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);

if (unlikely(nodeid == -1))
nodeid = numa_node_id();

if (likely(cachep->nodelists[nodeid])) {
if (nodeid == numa_node_id()) {
/*
* Use the locally cached objects if possible.
* However ____cache_alloc does not allow fallback
* to other nodes. It may fail while we still have
* objects on other nodes available.
*/
ptr = ____cache_alloc(cachep, flags);
}
if (!ptr) {
/* ___cache_alloc_node can fall back to other nodes */
ptr = ____cache_alloc_node(cachep, flags, nodeid);
}
} else {
/* Node not bootstrapped yet */
if (!(flags & __GFP_THISNODE))
ptr = fallback_alloc(cachep, flags);
}

local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);

return ptr;
}

void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
return __cache_alloc_node(cachep, flags, nodeid,
Expand Down

0 comments on commit 4daf3f3

Please sign in to comment.