Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 42699
b: refs/heads/master
c: 3c517a6
h: refs/heads/master
i:
  42697: fdd42ce
  42695: 8495ead
v: v3
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed Dec 7, 2006
1 parent 89e7e10 commit f1af589
Show file tree
Hide file tree
Showing 2 changed files with 58 additions and 23 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 952f3b51beb592f3f1de15adcdef802fc086ea91
refs/heads/master: 3c517a6132098ca37e122a2980fc64a9e798b0d7
79 changes: 57 additions & 22 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -1605,12 +1605,7 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
flags |= __GFP_COMP;
#endif

/*
* Under NUMA we want memory on the indicated node. We will handle
* the needed fallback ourselves since we want to serve from our
* per node object lists first for other nodes.
*/
flags |= cachep->gfpflags | GFP_THISNODE;
flags |= cachep->gfpflags;

page = alloc_pages_node(nodeid, flags, cachep->gfporder);
if (!page)
Expand Down Expand Up @@ -2567,7 +2562,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
if (OFF_SLAB(cachep)) {
/* Slab management obj is off-slab. */
slabp = kmem_cache_alloc_node(cachep->slabp_cache,
local_flags, nodeid);
local_flags & ~GFP_THISNODE, nodeid);
if (!slabp)
return NULL;
} else {
Expand Down Expand Up @@ -2708,10 +2703,10 @@ static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
* Grow (by 1) the number of slabs within a cache. This is called by
* kmem_cache_alloc() when there are no active objs left in a cache.
*/
static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
static int cache_grow(struct kmem_cache *cachep,
gfp_t flags, int nodeid, void *objp)
{
struct slab *slabp;
void *objp;
size_t offset;
gfp_t local_flags;
unsigned long ctor_flags;
Expand Down Expand Up @@ -2763,12 +2758,14 @@ static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
* Get mem for the objs. Attempt to allocate a physical page from
* 'nodeid'.
*/
objp = kmem_getpages(cachep, flags, nodeid);
if (!objp)
objp = kmem_getpages(cachep, flags, nodeid);
if (!objp)
goto failed;

/* Get slab management. */
slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid);
slabp = alloc_slabmgmt(cachep, objp, offset,
local_flags & ~GFP_THISNODE, nodeid);
if (!slabp)
goto opps1;

Expand Down Expand Up @@ -3006,7 +3003,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)

if (unlikely(!ac->avail)) {
int x;
x = cache_grow(cachep, flags, node);
x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);

/* cache_grow can reenable interrupts, then ac could change. */
ac = cpu_cache_get(cachep);
Expand Down Expand Up @@ -3166,25 +3163,63 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)

/*
* Fallback function if there was no memory available and no objects on a
* certain node and we are allowed to fall back. We mimick the behavior of
* the page allocator. We fall back according to a zonelist determined by
* the policy layer while obeying cpuset constraints.
* certain node and fall back is permitted. First we scan all the
* available nodelists for available objects. If that fails then we
* perform an allocation without specifying a node. This allows the page
* allocator to do its reclaim / fallback magic. We then insert the
* slab into the proper nodelist and then allocate from it.
*/
void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
{
struct zonelist *zonelist = &NODE_DATA(slab_node(current->mempolicy))
->node_zonelists[gfp_zone(flags)];
struct zone **z;
void *obj = NULL;
int nid;

retry:
/*
* Look through allowed nodes for objects available
* from existing per node queues.
*/
for (z = zonelist->zones; *z && !obj; z++) {
int nid = zone_to_nid(*z);
nid = zone_to_nid(*z);

if (cpuset_zone_allowed(*z, flags) &&
cache->nodelists[nid] &&
cache->nodelists[nid]->free_objects)
obj = ____cache_alloc_node(cache,
flags | GFP_THISNODE, nid);
}

if (zone_idx(*z) <= ZONE_NORMAL &&
cpuset_zone_allowed(*z, flags) &&
cache->nodelists[nid])
obj = ____cache_alloc_node(cache,
flags | __GFP_THISNODE, nid);
if (!obj) {
/*
* This allocation will be performed within the constraints
* of the current cpuset / memory policy requirements.
* We may trigger various forms of reclaim on the allowed
* set and go into memory reserves if necessary.
*/
obj = kmem_getpages(cache, flags, -1);
if (obj) {
/*
* Insert into the appropriate per node queues
*/
nid = page_to_nid(virt_to_page(obj));
if (cache_grow(cache, flags, nid, obj)) {
obj = ____cache_alloc_node(cache,
flags | GFP_THISNODE, nid);
if (!obj)
/*
* Another processor may allocate the
* objects in the slab since we are
* not holding any locks.
*/
goto retry;
} else {
kmem_freepages(cache, obj);
obj = NULL;
}
}
}
return obj;
}
Expand Down Expand Up @@ -3241,7 +3276,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,

must_grow:
spin_unlock(&l3->list_lock);
x = cache_grow(cachep, flags, nodeid);
x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
if (x)
goto retry;

Expand Down

0 comments on commit f1af589

Please sign in to comment.