Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 36152
b: refs/heads/master
c: 765c450
h: refs/heads/master
v: v3
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed Sep 27, 2006
1 parent a2161c9 commit 3b66f5d
Show file tree
Hide file tree
Showing 3 changed files with 82 additions and 31 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 77f700dab4c05f8ee17584ec869672796d7bcb87
refs/heads/master: 765c4507af71c39aba21006bbd3ec809fe9714ff
4 changes: 3 additions & 1 deletion trunk/mm/mempolicy.c
Original file line number Diff line number Diff line change
Expand Up @@ -1136,7 +1136,9 @@ static unsigned interleave_nodes(struct mempolicy *policy)
*/
unsigned slab_node(struct mempolicy *policy)
{
switch (policy->policy) {
int pol = policy ? policy->policy : MPOL_DEFAULT;

switch (pol) {
case MPOL_INTERLEAVE:
return interleave_nodes(policy);

Expand Down
107 changes: 78 additions & 29 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -972,7 +972,39 @@ static int transfer_objects(struct array_cache *to,
return nr;
}

#ifdef CONFIG_NUMA
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
#define reap_alien(cachep, l3) do { } while (0)

static inline struct array_cache **alloc_alien_cache(int node, int limit)
{
return (struct array_cache **)BAD_ALIEN_MAGIC;
}

static inline void free_alien_cache(struct array_cache **ac_ptr)
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
gfp_t flags)
{
return NULL;
}

static inline void *__cache_alloc_node(struct kmem_cache *cachep,
gfp_t flags, int nodeid)
{
return NULL;
}

#else /* CONFIG_NUMA */

static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);

Expand Down Expand Up @@ -1101,26 +1133,6 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
}
return 1;
}

#else

#define drain_alien_cache(cachep, alien) do { } while (0)
#define reap_alien(cachep, l3) do { } while (0)

static inline struct array_cache **alloc_alien_cache(int node, int limit)
{
return (struct array_cache **)BAD_ALIEN_MAGIC;
}

static inline void free_alien_cache(struct array_cache **ac_ptr)
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
return 0;
}

#endif

static int __cpuinit cpuup_callback(struct notifier_block *nfb,
Expand Down Expand Up @@ -1564,7 +1576,13 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
*/
flags |= __GFP_COMP;
#endif
flags |= cachep->gfpflags;

/*
* Under NUMA we want memory on the indicated node. We will handle
* the needed fallback ourselves since we want to serve from our
* per node object lists first for other nodes.
*/
flags |= cachep->gfpflags | GFP_THISNODE;

page = alloc_pages_node(nodeid, flags, cachep->gfporder);
if (!page)
Expand Down Expand Up @@ -3051,13 +3069,18 @@ static __always_inline void *__cache_alloc(struct kmem_cache *cachep,

local_irq_save(save_flags);

#ifdef CONFIG_NUMA
if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY)))
if (unlikely(NUMA_BUILD &&
current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY)))
objp = alternate_node_alloc(cachep, flags);
#endif

if (!objp)
objp = ____cache_alloc(cachep, flags);
/*
* We may just have run out of memory on the local node.
* __cache_alloc_node() knows how to locate memory on other nodes
*/
if (NUMA_BUILD && !objp)
objp = __cache_alloc_node(cachep, flags, numa_node_id());
local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp,
caller);
Expand All @@ -3076,7 +3099,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
{
int nid_alloc, nid_here;

if (in_interrupt())
if (in_interrupt() || (flags & __GFP_THISNODE))
return NULL;
nid_alloc = nid_here = numa_node_id();
if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
Expand All @@ -3088,6 +3111,28 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
return NULL;
}

/*
* Fallback function if there was no memory available and no objects on a
* certain node and we are allowed to fall back. We mimick the behavior of
* the page allocator. We fall back according to a zonelist determined by
* the policy layer while obeying cpuset constraints.
*/
void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
{
struct zonelist *zonelist = &NODE_DATA(slab_node(current->mempolicy))
->node_zonelists[gfp_zone(flags)];
struct zone **z;
void *obj = NULL;

for (z = zonelist->zones; *z && !obj; z++)
if (zone_idx(*z) <= ZONE_NORMAL &&
cpuset_zone_allowed(*z, flags))
obj = __cache_alloc_node(cache,
flags | __GFP_THISNODE,
zone_to_nid(*z));
return obj;
}

/*
* A interface to enable slab creation on nodeid
*/
Expand Down Expand Up @@ -3141,11 +3186,15 @@ static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
must_grow:
spin_unlock(&l3->list_lock);
x = cache_grow(cachep, flags, nodeid);
if (x)
goto retry;

if (!x)
return NULL;
if (!(flags & __GFP_THISNODE))
/* Unable to grow the cache. Fall back to other nodes. */
return fallback_alloc(cachep, flags);

return NULL;

goto retry;
done:
return obj;
}
Expand Down

0 comments on commit 3b66f5d

Please sign in to comment.