Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 198871
b: refs/heads/master
c: 7d6e6d0
h: refs/heads/master
i:
  198869: fef8c01
  198867: 72f01d7
  198863: d779a24
v: v3
  • Loading branch information
Lee Schermerhorn authored and Linus Torvalds committed May 27, 2010
1 parent c9e260b commit e482fe3
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 22 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: fd1197f1131a1f1d8bc192f9cfbbe17e305f17f3
refs/heads/master: 7d6e6d09de82cf6cff7fecdba55198b9f47b381c
43 changes: 22 additions & 21 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -821,7 +821,7 @@ static void init_reap_node(int cpu)
{
int node;

node = next_node(cpu_to_node(cpu), node_online_map);
node = next_node(cpu_to_mem(cpu), node_online_map);
if (node == MAX_NUMNODES)
node = first_node(node_online_map);

Expand Down Expand Up @@ -1050,7 +1050,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
struct array_cache *alien = NULL;
int node;

node = numa_node_id();
node = numa_mem_id();

/*
* Make sure we are not freeing a object from another node to the array
Expand Down Expand Up @@ -1129,7 +1129,7 @@ static void __cpuinit cpuup_canceled(long cpu)
{
struct kmem_cache *cachep;
struct kmem_list3 *l3 = NULL;
int node = cpu_to_node(cpu);
int node = cpu_to_mem(cpu);
const struct cpumask *mask = cpumask_of_node(node);

list_for_each_entry(cachep, &cache_chain, next) {
Expand Down Expand Up @@ -1194,7 +1194,7 @@ static int __cpuinit cpuup_prepare(long cpu)
{
struct kmem_cache *cachep;
struct kmem_list3 *l3 = NULL;
int node = cpu_to_node(cpu);
int node = cpu_to_mem(cpu);
int err;

/*
Expand Down Expand Up @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
* 6) Resize the head arrays of the kmalloc caches to their final sizes.
*/

node = numa_node_id();
node = numa_mem_id();

/* 1) create the cache_cache */
INIT_LIST_HEAD(&cache_chain);
Expand Down Expand Up @@ -2121,7 +2121,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
}
}
}
cachep->nodelists[numa_node_id()]->next_reap =
cachep->nodelists[numa_mem_id()]->next_reap =
jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3;

Expand Down Expand Up @@ -2452,7 +2452,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep)
{
#ifdef CONFIG_SMP
check_irq_off();
assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
assert_spin_locked(&cachep->nodelists[numa_mem_id()]->list_lock);
#endif
}

Expand All @@ -2479,7 +2479,7 @@ static void do_drain(void *arg)
{
struct kmem_cache *cachep = arg;
struct array_cache *ac;
int node = numa_node_id();
int node = numa_mem_id();

check_irq_off();
ac = cpu_cache_get(cachep);
Expand Down Expand Up @@ -3012,7 +3012,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)

retry:
check_irq_off();
node = numa_node_id();
node = numa_mem_id();
ac = cpu_cache_get(cachep);
batchcount = ac->batchcount;
if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
Expand Down Expand Up @@ -3216,7 +3216,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)

if (in_interrupt() || (flags & __GFP_THISNODE))
return NULL;
nid_alloc = nid_here = numa_node_id();
nid_alloc = nid_here = numa_mem_id();
get_mems_allowed();
if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
nid_alloc = cpuset_slab_spread_node();
Expand Down Expand Up @@ -3281,7 +3281,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
if (local_flags & __GFP_WAIT)
local_irq_enable();
kmem_flagcheck(cache, flags);
obj = kmem_getpages(cache, local_flags, numa_node_id());
obj = kmem_getpages(cache, local_flags, numa_mem_id());
if (local_flags & __GFP_WAIT)
local_irq_disable();
if (obj) {
Expand Down Expand Up @@ -3389,6 +3389,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
{
unsigned long save_flags;
void *ptr;
int slab_node = numa_mem_id();

flags &= gfp_allowed_mask;

Expand All @@ -3401,15 +3402,15 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
local_irq_save(save_flags);

if (nodeid == -1)
nodeid = numa_node_id();
nodeid = slab_node;

if (unlikely(!cachep->nodelists[nodeid])) {
/* Node not bootstrapped yet */
ptr = fallback_alloc(cachep, flags);
goto out;
}

if (nodeid == numa_node_id()) {
if (nodeid == slab_node) {
/*
* Use the locally cached objects if possible.
* However ____cache_alloc does not allow fallback
Expand Down Expand Up @@ -3453,8 +3454,8 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
* We may just have run out of memory on the local node.
* ____cache_alloc_node() knows how to locate memory on other nodes
*/
if (!objp)
objp = ____cache_alloc_node(cache, flags, numa_node_id());
if (!objp)
objp = ____cache_alloc_node(cache, flags, numa_mem_id());

out:
return objp;
Expand Down Expand Up @@ -3551,7 +3552,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
{
int batchcount;
struct kmem_list3 *l3;
int node = numa_node_id();
int node = numa_mem_id();

batchcount = ac->batchcount;
#if DEBUG
Expand Down Expand Up @@ -3985,7 +3986,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
return -ENOMEM;

for_each_online_cpu(i) {
new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
new->new[i] = alloc_arraycache(cpu_to_mem(i), limit,
batchcount, gfp);
if (!new->new[i]) {
for (i--; i >= 0; i--)
Expand All @@ -4007,9 +4008,9 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
struct array_cache *ccold = new->new[i];
if (!ccold)
continue;
spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
kfree(ccold);
}
kfree(new);
Expand Down Expand Up @@ -4115,7 +4116,7 @@ static void cache_reap(struct work_struct *w)
{
struct kmem_cache *searchp;
struct kmem_list3 *l3;
int node = numa_node_id();
int node = numa_mem_id();
struct delayed_work *work = to_delayed_work(w);

if (!mutex_trylock(&cache_chain_mutex))
Expand Down

0 comments on commit e482fe3

Please sign in to comment.