Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 9231
b: refs/heads/master
c: ff69416
h: refs/heads/master
i:
  9229: cff2f3f
  9227: ec6a2a4
  9223: 08417ea
  9215: a1ace4a
v: v3
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed Sep 23, 2005
1 parent 8fe7122 commit 40f5f16
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 20 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 57487f4376e16558ccbe45a5b41d8cb5192309a4
refs/heads/master: ff69416e6323fe9d38c42a06ebdefeb58bbe9336
38 changes: 19 additions & 19 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -640,7 +640,7 @@ static enum {

static DEFINE_PER_CPU(struct work_struct, reap_work);

static void free_block(kmem_cache_t* cachep, void** objpp, int len);
static void free_block(kmem_cache_t* cachep, void** objpp, int len, int node);
static void enable_cpucache (kmem_cache_t *cachep);
static void cache_reap (void *unused);
static int __node_shrink(kmem_cache_t *cachep, int node);
Expand Down Expand Up @@ -805,7 +805,7 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep, struct array_cache

if (ac->avail) {
spin_lock(&rl3->list_lock);
free_block(cachep, ac->entry, ac->avail);
free_block(cachep, ac->entry, ac->avail, node);
ac->avail = 0;
spin_unlock(&rl3->list_lock);
}
Expand Down Expand Up @@ -926,7 +926,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
/* Free limit for this kmem_list3 */
l3->free_limit -= cachep->batchcount;
if (nc)
free_block(cachep, nc->entry, nc->avail);
free_block(cachep, nc->entry, nc->avail, node);

if (!cpus_empty(mask)) {
spin_unlock(&l3->list_lock);
Expand All @@ -935,7 +935,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,

if (l3->shared) {
free_block(cachep, l3->shared->entry,
l3->shared->avail);
l3->shared->avail, node);
kfree(l3->shared);
l3->shared = NULL;
}
Expand Down Expand Up @@ -1883,12 +1883,13 @@ static void do_drain(void *arg)
{
kmem_cache_t *cachep = (kmem_cache_t*)arg;
struct array_cache *ac;
int node = numa_node_id();

check_irq_off();
ac = ac_data(cachep);
spin_lock(&cachep->nodelists[numa_node_id()]->list_lock);
free_block(cachep, ac->entry, ac->avail);
spin_unlock(&cachep->nodelists[numa_node_id()]->list_lock);
spin_lock(&cachep->nodelists[node]->list_lock);
free_block(cachep, ac->entry, ac->avail, node);
spin_unlock(&cachep->nodelists[node]->list_lock);
ac->avail = 0;
}

Expand Down Expand Up @@ -2609,7 +2610,7 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid)
/*
* Caller needs to acquire correct kmem_list's list_lock
*/
static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects)
static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects, int node)
{
int i;
struct kmem_list3 *l3;
Expand All @@ -2618,14 +2619,12 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects)
void *objp = objpp[i];
struct slab *slabp;
unsigned int objnr;
int nodeid = 0;

slabp = GET_PAGE_SLAB(virt_to_page(objp));
nodeid = slabp->nodeid;
l3 = cachep->nodelists[nodeid];
l3 = cachep->nodelists[node];
list_del(&slabp->list);
objnr = (objp - slabp->s_mem) / cachep->objsize;
check_spinlock_acquired_node(cachep, nodeid);
check_spinlock_acquired_node(cachep, node);
check_slabp(cachep, slabp);


Expand Down Expand Up @@ -2665,13 +2664,14 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
{
int batchcount;
struct kmem_list3 *l3;
int node = numa_node_id();

batchcount = ac->batchcount;
#if DEBUG
BUG_ON(!batchcount || batchcount > ac->avail);
#endif
check_irq_off();
l3 = cachep->nodelists[numa_node_id()];
l3 = cachep->nodelists[node];
spin_lock(&l3->list_lock);
if (l3->shared) {
struct array_cache *shared_array = l3->shared;
Expand All @@ -2687,7 +2687,7 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
}
}

free_block(cachep, ac->entry, batchcount);
free_block(cachep, ac->entry, batchcount, node);
free_done:
#if STATS
{
Expand Down Expand Up @@ -2752,7 +2752,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
} else {
spin_lock(&(cachep->nodelists[nodeid])->
list_lock);
free_block(cachep, &objp, 1);
free_block(cachep, &objp, 1, nodeid);
spin_unlock(&(cachep->nodelists[nodeid])->
list_lock);
}
Expand Down Expand Up @@ -2845,7 +2845,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int __nocast flags, i
unsigned long save_flags;
void *ptr;

if (nodeid == numa_node_id() || nodeid == -1)
if (nodeid == -1)
return __cache_alloc(cachep, flags);

if (unlikely(!cachep->nodelists[nodeid])) {
Expand Down Expand Up @@ -3080,7 +3080,7 @@ static int alloc_kmemlist(kmem_cache_t *cachep)

if ((nc = cachep->nodelists[node]->shared))
free_block(cachep, nc->entry,
nc->avail);
nc->avail, node);

l3->shared = new;
if (!cachep->nodelists[node]->alien) {
Expand Down Expand Up @@ -3161,7 +3161,7 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
if (!ccold)
continue;
spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
free_block(cachep, ccold->entry, ccold->avail);
free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
kfree(ccold);
}
Expand Down Expand Up @@ -3241,7 +3241,7 @@ static void drain_array_locked(kmem_cache_t *cachep,
if (tofree > ac->avail) {
tofree = (ac->avail+1)/2;
}
free_block(cachep, ac->entry, tofree);
free_block(cachep, ac->entry, tofree, node);
ac->avail -= tofree;
memmove(ac->entry, &(ac->entry[tofree]),
sizeof(void*)*ac->avail);
Expand Down

0 comments on commit 40f5f16

Please sign in to comment.