Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 175599
b: refs/heads/master
c: f3d8b53
h: refs/heads/master
i:
  175597: 4a39a3f
  175595: e586038
  175591: 27cabfa
  175583: 458168c
v: v3
  • Loading branch information
J. R. Okajima authored and Pekka Enberg committed Dec 6, 2009
1 parent abbb8b7 commit f9cfe1b
Show file tree
Hide file tree
Showing 2 changed files with 51 additions and 64 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 8e15b79cf4bd20c6afb4663d98a39cd004eee672
refs/heads/master: f3d8b53a3abbfd0b74fa5dfaa690870d9619fad9
113 changes: 50 additions & 63 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -604,26 +604,6 @@ static struct kmem_cache cache_cache = {

#define BAD_ALIEN_MAGIC 0x01020304ul

/*
* chicken and egg problem: delay the per-cpu array allocation
* until the general caches are up.
*/
static enum {
NONE,
PARTIAL_AC,
PARTIAL_L3,
EARLY,
FULL
} g_cpucache_up;

/*
* used by boot code to determine if it can use slab based allocator
*/
int slab_is_available(void)
{
return g_cpucache_up >= EARLY;
}

#ifdef CONFIG_LOCKDEP

/*
Expand All @@ -640,52 +620,40 @@ int slab_is_available(void)
static struct lock_class_key on_slab_l3_key;
static struct lock_class_key on_slab_alc_key;

static void init_node_lock_keys(int q)
static inline void init_lock_keys(void)

{
int q;
struct cache_sizes *s = malloc_sizes;

if (g_cpucache_up != FULL)
return;

for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
struct array_cache **alc;
struct kmem_list3 *l3;
int r;

l3 = s->cs_cachep->nodelists[q];
if (!l3 || OFF_SLAB(s->cs_cachep))
return;
lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
alc = l3->alien;
/*
* FIXME: This check for BAD_ALIEN_MAGIC
* should go away when common slab code is taught to
* work even without alien caches.
* Currently, non NUMA code returns BAD_ALIEN_MAGIC
* for alloc_alien_cache,
*/
if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
return;
for_each_node(r) {
if (alc[r])
lockdep_set_class(&alc[r]->lock,
&on_slab_alc_key);
while (s->cs_size != ULONG_MAX) {
for_each_node(q) {
struct array_cache **alc;
int r;
struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
if (!l3 || OFF_SLAB(s->cs_cachep))
continue;
lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
alc = l3->alien;
/*
* FIXME: This check for BAD_ALIEN_MAGIC
* should go away when common slab code is taught to
* work even without alien caches.
* Currently, non NUMA code returns BAD_ALIEN_MAGIC
* for alloc_alien_cache,
*/
if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
continue;
for_each_node(r) {
if (alc[r])
lockdep_set_class(&alc[r]->lock,
&on_slab_alc_key);
}
}
s++;
}
}

static inline void init_lock_keys(void)
{
int node;

for_each_node(node)
init_node_lock_keys(node);
}
#else
static void init_node_lock_keys(int q)
{
}

static inline void init_lock_keys(void)
{
}
Expand All @@ -697,6 +665,26 @@ static inline void init_lock_keys(void)
static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain;

/*
* chicken and egg problem: delay the per-cpu array allocation
* until the general caches are up.
*/
static enum {
NONE,
PARTIAL_AC,
PARTIAL_L3,
EARLY,
FULL
} g_cpucache_up;

/*
* used by boot code to determine if it can use slab based allocator
*/
int slab_is_available(void)
{
return g_cpucache_up >= EARLY;
}

static DEFINE_PER_CPU(struct delayed_work, reap_work);

static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Expand Down Expand Up @@ -1266,8 +1254,6 @@ static int __cpuinit cpuup_prepare(long cpu)
kfree(shared);
free_alien_cache(alien);
}
init_node_lock_keys(node);

return 0;
bad:
cpuup_canceled(cpu);
Expand Down Expand Up @@ -3123,7 +3109,8 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
* per-CPU caches is leaked, we need to make sure kmemleak doesn't
* treat the array pointers as a reference to the object.
*/
kmemleak_erase(&ac->entry[ac->avail]);
if (objp)
kmemleak_erase(&ac->entry[ac->avail]);
return objp;
}

Expand Down Expand Up @@ -3320,7 +3307,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);

if (nodeid == -1)
if (unlikely(nodeid == -1))
nodeid = numa_node_id();

if (unlikely(!cachep->nodelists[nodeid])) {
Expand Down

0 comments on commit f9cfe1b

Please sign in to comment.