Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 35614
b: refs/heads/master
c: 056c624
h: refs/heads/master
v: v3
  • Loading branch information
Ravikiran G Thirumalai authored and Linus Torvalds committed Sep 26, 2006
1 parent 1405dc8 commit b388f19
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 15 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2ed3a4ef95ef1a13a424378c34ebd9b7e593f212
refs/heads/master: 056c62418cc639bf2fe962c6a6ee56054b838bc7
57 changes: 43 additions & 14 deletions trunk/mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -674,6 +674,8 @@ static struct kmem_cache cache_cache = {
#endif
};

#define BAD_ALIEN_MAGIC 0x01020304ul

#ifdef CONFIG_LOCKDEP

/*
Expand All @@ -682,29 +684,53 @@ static struct kmem_cache cache_cache = {
* The locking for this is tricky in that it nests within the locks
* of all other slabs in a few places; to deal with this special
* locking we put on-slab caches into a separate lock-class.
*
* We set lock class for alien array caches which are up during init.
* The lock annotation will be lost if all cpus of a node goes down and
* then comes back up during hotplug
*/
static struct lock_class_key on_slab_key;
static struct lock_class_key on_slab_l3_key;
static struct lock_class_key on_slab_alc_key;

static inline void init_lock_keys(void)

static inline void init_lock_keys(struct cache_sizes *s)
{
int q;

for (q = 0; q < MAX_NUMNODES; q++) {
if (!s->cs_cachep->nodelists[q] || OFF_SLAB(s->cs_cachep))
continue;
lockdep_set_class(&s->cs_cachep->nodelists[q]->list_lock,
&on_slab_key);
struct cache_sizes *s = malloc_sizes;

while (s->cs_size != ULONG_MAX) {
for_each_node(q) {
struct array_cache **alc;
int r;
struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
if (!l3 || OFF_SLAB(s->cs_cachep))
continue;
lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
alc = l3->alien;
/*
* FIXME: This check for BAD_ALIEN_MAGIC
* should go away when common slab code is taught to
* work even without alien caches.
* Currently, non NUMA code returns BAD_ALIEN_MAGIC
* for alloc_alien_cache,
*/
if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
continue;
for_each_node(r) {
if (alc[r])
lockdep_set_class(&alc[r]->lock,
&on_slab_alc_key);
}
}
s++;
}
}

#else
static inline void init_lock_keys(struct cache_sizes *s)
static inline void init_lock_keys(void)
{
}
#endif



/* Guard access to the cache-chain. */
static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain;
Expand Down Expand Up @@ -1091,7 +1117,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)

static inline struct array_cache **alloc_alien_cache(int node, int limit)
{
return (struct array_cache **) 0x01020304ul;
return (struct array_cache **)BAD_ALIEN_MAGIC;
}

static inline void free_alien_cache(struct array_cache **ac_ptr)
Expand Down Expand Up @@ -1421,7 +1447,6 @@ void __init kmem_cache_init(void)
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL, NULL);
}
init_lock_keys(sizes);

sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
sizes->cs_size,
Expand Down Expand Up @@ -1495,6 +1520,10 @@ void __init kmem_cache_init(void)
mutex_unlock(&cache_chain_mutex);
}

/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();


/* Done! */
g_cpucache_up = FULL;

Expand Down

0 comments on commit b388f19

Please sign in to comment.