Skip to content

Commit

Permalink
[PATCH] lockdep: annotate mm/slab.c
Browse files Browse the repository at this point in the history
mm/slab.c uses nested locking when dealing with 'off-slab'
caches, in that case it allocates the slab header from the
(on-slab) kmalloc caches. Teach the lock validator about
this by putting all on-slab caches into a separate class.

this patch has no effect on non-lockdep kernels.

Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
Arjan van de Ven authored and Linus Torvalds committed Jul 13, 2006
1 parent 873623d commit f1aaee5
Showing 1 changed file with 32 additions and 0 deletions.
32 changes: 32 additions & 0 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -674,6 +674,37 @@ static struct kmem_cache cache_cache = {
#endif
};

#ifdef CONFIG_LOCKDEP

/*
* Slab sometimes uses the kmalloc slabs to store the slab headers
* for other slabs "off slab".
* The locking for this is tricky in that it nests within the locks
* of all other slabs in a few places; to deal with this special
* locking we put on-slab caches into a separate lock-class.
*/
static struct lock_class_key on_slab_key;

static inline void init_lock_keys(struct cache_sizes *s)
{
int q;

for (q = 0; q < MAX_NUMNODES; q++) {
if (!s->cs_cachep->nodelists[q] || OFF_SLAB(s->cs_cachep))
continue;
lockdep_set_class(&s->cs_cachep->nodelists[q]->list_lock,
&on_slab_key);
}
}

#else
static inline void init_lock_keys(struct cache_sizes *s)
{
}
#endif



/* Guard access to the cache-chain. */
static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain;
Expand Down Expand Up @@ -1391,6 +1422,7 @@ void __init kmem_cache_init(void)
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL, NULL);
}
init_lock_keys(sizes);

sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
sizes->cs_size,
Expand Down

0 comments on commit f1aaee5

Please sign in to comment.