Skip to content

Commit

Permalink
slub: use lockdep_assert_held
Browse files Browse the repository at this point in the history
Instead of using comments in an attempt at getting the locking right,
use proper assertions that actively warn you if you got it wrong.

Also add extra braces in a few sites to comply with coding-style.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
  • Loading branch information
Peter Zijlstra authored and Pekka Enberg committed Jan 13, 2014
1 parent 8afb147 commit c65c187
Showing 1 changed file with 20 additions and 20 deletions.
40 changes: 20 additions & 20 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -985,23 +985,22 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)

/*
* Tracking of fully allocated slabs for debugging purposes.
*
* list_lock must be held.
*/
static void add_full(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page)
{
lockdep_assert_held(&n->list_lock);

if (!(s->flags & SLAB_STORE_USER))
return;

list_add(&page->lru, &n->full);
}

/*
* list_lock must be held.
*/
static void remove_full(struct kmem_cache *s, struct page *page)
static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
{
lockdep_assert_held(&n->list_lock);

if (!(s->flags & SLAB_STORE_USER))
return;

Expand Down Expand Up @@ -1250,7 +1249,8 @@ static inline int check_object(struct kmem_cache *s, struct page *page,
void *object, u8 val) { return 1; }
static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct page *page) {}
static inline void remove_full(struct kmem_cache *s, struct page *page) {}
static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct page *page) {}
static inline unsigned long kmem_cache_flags(unsigned long object_size,
unsigned long flags, const char *name,
void (*ctor)(void *))
Expand Down Expand Up @@ -1504,25 +1504,24 @@ static void discard_slab(struct kmem_cache *s, struct page *page)

/*
* Management of partially allocated slabs.
*
* list_lock must be held.
*/
static inline void add_partial(struct kmem_cache_node *n,
struct page *page, int tail)
{
lockdep_assert_held(&n->list_lock);

n->nr_partial++;
if (tail == DEACTIVATE_TO_TAIL)
list_add_tail(&page->lru, &n->partial);
else
list_add(&page->lru, &n->partial);
}

/*
* list_lock must be held.
*/
static inline void remove_partial(struct kmem_cache_node *n,
struct page *page)
{
lockdep_assert_held(&n->list_lock);

list_del(&page->lru);
n->nr_partial--;
}
Expand All @@ -1532,8 +1531,6 @@ static inline void remove_partial(struct kmem_cache_node *n,
* return the pointer to the freelist.
*
* Returns a list of objects or NULL if it fails.
*
* Must hold list_lock since we modify the partial list.
*/
static inline void *acquire_slab(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page,
Expand All @@ -1543,6 +1540,8 @@ static inline void *acquire_slab(struct kmem_cache *s,
unsigned long counters;
struct page new;

lockdep_assert_held(&n->list_lock);

/*
* Zap the freelist and set the frozen bit.
* The old freelist is the list of objects for the
Expand Down Expand Up @@ -1887,7 +1886,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,

else if (l == M_FULL)

remove_full(s, page);
remove_full(s, n, page);

if (m == M_PARTIAL) {

Expand Down Expand Up @@ -2541,7 +2540,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
new.inuse--;
if ((!new.inuse || !prior) && !was_frozen) {

if (kmem_cache_has_cpu_partial(s) && !prior)
if (kmem_cache_has_cpu_partial(s) && !prior) {

/*
* Slab was on no list before and will be
Expand All @@ -2551,7 +2550,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/
new.frozen = 1;

else { /* Needs to be taken off a list */
} else { /* Needs to be taken off a list */

n = get_node(s, page_to_nid(page));
/*
Expand Down Expand Up @@ -2600,7 +2599,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
if (kmem_cache_debug(s))
remove_full(s, page);
remove_full(s, n, page);
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
Expand All @@ -2614,9 +2613,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/
remove_partial(n, page);
stat(s, FREE_REMOVE_PARTIAL);
} else
} else {
/* Slab must be on the full list */
remove_full(s, page);
remove_full(s, n, page);
}

spin_unlock_irqrestore(&n->list_lock, flags);
stat(s, FREE_SLAB);
Expand Down

0 comments on commit c65c187

Please sign in to comment.