Skip to content

Commit

Permalink
slub: disable interrupts in cmpxchg_double_slab when falling back to …
Browse files Browse the repository at this point in the history
…pagelock

Split cmpxchg_double_slab into two functions. One for the case where we know that
interrupts are disabled (and therefore the fallback does not need to disable
interrupts) and one for the other cases where fallback will also disable interrupts.

This fixes the issue that __slab_free called cmpxchg_double_slab in some scenarios
without disabling interrupts.

Tested-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
  • Loading branch information
Christoph Lameter authored and Pekka Enberg committed Jul 18, 2011
1 parent 013e896 commit 1d07171
Showing 1 changed file with 45 additions and 4 deletions.
49 changes: 45 additions & 4 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -354,6 +354,42 @@ static __always_inline void slab_unlock(struct page *page)
__bit_spin_unlock(PG_locked, &page->flags);
}

/* Interrupts must be disabled (for the fallback code to work right) */
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
void *freelist_old, unsigned long counters_old,
void *freelist_new, unsigned long counters_new,
const char *n)
{
VM_BUG_ON(!irqs_disabled());
#ifdef CONFIG_CMPXCHG_DOUBLE
if (s->flags & __CMPXCHG_DOUBLE) {
if (cmpxchg_double(&page->freelist,
freelist_old, counters_old,
freelist_new, counters_new))
return 1;
} else
#endif
{
slab_lock(page);
if (page->freelist == freelist_old && page->counters == counters_old) {
page->freelist = freelist_new;
page->counters = counters_new;
slab_unlock(page);
return 1;
}
slab_unlock(page);
}

cpu_relax();
stat(s, CMPXCHG_DOUBLE_FAIL);

#ifdef SLUB_DEBUG_CMPXCHG
printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
#endif

return 0;
}

static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
void *freelist_old, unsigned long counters_old,
void *freelist_new, unsigned long counters_new,
Expand All @@ -368,14 +404,19 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
} else
#endif
{
unsigned long flags;

local_irq_save(flags);
slab_lock(page);
if (page->freelist == freelist_old && page->counters == counters_old) {
page->freelist = freelist_new;
page->counters = counters_new;
slab_unlock(page);
local_irq_restore(flags);
return 1;
}
slab_unlock(page);
local_irq_restore(flags);
}

cpu_relax();
Expand Down Expand Up @@ -1471,7 +1512,7 @@ static inline int acquire_slab(struct kmem_cache *s,
VM_BUG_ON(new.frozen);
new.frozen = 1;

} while (!cmpxchg_double_slab(s, page,
} while (!__cmpxchg_double_slab(s, page,
freelist, counters,
NULL, new.counters,
"lock and freeze"));
Expand Down Expand Up @@ -1709,7 +1750,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
new.inuse--;
VM_BUG_ON(!new.frozen);

} while (!cmpxchg_double_slab(s, page,
} while (!__cmpxchg_double_slab(s, page,
prior, counters,
freelist, new.counters,
"drain percpu freelist"));
Expand Down Expand Up @@ -1798,7 +1839,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
}

l = m;
if (!cmpxchg_double_slab(s, page,
if (!__cmpxchg_double_slab(s, page,
old.freelist, old.counters,
new.freelist, new.counters,
"unfreezing slab"))
Expand Down Expand Up @@ -1992,7 +2033,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
new.inuse = page->objects;
new.frozen = object != NULL;

} while (!cmpxchg_double_slab(s, page,
} while (!__cmpxchg_double_slab(s, page,
object, counters,
NULL, new.counters,
"__slab_alloc"));
Expand Down

0 comments on commit 1d07171

Please sign in to comment.