Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 84426
b: refs/heads/master
c: 1f84260
h: refs/heads/master
v: v3
  • Loading branch information
Christoph Lameter authored and Christoph Lameter committed Feb 8, 2008
1 parent fe39d46 commit 2d3a933
Show file tree
Hide file tree
Showing 3 changed files with 93 additions and 6 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 683d0baad3d6e18134927f8c28ee804dbe10fe71
refs/heads/master: 1f84260c8ce3b1ce26d4c1d6dedc2f33a3a29c0c
4 changes: 4 additions & 0 deletions trunk/arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,10 @@ config HAVE_LATENCYTOP_SUPPORT
config SEMAPHORE_SLEEPERS
def_bool y

config FAST_CMPXCHG_LOCAL
bool
default y

config MMU
def_bool y

Expand Down
93 changes: 88 additions & 5 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,13 @@ static inline void ClearSlabDebug(struct page *page)
/* Enable to test recovery from slab corruption on boot */
#undef SLUB_RESILIENCY_TEST

/*
* Currently fastpath is not supported if preemption is enabled.
*/
#if defined(CONFIG_FAST_CMPXCHG_LOCAL) && !defined(CONFIG_PREEMPT)
#define SLUB_FASTPATH
#endif

#if PAGE_SHIFT <= 12

/*
Expand Down Expand Up @@ -1493,7 +1500,11 @@ static void *__slab_alloc(struct kmem_cache *s,
{
void **object;
struct page *new;
#ifdef SLUB_FASTPATH
unsigned long flags;

local_irq_save(flags);
#endif
if (!c->page)
goto new_slab;

Expand All @@ -1512,7 +1523,12 @@ static void *__slab_alloc(struct kmem_cache *s,
c->page->inuse = s->objects;
c->page->freelist = c->page->end;
c->node = page_to_nid(c->page);
unlock_out:
slab_unlock(c->page);
out:
#ifdef SLUB_FASTPATH
local_irq_restore(flags);
#endif
return object;

another_slab:
Expand Down Expand Up @@ -1542,7 +1558,8 @@ static void *__slab_alloc(struct kmem_cache *s,
c->page = new;
goto load_freelist;
}
return NULL;
object = NULL;
goto out;
debug:
object = c->page->freelist;
if (!alloc_debug_processing(s, c->page, object, addr))
Expand All @@ -1551,8 +1568,7 @@ static void *__slab_alloc(struct kmem_cache *s,
c->page->inuse++;
c->page->freelist = object[c->offset];
c->node = -1;
slab_unlock(c->page);
return object;
goto unlock_out;
}

/*
Expand All @@ -1569,9 +1585,36 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, int node, void *addr)
{
void **object;
unsigned long flags;
struct kmem_cache_cpu *c;

/*
* The SLUB_FASTPATH path is provisional and is currently disabled if the
* kernel is compiled with preemption or if the arch does not support
* fast cmpxchg operations. There are a couple of coming changes that will
* simplify matters and allow preemption. Ultimately we may end up making
* SLUB_FASTPATH the default.
*
* 1. The introduction of the per cpu allocator will avoid array lookups
* through get_cpu_slab(). A special register can be used instead.
*
* 2. The introduction of per cpu atomic operations (cpu_ops) means that
* we can realize the logic here entirely with per cpu atomics. The
* per cpu atomic ops will take care of the preemption issues.
*/

#ifdef SLUB_FASTPATH
c = get_cpu_slab(s, raw_smp_processor_id());
do {
object = c->freelist;
if (unlikely(is_end(object) || !node_match(c, node))) {
object = __slab_alloc(s, gfpflags, node, addr, c);
break;
}
} while (cmpxchg_local(&c->freelist, object, object[c->offset])
!= object);
#else
unsigned long flags;

local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id());
if (unlikely(is_end(c->freelist) || !node_match(c, node)))
Expand All @@ -1583,6 +1626,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
c->freelist = object[c->offset];
}
local_irq_restore(flags);
#endif

if (unlikely((gfpflags & __GFP_ZERO) && object))
memset(object, 0, c->objsize);
Expand Down Expand Up @@ -1618,6 +1662,11 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
void *prior;
void **object = (void *)x;

#ifdef SLUB_FASTPATH
unsigned long flags;

local_irq_save(flags);
#endif
slab_lock(page);

if (unlikely(SlabDebug(page)))
Expand All @@ -1643,6 +1692,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,

out_unlock:
slab_unlock(page);
#ifdef SLUB_FASTPATH
local_irq_restore(flags);
#endif
return;

slab_empty:
Expand All @@ -1653,6 +1705,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
remove_partial(s, page);

slab_unlock(page);
#ifdef SLUB_FASTPATH
local_irq_restore(flags);
#endif
discard_slab(s, page);
return;

Expand All @@ -1677,9 +1732,36 @@ static __always_inline void slab_free(struct kmem_cache *s,
struct page *page, void *x, void *addr)
{
void **object = (void *)x;
unsigned long flags;
struct kmem_cache_cpu *c;

#ifdef SLUB_FASTPATH
void **freelist;

c = get_cpu_slab(s, raw_smp_processor_id());
debug_check_no_locks_freed(object, s->objsize);
do {
freelist = c->freelist;
barrier();
/*
* If the compiler would reorder the retrieval of c->page to
* come before c->freelist then an interrupt could
* change the cpu slab before we retrieve c->freelist. We
* could be matching on a page no longer active and put the
* object onto the freelist of the wrong slab.
*
* On the other hand: If we already have the freelist pointer
* then any change of cpu_slab will cause the cmpxchg to fail
* since the freelist pointers are unique per slab.
*/
if (unlikely(page != c->page || c->node < 0)) {
__slab_free(s, page, x, addr, c->offset);
break;
}
object[c->offset] = freelist;
} while (cmpxchg_local(&c->freelist, freelist, object) != freelist);
#else
unsigned long flags;

local_irq_save(flags);
debug_check_no_locks_freed(object, s->objsize);
c = get_cpu_slab(s, smp_processor_id());
Expand All @@ -1690,6 +1772,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
__slab_free(s, page, x, addr, c->offset);

local_irq_restore(flags);
#endif
}

void kmem_cache_free(struct kmem_cache *s, void *x)
Expand Down

0 comments on commit 2d3a933

Please sign in to comment.