Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 186177
b: refs/heads/master
c: ff12059
h: refs/heads/master
i:
  186175: 237ac15
v: v3
  • Loading branch information
Christoph Lameter authored and Pekka Enberg committed Dec 20, 2009
1 parent 40f42ae commit 2bd0859
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 62 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 756dee75872a2a764b478e18076360b8a4ec9045
refs/heads/master: ff12059ed14b0773d7bbef86f98218ada6c20770
2 changes: 0 additions & 2 deletions trunk/include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,6 @@ struct kmem_cache_cpu {
void **freelist; /* Pointer to first free per cpu object */
struct page *page; /* The slab from which we are allocating */
int node; /* The node of the page (or -1 for debug) */
unsigned int offset; /* Freepointer offset (in word units) */
unsigned int objsize; /* Size of an object (from kmem_cache) */
#ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
Expand Down
76 changes: 17 additions & 59 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -260,13 +260,6 @@ static inline int check_valid_pointer(struct kmem_cache *s,
return 1;
}

/*
* Slow version of get and set free pointer.
*
* This version requires touching the cache lines of kmem_cache which
* we avoid to do in the fast alloc free paths. There we obtain the offset
* from the page struct.
*/
static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
return *(void **)(object + s->offset);
Expand Down Expand Up @@ -1473,10 +1466,10 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)

/* Retrieve object from cpu_freelist */
object = c->freelist;
c->freelist = c->freelist[c->offset];
c->freelist = get_freepointer(s, c->freelist);

/* And put onto the regular freelist */
object[c->offset] = page->freelist;
set_freepointer(s, object, page->freelist);
page->freelist = object;
page->inuse--;
}
Expand Down Expand Up @@ -1635,7 +1628,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
goto debug;

c->freelist = object[c->offset];
c->freelist = get_freepointer(s, object);
c->page->inuse = c->page->objects;
c->page->freelist = NULL;
c->node = page_to_nid(c->page);
Expand Down Expand Up @@ -1681,7 +1674,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
goto another_slab;

c->page->inuse++;
c->page->freelist = object[c->offset];
c->page->freelist = get_freepointer(s, object);
c->node = -1;
goto unlock_out;
}
Expand All @@ -1702,7 +1695,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
void **object;
struct kmem_cache_cpu *c;
unsigned long flags;
unsigned long objsize;

gfpflags &= gfp_allowed_mask;

Expand All @@ -1715,22 +1707,21 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
local_irq_save(flags);
c = __this_cpu_ptr(s->cpu_slab);
object = c->freelist;
objsize = c->objsize;
if (unlikely(!object || !node_match(c, node)))

object = __slab_alloc(s, gfpflags, node, addr, c);

else {
c->freelist = object[c->offset];
c->freelist = get_freepointer(s, object);
stat(c, ALLOC_FASTPATH);
}
local_irq_restore(flags);

if (unlikely(gfpflags & __GFP_ZERO) && object)
memset(object, 0, objsize);
memset(object, 0, s->objsize);

kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
kmemcheck_slab_alloc(s, gfpflags, object, s->objsize);
kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags);

return object;
}
Expand Down Expand Up @@ -1785,7 +1776,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
* handling required then we can return immediately.
*/
static void __slab_free(struct kmem_cache *s, struct page *page,
void *x, unsigned long addr, unsigned int offset)
void *x, unsigned long addr)
{
void *prior;
void **object = (void *)x;
Expand All @@ -1799,7 +1790,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
goto debug;

checks_ok:
prior = object[offset] = page->freelist;
prior = page->freelist;
set_freepointer(s, object, prior);
page->freelist = object;
page->inuse--;

Expand Down Expand Up @@ -1864,16 +1856,16 @@ static __always_inline void slab_free(struct kmem_cache *s,
kmemleak_free_recursive(x, s->flags);
local_irq_save(flags);
c = __this_cpu_ptr(s->cpu_slab);
kmemcheck_slab_free(s, object, c->objsize);
debug_check_no_locks_freed(object, c->objsize);
kmemcheck_slab_free(s, object, s->objsize);
debug_check_no_locks_freed(object, s->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(object, c->objsize);
debug_check_no_obj_freed(object, s->objsize);
if (likely(page == c->page && c->node >= 0)) {
object[c->offset] = c->freelist;
set_freepointer(s, object, c->freelist);
c->freelist = object;
stat(c, FREE_FASTPATH);
} else
__slab_free(s, page, x, addr, c->offset);
__slab_free(s, page, x, addr);

local_irq_restore(flags);
}
Expand Down Expand Up @@ -2060,19 +2052,6 @@ static unsigned long calculate_alignment(unsigned long flags,
return ALIGN(align, sizeof(void *));
}

static void init_kmem_cache_cpu(struct kmem_cache *s,
struct kmem_cache_cpu *c)
{
c->page = NULL;
c->freelist = NULL;
c->node = 0;
c->offset = s->offset / sizeof(void *);
c->objsize = s->objsize;
#ifdef CONFIG_SLUB_STATS
memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
#endif
}

static void
init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
{
Expand All @@ -2090,8 +2069,6 @@ static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[SLUB_PAGE_SHIFT]);

static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
{
int cpu;

if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
/*
* Boot time creation of the kmalloc array. Use static per cpu data
Expand All @@ -2104,8 +2081,6 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
if (!s->cpu_slab)
return 0;

for_each_possible_cpu(cpu)
init_kmem_cache_cpu(s, per_cpu_ptr(s->cpu_slab, cpu));
return 1;
}

Expand Down Expand Up @@ -2391,6 +2366,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,

if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
return 1;

free_kmem_cache_nodes(s);
error:
if (flags & SLAB_PANIC)
Expand Down Expand Up @@ -3247,22 +3223,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
down_write(&slub_lock);
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
int cpu;

s->refcount++;
/*
* Adjust the object sizes so that we clear
* the complete object on kzalloc.
*/
s->objsize = max(s->objsize, (int)size);

/*
* And then we need to update the object size in the
* per cpu structures
*/
for_each_online_cpu(cpu)
per_cpu_ptr(s->cpu_slab, cpu)->objsize = s->objsize;

s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
up_write(&slub_lock);

Expand Down Expand Up @@ -3316,14 +3282,6 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
unsigned long flags;

switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
down_read(&slub_lock);
list_for_each_entry(s, &slab_caches, list)
init_kmem_cache_cpu(s, per_cpu_ptr(s->cpu_slab, cpu));
up_read(&slub_lock);
break;

case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
Expand Down

0 comments on commit 2bd0859

Please sign in to comment.