Skip to content

Commit

Permalink
slab: Common Kmalloc cache determination
Browse files Browse the repository at this point in the history
Extract the optimized lookup functions from slub and put them into
slab_common.c. Then make slab use these functions as well.

Joonsoo notes that this fixes some issues with constant folding which
also reduces the code size for slub.

https://lkml.org/lkml/2012/10/20/82

Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
  • Loading branch information
Christoph Lameter authored and Pekka Enberg committed Feb 1, 2013
1 parent 9e5e8de commit 2c59dd6
Show file tree
Hide file tree
Showing 5 changed files with 124 additions and 173 deletions.
41 changes: 10 additions & 31 deletions include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -115,29 +115,6 @@ struct kmem_cache {
struct kmem_cache_node *node[MAX_NUMNODES];
};

#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#endif

/*
* Find the slab cache for a given combination of allocation flags and size.
*
* This ought to end up with a global pointer to the right cache
* in kmalloc_caches.
*/
static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
{
int index = kmalloc_index(size);

if (index == 0)
return NULL;

return kmalloc_caches[index];
}

void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);

Expand Down Expand Up @@ -195,13 +172,14 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
if (size > KMALLOC_MAX_CACHE_SIZE)
return kmalloc_large(size, flags);

if (!(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
if (!(flags & GFP_DMA)) {
int index = kmalloc_index(size);

if (!s)
if (!index)
return ZERO_SIZE_PTR;

return kmem_cache_alloc_trace(s, flags, size);
return kmem_cache_alloc_trace(kmalloc_caches[index],
flags, size);
}
}
return __kmalloc(size, flags);
Expand All @@ -228,13 +206,14 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size) &&
size <= KMALLOC_MAX_CACHE_SIZE && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
int index = kmalloc_index(size);

if (!s)
if (!index)
return ZERO_SIZE_PTR;

return kmem_cache_alloc_node_trace(s, flags, node, size);
return kmem_cache_alloc_node_trace(kmalloc_caches[index],
flags, node, size);
}
return __kmalloc_node(size, flags, node);
}
Expand Down
40 changes: 3 additions & 37 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -656,40 +656,6 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
return cachep->array[smp_processor_id()];
}

static inline struct kmem_cache *__find_general_cachep(size_t size,
gfp_t gfpflags)
{
int i;

#if DEBUG
/* This happens if someone tries to call
* kmem_cache_create(), or __kmalloc(), before
* the generic caches are initialized.
*/
BUG_ON(kmalloc_caches[INDEX_AC] == NULL);
#endif
if (!size)
return ZERO_SIZE_PTR;

i = kmalloc_index(size);

/*
* Really subtle: The last entry with cs->cs_size==ULONG_MAX
* has cs_{dma,}cachep==NULL. Thus no special case
* for large kmalloc calls required.
*/
#ifdef CONFIG_ZONE_DMA
if (unlikely(gfpflags & GFP_DMA))
return kmalloc_dma_caches[i];
#endif
return kmalloc_caches[i];
}

static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
{
return __find_general_cachep(size, gfpflags);
}

static size_t slab_mgmt_size(size_t nr_objs, size_t align)
{
return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
Expand Down Expand Up @@ -2426,7 +2392,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
cachep->reciprocal_buffer_size = reciprocal_value(size);

if (flags & CFLGS_OFF_SLAB) {
cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
cachep->slabp_cache = kmalloc_slab(slab_size, 0u);
/*
* This is a possibility for one of the malloc_sizes caches.
* But since we go off slab only for object size greater than
Expand Down Expand Up @@ -3729,7 +3695,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
{
struct kmem_cache *cachep;

cachep = kmem_find_general_cachep(size, flags);
cachep = kmalloc_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
return kmem_cache_alloc_node_trace(cachep, flags, node, size);
Expand Down Expand Up @@ -3774,7 +3740,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
* Then kmalloc uses the uninlined functions instead of the inline
* functions.
*/
cachep = __find_general_cachep(size, flags);
cachep = kmalloc_slab(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
ret = slab_alloc(cachep, flags, caller);
Expand Down
3 changes: 3 additions & 0 deletions mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@ unsigned long calculate_alignment(unsigned long flags,
#ifndef CONFIG_SLOB
/* Kmalloc array related functions */
void create_kmalloc_caches(unsigned long);

/* Find the kmalloc slab corresponding for a certain size */
struct kmem_cache *kmalloc_slab(size_t, gfp_t);
#endif


Expand Down
105 changes: 103 additions & 2 deletions mm/slab_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -327,6 +327,68 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
EXPORT_SYMBOL(kmalloc_dma_caches);
#endif

/*
* Conversion table for small slabs sizes / 8 to the index in the
* kmalloc array. This is necessary for slabs < 192 since we have non power
* of two cache sizes there. The size of larger slabs can be determined using
* fls.
*/
static s8 size_index[24] = {
3, /* 8 */
4, /* 16 */
5, /* 24 */
5, /* 32 */
6, /* 40 */
6, /* 48 */
6, /* 56 */
6, /* 64 */
1, /* 72 */
1, /* 80 */
1, /* 88 */
1, /* 96 */
7, /* 104 */
7, /* 112 */
7, /* 120 */
7, /* 128 */
2, /* 136 */
2, /* 144 */
2, /* 152 */
2, /* 160 */
2, /* 168 */
2, /* 176 */
2, /* 184 */
2 /* 192 */
};

static inline int size_index_elem(size_t bytes)
{
return (bytes - 1) / 8;
}

/*
* Find the kmem_cache structure that serves a given size of
* allocation
*/
struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
{
int index;

if (size <= 192) {
if (!size)
return ZERO_SIZE_PTR;

index = size_index[size_index_elem(size)];
} else
index = fls(size - 1);

#ifdef CONFIG_ZONE_DMA
if (unlikely((flags & SLAB_CACHE_DMA)))
return kmalloc_dma_caches[index];

#endif
return kmalloc_caches[index];
}

/*
* Create the kmalloc array. Some of the regular kmalloc arrays
* may already have been created because they were needed to
Expand All @@ -336,6 +398,47 @@ void __init create_kmalloc_caches(unsigned long flags)
{
int i;

/*
* Patch up the size_index table if we have strange large alignment
* requirements for the kmalloc array. This is only the case for
* MIPS it seems. The standard arches will not generate any code here.
*
* Largest permitted alignment is 256 bytes due to the way we
* handle the index determination for the smaller caches.
*
* Make sure that nothing crazy happens if someone starts tinkering
* around with ARCH_KMALLOC_MINALIGN
*/
BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));

for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
int elem = size_index_elem(i);

if (elem >= ARRAY_SIZE(size_index))
break;
size_index[elem] = KMALLOC_SHIFT_LOW;
}

if (KMALLOC_MIN_SIZE >= 64) {
/*
* The 96 byte size cache is not used if the alignment
* is 64 byte.
*/
for (i = 64 + 8; i <= 96; i += 8)
size_index[size_index_elem(i)] = 7;

}

if (KMALLOC_MIN_SIZE >= 128) {
/*
* The 192 byte sized cache is not used if the alignment
* is 128 byte. Redirect kmalloc to use the 256 byte cache
* instead.
*/
for (i = 128 + 8; i <= 192; i += 8)
size_index[size_index_elem(i)] = 8;
}
/* Caches that are not of the two-to-the-power-of size */
if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1])
kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
Expand Down Expand Up @@ -379,8 +482,6 @@ void __init create_kmalloc_caches(unsigned long flags)
}
#endif
}


#endif /* !CONFIG_SLOB */


Expand Down
Loading

0 comments on commit 2c59dd6

Please sign in to comment.