Skip to content

Commit

Permalink
SLUB: Get rid of dynamic DMA kmalloc cache allocation
Browse files Browse the repository at this point in the history
Dynamic DMA kmalloc cache allocation is troublesome since the
new percpu allocator does not support allocations in atomic contexts.
Reserve some statically allocated kmalloc_cpu structures instead.

Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
  • Loading branch information
Christoph Lameter authored and Pekka Enberg committed Dec 20, 2009
1 parent 9dfc6e6 commit 756dee7
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 22 deletions.
19 changes: 11 additions & 8 deletions include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -131,11 +131,21 @@ struct kmem_cache {

#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)

#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
/* Reserve extra caches for potential DMA use */
#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6)
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#define KMALLOC_CACHES SLUB_PAGE_SHIFT
#endif

/*
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES];

/*
* Sorry that the following has to be that ugly but some versions of GCC
Expand Down Expand Up @@ -203,13 +213,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
return &kmalloc_caches[index];
}

#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#endif

void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);

Expand Down
24 changes: 10 additions & 14 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -2092,7 +2092,7 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
{
int cpu;

if (s < kmalloc_caches + SLUB_PAGE_SHIFT && s >= kmalloc_caches)
if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
/*
* Boot time creation of the kmalloc array. Use static per cpu data
* since the per cpu allocator is not available yet.
Expand Down Expand Up @@ -2539,7 +2539,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem
*******************************************************************/

struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches);

static int __init setup_slub_min_order(char *str)
Expand Down Expand Up @@ -2629,6 +2629,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
char *text;
size_t realsize;
unsigned long slabflags;
int i;

s = kmalloc_caches_dma[index];
if (s)
Expand All @@ -2649,18 +2650,13 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
(unsigned int)realsize);

if (flags & __GFP_WAIT)
s = kmalloc(kmem_size, flags & ~SLUB_DMA);
else {
int i;
s = NULL;
for (i = 0; i < KMALLOC_CACHES; i++)
if (!kmalloc_caches[i].size)
break;

s = NULL;
for (i = 0; i < SLUB_PAGE_SHIFT; i++)
if (kmalloc_caches[i].size) {
s = kmalloc_caches + i;
break;
}
}
BUG_ON(i >= KMALLOC_CACHES);
s = kmalloc_caches + i;

/*
* Must defer sysfs creation to a workqueue because we don't know
Expand All @@ -2674,7 +2670,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)

if (!s || !text || !kmem_cache_open(s, flags, text,
realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
kfree(s);
s->size = 0;
kfree(text);
goto unlock_out;
}
Expand Down

0 comments on commit 756dee7

Please sign in to comment.