Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 186176
b: refs/heads/master
c: 756dee7
h: refs/heads/master
v: v3
  • Loading branch information
Christoph Lameter authored and Pekka Enberg committed Dec 20, 2009
1 parent 237ac15 commit 40f42ae
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 23 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 9dfc6e68bfe6ee452efb1a4e9ca26a9007f2b864
refs/heads/master: 756dee75872a2a764b478e18076360b8a4ec9045
19 changes: 11 additions & 8 deletions trunk/include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -131,11 +131,21 @@ struct kmem_cache {

#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)

#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
/* Reserve extra caches for potential DMA use */
#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6)
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#define KMALLOC_CACHES SLUB_PAGE_SHIFT
#endif

/*
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES];

/*
* Sorry that the following has to be that ugly but some versions of GCC
Expand Down Expand Up @@ -203,13 +213,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
return &kmalloc_caches[index];
}

#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#endif

void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);

Expand Down
24 changes: 10 additions & 14 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -2092,7 +2092,7 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
{
int cpu;

if (s < kmalloc_caches + SLUB_PAGE_SHIFT && s >= kmalloc_caches)
if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
/*
* Boot time creation of the kmalloc array. Use static per cpu data
* since the per cpu allocator is not available yet.
Expand Down Expand Up @@ -2539,7 +2539,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem
*******************************************************************/

struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches);

static int __init setup_slub_min_order(char *str)
Expand Down Expand Up @@ -2629,6 +2629,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
char *text;
size_t realsize;
unsigned long slabflags;
int i;

s = kmalloc_caches_dma[index];
if (s)
Expand All @@ -2649,18 +2650,13 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
(unsigned int)realsize);

if (flags & __GFP_WAIT)
s = kmalloc(kmem_size, flags & ~SLUB_DMA);
else {
int i;
s = NULL;
for (i = 0; i < KMALLOC_CACHES; i++)
if (!kmalloc_caches[i].size)
break;

s = NULL;
for (i = 0; i < SLUB_PAGE_SHIFT; i++)
if (kmalloc_caches[i].size) {
s = kmalloc_caches + i;
break;
}
}
BUG_ON(i >= KMALLOC_CACHES);
s = kmalloc_caches + i;

/*
* Must defer sysfs creation to a workqueue because we don't know
Expand All @@ -2674,7 +2670,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)

if (!s || !text || !kmem_cache_open(s, flags, text,
realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
kfree(s);
s->size = 0;
kfree(text);
goto unlock_out;
}
Expand Down

0 comments on commit 40f42ae

Please sign in to comment.