Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 69602
b: refs/heads/master
c: aadb4bc
h: refs/heads/master
v: v3
  • Loading branch information
Christoph Lameter authored and Linus Torvalds committed Oct 16, 2007
1 parent 2ba7be0 commit ee83f57
Show file tree
Hide file tree
Showing 3 changed files with 63 additions and 59 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 57f6b96c09c30e444e0d3fc3080feba037657a7b
refs/heads/master: aadb4bc4a1f9108c1d0fbd121827c936c2ed4217
57 changes: 24 additions & 33 deletions trunk/include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ struct kmem_cache {
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
extern struct kmem_cache kmalloc_caches[PAGE_SHIFT];

/*
* Sorry that the following has to be that ugly but some versions of GCC
Expand All @@ -83,9 +83,6 @@ static __always_inline int kmalloc_index(size_t size)
if (!size)
return 0;

if (size > KMALLOC_MAX_SIZE)
return -1;

if (size <= KMALLOC_MIN_SIZE)
return KMALLOC_SHIFT_LOW;

Expand All @@ -102,20 +99,20 @@ static __always_inline int kmalloc_index(size_t size)
if (size <= 512) return 9;
if (size <= 1024) return 10;
if (size <= 2 * 1024) return 11;
/*
* The following is only needed to support architectures with a larger page
* size than 4k.
*/
if (size <= 4 * 1024) return 12;
if (size <= 8 * 1024) return 13;
if (size <= 16 * 1024) return 14;
if (size <= 32 * 1024) return 15;
if (size <= 64 * 1024) return 16;
if (size <= 128 * 1024) return 17;
if (size <= 256 * 1024) return 18;
if (size <= 512 * 1024) return 19;
if (size <= 512 * 1024) return 19;
if (size <= 1024 * 1024) return 20;
if (size <= 2 * 1024 * 1024) return 21;
if (size <= 4 * 1024 * 1024) return 22;
if (size <= 8 * 1024 * 1024) return 23;
if (size <= 16 * 1024 * 1024) return 24;
if (size <= 32 * 1024 * 1024) return 25;
return -1;

/*
Expand All @@ -140,19 +137,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
if (index == 0)
return NULL;

/*
* This function only gets expanded if __builtin_constant_p(size), so
* testing it here shouldn't be needed. But some versions of gcc need
* help.
*/
if (__builtin_constant_p(size) && index < 0) {
/*
* Generate a link failure. Would be great if we could
* do something to stop the compile here.
*/
extern void __kmalloc_size_too_large(void);
__kmalloc_size_too_large();
}
return &kmalloc_caches[index];
}

Expand All @@ -168,15 +152,21 @@ void *__kmalloc(size_t size, gfp_t flags);

static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
if (__builtin_constant_p(size)) {
if (size > PAGE_SIZE / 2)
return (void *)__get_free_pages(flags | __GFP_COMP,
get_order(size));

if (!s)
return ZERO_SIZE_PTR;
if (!(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);

if (!s)
return ZERO_SIZE_PTR;

return kmem_cache_alloc(s, flags);
} else
return __kmalloc(size, flags);
return kmem_cache_alloc(s, flags);
}
}
return __kmalloc(size, flags);
}

#ifdef CONFIG_NUMA
Expand All @@ -185,15 +175,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);

static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
if (__builtin_constant_p(size) &&
size <= PAGE_SIZE / 2 && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);

if (!s)
return ZERO_SIZE_PTR;

return kmem_cache_alloc_node(s, flags, node);
} else
return __kmalloc_node(size, flags, node);
}
return __kmalloc_node(size, flags, node);
}
#endif

Expand Down
63 changes: 38 additions & 25 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -2227,11 +2227,11 @@ EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem
*******************************************************************/

struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __cacheline_aligned;
struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches);

#ifdef CONFIG_ZONE_DMA
static struct kmem_cache *kmalloc_caches_dma[KMALLOC_SHIFT_HIGH + 1];
static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT];
#endif

static int __init setup_slub_min_order(char *str)
Expand Down Expand Up @@ -2397,12 +2397,8 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
return ZERO_SIZE_PTR;

index = size_index[(size - 1) / 8];
} else {
if (size > KMALLOC_MAX_SIZE)
return NULL;

} else
index = fls(size - 1);
}

#ifdef CONFIG_ZONE_DMA
if (unlikely((flags & SLUB_DMA)))
Expand All @@ -2414,9 +2410,15 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)

void *__kmalloc(size_t size, gfp_t flags)
{
struct kmem_cache *s = get_slab(size, flags);
struct kmem_cache *s;

if (ZERO_OR_NULL_PTR(s))
if (unlikely(size > PAGE_SIZE / 2))
return (void *)__get_free_pages(flags | __GFP_COMP,
get_order(size));

s = get_slab(size, flags);

if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;

return slab_alloc(s, flags, -1, __builtin_return_address(0));
Expand All @@ -2426,9 +2428,15 @@ EXPORT_SYMBOL(__kmalloc);
#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *s = get_slab(size, flags);
struct kmem_cache *s;

if (ZERO_OR_NULL_PTR(s))
if (unlikely(size > PAGE_SIZE / 2))
return (void *)__get_free_pages(flags | __GFP_COMP,
get_order(size));

s = get_slab(size, flags);

if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;

return slab_alloc(s, flags, node, __builtin_return_address(0));
Expand Down Expand Up @@ -2473,22 +2481,17 @@ EXPORT_SYMBOL(ksize);

void kfree(const void *x)
{
struct kmem_cache *s;
struct page *page;

/*
* This has to be an unsigned comparison. According to Linus
* some gcc version treat a pointer as a signed entity. Then
* this comparison would be true for all "negative" pointers
* (which would cover the whole upper half of the address space).
*/
if (ZERO_OR_NULL_PTR(x))
return;

page = virt_to_head_page(x);
s = page->slab;

slab_free(s, page, (void *)x, __builtin_return_address(0));
if (unlikely(!PageSlab(page))) {
put_page(page);
return;
}
slab_free(page->slab, page, (void *)x, __builtin_return_address(0));
}
EXPORT_SYMBOL(kfree);

Expand Down Expand Up @@ -2602,7 +2605,7 @@ void __init kmem_cache_init(void)
caches++;
}

for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) {
create_kmalloc_cache(&kmalloc_caches[i],
"kmalloc", 1 << i, GFP_KERNEL);
caches++;
Expand All @@ -2629,7 +2632,7 @@ void __init kmem_cache_init(void)
slab_state = UP;

/* Provide the correct kmalloc names now that the caches are up */
for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++)
kmalloc_caches[i]. name =
kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);

Expand Down Expand Up @@ -2790,7 +2793,12 @@ static struct notifier_block __cpuinitdata slab_notifier =

void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
{
struct kmem_cache *s = get_slab(size, gfpflags);
struct kmem_cache *s;

if (unlikely(size > PAGE_SIZE / 2))
return (void *)__get_free_pages(gfpflags | __GFP_COMP,
get_order(size));
s = get_slab(size, gfpflags);

if (ZERO_OR_NULL_PTR(s))
return s;
Expand All @@ -2801,7 +2809,12 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
int node, void *caller)
{
struct kmem_cache *s = get_slab(size, gfpflags);
struct kmem_cache *s;

if (unlikely(size > PAGE_SIZE / 2))
return (void *)__get_free_pages(gfpflags | __GFP_COMP,
get_order(size));
s = get_slab(size, gfpflags);

if (ZERO_OR_NULL_PTR(s))
return s;
Expand Down

0 comments on commit ee83f57

Please sign in to comment.