From ca377f1b7f9c7a21083489b2f5f04e19b62b4aa3 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 20 Feb 2009 17:34:06 +0100 Subject: [PATCH] --- yaml --- r: 140781 b: refs/heads/master c: 91f73f90d97fa67effbb49e0a79c50cf26dfe324 h: refs/heads/master i: 140779: bb58e80cb797242f5228ee42c2774754b3e66ced v: v3 --- [refs] | 2 +- trunk/include/linux/slub_def.h | 19 +++---------------- trunk/init/Kconfig | 2 +- trunk/mm/slub.c | 16 ++++++++-------- 4 files changed, 13 insertions(+), 26 deletions(-) diff --git a/[refs] b/[refs] index 4070b7316222..3adb0b851ecd 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 057685cf57066bc8aaed68de1b1970e12f0075d2 +refs/heads/master: 91f73f90d97fa67effbb49e0a79c50cf26dfe324 diff --git a/trunk/include/linux/slub_def.h b/trunk/include/linux/slub_def.h index 9e3a575b2c30..6b657f7dcb2b 100644 --- a/trunk/include/linux/slub_def.h +++ b/trunk/include/linux/slub_def.h @@ -121,24 +121,11 @@ struct kmem_cache { #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) -/* - * Maximum kmalloc object size handled by SLUB. Larger object allocations - * are passed through to the page allocator. The page allocator "fastpath" - * is relatively slow so we need this value sufficiently high so that - * performance critical objects are allocated through the SLUB fastpath. - * - * This should be dropped to PAGE_SIZE / 2 once the page allocator - * "fastpath" becomes competitive with the slab allocator fastpaths. - */ -#define SLUB_MAX_SIZE (PAGE_SIZE) - -#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1) - /* * We keep the general caches in an array of slab caches that are used for * 2^x bytes of allocations. */ -extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT]; +extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; /* * Sorry that the following has to be that ugly but some versions of GCC @@ -244,7 +231,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) void *ret; if (__builtin_constant_p(size)) { - if (size > SLUB_MAX_SIZE) + if (size > PAGE_SIZE) return kmalloc_large(size, flags); if (!(flags & SLUB_DMA)) { @@ -288,7 +275,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) void *ret; if (__builtin_constant_p(size) && - size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { + size <= PAGE_SIZE && !(flags & SLUB_DMA)) { struct kmem_cache *s = kmalloc_slab(size); if (!s) diff --git a/trunk/init/Kconfig b/trunk/init/Kconfig index f068071fcc5d..26b5bab6f6e8 100644 --- a/trunk/init/Kconfig +++ b/trunk/init/Kconfig @@ -945,7 +945,7 @@ config TRACEPOINTS config MARKERS bool "Activate markers" - depends on TRACEPOINTS + select TRACEPOINTS help Place an empty function call at each marker site. Can be dynamically changed for a probe function. diff --git a/trunk/mm/slub.c b/trunk/mm/slub.c index 6de5e07c8850..3525e7b21d19 100644 --- a/trunk/mm/slub.c +++ b/trunk/mm/slub.c @@ -2506,7 +2506,7 @@ EXPORT_SYMBOL(kmem_cache_destroy); * Kmalloc subsystem *******************************************************************/ -struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned; +struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; EXPORT_SYMBOL(kmalloc_caches); static int __init setup_slub_min_order(char *str) @@ -2568,7 +2568,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, } #ifdef CONFIG_ZONE_DMA -static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT]; +static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; static void sysfs_add_func(struct work_struct *w) { @@ -2690,7 +2690,7 @@ void *__kmalloc(size_t size, gfp_t flags) struct kmem_cache *s; void *ret; - if (unlikely(size > SLUB_MAX_SIZE)) + if (unlikely(size > PAGE_SIZE)) return kmalloc_large(size, flags); s = get_slab(size, flags); @@ -2724,7 +2724,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) struct kmem_cache *s; void *ret; - if (unlikely(size > SLUB_MAX_SIZE)) { + if (unlikely(size > PAGE_SIZE)) { ret = kmalloc_large_node(size, flags, node); kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, @@ -3039,7 +3039,7 @@ void __init kmem_cache_init(void) caches++; } - for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { + for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { create_kmalloc_cache(&kmalloc_caches[i], "kmalloc", 1 << i, GFP_KERNEL); caches++; @@ -3076,7 +3076,7 @@ void __init kmem_cache_init(void) slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ - for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) + for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) kmalloc_caches[i]. name = kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); @@ -3277,7 +3277,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) struct kmem_cache *s; void *ret; - if (unlikely(size > SLUB_MAX_SIZE)) + if (unlikely(size > PAGE_SIZE)) return kmalloc_large(size, gfpflags); s = get_slab(size, gfpflags); @@ -3300,7 +3300,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, struct kmem_cache *s; void *ret; - if (unlikely(size > SLUB_MAX_SIZE)) + if (unlikely(size > PAGE_SIZE)) return kmalloc_large_node(size, gfpflags, node); s = get_slab(size, gfpflags);