From f34bda53e4fea5fa720bd0dee4243dfbb6d150a8 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Thu, 26 Aug 2010 09:41:19 -0500 Subject: [PATCH] --- yaml --- r: 215943 b: refs/heads/master c: db210e70e5f191710a3b1d09f653b44885d397ea h: refs/heads/master i: 215941: 1511f74d20e61138e8f68a7c8f55bcf1cfb137ad 215939: b819eb4812a630bba62db621fb329aa019258260 215935: 497a1c2ca90ac25373b0fe63f5ec5a0e67e6b1be v: v3 --- [refs] | 2 +- trunk/mm/slub.c | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index c6f7252dcdcf..7df7eef04350 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: a016471a16b5c4d4ec8f5221575e603a3d11e5e9 +refs/heads/master: db210e70e5f191710a3b1d09f653b44885d397ea diff --git a/trunk/mm/slub.c b/trunk/mm/slub.c index 4c5a76f505ea..05674aac9294 100644 --- a/trunk/mm/slub.c +++ b/trunk/mm/slub.c @@ -2103,8 +2103,24 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) { +#ifdef CONFIG_SMP + /* + * Will use reserve that does not require slab operation during + * early boot. + */ BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); +#else + /* + * Special hack for UP mode. allocpercpu() falls back to kmalloc + * operations. So we cannot use that before the slab allocator is up + * Simply get the smallest possible compound page. The page will be + * released via kfree() when the cpu caches are resized later. + */ + if (slab_state < UP) + s->cpu_slab = (__percpu void *)kmalloc_large(PAGE_SIZE << 1, GFP_NOWAIT); + else +#endif s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);