From dc753b9208d9188eba80540efac953b079c68634 Mon Sep 17 00:00:00 2001 From: Pekka Enberg Date: Fri, 12 Jun 2009 15:58:59 +0300 Subject: [PATCH] --- yaml --- r: 148173 b: refs/heads/master c: 8429db5c6336083594036c30f49401405d536911 h: refs/heads/master i: 148171: 53b95eec37bc5d99b47cf3e144a69c57d3892fb8 v: v3 --- [refs] | 2 +- trunk/mm/slab.c | 37 +++++++++++++++++++------------------ 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/[refs] b/[refs] index 072a0b2a98ad..ff67ba45ca19 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 7e85ee0c1d15ca5f8bff0f514f158eba1742dd87 +refs/heads/master: 8429db5c6336083594036c30f49401405d536911 diff --git a/trunk/mm/slab.c b/trunk/mm/slab.c index 453efcb1c980..18e3164de09a 100644 --- a/trunk/mm/slab.c +++ b/trunk/mm/slab.c @@ -759,6 +759,7 @@ static enum { NONE, PARTIAL_AC, PARTIAL_L3, + EARLY, FULL } g_cpucache_up; @@ -767,7 +768,7 @@ static enum { */ int slab_is_available(void) { - return g_cpucache_up == FULL; + return g_cpucache_up >= EARLY; } static DEFINE_PER_CPU(struct delayed_work, reap_work); @@ -1631,19 +1632,27 @@ void __init kmem_cache_init(void) } } - /* 6) resize the head arrays to their final sizes */ - { - struct kmem_cache *cachep; - mutex_lock(&cache_chain_mutex); - list_for_each_entry(cachep, &cache_chain, next) - if (enable_cpucache(cachep, GFP_NOWAIT)) - BUG(); - mutex_unlock(&cache_chain_mutex); - } + g_cpucache_up = EARLY; /* Annotate slab for lockdep -- annotate the malloc caches */ init_lock_keys(); +} + +void __init kmem_cache_init_late(void) +{ + struct kmem_cache *cachep; + + /* + * Interrupts are enabled now so all GFP allocations are safe. + */ + slab_gfp_mask = __GFP_BITS_MASK; + /* 6) resize the head arrays to their final sizes */ + mutex_lock(&cache_chain_mutex); + list_for_each_entry(cachep, &cache_chain, next) + if (enable_cpucache(cachep, GFP_NOWAIT)) + BUG(); + mutex_unlock(&cache_chain_mutex); /* Done! */ g_cpucache_up = FULL; @@ -1660,14 +1669,6 @@ void __init kmem_cache_init(void) */ } -void __init kmem_cache_init_late(void) -{ - /* - * Interrupts are enabled now so all GFP allocations are safe. - */ - slab_gfp_mask = __GFP_BITS_MASK; -} - static int __init cpucache_init(void) { int cpu;