From dfc44c7b93bcc57184d94103c63d86cb01fc1d90 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 6 May 2007 14:50:17 -0700 Subject: [PATCH] --- yaml --- r: 54242 b: refs/heads/master c: cfce66047f1893cb7d3abb0d53e65cbbd8d605f0 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/include/linux/gfp.h | 3 +-- trunk/mm/slab.c | 6 ++---- trunk/mm/slub.c | 3 --- 4 files changed, 4 insertions(+), 10 deletions(-) diff --git a/[refs] b/[refs] index 5f65af1b3fb3..f5d85797c1d4 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 4f104934591ed98534b3a4c3d17d972b790e9c42 +refs/heads/master: cfce66047f1893cb7d3abb0d53e65cbbd8d605f0 diff --git a/trunk/include/linux/gfp.h b/trunk/include/linux/gfp.h index 2a7d15bcde46..97a36c3d96e2 100644 --- a/trunk/include/linux/gfp.h +++ b/trunk/include/linux/gfp.h @@ -40,7 +40,6 @@ struct vm_area_struct; #define __GFP_REPEAT ((__force gfp_t)0x400u) /* Retry the allocation. Might fail */ #define __GFP_NOFAIL ((__force gfp_t)0x800u) /* Retry for ever. Cannot fail */ #define __GFP_NORETRY ((__force gfp_t)0x1000u)/* Do not retry. Might fail */ -#define __GFP_NO_GROW ((__force gfp_t)0x2000u)/* Slab internal usage */ #define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ @@ -53,7 +52,7 @@ struct vm_area_struct; /* if you forget to add the bitmask here kernel will crash, period */ #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ - __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \ + __GFP_NOFAIL|__GFP_NORETRY|__GFP_COMP| \ __GFP_NOMEMALLOC|__GFP_HARDWALL|__GFP_THISNODE) /* This equals 0, but use constants in case they ever change */ diff --git a/trunk/mm/slab.c b/trunk/mm/slab.c index 52ecf7599a7b..5920a412b377 100644 --- a/trunk/mm/slab.c +++ b/trunk/mm/slab.c @@ -2746,9 +2746,7 @@ static int cache_grow(struct kmem_cache *cachep, * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ - BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK | __GFP_NO_GROW)); - if (flags & __GFP_NO_GROW) - return 0; + BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); ctor_flags = SLAB_CTOR_CONSTRUCTOR; local_flags = (flags & GFP_LEVEL_MASK); @@ -3252,7 +3250,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) flags | GFP_THISNODE, nid); } - if (!obj && !(flags & __GFP_NO_GROW)) { + if (!obj) { /* * This allocation will be performed within the constraints * of the current cpuset / memory policy requirements. diff --git a/trunk/mm/slub.c b/trunk/mm/slub.c index 347e44821bcb..a6323484dd3e 100644 --- a/trunk/mm/slub.c +++ b/trunk/mm/slub.c @@ -815,9 +815,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) void *last; void *p; - if (flags & __GFP_NO_GROW) - return NULL; - BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK)); if (flags & __GFP_WAIT)