Skip to content

Commit

Permalink
mm: slab: remove ZONE_DMA_FLAG
Browse files Browse the repository at this point in the history
Now we have IS_ENABLED helper to check if a Kconfig option is enabled or
not, so ZONE_DMA_FLAG sounds no longer useful.

And, the use of ZONE_DMA_FLAG in slab looks pointless according to the
comment [1] from Johannes Weiner, so remove them and ORing passed in
flags with the cache gfp flags has been done in kmem_getpages().

[1] https://lkml.org/lkml/2014/9/25/553

Link: http://lkml.kernel.org/r/1462381297-11009-1-git-send-email-yang.shi@linaro.org
Signed-off-by: Yang Shi <yang.shi@linaro.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Yang Shi authored and Linus Torvalds committed May 20, 2016
1 parent c7ce4f6 commit a3187e4
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 27 deletions.
5 changes: 0 additions & 5 deletions mm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -268,11 +268,6 @@ config ARCH_ENABLE_HUGEPAGE_MIGRATION
config PHYS_ADDR_T_64BIT
def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT

config ZONE_DMA_FLAG
int
default "0" if !ZONE_DMA
default "1"

config BOUNCE
bool "Enable bounce buffers"
default y
Expand Down
23 changes: 1 addition & 22 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -2236,7 +2236,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
cachep->freelist_size = cachep->num * sizeof(freelist_idx_t);
cachep->flags = flags;
cachep->allocflags = __GFP_COMP;
if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
if (flags & SLAB_CACHE_DMA)
cachep->allocflags |= GFP_DMA;
cachep->size = size;
cachep->reciprocal_buffer_size = reciprocal_value(size);
Expand Down Expand Up @@ -2664,16 +2664,6 @@ static void cache_init_objs(struct kmem_cache *cachep,
}
}

static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
{
if (CONFIG_ZONE_DMA_FLAG) {
if (flags & GFP_DMA)
BUG_ON(!(cachep->allocflags & GFP_DMA));
else
BUG_ON(cachep->allocflags & GFP_DMA);
}
}

static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
{
void *objp;
Expand Down Expand Up @@ -2752,14 +2742,6 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
if (gfpflags_allow_blocking(local_flags))
local_irq_enable();

/*
* The test for missing atomic flag is performed here, rather than
* the more obvious place, simply to reduce the critical path length
* in kmem_cache_alloc(). If a caller is seriously mis-behaving they
* will eventually be caught here (where it matters).
*/
kmem_flagcheck(cachep, flags);

/*
* Get mem for the objs. Attempt to allocate a physical page from
* 'nodeid'.
Expand Down Expand Up @@ -3145,9 +3127,6 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
gfp_t flags)
{
might_sleep_if(gfpflags_allow_blocking(flags));
#if DEBUG
kmem_flagcheck(cachep, flags);
#endif
}

#if DEBUG
Expand Down

0 comments on commit a3187e4

Please sign in to comment.