diff --git a/[refs] b/[refs] index e767444c888e..0eed93f8cf02 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: d0725992c8a6fb63a16bc9e8b2a50094cc4db3cd +refs/heads/master: bd453cd487ac7116a269517779b83c1061debbec diff --git a/trunk/mm/page_alloc.c b/trunk/mm/page_alloc.c index aecc9cdfdfce..5d714f8fb303 100644 --- a/trunk/mm/page_alloc.c +++ b/trunk/mm/page_alloc.c @@ -1153,10 +1153,10 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, * properly detect and handle allocation failures. * * We most definitely don't want callers attempting to - * allocate greater than single-page units with + * allocate greater than order-1 page units with * __GFP_NOFAIL. */ - WARN_ON_ONCE(order > 0); + WARN_ON_ONCE(order > 1); } spin_lock_irqsave(&zone->lock, flags); page = __rmqueue(zone, order, migratetype); diff --git a/trunk/mm/slub.c b/trunk/mm/slub.c index ce62b770e2fc..819f056b39c6 100644 --- a/trunk/mm/slub.c +++ b/trunk/mm/slub.c @@ -1085,11 +1085,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) { struct page *page; struct kmem_cache_order_objects oo = s->oo; + gfp_t alloc_gfp; flags |= s->allocflags; - page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node, - oo); + /* + * Let the initial higher-order allocation fail under memory pressure + * so we fall-back to the minimum order allocation. + */ + alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; + + page = alloc_slab_page(alloc_gfp, node, oo); if (unlikely(!page)) { oo = s->min; /*