diff --git a/[refs] b/[refs] index 738d92b0f64d..9f23264ab395 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c82e6d450fda56cb2d4f68534173d3cd11b32f9f +refs/heads/master: 4923abf9f1a4c1864af438a57c1f3686548230e9 diff --git a/trunk/mm/page_alloc.c b/trunk/mm/page_alloc.c index aecc9cdfdfce..5d714f8fb303 100644 --- a/trunk/mm/page_alloc.c +++ b/trunk/mm/page_alloc.c @@ -1153,10 +1153,10 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, * properly detect and handle allocation failures. * * We most definitely don't want callers attempting to - * allocate greater than single-page units with + * allocate greater than order-1 page units with * __GFP_NOFAIL. */ - WARN_ON_ONCE(order > 0); + WARN_ON_ONCE(order > 1); } spin_lock_irqsave(&zone->lock, flags); page = __rmqueue(zone, order, migratetype);