From 69d87ad98fc1eeb13cda3afba5897391aa927e16 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 17 Dec 2007 16:20:05 -0800 Subject: [PATCH] --- yaml --- r: 74984 b: refs/heads/master c: 81eabcbe0b991ddef5216f30ae91c4b226d54b6d h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/mm/page_alloc.c | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/[refs] b/[refs] index 15786a9b1b8a..7d65639043b1 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 8d936626dd00bd47cf574add458fea8a23b79611 +refs/heads/master: 81eabcbe0b991ddef5216f30ae91c4b226d54b6d diff --git a/trunk/mm/page_alloc.c b/trunk/mm/page_alloc.c index b5a58d476c1a..d73bfad1c32f 100644 --- a/trunk/mm/page_alloc.c +++ b/trunk/mm/page_alloc.c @@ -847,8 +847,19 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, struct page *page = __rmqueue(zone, order, migratetype); if (unlikely(page == NULL)) break; + + /* + * Split buddy pages returned by expand() are received here + * in physical page order. The page is added to the callers and + * list and the list head then moves forward. From the callers + * perspective, the linked list is ordered by page number in + * some conditions. This is useful for IO devices that can + * merge IO requests if the physical pages are ordered + * properly. + */ list_add(&page->lru, list); set_page_private(page, migratetype); + list = &page->lru; } spin_unlock(&zone->lock); return i;