diff --git a/[refs] b/[refs] index 15786a9b1b8a..7d65639043b1 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 8d936626dd00bd47cf574add458fea8a23b79611 +refs/heads/master: 81eabcbe0b991ddef5216f30ae91c4b226d54b6d diff --git a/trunk/mm/page_alloc.c b/trunk/mm/page_alloc.c index b5a58d476c1a..d73bfad1c32f 100644 --- a/trunk/mm/page_alloc.c +++ b/trunk/mm/page_alloc.c @@ -847,8 +847,19 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, struct page *page = __rmqueue(zone, order, migratetype); if (unlikely(page == NULL)) break; + + /* + * Split buddy pages returned by expand() are received here + * in physical page order. The page is added to the callers and + * list and the list head then moves forward. From the callers + * perspective, the linked list is ordered by page number in + * some conditions. This is useful for IO devices that can + * merge IO requests if the physical pages are ordered + * properly. + */ list_add(&page->lru, list); set_page_private(page, migratetype); + list = &page->lru; } spin_unlock(&zone->lock); return i;