From bf213f212792935a3332a32221c921fe7fe76b3c Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Tue, 16 Jun 2009 15:32:13 -0700 Subject: [PATCH] --- yaml --- r: 151542 b: refs/heads/master c: f2260e6b1f4eba0f5b5906795117791b5c660154 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/mm/page_alloc.c | 13 +++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/[refs] b/[refs] index 6c610eb98983..44dfc2652a25 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 418589663d6011de9006425b6c5721e1544fb47a +refs/heads/master: f2260e6b1f4eba0f5b5906795117791b5c660154 diff --git a/trunk/mm/page_alloc.c b/trunk/mm/page_alloc.c index abe26003124d..d56e377ad085 100644 --- a/trunk/mm/page_alloc.c +++ b/trunk/mm/page_alloc.c @@ -456,7 +456,6 @@ static inline void __free_one_page(struct page *page, int migratetype) { unsigned long page_idx; - int order_size = 1 << order; if (unlikely(PageCompound(page))) if (unlikely(destroy_compound_page(page, order))) @@ -466,10 +465,9 @@ static inline void __free_one_page(struct page *page, page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); - VM_BUG_ON(page_idx & (order_size - 1)); + VM_BUG_ON(page_idx & ((1 << order) - 1)); VM_BUG_ON(bad_range(zone, page)); - __mod_zone_page_state(zone, NR_FREE_PAGES, order_size); while (order < MAX_ORDER-1) { unsigned long combined_idx; struct page *buddy; @@ -524,6 +522,8 @@ static void free_pages_bulk(struct zone *zone, int count, spin_lock(&zone->lock); zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); zone->pages_scanned = 0; + + __mod_zone_page_state(zone, NR_FREE_PAGES, count << order); while (count--) { struct page *page; @@ -542,6 +542,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order, spin_lock(&zone->lock); zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); zone->pages_scanned = 0; + + __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); __free_one_page(page, zone, order, migratetype); spin_unlock(&zone->lock); } @@ -686,7 +688,6 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, list_del(&page->lru); rmv_page_order(page); area->nr_free--; - __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order)); expand(zone, page, order, current_order, area, migratetype); return page; } @@ -826,8 +827,6 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) /* Remove the page from the freelists */ list_del(&page->lru); rmv_page_order(page); - __mod_zone_page_state(zone, NR_FREE_PAGES, - -(1UL << order)); if (current_order == pageblock_order) set_pageblock_migratetype(page, @@ -900,6 +899,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, set_page_private(page, migratetype); list = &page->lru; } + __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); spin_unlock(&zone->lock); return i; } @@ -1129,6 +1129,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, } else { spin_lock_irqsave(&zone->lock, flags); page = __rmqueue(zone, order, migratetype); + __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order)); spin_unlock(&zone->lock); if (!page) goto failed;