From ca6a51fc5c58ff30b63a160fd62488ff7d657d95 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sun, 13 Nov 2005 16:06:45 -0800 Subject: [PATCH] --- yaml --- r: 14044 b: refs/heads/master c: 669ed17521b9b78cdbeac8a53c30599aca9527ce h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/mm/page_alloc.c | 27 ++++++++++++++------------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/[refs] b/[refs] index 26bc1db2dd3e..6f1f0975d2cf 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 2d6c666e8704cf06267f29a4fa3d2cf823469c38 +refs/heads/master: 669ed17521b9b78cdbeac8a53c30599aca9527ce diff --git a/trunk/mm/page_alloc.c b/trunk/mm/page_alloc.c index 845b91749a42..3c5cf664abd2 100644 --- a/trunk/mm/page_alloc.c +++ b/trunk/mm/page_alloc.c @@ -2397,13 +2397,18 @@ void setup_per_zone_pages_min(void) } for_each_zone(zone) { + unsigned long tmp; spin_lock_irqsave(&zone->lru_lock, flags); + tmp = (pages_min * zone->present_pages) / lowmem_pages; if (is_highmem(zone)) { /* - * Often, highmem doesn't need to reserve any pages. - * But the pages_min/low/high values are also used for - * batching up page reclaim activity so we need a - * decent value here. + * __GFP_HIGH and PF_MEMALLOC allocations usually don't + * need highmem pages, so cap pages_min to a small + * value here. + * + * The (pages_high-pages_low) and (pages_low-pages_min) + * deltas controls asynch page reclaim, and so should + * not be capped for highmem. */ int min_pages; @@ -2414,19 +2419,15 @@ void setup_per_zone_pages_min(void) min_pages = 128; zone->pages_min = min_pages; } else { - /* if it's a lowmem zone, reserve a number of pages + /* + * If it's a lowmem zone, reserve a number of pages * proportionate to the zone's size. */ - zone->pages_min = (pages_min * zone->present_pages) / - lowmem_pages; + zone->pages_min = tmp; } - /* - * When interpreting these watermarks, just keep in mind that: - * zone->pages_min == (zone->pages_min * 4) / 4; - */ - zone->pages_low = (zone->pages_min * 5) / 4; - zone->pages_high = (zone->pages_min * 6) / 4; + zone->pages_low = zone->pages_min + tmp / 4; + zone->pages_high = zone->pages_min + tmp / 2; spin_unlock_irqrestore(&zone->lru_lock, flags); } }