From 21f80ac7543ddd0959b0fda0a6fa3015227cd2b6 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Mon, 25 Sep 2006 23:31:51 -0700 Subject: [PATCH] --- yaml --- r: 35626 b: refs/heads/master c: 8417bba4b151346ed475fcc923693c9e3be89063 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/include/linux/mmzone.h | 2 +- trunk/mm/page_alloc.c | 4 ++-- trunk/mm/vmscan.c | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/[refs] b/[refs] index dd3dcd723b79..66c678caea14 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: d00bcc98d7ec2c87391c9d9e1cca519ef64d33ef +refs/heads/master: 8417bba4b151346ed475fcc923693c9e3be89063 diff --git a/trunk/include/linux/mmzone.h b/trunk/include/linux/mmzone.h index 7fe317164b73..a703527e2b45 100644 --- a/trunk/include/linux/mmzone.h +++ b/trunk/include/linux/mmzone.h @@ -169,7 +169,7 @@ struct zone { /* * zone reclaim becomes active if more unmapped pages exist. */ - unsigned long min_unmapped_ratio; + unsigned long min_unmapped_pages; struct per_cpu_pageset *pageset[NR_CPUS]; #else struct per_cpu_pageset pageset[NR_CPUS]; diff --git a/trunk/mm/page_alloc.c b/trunk/mm/page_alloc.c index f7ea020c23ea..5da6bc4e0a6b 100644 --- a/trunk/mm/page_alloc.c +++ b/trunk/mm/page_alloc.c @@ -2002,7 +2002,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat, zone->spanned_pages = size; zone->present_pages = realsize; #ifdef CONFIG_NUMA - zone->min_unmapped_ratio = (realsize*sysctl_min_unmapped_ratio) + zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) / 100; #endif zone->name = zone_names[j]; @@ -2313,7 +2313,7 @@ int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, return rc; for_each_zone(zone) - zone->min_unmapped_ratio = (zone->present_pages * + zone->min_unmapped_pages = (zone->present_pages * sysctl_min_unmapped_ratio) / 100; return 0; } diff --git a/trunk/mm/vmscan.c b/trunk/mm/vmscan.c index 8f35d7d585cb..5154c25e8440 100644 --- a/trunk/mm/vmscan.c +++ b/trunk/mm/vmscan.c @@ -1618,7 +1618,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) * unmapped file backed pages. */ if (zone_page_state(zone, NR_FILE_PAGES) - - zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_ratio) + zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages) return 0; /*