From a2698c3363313ed478cc385c2ab059a92bd003c3 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 22 Feb 2013 16:32:15 -0800 Subject: [PATCH] --- yaml --- r: 356488 b: refs/heads/master c: 11d16c25bbf7a3b7a43d7472e175cdd52961757d h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/mm/vmscan.c | 12 +++++++----- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/[refs] b/[refs] index 58fe5ca794bd..24060b8b9d2d 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 10316b313cbde4b778c3d1b4b2fe2adbcbe84a48 +refs/heads/master: 11d16c25bbf7a3b7a43d7472e175cdd52961757d diff --git a/trunk/mm/vmscan.c b/trunk/mm/vmscan.c index 68586c887611..259f8208a388 100644 --- a/trunk/mm/vmscan.c +++ b/trunk/mm/vmscan.c @@ -1713,13 +1713,15 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, file = get_lru_size(lruvec, LRU_ACTIVE_FILE) + get_lru_size(lruvec, LRU_INACTIVE_FILE); + /* + * If it's foreseeable that reclaiming the file cache won't be + * enough to get the zone back into a desirable shape, we have + * to swap. Better start now and leave the - probably heavily + * thrashing - remaining file pages alone. + */ if (global_reclaim(sc)) { - free = zone_page_state(zone, NR_FREE_PAGES); + free = zone_page_state(zone, NR_FREE_PAGES); if (unlikely(file + free <= high_wmark_pages(zone))) { - /* - * If we have very few page cache pages, force-scan - * anon pages. - */ fraction[0] = 1; fraction[1] = 0; denominator = 1;