From 04e8cff3809b99417df8768083b8d3cb8fbe4f87 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Mon, 8 Oct 2012 16:33:48 -0700 Subject: [PATCH] --- yaml --- r: 332469 b: refs/heads/master c: e46a28790e594c0876d1a84270926abf75460f61 h: refs/heads/master i: 332467: b038238076c4b775f7b194aa1cd0ea91ab076432 v: v3 --- [refs] | 2 +- trunk/include/linux/mmzone.h | 2 ++ trunk/mm/compaction.c | 8 ++++++-- trunk/mm/internal.h | 2 +- trunk/mm/page_alloc.c | 2 +- trunk/mm/vmscan.c | 4 ++-- 6 files changed, 13 insertions(+), 7 deletions(-) diff --git a/[refs] b/[refs] index f8a484f2a89f..42196d6d0595 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 7a71932d5676b7410ab64d149bad8bde6b0d8632 +refs/heads/master: e46a28790e594c0876d1a84270926abf75460f61 diff --git a/trunk/include/linux/mmzone.h b/trunk/include/linux/mmzone.h index a5578871d033..50aaca81f63d 100644 --- a/trunk/include/linux/mmzone.h +++ b/trunk/include/linux/mmzone.h @@ -218,6 +218,8 @@ struct lruvec { #define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2) /* Isolate for asynchronous migration */ #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) +/* Isolate unevictable pages */ +#define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) /* LRU Isolation modes. */ typedef unsigned __bitwise__ isolate_mode_t; diff --git a/trunk/mm/compaction.c b/trunk/mm/compaction.c index d8187f9cabbf..2c4ce17651d8 100644 --- a/trunk/mm/compaction.c +++ b/trunk/mm/compaction.c @@ -461,6 +461,7 @@ static bool too_many_isolated(struct zone *zone) * @cc: Compaction control structure. * @low_pfn: The first PFN of the range. * @end_pfn: The one-past-the-last PFN of the range. + * @unevictable: true if it allows to isolate unevictable pages * * Isolate all pages that can be migrated from the range specified by * [low_pfn, end_pfn). Returns zero if there is a fatal signal @@ -476,7 +477,7 @@ static bool too_many_isolated(struct zone *zone) */ unsigned long isolate_migratepages_range(struct zone *zone, struct compact_control *cc, - unsigned long low_pfn, unsigned long end_pfn) + unsigned long low_pfn, unsigned long end_pfn, bool unevictable) { unsigned long last_pageblock_nr = 0, pageblock_nr; unsigned long nr_scanned = 0, nr_isolated = 0; @@ -602,6 +603,9 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, if (!cc->sync) mode |= ISOLATE_ASYNC_MIGRATE; + if (unevictable) + mode |= ISOLATE_UNEVICTABLE; + lruvec = mem_cgroup_page_lruvec(page, zone); /* Try isolate the page */ @@ -807,7 +811,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, } /* Perform the isolation */ - low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn); + low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false); if (!low_pfn || cc->contended) return ISOLATE_ABORT; diff --git a/trunk/mm/internal.h b/trunk/mm/internal.h index 4dc93e2fe69e..f5f295fe11e1 100644 --- a/trunk/mm/internal.h +++ b/trunk/mm/internal.h @@ -138,7 +138,7 @@ isolate_freepages_range(struct compact_control *cc, unsigned long start_pfn, unsigned long end_pfn); unsigned long isolate_migratepages_range(struct zone *zone, struct compact_control *cc, - unsigned long low_pfn, unsigned long end_pfn); + unsigned long low_pfn, unsigned long end_pfn, bool unevictable); #endif diff --git a/trunk/mm/page_alloc.c b/trunk/mm/page_alloc.c index 5485f0ef4ec3..fd86c47de86f 100644 --- a/trunk/mm/page_alloc.c +++ b/trunk/mm/page_alloc.c @@ -5690,7 +5690,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, if (list_empty(&cc->migratepages)) { cc->nr_migratepages = 0; pfn = isolate_migratepages_range(cc->zone, cc, - pfn, end); + pfn, end, true); if (!pfn) { ret = -EINTR; break; diff --git a/trunk/mm/vmscan.c b/trunk/mm/vmscan.c index 8b627309dd44..2624edcfb420 100644 --- a/trunk/mm/vmscan.c +++ b/trunk/mm/vmscan.c @@ -1009,8 +1009,8 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode) if (!PageLRU(page)) return ret; - /* Do not give back unevictable pages for compaction */ - if (PageUnevictable(page)) + /* Compaction should not handle unevictable pages but CMA can do so */ + if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE)) return ret; ret = -EBUSY;