diff --git a/[refs] b/[refs] index 710873c004fc..6283e9939409 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 21ee9f398be209ccbb62929d35961ca1ed48eec3 +refs/heads/master: e0887c19b2daa140f20ca8104bdc5740f39dbb86 diff --git a/trunk/mm/vmscan.c b/trunk/mm/vmscan.c index f51a33e8ed89..7e0f05797388 100644 --- a/trunk/mm/vmscan.c +++ b/trunk/mm/vmscan.c @@ -2125,6 +2125,22 @@ static void shrink_zones(int priority, struct zonelist *zonelist, continue; if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; /* Let kswapd poll it */ + if (COMPACTION_BUILD) { + /* + * If we already have plenty of memory + * free for compaction, don't free any + * more. Even though compaction is + * invoked for any non-zero order, + * only frequent costly order + * reclamation is disruptive enough to + * become a noticable problem, like + * transparent huge page allocations. + */ + if (sc->order > PAGE_ALLOC_COSTLY_ORDER && + (compaction_suitable(zone, sc->order) || + compaction_deferred(zone))) + continue; + } /* * This steals pages from memory cgroups over softlimit * and returns the number of reclaimed pages and