Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 272164
b: refs/heads/master
c: e0c2327
h: refs/heads/master
v: v3
  • Loading branch information
Mel Gorman authored and Linus Torvalds committed Nov 1, 2011
1 parent 8323fc3 commit 3a46af9
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 12 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: e0887c19b2daa140f20ca8104bdc5740f39dbb86
refs/heads/master: e0c23279c9f800c403f37511484d9014ac83adec
32 changes: 21 additions & 11 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -2103,14 +2103,19 @@ static void shrink_zone(int priority, struct zone *zone,
*
* If a zone is deemed to be full of pinned pages then just give it a light
* scan then give up on it.
*
* This function returns true if a zone is being reclaimed for a costly
* high-order allocation and compaction is either ready to begin or deferred.
* This indicates to the caller that it should retry the allocation or fail.
*/
static void shrink_zones(int priority, struct zonelist *zonelist,
static bool shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
{
struct zoneref *z;
struct zone *zone;
unsigned long nr_soft_reclaimed;
unsigned long nr_soft_scanned;
bool should_abort_reclaim = false;

for_each_zone_zonelist_nodemask(zone, z, zonelist,
gfp_zone(sc->gfp_mask), sc->nodemask) {
Expand All @@ -2127,19 +2132,20 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
continue; /* Let kswapd poll it */
if (COMPACTION_BUILD) {
/*
* If we already have plenty of memory
* free for compaction, don't free any
* more. Even though compaction is
* invoked for any non-zero order,
* only frequent costly order
* reclamation is disruptive enough to
* become a noticable problem, like
* transparent huge page allocations.
* If we already have plenty of memory free for
* compaction in this zone, don't free any more.
* Even though compaction is invoked for any
* non-zero order, only frequent costly order
* reclamation is disruptive enough to become a
* noticable problem, like transparent huge page
* allocations.
*/
if (sc->order > PAGE_ALLOC_COSTLY_ORDER &&
(compaction_suitable(zone, sc->order) ||
compaction_deferred(zone)))
compaction_deferred(zone))) {
should_abort_reclaim = true;
continue;
}
}
/*
* This steals pages from memory cgroups over softlimit
Expand All @@ -2158,6 +2164,8 @@ static void shrink_zones(int priority, struct zonelist *zonelist,

shrink_zone(priority, zone, sc);
}

return should_abort_reclaim;
}

static bool zone_reclaimable(struct zone *zone)
Expand Down Expand Up @@ -2222,7 +2230,9 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
sc->nr_scanned = 0;
if (!priority)
disable_swap_token(sc->mem_cgroup);
shrink_zones(priority, zonelist, sc);
if (shrink_zones(priority, zonelist, sc))
break;

/*
* Don't shrink slabs when reclaiming memory from
* over limit cgroups
Expand Down

0 comments on commit 3a46af9

Please sign in to comment.