Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 356490
b: refs/heads/master
c: 9b4f98c
h: refs/heads/master
v: v3
  • Loading branch information
Johannes Weiner authored and Linus Torvalds committed Feb 24, 2013
1 parent 9f53283 commit f0fcecd
Show file tree
Hide file tree
Showing 2 changed files with 92 additions and 89 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 9a2651140ef740b3b67ad47ea3d0af75581aacc6
refs/heads/master: 9b4f98cdac9683ee9cdb28c582a81223f0c10a3f
179 changes: 91 additions & 88 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -1822,6 +1822,58 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
}
}

/*
* This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
*/
static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
unsigned long nr[NR_LRU_LISTS];
unsigned long nr_to_scan;
enum lru_list lru;
unsigned long nr_reclaimed = 0;
unsigned long nr_to_reclaim = sc->nr_to_reclaim;
struct blk_plug plug;

get_scan_count(lruvec, sc, nr);

blk_start_plug(&plug);
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
nr[LRU_INACTIVE_FILE]) {
for_each_evictable_lru(lru) {
if (nr[lru]) {
nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX);
nr[lru] -= nr_to_scan;

nr_reclaimed += shrink_list(lru, nr_to_scan,
lruvec, sc);
}
}
/*
* On large memory systems, scan >> priority can become
* really large. This is fine for the starting priority;
* we want to put equal scanning pressure on each zone.
* However, if the VM has a harder time of freeing pages,
* with multiple processes reclaiming pages, the total
* freeing target can get unreasonably large.
*/
if (nr_reclaimed >= nr_to_reclaim &&
sc->priority < DEF_PRIORITY)
break;
}
blk_finish_plug(&plug);
sc->nr_reclaimed += nr_reclaimed;

/*
* Even if we did not try to evict anon pages at all, we want to
* rebalance the anon lru active/inactive ratio.
*/
if (inactive_anon_is_low(lruvec))
shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
sc, LRU_ACTIVE_ANON);

throttle_vm_writeout(sc->gfp_mask);
}

/* Use reclaim/compaction for costly allocs or under memory pressure */
static bool in_reclaim_compaction(struct scan_control *sc)
{
Expand All @@ -1840,7 +1892,7 @@ static bool in_reclaim_compaction(struct scan_control *sc)
* calls try_to_compact_zone() that it will have enough free pages to succeed.
* It will give up earlier than that if there is difficulty reclaiming pages.
*/
static inline bool should_continue_reclaim(struct lruvec *lruvec,
static inline bool should_continue_reclaim(struct zone *zone,
unsigned long nr_reclaimed,
unsigned long nr_scanned,
struct scan_control *sc)
Expand Down Expand Up @@ -1880,15 +1932,15 @@ static inline bool should_continue_reclaim(struct lruvec *lruvec,
* inactive lists are large enough, continue reclaiming
*/
pages_for_compaction = (2UL << sc->order);
inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE);
inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE);
if (nr_swap_pages > 0)
inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON);
inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON);
if (sc->nr_reclaimed < pages_for_compaction &&
inactive_lru_pages > pages_for_compaction)
return true;

/* If compaction would go ahead or the allocation would succeed, stop */
switch (compaction_suitable(lruvec_zone(lruvec), sc->order)) {
switch (compaction_suitable(zone, sc->order)) {
case COMPACT_PARTIAL:
case COMPACT_CONTINUE:
return false;
Expand All @@ -1897,98 +1949,49 @@ static inline bool should_continue_reclaim(struct lruvec *lruvec,
}
}

/*
* This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
*/
static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
static void shrink_zone(struct zone *zone, struct scan_control *sc)
{
unsigned long nr[NR_LRU_LISTS];
unsigned long nr_to_scan;
enum lru_list lru;
unsigned long nr_reclaimed, nr_scanned;
unsigned long nr_to_reclaim = sc->nr_to_reclaim;
struct blk_plug plug;

restart:
nr_reclaimed = 0;
nr_scanned = sc->nr_scanned;
get_scan_count(lruvec, sc, nr);

blk_start_plug(&plug);
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
nr[LRU_INACTIVE_FILE]) {
for_each_evictable_lru(lru) {
if (nr[lru]) {
nr_to_scan = min_t(unsigned long,
nr[lru], SWAP_CLUSTER_MAX);
nr[lru] -= nr_to_scan;

nr_reclaimed += shrink_list(lru, nr_to_scan,
lruvec, sc);
}
}
/*
* On large memory systems, scan >> priority can become
* really large. This is fine for the starting priority;
* we want to put equal scanning pressure on each zone.
* However, if the VM has a harder time of freeing pages,
* with multiple processes reclaiming pages, the total
* freeing target can get unreasonably large.
*/
if (nr_reclaimed >= nr_to_reclaim &&
sc->priority < DEF_PRIORITY)
break;
}
blk_finish_plug(&plug);
sc->nr_reclaimed += nr_reclaimed;

/*
* Even if we did not try to evict anon pages at all, we want to
* rebalance the anon lru active/inactive ratio.
*/
if (inactive_anon_is_low(lruvec))
shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
sc, LRU_ACTIVE_ANON);

/* reclaim/compaction might need reclaim to continue */
if (should_continue_reclaim(lruvec, nr_reclaimed,
sc->nr_scanned - nr_scanned, sc))
goto restart;
do {
struct mem_cgroup *root = sc->target_mem_cgroup;
struct mem_cgroup_reclaim_cookie reclaim = {
.zone = zone,
.priority = sc->priority,
};
struct mem_cgroup *memcg;

throttle_vm_writeout(sc->gfp_mask);
}
nr_reclaimed = sc->nr_reclaimed;
nr_scanned = sc->nr_scanned;

static void shrink_zone(struct zone *zone, struct scan_control *sc)
{
struct mem_cgroup *root = sc->target_mem_cgroup;
struct mem_cgroup_reclaim_cookie reclaim = {
.zone = zone,
.priority = sc->priority,
};
struct mem_cgroup *memcg;
memcg = mem_cgroup_iter(root, NULL, &reclaim);
do {
struct lruvec *lruvec;

memcg = mem_cgroup_iter(root, NULL, &reclaim);
do {
struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
lruvec = mem_cgroup_zone_lruvec(zone, memcg);

shrink_lruvec(lruvec, sc);
shrink_lruvec(lruvec, sc);

/*
* Limit reclaim has historically picked one memcg and
* scanned it with decreasing priority levels until
* nr_to_reclaim had been reclaimed. This priority
* cycle is thus over after a single memcg.
*
* Direct reclaim and kswapd, on the other hand, have
* to scan all memory cgroups to fulfill the overall
* scan target for the zone.
*/
if (!global_reclaim(sc)) {
mem_cgroup_iter_break(root, memcg);
break;
}
memcg = mem_cgroup_iter(root, memcg, &reclaim);
} while (memcg);
/*
* Limit reclaim has historically picked one
* memcg and scanned it with decreasing
* priority levels until nr_to_reclaim had
* been reclaimed. This priority cycle is
* thus over after a single memcg.
*
* Direct reclaim and kswapd, on the other
* hand, have to scan all memory cgroups to
* fulfill the overall scan target for the
* zone.
*/
if (!global_reclaim(sc)) {
mem_cgroup_iter_break(root, memcg);
break;
}
memcg = mem_cgroup_iter(root, memcg, &reclaim);
} while (memcg);
} while (should_continue_reclaim(zone, sc->nr_reclaimed - nr_reclaimed,
sc->nr_scanned - nr_scanned, sc));
}

/* Returns true if compaction should go ahead for a high-order request */
Expand Down

0 comments on commit f0fcecd

Please sign in to comment.