Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 284362
b: refs/heads/master
c: 89b5fae
h: refs/heads/master
v: v3
  • Loading branch information
Johannes Weiner authored and Linus Torvalds committed Jan 13, 2012
1 parent 70f4c92 commit 03f1005
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 26 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 9f3a0d0933de079665ec1b498947ffbf805b0018
refs/heads/master: 89b5fae5368f6aec62fb09c8e19b6c61f1154603
62 changes: 37 additions & 25 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -153,9 +153,25 @@ static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem);

#ifdef CONFIG_CGROUP_MEM_RES_CTLR
#define scanning_global_lru(sc) (!(sc)->mem_cgroup)
static bool global_reclaim(struct scan_control *sc)
{
return !sc->mem_cgroup;
}

static bool scanning_global_lru(struct scan_control *sc)
{
return !sc->mem_cgroup;
}
#else
#define scanning_global_lru(sc) (1)
static bool global_reclaim(struct scan_control *sc)
{
return true;
}

static bool scanning_global_lru(struct scan_control *sc)
{
return true;
}
#endif

static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
Expand Down Expand Up @@ -994,7 +1010,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* back off and wait for congestion to clear because further reclaim
* will encounter the same problem
*/
if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc))
if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
zone_set_flag(zone, ZONE_CONGESTED);

free_hot_cold_page_list(&free_pages, 1);
Expand Down Expand Up @@ -1313,7 +1329,7 @@ static int too_many_isolated(struct zone *zone, int file,
if (current_is_kswapd())
return 0;

if (!scanning_global_lru(sc))
if (!global_reclaim(sc))
return 0;

if (file) {
Expand Down Expand Up @@ -1491,21 +1507,19 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
if (scanning_global_lru(sc)) {
nr_taken = isolate_pages_global(nr_to_scan, &page_list,
&nr_scanned, sc->order, reclaim_mode, zone, 0, file);
} else {
nr_taken = mem_cgroup_isolate_pages(nr_to_scan, &page_list,
&nr_scanned, sc->order, reclaim_mode, zone,
sc->mem_cgroup, 0, file);
}
if (global_reclaim(sc)) {
zone->pages_scanned += nr_scanned;
if (current_is_kswapd())
__count_zone_vm_events(PGSCAN_KSWAPD, zone,
nr_scanned);
else
__count_zone_vm_events(PGSCAN_DIRECT, zone,
nr_scanned);
} else {
nr_taken = mem_cgroup_isolate_pages(nr_to_scan, &page_list,
&nr_scanned, sc->order, reclaim_mode, zone,
sc->mem_cgroup, 0, file);
/*
* mem_cgroup_isolate_pages() keeps track of
* scanned pages on its own.
*/
}

if (nr_taken == 0) {
Expand Down Expand Up @@ -1646,18 +1660,16 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
&pgscanned, sc->order,
reclaim_mode, zone,
1, file);
zone->pages_scanned += pgscanned;
} else {
nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
&pgscanned, sc->order,
reclaim_mode, zone,
sc->mem_cgroup, 1, file);
/*
* mem_cgroup_isolate_pages() keeps track of
* scanned pages on its own.
*/
}

if (global_reclaim(sc))
zone->pages_scanned += pgscanned;

reclaim_stat->recent_scanned[file] += nr_taken;

__count_zone_vm_events(PGREFILL, zone, pgscanned);
Expand Down Expand Up @@ -1827,7 +1839,7 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,

static int vmscan_swappiness(struct scan_control *sc)
{
if (scanning_global_lru(sc))
if (global_reclaim(sc))
return vm_swappiness;
return mem_cgroup_swappiness(sc->mem_cgroup);
}
Expand Down Expand Up @@ -1862,9 +1874,9 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
* latencies, so it's better to scan a minimum amount there as
* well.
*/
if (scanning_global_lru(sc) && current_is_kswapd())
if (current_is_kswapd())
force_scan = true;
if (!scanning_global_lru(sc))
if (!global_reclaim(sc))
force_scan = true;

/* If we have no swap space, do not bother scanning anon pages. */
Expand All @@ -1881,7 +1893,7 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);

if (scanning_global_lru(sc)) {
if (global_reclaim(sc)) {
free = zone_page_state(zone, NR_FREE_PAGES);
/* If we have very few page cache pages,
force-scan anon pages. */
Expand Down Expand Up @@ -2114,7 +2126,7 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
* Take care memory controller reclaiming has small influence
* to global LRU.
*/
if (scanning_global_lru(sc)) {
if (global_reclaim(sc)) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
Expand Down Expand Up @@ -2212,7 +2224,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
get_mems_allowed();
delayacct_freepages_start();

if (scanning_global_lru(sc))
if (global_reclaim(sc))
count_vm_event(ALLOCSTALL);

for (priority = DEF_PRIORITY; priority >= 0; priority--) {
Expand All @@ -2226,7 +2238,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
* Don't shrink slabs when reclaiming memory from
* over limit cgroups
*/
if (scanning_global_lru(sc)) {
if (global_reclaim(sc)) {
unsigned long lru_pages = 0;
for_each_zone_zonelist(zone, z, zonelist,
gfp_zone(sc->gfp_mask)) {
Expand Down Expand Up @@ -2288,7 +2300,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
return 0;

/* top priority shrink_zones still had more to do? don't OOM, then */
if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc))
return 1;

return 0;
Expand Down

0 comments on commit 03f1005

Please sign in to comment.