Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 252042
b: refs/heads/master
c: 246e87a
h: refs/heads/master
v: v3
  • Loading branch information
KAMEZAWA Hiroyuki authored and Linus Torvalds committed May 27, 2011
1 parent a985140 commit af5add7
Show file tree
Hide file tree
Showing 4 changed files with 35 additions and 36 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 889976dbcb1218119fdd950fb7819084e37d7d37
refs/heads/master: 246e87a9393448c20873bc5dee64be68ed559e24
5 changes: 0 additions & 5 deletions trunk/include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -273,11 +273,6 @@ struct zone_reclaim_stat {
*/
unsigned long recent_rotated[2];
unsigned long recent_scanned[2];

/*
* accumulated for batching
*/
unsigned long nr_saved_scan[NR_LRU_LISTS];
};

struct zone {
Expand Down
4 changes: 1 addition & 3 deletions trunk/mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -4323,10 +4323,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
zone->zone_pgdat = pgdat;

zone_pcp_init(zone);
for_each_lru(l) {
for_each_lru(l)
INIT_LIST_HEAD(&zone->lru[l].list);
zone->reclaim_stat.nr_saved_scan[l] = 0;
}
zone->reclaim_stat.recent_rotated[0] = 0;
zone->reclaim_stat.recent_rotated[1] = 0;
zone->reclaim_stat.recent_scanned[0] = 0;
Expand Down
60 changes: 33 additions & 27 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -1717,26 +1717,6 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
}

/*
* Smallish @nr_to_scan's are deposited in @nr_saved_scan,
* until we collected @swap_cluster_max pages to scan.
*/
static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
unsigned long *nr_saved_scan)
{
unsigned long nr;

*nr_saved_scan += nr_to_scan;
nr = *nr_saved_scan;

if (nr >= SWAP_CLUSTER_MAX)
*nr_saved_scan = 0;
else
nr = 0;

return nr;
}

/*
* Determine how aggressively the anon and file LRU lists should be
* scanned. The relative value of each set of LRU lists is determined
Expand All @@ -1755,6 +1735,22 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
u64 fraction[2], denominator;
enum lru_list l;
int noswap = 0;
int force_scan = 0;


anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);

if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) {
/* kswapd does zone balancing and need to scan this zone */
if (scanning_global_lru(sc) && current_is_kswapd())
force_scan = 1;
/* memcg may have small limit and need to avoid priority drop */
if (!scanning_global_lru(sc))
force_scan = 1;
}

/* If we have no swap space, do not bother scanning anon pages. */
if (!sc->may_swap || (nr_swap_pages <= 0)) {
Expand All @@ -1765,11 +1761,6 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
goto out;
}

anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);

if (scanning_global_lru(sc)) {
free = zone_page_state(zone, NR_FREE_PAGES);
/* If we have very few page cache pages,
Expand Down Expand Up @@ -1836,8 +1827,23 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
scan >>= priority;
scan = div64_u64(scan * fraction[file], denominator);
}
nr[l] = nr_scan_try_batch(scan,
&reclaim_stat->nr_saved_scan[l]);

/*
* If zone is small or memcg is small, nr[l] can be 0.
* This results no-scan on this priority and priority drop down.
* For global direct reclaim, it can visit next zone and tend
* not to have problems. For global kswapd, it's for zone
* balancing and it need to scan a small amounts. When using
* memcg, priority drop can cause big latency. So, it's better
* to scan small amount. See may_noscan above.
*/
if (!scan && force_scan) {
if (file)
scan = SWAP_CLUSTER_MAX;
else if (!noswap)
scan = SWAP_CLUSTER_MAX;
}
nr[l] = scan;
}
}

Expand Down

0 comments on commit af5add7

Please sign in to comment.