Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 198255
b: refs/heads/master
c: 5f53e76
h: refs/heads/master
i:
  198253: 1ac9666
  198251: 5e9a900
  198247: 8fe06cc
  198239: d83e277
v: v3
  • Loading branch information
KOSAKI Motohiro authored and Linus Torvalds committed May 25, 2010
1 parent 723d76c commit eca6786
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 16 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: bf8abe8b926f7546eb763fd2a088fe461dde6317
refs/heads/master: 5f53e76299ceebd68bdf9495e8ff80db77711236
41 changes: 26 additions & 15 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,12 @@ struct scan_control {

int order;

/*
* Intend to reclaim enough contenious memory rather than to reclaim
* enough amount memory. I.e, it's the mode for high order allocation.
*/
bool lumpy_reclaim_mode;

/* Which cgroup do we reclaim from */
struct mem_cgroup *mem_cgroup;

Expand Down Expand Up @@ -575,7 +581,7 @@ static enum page_references page_check_references(struct page *page,
referenced_page = TestClearPageReferenced(page);

/* Lumpy reclaim - ignore references */
if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
if (sc->lumpy_reclaim_mode)
return PAGEREF_RECLAIM;

/*
Expand Down Expand Up @@ -1125,7 +1131,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
unsigned long nr_scanned = 0;
unsigned long nr_reclaimed = 0;
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
int lumpy_reclaim = 0;

while (unlikely(too_many_isolated(zone, file, sc))) {
congestion_wait(BLK_RW_ASYNC, HZ/10);
Expand All @@ -1135,17 +1140,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
return SWAP_CLUSTER_MAX;
}

/*
* If we need a large contiguous chunk of memory, or have
* trouble getting a small set of contiguous pages, we
* will reclaim both active and inactive pages.
*
* We use the same threshold as pageout congestion_wait below.
*/
if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
lumpy_reclaim = 1;
else if (sc->order && priority < DEF_PRIORITY - 2)
lumpy_reclaim = 1;

pagevec_init(&pvec, 1);

Expand All @@ -1158,7 +1152,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
unsigned long nr_freed;
unsigned long nr_active;
unsigned int count[NR_LRU_LISTS] = { 0, };
int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
int mode = sc->lumpy_reclaim_mode ? ISOLATE_BOTH : ISOLATE_INACTIVE;
unsigned long nr_anon;
unsigned long nr_file;

Expand Down Expand Up @@ -1211,7 +1205,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
* but that should be acceptable to the caller
*/
if (nr_freed < nr_taken && !current_is_kswapd() &&
lumpy_reclaim) {
sc->lumpy_reclaim_mode) {
congestion_wait(BLK_RW_ASYNC, HZ/10);

/*
Expand Down Expand Up @@ -1639,6 +1633,21 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
}
}

static void set_lumpy_reclaim_mode(int priority, struct scan_control *sc)
{
/*
* If we need a large contiguous chunk of memory, or have
* trouble getting a small set of contiguous pages, we
* will reclaim both active and inactive pages.
*/
if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
sc->lumpy_reclaim_mode = 1;
else if (sc->order && priority < DEF_PRIORITY - 2)
sc->lumpy_reclaim_mode = 1;
else
sc->lumpy_reclaim_mode = 0;
}

/*
* This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
*/
Expand All @@ -1653,6 +1662,8 @@ static void shrink_zone(int priority, struct zone *zone,

get_scan_count(zone, sc, nr, priority);

set_lumpy_reclaim_mode(priority, sc);

while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
nr[LRU_INACTIVE_FILE]) {
for_each_evictable_lru(l) {
Expand Down

0 comments on commit eca6786

Please sign in to comment.