Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 272129
b: refs/heads/master
c: 92df3a7
h: refs/heads/master
i:
  272127: f5e194b
v: v3
  • Loading branch information
Mel Gorman authored and Linus Torvalds committed Nov 1, 2011
1 parent 4016423 commit 4c1a6a5
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 4 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f84f6e2b0868f198f97a32ba503d6f9f319a249a
refs/heads/master: 92df3a723f84cdf8133560bbff950a7a99e92bc9
42 changes: 39 additions & 3 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -751,14 +751,17 @@ static noinline_for_stack void free_page_list(struct list_head *free_pages)
static unsigned long shrink_page_list(struct list_head *page_list,
struct zone *zone,
struct scan_control *sc,
int priority)
int priority,
unsigned long *ret_nr_dirty,
unsigned long *ret_nr_writeback)
{
LIST_HEAD(ret_pages);
LIST_HEAD(free_pages);
int pgactivate = 0;
unsigned long nr_dirty = 0;
unsigned long nr_congested = 0;
unsigned long nr_reclaimed = 0;
unsigned long nr_writeback = 0;

cond_resched();

Expand Down Expand Up @@ -795,6 +798,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));

if (PageWriteback(page)) {
nr_writeback++;
/*
* Synchronous reclaim cannot queue pages for
* writeback due to the possibility of stack overflow
Expand Down Expand Up @@ -1000,6 +1004,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,

list_splice(&ret_pages, page_list);
count_vm_events(PGACTIVATE, pgactivate);
*ret_nr_dirty += nr_dirty;
*ret_nr_writeback += nr_writeback;
return nr_reclaimed;
}

Expand Down Expand Up @@ -1460,6 +1466,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
unsigned long nr_taken;
unsigned long nr_anon;
unsigned long nr_file;
unsigned long nr_dirty = 0;
unsigned long nr_writeback = 0;
isolate_mode_t reclaim_mode = ISOLATE_INACTIVE;

while (unlikely(too_many_isolated(zone, file, sc))) {
Expand Down Expand Up @@ -1512,12 +1520,14 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,

spin_unlock_irq(&zone->lru_lock);

nr_reclaimed = shrink_page_list(&page_list, zone, sc, priority);
nr_reclaimed = shrink_page_list(&page_list, zone, sc, priority,
&nr_dirty, &nr_writeback);

/* Check if we should syncronously wait for writeback */
if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
set_reclaim_mode(priority, sc, true);
nr_reclaimed += shrink_page_list(&page_list, zone, sc, priority);
nr_reclaimed += shrink_page_list(&page_list, zone, sc,
priority, &nr_dirty, &nr_writeback);
}

local_irq_disable();
Expand All @@ -1527,6 +1537,32 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,

putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);

/*
* If reclaim is isolating dirty pages under writeback, it implies
* that the long-lived page allocation rate is exceeding the page
* laundering rate. Either the global limits are not being effective
* at throttling processes due to the page distribution throughout
* zones or there is heavy usage of a slow backing device. The
* only option is to throttle from reclaim context which is not ideal
* as there is no guarantee the dirtying process is throttled in the
* same way balance_dirty_pages() manages.
*
* This scales the number of dirty pages that must be under writeback
* before throttling depending on priority. It is a simple backoff
* function that has the most effect in the range DEF_PRIORITY to
* DEF_PRIORITY-2 which is the priority reclaim is considered to be
* in trouble and reclaim is considered to be in trouble.
*
* DEF_PRIORITY 100% isolated pages must be PageWriteback to throttle
* DEF_PRIORITY-1 50% must be PageWriteback
* DEF_PRIORITY-2 25% must be PageWriteback, kswapd in trouble
* ...
* DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
* isolated page is PageWriteback
*/
if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority)))
wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);

trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
zone_idx(zone),
nr_scanned, nr_reclaimed,
Expand Down

0 comments on commit 4c1a6a5

Please sign in to comment.