Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 207278
b: refs/heads/master
c: e31f369
h: refs/heads/master
v: v3
  • Loading branch information
Wu Fengguang authored and Linus Torvalds committed Aug 10, 2010
1 parent f687e69 commit 7cd7f77
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 9 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 51980ac9e72fb5f22c81b7798d65b691125d70ee
refs/heads/master: e31f3698cd3499e676f6b0ea12e3528f569c4fa3
51 changes: 43 additions & 8 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -1233,6 +1233,47 @@ static noinline_for_stack void update_isolated_counts(struct zone *zone,
reclaim_stat->recent_scanned[1] += *nr_file;
}

/*
* Returns true if the caller should wait to clean dirty/writeback pages.
*
* If we are direct reclaiming for contiguous pages and we do not reclaim
* everything in the list, try again and wait for writeback IO to complete.
* This will stall high-order allocations noticeably. Only do that when really
* need to free the pages under high memory pressure.
*/
static inline bool should_reclaim_stall(unsigned long nr_taken,
unsigned long nr_freed,
int priority,
struct scan_control *sc)
{
int lumpy_stall_priority;

/* kswapd should not stall on sync IO */
if (current_is_kswapd())
return false;

/* Only stall on lumpy reclaim */
if (!sc->lumpy_reclaim_mode)
return false;

/* If we have relaimed everything on the isolated list, no stall */
if (nr_freed == nr_taken)
return false;

/*
* For high-order allocations, there are two stall thresholds.
* High-cost allocations stall immediately where as lower
* order allocations such as stacks require the scanning
* priority to be much higher before stalling.
*/
if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
lumpy_stall_priority = DEF_PRIORITY;
else
lumpy_stall_priority = DEF_PRIORITY / 3;

return priority <= lumpy_stall_priority;
}

/*
* shrink_inactive_list() is a helper for shrink_zone(). It returns the number
* of reclaimed pages
Expand Down Expand Up @@ -1298,14 +1339,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,

nr_reclaimed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);

/*
* If we are direct reclaiming for contiguous pages and we do
* not reclaim everything in the list, try again and wait
* for IO to complete. This will stall high-order allocations
* but that should be acceptable to the caller
*/
if (nr_reclaimed < nr_taken && !current_is_kswapd() &&
sc->lumpy_reclaim_mode) {
/* Check if we should syncronously wait for writeback */
if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
congestion_wait(BLK_RW_ASYNC, HZ/10);

/*
Expand Down

0 comments on commit 7cd7f77

Please sign in to comment.