Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 292623
b: refs/heads/master
c: fe2c2a1
h: refs/heads/master
i:
  292621: 83ccb0b
  292619: c5e6f91
  292615: 4b5ac79
  292607: e60f2ba
v: v3
  • Loading branch information
Rik van Riel authored and Linus Torvalds committed Mar 22, 2012
1 parent 0f83294 commit 04ea745
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 18 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 67f96aa252e606cdf6c3cf1032952ec207ec0cf0
refs/heads/master: fe2c2a106663130a5ab45cb0e3414b52df2fff0c
47 changes: 30 additions & 17 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -1138,7 +1138,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
* @mz: The mem_cgroup_zone to pull pages from.
* @dst: The temp list to put pages on to.
* @nr_scanned: The number of pages that were scanned.
* @order: The caller's attempted allocation order
* @sc: The scan_control struct for this reclaim session
* @mode: One of the LRU isolation modes
* @active: True [1] if isolating active pages
* @file: True [1] if isolating file [!anon] pages
Expand All @@ -1147,8 +1147,8 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
*/
static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
struct mem_cgroup_zone *mz, struct list_head *dst,
unsigned long *nr_scanned, int order, isolate_mode_t mode,
int active, int file)
unsigned long *nr_scanned, struct scan_control *sc,
isolate_mode_t mode, int active, int file)
{
struct lruvec *lruvec;
struct list_head *src;
Expand Down Expand Up @@ -1194,7 +1194,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
BUG();
}

if (!order)
if (!sc->order || !(sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM))
continue;

/*
Expand All @@ -1208,8 +1208,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
*/
zone_id = page_zone_id(page);
page_pfn = page_to_pfn(page);
pfn = page_pfn & ~((1 << order) - 1);
end_pfn = pfn + (1 << order);
pfn = page_pfn & ~((1 << sc->order) - 1);
end_pfn = pfn + (1 << sc->order);
for (; pfn < end_pfn; pfn++) {
struct page *cursor_page;

Expand Down Expand Up @@ -1275,7 +1275,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,

*nr_scanned = scan;

trace_mm_vmscan_lru_isolate(order,
trace_mm_vmscan_lru_isolate(sc->order,
nr_to_scan, scan,
nr_taken,
nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
Expand Down Expand Up @@ -1533,9 +1533,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,

spin_lock_irq(&zone->lru_lock);

nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list,
&nr_scanned, sc->order,
isolate_mode, 0, file);
nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list, &nr_scanned,
sc, isolate_mode, 0, file);
if (global_reclaim(sc)) {
zone->pages_scanned += nr_scanned;
if (current_is_kswapd())
Expand Down Expand Up @@ -1711,8 +1710,7 @@ static void shrink_active_list(unsigned long nr_to_scan,

spin_lock_irq(&zone->lru_lock);

nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold,
&nr_scanned, sc->order,
nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold, &nr_scanned, sc,
isolate_mode, 1, file);
if (global_reclaim(sc))
zone->pages_scanned += nr_scanned;
Expand Down Expand Up @@ -2758,7 +2756,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
*/
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;
int nr_slab;
int nr_slab, testorder;
unsigned long balance_gap;

if (!populated_zone(zone))
Expand Down Expand Up @@ -2791,7 +2789,20 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
(zone->present_pages +
KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
KSWAPD_ZONE_BALANCE_GAP_RATIO);
if (!zone_watermark_ok_safe(zone, order,
/*
* Kswapd reclaims only single pages with compaction
* enabled. Trying too hard to reclaim until contiguous
* free pages have become available can hurt performance
* by evicting too much useful data from memory.
* Do not reclaim more than needed for compaction.
*/
testorder = order;
if (COMPACTION_BUILD && order &&
compaction_suitable(zone, order) !=
COMPACT_SKIPPED)
testorder = 0;

if (!zone_watermark_ok_safe(zone, testorder,
high_wmark_pages(zone) + balance_gap,
end_zone, 0)) {
shrink_zone(priority, zone, &sc);
Expand Down Expand Up @@ -2820,7 +2831,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
continue;
}

if (!zone_watermark_ok_safe(zone, order,
if (!zone_watermark_ok_safe(zone, testorder,
high_wmark_pages(zone), end_zone, 0)) {
all_zones_ok = 0;
/*
Expand Down Expand Up @@ -2917,6 +2928,10 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue;

/* Would compaction fail due to lack of free memory? */
if (compaction_suitable(zone, order) == COMPACT_SKIPPED)
goto loop_again;

/* Confirm the zone is balanced for order-0 */
if (!zone_watermark_ok(zone, 0,
high_wmark_pages(zone), 0, 0)) {
Expand All @@ -2926,8 +2941,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,

/* If balanced, clear the congested flag */
zone_clear_flag(zone, ZONE_CONGESTED);
if (i <= *classzone_idx)
balanced += zone->present_pages;
}
}

Expand Down

0 comments on commit 04ea745

Please sign in to comment.