From 6e5029a1a9985a511649e19eab0a0656f0363489 Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Thu, 7 Feb 2008 00:14:08 -0800 Subject: [PATCH] --- yaml --- r: 83775 b: refs/heads/master c: f1a9ee758de7de1e040de849fdef46e6802ea117 h: refs/heads/master i: 83773: a80e0d1e1eb1d161f562ef01fcd744ff4d4a4721 83771: af6f2c3e28f594847e86b87a018103ee1fb48cf3 83767: 36763e7fbb7dce464c7d77dd5f2289086ccda768 83759: c3ed69b5ab607265757d92e65c28771b59a9040a 83743: 6083148099cf8c1a2ca94349169cd8aa2bb7b838 83711: c95b5eb4dbc2e17e1bc0e5e48148bce1a11b82c5 v: v3 --- [refs] | 2 +- trunk/mm/vmscan.c | 27 ++++++++++++++++++++++----- 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/[refs] b/[refs] index 50a444e69233..f8c1a092d570 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: fef1bdd68c81b71882ccb6f47c70980a03182063 +refs/heads/master: f1a9ee758de7de1e040de849fdef46e6802ea117 diff --git a/trunk/mm/vmscan.c b/trunk/mm/vmscan.c index b7d868cbca09..1b85217b528c 100644 --- a/trunk/mm/vmscan.c +++ b/trunk/mm/vmscan.c @@ -70,6 +70,13 @@ struct scan_control { int order; + /* + * Pages that have (or should have) IO pending. If we run into + * a lot of these, we're better off waiting a little for IO to + * finish rather than scanning more pages in the VM. + */ + int nr_io_pages; + /* Which cgroup do we reclaim from */ struct mem_cgroup *mem_cgroup; @@ -499,8 +506,10 @@ static unsigned long shrink_page_list(struct list_head *page_list, */ if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs) wait_on_page_writeback(page); - else + else { + sc->nr_io_pages++; goto keep_locked; + } } referenced = page_referenced(page, 1, sc->mem_cgroup); @@ -539,8 +548,10 @@ static unsigned long shrink_page_list(struct list_head *page_list, if (PageDirty(page)) { if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced) goto keep_locked; - if (!may_enter_fs) + if (!may_enter_fs) { + sc->nr_io_pages++; goto keep_locked; + } if (!sc->may_writepage) goto keep_locked; @@ -551,8 +562,10 @@ static unsigned long shrink_page_list(struct list_head *page_list, case PAGE_ACTIVATE: goto activate_locked; case PAGE_SUCCESS: - if (PageWriteback(page) || PageDirty(page)) + if (PageWriteback(page) || PageDirty(page)) { + sc->nr_io_pages++; goto keep; + } /* * A synchronous write - probably a ramdisk. Go * ahead and try to reclaim the page. @@ -1259,6 +1272,7 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask, for (priority = DEF_PRIORITY; priority >= 0; priority--) { sc->nr_scanned = 0; + sc->nr_io_pages = 0; if (!priority) disable_swap_token(); nr_reclaimed += shrink_zones(priority, zones, sc); @@ -1292,7 +1306,8 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask, } /* Take a nap, wait for some writeback to complete */ - if (sc->nr_scanned && priority < DEF_PRIORITY - 2) + if (sc->nr_scanned && priority < DEF_PRIORITY - 2 && + sc->nr_io_pages > sc->swap_cluster_max) congestion_wait(WRITE, HZ/10); } /* top priority shrink_caches still had more to do? don't OOM, then */ @@ -1424,6 +1439,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) if (!priority) disable_swap_token(); + sc.nr_io_pages = 0; all_zones_ok = 1; /* @@ -1516,7 +1532,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) * OK, kswapd is getting into trouble. Take a nap, then take * another pass across the zones. */ - if (total_scanned && priority < DEF_PRIORITY - 2) + if (total_scanned && priority < DEF_PRIORITY - 2 && + sc.nr_io_pages > sc.swap_cluster_max) congestion_wait(WRITE, HZ/10); /*