Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 64233
b: refs/heads/master
c: c661b07
h: refs/heads/master
i:
  64231: 40d30e6
v: v3
  • Loading branch information
Andy Whitcroft authored and Linus Torvalds committed Aug 23, 2007
1 parent 30a579d commit 5bccf15
Show file tree
Hide file tree
Showing 2 changed files with 61 additions and 9 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: e9187bdcbbc06090757d565a3077e1b0ecab23d2
refs/heads/master: c661b078fd62abe06fd11fab4ac5e4eeafe26b6d
68 changes: 60 additions & 8 deletions trunk/mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,12 @@ static void handle_write_error(struct address_space *mapping,
unlock_page(page);
}

/* Request for sync pageout. */
enum pageout_io {
PAGEOUT_IO_ASYNC,
PAGEOUT_IO_SYNC,
};

/* possible outcome of pageout() */
typedef enum {
/* failed to write page out, page is locked */
Expand All @@ -287,7 +293,8 @@ typedef enum {
* pageout is called by shrink_page_list() for each dirty page.
* Calls ->writepage().
*/
static pageout_t pageout(struct page *page, struct address_space *mapping)
static pageout_t pageout(struct page *page, struct address_space *mapping,
enum pageout_io sync_writeback)
{
/*
* If the page is dirty, only perform writeback if that write
Expand Down Expand Up @@ -346,6 +353,15 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
ClearPageReclaim(page);
return PAGE_ACTIVATE;
}

/*
* Wait on writeback if requested to. This happens when
* direct reclaiming a large contiguous area and the
* first attempt to free a range of pages fails.
*/
if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
wait_on_page_writeback(page);

if (!PageWriteback(page)) {
/* synchronous write or broken a_ops? */
ClearPageReclaim(page);
Expand Down Expand Up @@ -423,7 +439,8 @@ int remove_mapping(struct address_space *mapping, struct page *page)
* shrink_page_list() returns the number of reclaimed pages
*/
static unsigned long shrink_page_list(struct list_head *page_list,
struct scan_control *sc)
struct scan_control *sc,
enum pageout_io sync_writeback)
{
LIST_HEAD(ret_pages);
struct pagevec freed_pvec;
Expand Down Expand Up @@ -458,8 +475,23 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (page_mapped(page) || PageSwapCache(page))
sc->nr_scanned++;

if (PageWriteback(page))
goto keep_locked;
may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));

if (PageWriteback(page)) {
/*
* Synchronous reclaim is performed in two passes,
* first an asynchronous pass over the list to
* start parallel writeback, and a second synchronous
* pass to wait for the IO to complete. Wait here
* for any page for which writeback has already
* started.
*/
if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
wait_on_page_writeback(page);
else
goto keep_locked;
}

referenced = page_referenced(page, 1);
/* In active use or really unfreeable? Activate it. */
Expand All @@ -478,8 +510,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
#endif /* CONFIG_SWAP */

mapping = page_mapping(page);
may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));

/*
* The page is mapped into the page tables of one or more
Expand All @@ -505,7 +535,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
goto keep_locked;

/* Page is dirty, try to write it out here */
switch(pageout(page, mapping)) {
switch (pageout(page, mapping, sync_writeback)) {
case PAGE_KEEP:
goto keep_locked;
case PAGE_ACTIVATE:
Expand Down Expand Up @@ -786,7 +816,29 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
spin_unlock_irq(&zone->lru_lock);

nr_scanned += nr_scan;
nr_freed = shrink_page_list(&page_list, sc);
nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);

/*
* If we are direct reclaiming for contiguous pages and we do
* not reclaim everything in the list, try again and wait
* for IO to complete. This will stall high-order allocations
* but that should be acceptable to the caller
*/
if (nr_freed < nr_taken && !current_is_kswapd() &&
sc->order > PAGE_ALLOC_COSTLY_ORDER) {
congestion_wait(WRITE, HZ/10);

/*
* The attempt at page out may have made some
* of the pages active, mark them inactive again.
*/
nr_active = clear_active_flags(&page_list);
count_vm_events(PGDEACTIVATE, nr_active);

nr_freed += shrink_page_list(&page_list, sc,
PAGEOUT_IO_SYNC);
}

nr_reclaimed += nr_freed;
local_irq_disable();
if (current_is_kswapd()) {
Expand Down

0 comments on commit 5bccf15

Please sign in to comment.