Skip to content

Commit

Permalink
readahead: convert filemap invocations
Browse files Browse the repository at this point in the history
Convert filemap reads to use on-demand readahead.

The new call scheme is to
- call readahead on non-cached page
- call readahead on look-ahead page
- update prev_index when finished with the read request

Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn>
Cc: Steven Pratt <slpratt@austin.ibm.com>
Cc: Ram Pai <linuxram@us.ibm.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Fengguang Wu authored and Linus Torvalds committed Jul 19, 2007
1 parent 122a21d commit 3ea89ee
Showing 1 changed file with 31 additions and 20 deletions.
51 changes: 31 additions & 20 deletions mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -891,15 +891,20 @@ void do_generic_mapping_read(struct address_space *mapping,
unsigned long nr, ret;

cond_resched();
if (index == next_index)
next_index = page_cache_readahead(mapping, &ra, filp,
index, last_index - index);

find_page:
page = find_get_page(mapping, index);
if (unlikely(page == NULL)) {
handle_ra_miss(mapping, &ra, index);
goto no_cached_page;
if (!page) {
page_cache_readahead_ondemand(mapping,
&ra, filp, page,
index, last_index - index);
page = find_get_page(mapping, index);
if (unlikely(page == NULL))
goto no_cached_page;
}
if (PageReadahead(page)) {
page_cache_readahead_ondemand(mapping,
&ra, filp, page,
index, last_index - index);
}
if (!PageUptodate(page))
goto page_not_up_to_date;
Expand Down Expand Up @@ -1051,6 +1056,7 @@ void do_generic_mapping_read(struct address_space *mapping,

out:
*_ra = ra;
_ra->prev_index = prev_index;

*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
if (cached_page)
Expand Down Expand Up @@ -1332,27 +1338,31 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (VM_RandomReadHint(vma))
goto no_cached_page;

/*
* The readahead code wants to be told about each and every page
* so it can build and shrink its windows appropriately
*
* For sequential accesses, we use the generic readahead logic.
*/
if (VM_SequentialReadHint(vma))
page_cache_readahead(mapping, ra, file, vmf->pgoff, 1);

/*
* Do we have something in the page cache already?
*/
retry_find:
page = find_lock_page(mapping, vmf->pgoff);
/*
* For sequential accesses, we use the generic readahead logic.
*/
if (VM_SequentialReadHint(vma)) {
if (!page) {
page_cache_readahead_ondemand(mapping, ra, file, page,
vmf->pgoff, 1);
page = find_lock_page(mapping, vmf->pgoff);
if (!page)
goto no_cached_page;
}
if (PageReadahead(page)) {
page_cache_readahead_ondemand(mapping, ra, file, page,
vmf->pgoff, 1);
}
}

if (!page) {
unsigned long ra_pages;

if (VM_SequentialReadHint(vma)) {
handle_ra_miss(mapping, ra, vmf->pgoff);
goto no_cached_page;
}
ra->mmap_miss++;

/*
Expand Down Expand Up @@ -1405,6 +1415,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* Found the page and have a reference on it.
*/
mark_page_accessed(page);
ra->prev_index = page->index;
vmf->page = page;
return ret | VM_FAULT_LOCKED;

Expand Down

0 comments on commit 3ea89ee

Please sign in to comment.