Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 151513
b: refs/heads/master
c: 045a252
h: refs/heads/master
i:
  151511: b710231
v: v3
  • Loading branch information
Wu Fengguang authored and Linus Torvalds committed Jun 17, 2009
1 parent 29d9fa6 commit 3d02cc9
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 22 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: dc566127dd161b6c997466a2349ac179527ea89b
refs/heads/master: 045a2529a3513faed2d45bd82f9013b124309d94
46 changes: 25 additions & 21 deletions trunk/mm/readahead.c
Original file line number Diff line number Diff line change
Expand Up @@ -339,33 +339,25 @@ ondemand_readahead(struct address_space *mapping,
unsigned long req_size)
{
unsigned long max = max_sane_readahead(ra->ra_pages);
pgoff_t prev_offset;
int sequential;

/*
* start of file
*/
if (!offset)
goto initial_readahead;

/*
* It's the expected callback offset, assume sequential access.
* Ramp up sizes, and push forward the readahead window.
*/
if (offset && (offset == (ra->start + ra->size - ra->async_size) ||
offset == (ra->start + ra->size))) {
if ((offset == (ra->start + ra->size - ra->async_size) ||
offset == (ra->start + ra->size))) {
ra->start += ra->size;
ra->size = get_next_ra_size(ra, max);
ra->async_size = ra->size;
goto readit;
}

prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
sequential = offset - prev_offset <= 1UL || req_size > max;

/*
* Standalone, small read.
* Read as is, and do not pollute the readahead state.
*/
if (!hit_readahead_marker && !sequential) {
return __do_page_cache_readahead(mapping, filp,
offset, req_size, 0);
}

/*
* Hit a marked page without valid readahead state.
* E.g. interleaved reads.
Expand All @@ -391,12 +383,24 @@ ondemand_readahead(struct address_space *mapping,
}

/*
* It may be one of
* - first read on start of file
* - sequential cache miss
* - oversize random read
* Start readahead for it.
* oversize read
*/
if (req_size > max)
goto initial_readahead;

/*
* sequential cache miss
*/
if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL)
goto initial_readahead;

/*
* standalone, small random read
* Read as is, and do not pollute the readahead state.
*/
return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);

initial_readahead:
ra->start = offset;
ra->size = get_init_ra_size(req_size, max);
ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
Expand Down

0 comments on commit 3d02cc9

Please sign in to comment.