Skip to content

Commit

Permalink
readahead: improve heuristic detecting sequential reads
Browse files Browse the repository at this point in the history
Introduce ra.offset and store in it an offset where the previous read
ended.  This way we can detect whether reads are really sequential (and
thus we should not mark the page as accessed repeatedly) or whether they
are random and just happen to be in the same page (and the page should
really be marked accessed again).

Signed-off-by: Jan Kara <jack@suse.cz>
Acked-by: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: WU Fengguang <wfg@mail.ustc.edu.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Jan Kara authored and Linus Torvalds committed May 7, 2007
1 parent b813e93 commit ec0f163
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 3 deletions.
1 change: 1 addition & 0 deletions include/linux/fs.h
Original file line number Diff line number Diff line change
Expand Up @@ -702,6 +702,7 @@ struct file_ra_state {
unsigned long ra_pages; /* Maximum readahead window */
unsigned long mmap_hit; /* Cache hit stat for mmap accesses */
unsigned long mmap_miss; /* Cache miss stat for mmap accesses */
unsigned int offset; /* Offset where last read() ended in a page */
};
#define RA_FLAG_MISS 0x01 /* a cache miss occured against this file */
#define RA_FLAG_INCACHE 0x02 /* file is already in cache */
Expand Down
9 changes: 6 additions & 3 deletions mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -868,6 +868,7 @@ void do_generic_mapping_read(struct address_space *mapping,
unsigned long last_index;
unsigned long next_index;
unsigned long prev_index;
unsigned int prev_offset;
loff_t isize;
struct page *cached_page;
int error;
Expand All @@ -877,6 +878,7 @@ void do_generic_mapping_read(struct address_space *mapping,
index = *ppos >> PAGE_CACHE_SHIFT;
next_index = index;
prev_index = ra.prev_page;
prev_offset = ra.offset;
last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
offset = *ppos & ~PAGE_CACHE_MASK;

Expand Down Expand Up @@ -924,10 +926,10 @@ void do_generic_mapping_read(struct address_space *mapping,
flush_dcache_page(page);

/*
* When (part of) the same page is read multiple times
* in succession, only mark it as accessed the first time.
* When a sequential read accesses a page several times,
* only mark it as accessed the first time.
*/
if (prev_index != index)
if (prev_index != index || offset != prev_offset)
mark_page_accessed(page);
prev_index = index;

Expand All @@ -945,6 +947,7 @@ void do_generic_mapping_read(struct address_space *mapping,
offset += ret;
index += offset >> PAGE_CACHE_SHIFT;
offset &= ~PAGE_CACHE_MASK;
prev_offset = ra.offset = offset;

page_cache_release(page);
if (ret == nr && desc->count)
Expand Down
3 changes: 3 additions & 0 deletions mm/readahead.c
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,8 @@ static int read_pages(struct address_space *mapping, struct file *filp,
* If page_cache_readahead sees that it is again being called for
* a page which it just looked at, it can return immediately without
* making any state changes.
* offset: Offset in the prev_page where the last read ended - used for
* detection of sequential file reading.
* ahead_start,
* ahead_size: Together, these form the "ahead window".
* ra_pages: The externally controlled max readahead for this fd.
Expand Down Expand Up @@ -473,6 +475,7 @@ page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra,
/* Note that prev_page == -1 if it is a first read */
sequential = (offset == ra->prev_page + 1);
ra->prev_page = offset;
ra->offset = 0;

max = get_max_readahead(ra);
newsize = min(req_size, max);
Expand Down

0 comments on commit ec0f163

Please sign in to comment.