Skip to content

Commit

Permalink
readahead: record mmap read-around states in file_ra_state
Browse files Browse the repository at this point in the history
Mmap read-around now shares the same code style and data structure with
readahead code.

This also removes do_page_cache_readahead().  Its last user, mmap
read-around, has been changed to call ra_submit().

The no-readahead-if-congested logic is dumped by the way.  Users will be
pretty sensitive about the slow loading of executables.  So it's
unfavorable to disabled mmap read-around on a congested queue.

[akpm@linux-foundation.org: coding-style fixes]
Cc: Nick Piggin <npiggin@suse.de>
Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn>
Cc: Ying Han <yinghan@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Wu Fengguang authored and Linus Torvalds committed Jun 17, 2009
1 parent 2fad6f5 commit d30a110
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 28 deletions.
5 changes: 3 additions & 2 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -1178,8 +1178,6 @@ void task_dirty_inc(struct task_struct *tsk);
#define VM_MAX_READAHEAD 128 /* kbytes */
#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */

int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read);
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read);

Expand All @@ -1197,6 +1195,9 @@ void page_cache_async_readahead(struct address_space *mapping,
unsigned long size);

unsigned long max_sane_readahead(unsigned long nr);
unsigned long ra_submit(struct file_ra_state *ra,
struct address_space *mapping,
struct file *filp);

/* Do stack extension */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
Expand Down
12 changes: 7 additions & 5 deletions mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1488,13 +1488,15 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
if (ra->mmap_miss > MMAP_LOTSAMISS)
return;

/*
* mmap read-around
*/
ra_pages = max_sane_readahead(ra->ra_pages);
if (ra_pages) {
pgoff_t start = 0;

if (offset > ra_pages / 2)
start = offset - ra_pages / 2;
do_page_cache_readahead(mapping, file, start, ra_pages);
ra->start = max_t(long, 0, offset - ra_pages/2);
ra->size = ra_pages;
ra->async_size = 0;
ra_submit(ra, mapping, file);
}
}

Expand Down
23 changes: 2 additions & 21 deletions mm/readahead.c
Original file line number Diff line number Diff line change
Expand Up @@ -133,15 +133,12 @@ static int read_pages(struct address_space *mapping, struct file *filp,
}

/*
* do_page_cache_readahead actually reads a chunk of disk. It allocates all
* __do_page_cache_readahead() actually reads a chunk of disk. It allocates all
* the pages first, then submits them all for I/O. This avoids the very bad
* behaviour which would occur if page allocations are causing VM writeback.
* We really don't want to intermingle reads and writes like that.
*
* Returns the number of pages requested, or the maximum amount of I/O allowed.
*
* do_page_cache_readahead() returns -1 if it encountered request queue
* congestion.
*/
static int
__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
Expand Down Expand Up @@ -231,22 +228,6 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
return ret;
}

/*
* This version skips the IO if the queue is read-congested, and will tell the
* block layer to abandon the readahead if request allocation would block.
*
* force_page_cache_readahead() will ignore queue congestion and will block on
* request queues.
*/
int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read)
{
if (bdi_read_congested(mapping->backing_dev_info))
return -1;

return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0);
}

/*
* Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
* sensible upper limit.
Expand All @@ -260,7 +241,7 @@ unsigned long max_sane_readahead(unsigned long nr)
/*
* Submit IO for the read-ahead request in file_ra_state.
*/
static unsigned long ra_submit(struct file_ra_state *ra,
unsigned long ra_submit(struct file_ra_state *ra,
struct address_space *mapping, struct file *filp)
{
int actual;
Expand Down

0 comments on commit d30a110

Please sign in to comment.