Skip to content

Commit

Permalink
vfs: add hooks for ext4's delayed allocation support
Browse files Browse the repository at this point in the history
Export mpage_bio_submit() and __mpage_writepage() for the benefit of
ext4's delayed allocation support.   Also change __block_write_full_page
so that if buffers that have the BH_Delay flag set it will call
get_block() to get the physical block allocated, just as in the
!BH_Mapped case.

Signed-off-by: Alex Tomas <alex@clusterfs.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
  • Loading branch information
Alex Tomas authored and Theodore Ts'o committed Jul 11, 2008
1 parent 87c89c2 commit 29a814d
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 11 deletions.
7 changes: 5 additions & 2 deletions fs/buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -1691,11 +1691,13 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
*/
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
} else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
buffer_dirty(bh)) {
WARN_ON(bh->b_size != blocksize);
err = get_block(inode, block, bh, 1);
if (err)
goto recover;
clear_buffer_delay(bh);
if (buffer_new(bh)) {
/* blockdev mappings never come here */
clear_buffer_new(bh);
Expand Down Expand Up @@ -1774,7 +1776,8 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
bh = head;
/* Recovery: lock and submit the mapped buffers */
do {
if (buffer_mapped(bh) && buffer_dirty(bh)) {
if (buffer_mapped(bh) && buffer_dirty(bh) &&
!buffer_delay(bh)) {
lock_buffer(bh);
mark_buffer_async_write(bh);
} else {
Expand Down
14 changes: 5 additions & 9 deletions fs/mpage.c
Original file line number Diff line number Diff line change
Expand Up @@ -82,14 +82,15 @@ static void mpage_end_io_write(struct bio *bio, int err)
bio_put(bio);
}

static struct bio *mpage_bio_submit(int rw, struct bio *bio)
struct bio *mpage_bio_submit(int rw, struct bio *bio)
{
bio->bi_end_io = mpage_end_io_read;
if (rw == WRITE)
bio->bi_end_io = mpage_end_io_write;
submit_bio(rw, bio);
return NULL;
}
EXPORT_SYMBOL(mpage_bio_submit);

static struct bio *
mpage_alloc(struct block_device *bdev,
Expand Down Expand Up @@ -435,15 +436,9 @@ EXPORT_SYMBOL(mpage_readpage);
* written, so it can intelligently allocate a suitably-sized BIO. For now,
* just allocate full-size (16-page) BIOs.
*/
struct mpage_data {
struct bio *bio;
sector_t last_block_in_bio;
get_block_t *get_block;
unsigned use_writepage;
};

static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
void *data)
int __mpage_writepage(struct page *page, struct writeback_control *wbc,
void *data)
{
struct mpage_data *mpd = data;
struct bio *bio = mpd->bio;
Expand Down Expand Up @@ -651,6 +646,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
mpd->bio = bio;
return ret;
}
EXPORT_SYMBOL(__mpage_writepage);

/**
* mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them
Expand Down
10 changes: 10 additions & 0 deletions include/linux/mpage.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,21 @@
*/
#ifdef CONFIG_BLOCK

struct mpage_data {
struct bio *bio;
sector_t last_block_in_bio;
get_block_t *get_block;
unsigned use_writepage;
};

struct writeback_control;

struct bio *mpage_bio_submit(int rw, struct bio *bio);
int mpage_readpages(struct address_space *mapping, struct list_head *pages,
unsigned nr_pages, get_block_t get_block);
int mpage_readpage(struct page *page, get_block_t get_block);
int __mpage_writepage(struct page *page, struct writeback_control *wbc,
void *data);
int mpage_writepages(struct address_space *mapping,
struct writeback_control *wbc, get_block_t get_block);
int mpage_writepage(struct page *page, get_block_t *get_block,
Expand Down

0 comments on commit 29a814d

Please sign in to comment.