Skip to content

Commit

Permalink
GFS2: Optimise writepage for metadata
Browse files Browse the repository at this point in the history
This adds a GFS2 specific writepage for metadata, rather than
continuing to use the VFS function. As a result we now tag all
our metadata I/O with the correct flag so that blktraces will
now be less confusing.

Also, the generic function was checking for a number of corner
cases which cannot happen on the metadata address spaces so that
this should be faster too.

Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
  • Loading branch information
Steven Whitehouse committed May 11, 2009
1 parent c969f58 commit 4a0f9a3
Showing 1 changed file with 57 additions and 9 deletions.
66 changes: 57 additions & 9 deletions fs/gfs2/meta_io.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,17 +33,65 @@
#include "util.h"
#include "ops_address.h"

static int aspace_get_block(struct inode *inode, sector_t lblock,
struct buffer_head *bh_result, int create)
static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
{
gfs2_assert_warn(inode->i_sb->s_fs_info, 0);
return -EOPNOTSUPP;
}
int err;
struct buffer_head *bh, *head;
int nr_underway = 0;
int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ?
WRITE_SYNC_PLUG : WRITE));

BUG_ON(!PageLocked(page));
BUG_ON(!page_has_buffers(page));

head = page_buffers(page);
bh = head;

do {
if (!buffer_mapped(bh))
continue;
/*
* If it's a fully non-blocking write attempt and we cannot
* lock the buffer then redirty the page. Note that this can
* potentially cause a busy-wait loop from pdflush and kswapd
* activity, but those code paths have their own higher-level
* throttling.
*/
if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
lock_buffer(bh);
} else if (!trylock_buffer(bh)) {
redirty_page_for_writepage(wbc, page);
continue;
}
if (test_clear_buffer_dirty(bh)) {
mark_buffer_async_write(bh);
} else {
unlock_buffer(bh);
}
} while ((bh = bh->b_this_page) != head);

/*
* The page and its buffers are protected by PageWriteback(), so we can
* drop the bh refcounts early.
*/
BUG_ON(PageWriteback(page));
set_page_writeback(page);

do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
submit_bh(write_op, bh);
nr_underway++;
}
bh = next;
} while (bh != head);
unlock_page(page);

static int gfs2_aspace_writepage(struct page *page,
struct writeback_control *wbc)
{
return block_write_full_page(page, aspace_get_block, wbc);
err = 0;
if (nr_underway == 0)
end_page_writeback(page);

return err;
}

static const struct address_space_operations aspace_aops = {
Expand Down

0 comments on commit 4a0f9a3

Please sign in to comment.