From 5fb80943365afe9c51697c4f5d9b6e8b201218e9 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Fri, 18 Jan 2013 14:54:13 +0900 Subject: [PATCH] --- yaml --- r: 348979 b: refs/heads/master c: a7fdffbd3ea4b3cc2993af006bde38a423b38b72 h: refs/heads/master i: 348977: acde0595a1dd1accb32a23de36cee014c9ed88f1 348975: a11519dc167d6b8a9ad777692f9eff79b7738ec4 v: v3 --- [refs] | 2 +- trunk/fs/f2fs/node.c | 17 +++++++++++------ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/[refs] b/[refs] index 4062ec5b6754..dd9c4bee021e 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c01e54b770e69c65525295eb2668be3dc0822406 +refs/heads/master: a7fdffbd3ea4b3cc2993af006bde38a423b38b72 diff --git a/trunk/fs/f2fs/node.c b/trunk/fs/f2fs/node.c index f177c018745c..9bda63c9c166 100644 --- a/trunk/fs/f2fs/node.c +++ b/trunk/fs/f2fs/node.c @@ -1124,6 +1124,12 @@ static int f2fs_write_node_page(struct page *page, return 0; } +/* + * It is very important to gather dirty pages and write at once, so that we can + * submit a big bio without interfering other data writes. + * Be default, 512 pages (2MB), a segment size, is quite reasonable. + */ +#define COLLECT_DIRTY_NODES 512 static int f2fs_write_node_pages(struct address_space *mapping, struct writeback_control *wbc) { @@ -1131,17 +1137,16 @@ static int f2fs_write_node_pages(struct address_space *mapping, struct block_device *bdev = sbi->sb->s_bdev; long nr_to_write = wbc->nr_to_write; - if (wbc->for_kupdate) - return 0; - - if (get_pages(sbi, F2FS_DIRTY_NODES) == 0) - return 0; - + /* First check balancing cached NAT entries */ if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) { write_checkpoint(sbi, false, false); return 0; } + /* collect a number of dirty node pages and write together */ + if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES) + return 0; + /* if mounting is failed, skip writing node pages */ wbc->nr_to_write = bio_get_nr_vecs(bdev); sync_node_pages(sbi, 0, wbc);