diff --git a/[refs] b/[refs] index 8212b5e8367f..6cea9f0dbbd4 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 9ecc2738ac2371f88dff5d48914b4e35c45203cd +refs/heads/master: b3af9468aebf5fcb573d0a116b31d2be1d43c0e9 diff --git a/trunk/fs/fs-writeback.c b/trunk/fs/fs-writeback.c index c6bf775e641a..52aa54540079 100644 --- a/trunk/fs/fs-writeback.c +++ b/trunk/fs/fs-writeback.c @@ -474,10 +474,15 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) spin_lock(&inode_lock); inode->i_state &= ~I_SYNC; if (!(inode->i_state & (I_FREEING | I_CLEAR))) { - if (inode->i_state & I_DIRTY) { + if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) { /* - * Someone redirtied the inode while were writing back - * the pages. + * More pages get dirtied by a fast dirtier. + */ + goto select_queue; + } else if (inode->i_state & I_DIRTY) { + /* + * At least XFS will redirty the inode during the + * writeback (delalloc) and on io completion (isize). */ redirty_tail(inode); } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { @@ -502,6 +507,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) * soon as the queue becomes uncongested. */ inode->i_state |= I_DIRTY_PAGES; +select_queue: if (wbc->nr_to_write <= 0) { /* * slice used up: queue for next turn