Skip to content

Commit

Permalink
writeback: integrated background writeback work
Browse files Browse the repository at this point in the history
Check whether background writeback is needed after finishing each work.

When bdi flusher thread finishes doing some work check whether any kind of
background writeback needs to be done (either because
dirty_background_ratio is exceeded or because we need to start flushing
old inodes).  If so, just do background write back.

This way, bdi_start_background_writeback() just needs to wake up the
flusher thread.  It will do background writeback as soon as there is no
other work.

This is a preparatory patch for the next patch which stops background
writeback as soon as there is other work to do.

Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Jan Engelhardt <jengelh@medozas.de>
Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Jan Kara authored and Linus Torvalds committed Jan 14, 2011
1 parent b44129b commit 6585027
Showing 1 changed file with 46 additions and 15 deletions.
61 changes: 46 additions & 15 deletions fs/fs-writeback.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,29 +84,36 @@ static inline struct inode *wb_inode(struct list_head *head)
return list_entry(head, struct inode, i_wb_list);
}

static void bdi_queue_work(struct backing_dev_info *bdi,
struct wb_writeback_work *work)
/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
{
trace_writeback_queue(bdi, work);

spin_lock_bh(&bdi->wb_lock);
list_add_tail(&work->list, &bdi->work_list);
if (bdi->wb.task) {
wake_up_process(bdi->wb.task);
} else {
/*
* The bdi thread isn't there, wake up the forker thread which
* will create and run it.
*/
trace_writeback_nothread(bdi, work);
wake_up_process(default_backing_dev_info.wb.task);
}
}

static void bdi_queue_work(struct backing_dev_info *bdi,
struct wb_writeback_work *work)
{
trace_writeback_queue(bdi, work);

spin_lock_bh(&bdi->wb_lock);
list_add_tail(&work->list, &bdi->work_list);
if (!bdi->wb.task)
trace_writeback_nothread(bdi, work);
bdi_wakeup_flusher(bdi);
spin_unlock_bh(&bdi->wb_lock);
}

static void
__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
bool range_cyclic, bool for_background)
bool range_cyclic)
{
struct wb_writeback_work *work;

Expand All @@ -126,7 +133,6 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
work->sync_mode = WB_SYNC_NONE;
work->nr_pages = nr_pages;
work->range_cyclic = range_cyclic;
work->for_background = for_background;

bdi_queue_work(bdi, work);
}
Expand All @@ -144,21 +150,28 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
*/
void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
{
__bdi_start_writeback(bdi, nr_pages, true, false);
__bdi_start_writeback(bdi, nr_pages, true);
}

/**
* bdi_start_background_writeback - start background writeback
* @bdi: the backing device to write from
*
* Description:
* This does WB_SYNC_NONE background writeback. The IO is only
* started when this function returns, we make no guarentees on
* completion. Caller need not hold sb s_umount semaphore.
* This makes sure WB_SYNC_NONE background writeback happens. When
* this function returns, it is only guaranteed that for given BDI
* some IO is happening if we are over background dirty threshold.
* Caller need not hold sb s_umount semaphore.
*/
void bdi_start_background_writeback(struct backing_dev_info *bdi)
{
__bdi_start_writeback(bdi, LONG_MAX, true, true);
/*
* We just wake up the flusher thread. It will perform background
* writeback as soon as there is no other work to do.
*/
spin_lock_bh(&bdi->wb_lock);
bdi_wakeup_flusher(bdi);
spin_unlock_bh(&bdi->wb_lock);
}

/*
Expand Down Expand Up @@ -718,6 +731,23 @@ static unsigned long get_nr_dirty_pages(void)
get_nr_dirty_inodes();
}

static long wb_check_background_flush(struct bdi_writeback *wb)
{
if (over_bground_thresh()) {

struct wb_writeback_work work = {
.nr_pages = LONG_MAX,
.sync_mode = WB_SYNC_NONE,
.for_background = 1,
.range_cyclic = 1,
};

return wb_writeback(wb, &work);
}

return 0;
}

static long wb_check_old_data_flush(struct bdi_writeback *wb)
{
unsigned long expired;
Expand Down Expand Up @@ -787,6 +817,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
* Check for periodic writeback, kupdated() style
*/
wrote += wb_check_old_data_flush(wb);
wrote += wb_check_background_flush(wb);
clear_bit(BDI_writeback_running, &wb->bdi->state);

return wrote;
Expand Down Expand Up @@ -873,7 +904,7 @@ void wakeup_flusher_threads(long nr_pages)
list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
if (!bdi_has_dirty_io(bdi))
continue;
__bdi_start_writeback(bdi, nr_pages, false, false);
__bdi_start_writeback(bdi, nr_pages, false);
}
rcu_read_unlock();
}
Expand Down

0 comments on commit 6585027

Please sign in to comment.