Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 208343
b: refs/heads/master
c: 6467716
h: refs/heads/master
i:
  208341: 6667c30
  208339: 54d9248
  208335: 5760715
v: v3
  • Loading branch information
Artem Bityutskiy authored and Jens Axboe committed Aug 7, 2010
1 parent 9ff5498 commit 614c4fd
Show file tree
Hide file tree
Showing 4 changed files with 71 additions and 42 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 253c34e9b10c30d3064be654b5b78fbc1a8b1896
refs/heads/master: 6467716a37673e8d47b4984eb19839bdad0a8353
36 changes: 11 additions & 25 deletions trunk/fs/fs-writeback.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ static void bdi_queue_work(struct backing_dev_info *bdi,
{
trace_writeback_queue(bdi, work);

spin_lock(&bdi->wb_lock);
spin_lock_bh(&bdi->wb_lock);
list_add_tail(&work->list, &bdi->work_list);
if (bdi->wb.task) {
wake_up_process(bdi->wb.task);
Expand All @@ -88,7 +88,7 @@ static void bdi_queue_work(struct backing_dev_info *bdi,
trace_writeback_nothread(bdi, work);
wake_up_process(default_backing_dev_info.wb.task);
}
spin_unlock(&bdi->wb_lock);
spin_unlock_bh(&bdi->wb_lock);
}

static void
Expand Down Expand Up @@ -704,13 +704,13 @@ get_next_work_item(struct backing_dev_info *bdi)
{
struct wb_writeback_work *work = NULL;

spin_lock(&bdi->wb_lock);
spin_lock_bh(&bdi->wb_lock);
if (!list_empty(&bdi->work_list)) {
work = list_entry(bdi->work_list.next,
struct wb_writeback_work, list);
list_del_init(&work->list);
}
spin_unlock(&bdi->wb_lock);
spin_unlock_bh(&bdi->wb_lock);
return work;
}

Expand Down Expand Up @@ -810,6 +810,12 @@ int bdi_writeback_thread(void *data)
trace_writeback_thread_start(bdi);

while (!kthread_should_stop()) {
/*
* Remove own delayed wake-up timer, since we are already awake
* and we'll take care of the preriodic write-back.
*/
del_timer(&wb->wakeup_timer);

pages_written = wb_do_writeback(wb, 0);

trace_writeback_pages_written(pages_written);
Expand Down Expand Up @@ -868,26 +874,6 @@ void wakeup_flusher_threads(long nr_pages)
rcu_read_unlock();
}

/*
* This function is used when the first inode for this bdi is marked dirty. It
* wakes-up the corresponding bdi thread which should then take care of the
* periodic background write-out of dirty inodes.
*/
static void wakeup_bdi_thread(struct backing_dev_info *bdi)
{
spin_lock(&bdi->wb_lock);
if (bdi->wb.task)
wake_up_process(bdi->wb.task);
else
/*
* When bdi tasks are inactive for long time, they are killed.
* In this case we have to wake-up the forker thread which
* should create and run the bdi thread.
*/
wake_up_process(default_backing_dev_info.wb.task);
spin_unlock(&bdi->wb_lock);
}

static noinline void block_dump___mark_inode_dirty(struct inode *inode)
{
if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
Expand Down Expand Up @@ -1019,7 +1005,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
spin_unlock(&inode_lock);

if (wakeup_bdi)
wakeup_bdi_thread(bdi);
bdi_wakeup_thread_delayed(bdi);
}
EXPORT_SYMBOL(__mark_inode_dirty);

Expand Down
2 changes: 2 additions & 0 deletions trunk/include/linux/backing-dev.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ struct bdi_writeback {
unsigned long last_active; /* last time bdi thread was active */

struct task_struct *task; /* writeback thread */
struct timer_list wakeup_timer; /* used for delayed bdi thread wakeup */
struct list_head b_dirty; /* dirty inodes */
struct list_head b_io; /* parked for writeback */
struct list_head b_more_io; /* parked for more writeback */
Expand Down Expand Up @@ -105,6 +106,7 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi);
int bdi_writeback_thread(void *data);
int bdi_has_dirty_io(struct backing_dev_info *bdi);
void bdi_arm_supers_timer(void);
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);

extern spinlock_t bdi_lock;
extern struct list_head bdi_list;
Expand Down
73 changes: 57 additions & 16 deletions trunk/mm/backing-dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -248,17 +248,6 @@ static int __init default_bdi_init(void)
}
subsys_initcall(default_bdi_init);

static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
{
memset(wb, 0, sizeof(*wb));

wb->bdi = bdi;
wb->last_old_flush = jiffies;
INIT_LIST_HEAD(&wb->b_dirty);
INIT_LIST_HEAD(&wb->b_io);
INIT_LIST_HEAD(&wb->b_more_io);
}

int bdi_has_dirty_io(struct backing_dev_info *bdi)
{
return wb_has_dirty_io(&bdi->wb);
Expand Down Expand Up @@ -316,6 +305,43 @@ static void sync_supers_timer_fn(unsigned long unused)
bdi_arm_supers_timer();
}

static void wakeup_timer_fn(unsigned long data)
{
struct backing_dev_info *bdi = (struct backing_dev_info *)data;

spin_lock_bh(&bdi->wb_lock);
if (bdi->wb.task) {
wake_up_process(bdi->wb.task);
} else {
/*
* When bdi tasks are inactive for long time, they are killed.
* In this case we have to wake-up the forker thread which
* should create and run the bdi thread.
*/
wake_up_process(default_backing_dev_info.wb.task);
}
spin_unlock_bh(&bdi->wb_lock);
}

/*
* This function is used when the first inode for this bdi is marked dirty. It
* wakes-up the corresponding bdi thread which should then take care of the
* periodic background write-out of dirty inodes. Since the write-out would
* starts only 'dirty_writeback_interval' centisecs from now anyway, we just
* set up a timer which wakes the bdi thread up later.
*
* Note, we wouldn't bother setting up the timer, but this function is on the
* fast-path (used by '__mark_inode_dirty()'), so we save few context switches
* by delaying the wake-up.
*/
void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi)
{
unsigned long timeout;

timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
mod_timer(&bdi->wb.wakeup_timer, jiffies + timeout);
}

/*
* Calculate the longest interval (jiffies) bdi threads are allowed to be
* inactive.
Expand Down Expand Up @@ -353,8 +379,10 @@ static int bdi_forker_thread(void *ptr)
* Temporary measure, we want to make sure we don't see
* dirty data on the default backing_dev_info
*/
if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) {
del_timer(&me->wakeup_timer);
wb_do_writeback(me, 0);
}

spin_lock_bh(&bdi_lock);
set_current_state(TASK_INTERRUPTIBLE);
Expand Down Expand Up @@ -386,7 +414,7 @@ static int bdi_forker_thread(void *ptr)
break;
}

spin_lock(&bdi->wb_lock);
spin_lock_bh(&bdi->wb_lock);
/*
* If there is no work to do and the bdi thread was
* inactive long enough - kill it. The wb_lock is taken
Expand All @@ -403,7 +431,7 @@ static int bdi_forker_thread(void *ptr)
action = KILL_THREAD;
break;
}
spin_unlock(&bdi->wb_lock);
spin_unlock_bh(&bdi->wb_lock);
}
spin_unlock_bh(&bdi_lock);

Expand All @@ -427,9 +455,9 @@ static int bdi_forker_thread(void *ptr)
* The spinlock makes sure we do not lose
* wake-ups when racing with 'bdi_queue_work()'.
*/
spin_lock(&bdi->wb_lock);
spin_lock_bh(&bdi->wb_lock);
bdi->wb.task = task;
spin_unlock(&bdi->wb_lock);
spin_unlock_bh(&bdi->wb_lock);
}
break;

Expand Down Expand Up @@ -586,6 +614,7 @@ void bdi_unregister(struct backing_dev_info *bdi)
if (bdi->dev) {
trace_writeback_bdi_unregister(bdi);
bdi_prune_sb(bdi);
del_timer_sync(&bdi->wb.wakeup_timer);

if (!bdi_cap_flush_forker(bdi))
bdi_wb_shutdown(bdi);
Expand All @@ -596,6 +625,18 @@ void bdi_unregister(struct backing_dev_info *bdi)
}
EXPORT_SYMBOL(bdi_unregister);

static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
{
memset(wb, 0, sizeof(*wb));

wb->bdi = bdi;
wb->last_old_flush = jiffies;
INIT_LIST_HEAD(&wb->b_dirty);
INIT_LIST_HEAD(&wb->b_io);
INIT_LIST_HEAD(&wb->b_more_io);
setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi);
}

int bdi_init(struct backing_dev_info *bdi)
{
int i, err;
Expand Down

0 comments on commit 614c4fd

Please sign in to comment.