Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 208341
b: refs/heads/master
c: fff5b85
h: refs/heads/master
i:
  208339: 54d9248
v: v3
  • Loading branch information
Artem Bityutskiy authored and Jens Axboe committed Aug 7, 2010
1 parent ea28571 commit 6667c30
Show file tree
Hide file tree
Showing 3 changed files with 71 additions and 54 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: adf392407076b85816d48714fb8eeaedb2157884
refs/heads/master: fff5b85aa4225a7be157f208277a055822039a9e
54 changes: 12 additions & 42 deletions trunk/fs/fs-writeback.c
Original file line number Diff line number Diff line change
Expand Up @@ -78,21 +78,17 @@ static void bdi_queue_work(struct backing_dev_info *bdi,

spin_lock(&bdi->wb_lock);
list_add_tail(&work->list, &bdi->work_list);
spin_unlock(&bdi->wb_lock);

/*
* If the default thread isn't there, make sure we add it. When
* it gets created and wakes up, we'll run this work.
*/
if (unlikely(!bdi->wb.task)) {
if (bdi->wb.task) {
wake_up_process(bdi->wb.task);
} else {
/*
* The bdi thread isn't there, wake up the forker thread which
* will create and run it.
*/
trace_writeback_nothread(bdi, work);
wake_up_process(default_backing_dev_info.wb.task);
} else {
struct bdi_writeback *wb = &bdi->wb;

if (wb->task)
wake_up_process(wb->task);
}
spin_unlock(&bdi->wb_lock);
}

static void
Expand Down Expand Up @@ -800,7 +796,6 @@ int bdi_writeback_thread(void *data)
{
struct bdi_writeback *wb = data;
struct backing_dev_info *bdi = wb->bdi;
unsigned long wait_jiffies = -1UL;
long pages_written;

current->flags |= PF_FLUSHER | PF_SWAPWRITE;
Expand All @@ -812,13 +807,6 @@ int bdi_writeback_thread(void *data)
*/
set_user_nice(current, 0);

/*
* Clear pending bit and wakeup anybody waiting to tear us down
*/
clear_bit(BDI_pending, &bdi->state);
smp_mb__after_clear_bit();
wake_up_bit(&bdi->state, BDI_pending);

trace_writeback_thread_start(bdi);

while (!kthread_should_stop()) {
Expand All @@ -828,40 +816,22 @@ int bdi_writeback_thread(void *data)

if (pages_written)
wb->last_active = jiffies;
else if (wait_jiffies != -1UL) {
unsigned long max_idle;

/*
* Longest period of inactivity that we tolerate. If we
* see dirty data again later, the thread will get
* recreated automatically.
*/
max_idle = max(5UL * 60 * HZ, wait_jiffies);
if (time_after(jiffies, max_idle + wb->last_active))
break;
}

set_current_state(TASK_INTERRUPTIBLE);
if (!list_empty(&bdi->work_list)) {
__set_current_state(TASK_RUNNING);
continue;
}

if (dirty_writeback_interval) {
wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
schedule_timeout(wait_jiffies);
} else
if (dirty_writeback_interval)
schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
else
schedule();

try_to_freeze();
}

wb->task = NULL;

/*
* Flush any work that raced with us exiting. No new work
* will be added, since this bdi isn't discoverable anymore.
*/
/* Flush any work that raced with us exiting */
if (!list_empty(&bdi->work_list))
wb_do_writeback(wb, 1);

Expand Down
69 changes: 58 additions & 11 deletions trunk/mm/backing-dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -316,6 +316,18 @@ static void sync_supers_timer_fn(unsigned long unused)
bdi_arm_supers_timer();
}

/*
* Calculate the longest interval (jiffies) bdi threads are allowed to be
* inactive.
*/
static unsigned long bdi_longest_inactive(void)
{
unsigned long interval;

interval = msecs_to_jiffies(dirty_writeback_interval * 10);
return max(5UL * 60 * HZ, interval);
}

static int bdi_forker_thread(void *ptr)
{
struct bdi_writeback *me = ptr;
Expand All @@ -329,11 +341,12 @@ static int bdi_forker_thread(void *ptr)
set_user_nice(current, 0);

for (;;) {
struct task_struct *task;
struct task_struct *task = NULL;
struct backing_dev_info *bdi;
enum {
NO_ACTION, /* Nothing to do */
FORK_THREAD, /* Fork bdi thread */
KILL_THREAD, /* Kill inactive bdi thread */
} action = NO_ACTION;

/*
Expand All @@ -346,10 +359,6 @@ static int bdi_forker_thread(void *ptr)
spin_lock_bh(&bdi_lock);
set_current_state(TASK_INTERRUPTIBLE);

/*
* Check if any existing bdi's have dirty data without
* a thread registered. If so, set that up.
*/
list_for_each_entry(bdi, &bdi_list, bdi_list) {
bool have_dirty_io;

Expand All @@ -376,6 +385,25 @@ static int bdi_forker_thread(void *ptr)
action = FORK_THREAD;
break;
}

spin_lock(&bdi->wb_lock);
/*
* If there is no work to do and the bdi thread was
* inactive long enough - kill it. The wb_lock is taken
* to make sure no-one adds more work to this bdi and
* wakes the bdi thread up.
*/
if (bdi->wb.task && !have_dirty_io &&
time_after(jiffies, bdi->wb.last_active +
bdi_longest_inactive())) {
task = bdi->wb.task;
bdi->wb.task = NULL;
spin_unlock(&bdi->wb_lock);
set_bit(BDI_pending, &bdi->state);
action = KILL_THREAD;
break;
}
spin_unlock(&bdi->wb_lock);
}
spin_unlock_bh(&bdi_lock);

Expand All @@ -394,8 +422,20 @@ static int bdi_forker_thread(void *ptr)
* the bdi from the thread.
*/
bdi_flush_io(bdi);
} else
} else {
/*
* The spinlock makes sure we do not lose
* wake-ups when racing with 'bdi_queue_work()'.
*/
spin_lock(&bdi->wb_lock);
bdi->wb.task = task;
spin_unlock(&bdi->wb_lock);
}
break;

case KILL_THREAD:
__set_current_state(TASK_RUNNING);
kthread_stop(task);
break;

case NO_ACTION:
Expand All @@ -407,6 +447,13 @@ static int bdi_forker_thread(void *ptr)
/* Back to the main loop */
continue;
}

/*
* Clear pending bit and wakeup anybody waiting to tear us down.
*/
clear_bit(BDI_pending, &bdi->state);
smp_mb__after_clear_bit();
wake_up_bit(&bdi->state, BDI_pending);
}

return 0;
Expand Down Expand Up @@ -490,15 +537,15 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
return;

/*
* If setup is pending, wait for that to complete first
* Make sure nobody finds us on the bdi_list anymore
*/
wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
TASK_UNINTERRUPTIBLE);
bdi_remove_from_list(bdi);

/*
* Make sure nobody finds us on the bdi_list anymore
* If setup is pending, wait for that to complete first
*/
bdi_remove_from_list(bdi);
wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
TASK_UNINTERRUPTIBLE);

/*
* Finally, kill the kernel thread. We don't need to be RCU
Expand Down

0 comments on commit 6667c30

Please sign in to comment.