Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 143819
b: refs/heads/master
c: d313d7a
h: refs/heads/master
i:
  143817: e7fc15e
  143815: de59cce
v: v3
  • Loading branch information
Chris Mason committed Apr 20, 2009
1 parent 491eea2 commit 14c6d58
Show file tree
Hide file tree
Showing 6 changed files with 57 additions and 16 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: ffbd517d5a8c8e93ddd11046434fb029f3df73aa
refs/heads/master: d313d7a31a752c88f7288692bd98e66d0789779b
60 changes: 47 additions & 13 deletions trunk/fs/btrfs/async-thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#define WORK_QUEUED_BIT 0
#define WORK_DONE_BIT 1
#define WORK_ORDER_DONE_BIT 2
#define WORK_HIGH_PRIO_BIT 3

/*
* container for the kthread task pointer and the list of pending work
Expand All @@ -36,6 +37,7 @@ struct btrfs_worker_thread {

/* list of struct btrfs_work that are waiting for service */
struct list_head pending;
struct list_head prio_pending;

/* list of worker threads from struct btrfs_workers */
struct list_head worker_list;
Expand Down Expand Up @@ -103,10 +105,16 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,

spin_lock_irqsave(&workers->lock, flags);

while (!list_empty(&workers->order_list)) {
work = list_entry(workers->order_list.next,
struct btrfs_work, order_list);

while (1) {
if (!list_empty(&workers->prio_order_list)) {
work = list_entry(workers->prio_order_list.next,
struct btrfs_work, order_list);
} else if (!list_empty(&workers->order_list)) {
work = list_entry(workers->order_list.next,
struct btrfs_work, order_list);
} else {
break;
}
if (!test_bit(WORK_DONE_BIT, &work->flags))
break;

Expand Down Expand Up @@ -143,8 +151,14 @@ static int worker_loop(void *arg)
do {
spin_lock_irq(&worker->lock);
again_locked:
while (!list_empty(&worker->pending)) {
cur = worker->pending.next;
while (1) {
if (!list_empty(&worker->prio_pending))
cur = worker->prio_pending.next;
else if (!list_empty(&worker->pending))
cur = worker->pending.next;
else
break;

work = list_entry(cur, struct btrfs_work, list);
list_del(&work->list);
clear_bit(WORK_QUEUED_BIT, &work->flags);
Expand All @@ -163,7 +177,6 @@ static int worker_loop(void *arg)

spin_lock_irq(&worker->lock);
check_idle_worker(worker);

}
if (freezing(current)) {
worker->working = 0;
Expand All @@ -178,7 +191,8 @@ static int worker_loop(void *arg)
* jump_in?
*/
smp_mb();
if (!list_empty(&worker->pending))
if (!list_empty(&worker->pending) ||
!list_empty(&worker->prio_pending))
continue;

/*
Expand All @@ -191,7 +205,8 @@ static int worker_loop(void *arg)
*/
schedule_timeout(1);
smp_mb();
if (!list_empty(&worker->pending))
if (!list_empty(&worker->pending) ||
!list_empty(&worker->prio_pending))
continue;

if (kthread_should_stop())
Expand All @@ -200,7 +215,8 @@ static int worker_loop(void *arg)
/* still no more work?, sleep for real */
spin_lock_irq(&worker->lock);
set_current_state(TASK_INTERRUPTIBLE);
if (!list_empty(&worker->pending))
if (!list_empty(&worker->pending) ||
!list_empty(&worker->prio_pending))
goto again_locked;

/*
Expand Down Expand Up @@ -248,6 +264,7 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
INIT_LIST_HEAD(&workers->worker_list);
INIT_LIST_HEAD(&workers->idle_list);
INIT_LIST_HEAD(&workers->order_list);
INIT_LIST_HEAD(&workers->prio_order_list);
spin_lock_init(&workers->lock);
workers->max_workers = max;
workers->idle_thresh = 32;
Expand All @@ -273,6 +290,7 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
}

INIT_LIST_HEAD(&worker->pending);
INIT_LIST_HEAD(&worker->prio_pending);
INIT_LIST_HEAD(&worker->worker_list);
spin_lock_init(&worker->lock);
atomic_set(&worker->num_pending, 0);
Expand Down Expand Up @@ -396,7 +414,10 @@ int btrfs_requeue_work(struct btrfs_work *work)
goto out;

spin_lock_irqsave(&worker->lock, flags);
list_add_tail(&work->list, &worker->pending);
if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
list_add_tail(&work->list, &worker->prio_pending);
else
list_add_tail(&work->list, &worker->pending);
atomic_inc(&worker->num_pending);

/* by definition we're busy, take ourselves off the idle
Expand All @@ -422,6 +443,11 @@ int btrfs_requeue_work(struct btrfs_work *work)
return 0;
}

void btrfs_set_work_high_prio(struct btrfs_work *work)
{
set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
}

/*
* places a struct btrfs_work into the pending queue of one of the kthreads
*/
Expand All @@ -438,15 +464,23 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
worker = find_worker(workers);
if (workers->ordered) {
spin_lock_irqsave(&workers->lock, flags);
list_add_tail(&work->order_list, &workers->order_list);
if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
list_add_tail(&work->order_list,
&workers->prio_order_list);
} else {
list_add_tail(&work->order_list, &workers->order_list);
}
spin_unlock_irqrestore(&workers->lock, flags);
} else {
INIT_LIST_HEAD(&work->order_list);
}

spin_lock_irqsave(&worker->lock, flags);

list_add_tail(&work->list, &worker->pending);
if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
list_add_tail(&work->list, &worker->prio_pending);
else
list_add_tail(&work->list, &worker->pending);
atomic_inc(&worker->num_pending);
check_busy_worker(worker);

Expand Down
2 changes: 2 additions & 0 deletions trunk/fs/btrfs/async-thread.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ struct btrfs_workers {
* of work items waiting for completion
*/
struct list_head order_list;
struct list_head prio_order_list;

/* lock for finding the next worker thread to queue on */
spinlock_t lock;
Expand All @@ -98,4 +99,5 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
int btrfs_stop_workers(struct btrfs_workers *workers);
void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max);
int btrfs_requeue_work(struct btrfs_work *work);
void btrfs_set_work_high_prio(struct btrfs_work *work);
#endif
5 changes: 5 additions & 0 deletions trunk/fs/btrfs/disk-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -579,6 +579,10 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
async->bio_flags = bio_flags;

atomic_inc(&fs_info->nr_async_submits);

if (rw & (1 << BIO_RW_SYNCIO))
btrfs_set_work_high_prio(&async->work);

btrfs_queue_worker(&fs_info->workers, &async->work);
#if 0
int limit = btrfs_async_submit_limit(fs_info);
Expand Down Expand Up @@ -656,6 +660,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
mirror_num, 0);
}

/*
* kthread helpers are used to submit writes so that checksumming
* can happen in parallel across all CPUs
Expand Down
2 changes: 1 addition & 1 deletion trunk/fs/btrfs/extent_io.c
Original file line number Diff line number Diff line change
Expand Up @@ -2501,7 +2501,7 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
};
struct writeback_control wbc_writepages = {
.bdi = wbc->bdi,
.sync_mode = WB_SYNC_NONE,
.sync_mode = wbc->sync_mode,
.older_than_this = NULL,
.nr_to_write = 64,
.range_start = page_offset(page) + PAGE_CACHE_SIZE,
Expand Down
2 changes: 1 addition & 1 deletion trunk/fs/btrfs/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -1131,7 +1131,7 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
if (will_write) {
btrfs_fdatawrite_range(inode->i_mapping, pos,
pos + write_bytes - 1,
WB_SYNC_NONE);
WB_SYNC_ALL);
} else {
balance_dirty_pages_ratelimited_nr(inode->i_mapping,
num_pages);
Expand Down

0 comments on commit 14c6d58

Please sign in to comment.