Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 322361
b: refs/heads/master
c: 1fa11e2
h: refs/heads/master
i:
  322359: a566128
v: v3
  • Loading branch information
Arne Jansen authored and Chris Mason committed Aug 28, 2012
1 parent d4adf01 commit 0e2359e
Show file tree
Hide file tree
Showing 6 changed files with 22 additions and 74 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6209526531e70c080f79318ab8f50e26846c40a8
refs/heads/master: 1fa11e265fa2562fb713171b6a58e72bb7afd276
6 changes: 0 additions & 6 deletions trunk/fs/btrfs/ctree.c
Original file line number Diff line number Diff line change
Expand Up @@ -420,12 +420,6 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
}
spin_unlock(&fs_info->tree_mod_seq_lock);

/*
* we removed the lowest blocker from the blocker list, so there may be
* more processible delayed refs.
*/
wake_up(&fs_info->tree_mod_seq_wait);

/*
* anything that's lower than the lowest existing (read: blocked)
* sequence number can be removed from the tree.
Expand Down
1 change: 0 additions & 1 deletion trunk/fs/btrfs/ctree.h
Original file line number Diff line number Diff line change
Expand Up @@ -1252,7 +1252,6 @@ struct btrfs_fs_info {
atomic_t tree_mod_seq;
struct list_head tree_mod_seq_list;
struct seq_list tree_mod_seq_elem;
wait_queue_head_t tree_mod_seq_wait;

/* this protects tree_mod_log */
rwlock_t tree_mod_log_lock;
Expand Down
8 changes: 0 additions & 8 deletions trunk/fs/btrfs/delayed-ref.c
Original file line number Diff line number Diff line change
Expand Up @@ -662,9 +662,6 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
num_bytes, parent, ref_root, level, action,
for_cow);
if (!need_ref_seq(for_cow, ref_root) &&
waitqueue_active(&fs_info->tree_mod_seq_wait))
wake_up(&fs_info->tree_mod_seq_wait);
spin_unlock(&delayed_refs->lock);
if (need_ref_seq(for_cow, ref_root))
btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
Expand Down Expand Up @@ -713,9 +710,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
num_bytes, parent, ref_root, owner, offset,
action, for_cow);
if (!need_ref_seq(for_cow, ref_root) &&
waitqueue_active(&fs_info->tree_mod_seq_wait))
wake_up(&fs_info->tree_mod_seq_wait);
spin_unlock(&delayed_refs->lock);
if (need_ref_seq(for_cow, ref_root))
btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
Expand Down Expand Up @@ -744,8 +738,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
extent_op->is_data);

if (waitqueue_active(&fs_info->tree_mod_seq_wait))
wake_up(&fs_info->tree_mod_seq_wait);
spin_unlock(&delayed_refs->lock);
return 0;
}
Expand Down
2 changes: 0 additions & 2 deletions trunk/fs/btrfs/disk-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -2035,8 +2035,6 @@ int open_ctree(struct super_block *sb,
fs_info->free_chunk_space = 0;
fs_info->tree_mod_log = RB_ROOT;

init_waitqueue_head(&fs_info->tree_mod_seq_wait);

/* readahead state */
INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
spin_lock_init(&fs_info->reada_lock);
Expand Down
77 changes: 21 additions & 56 deletions trunk/fs/btrfs/extent-tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -2318,12 +2318,6 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
ref->in_tree = 0;
rb_erase(&ref->rb_node, &delayed_refs->root);
delayed_refs->num_entries--;
/*
* we modified num_entries, but as we're currently running
* delayed refs, skip
* wake_up(&delayed_refs->seq_wait);
* here.
*/
spin_unlock(&delayed_refs->lock);

ret = run_one_delayed_ref(trans, root, ref, extent_op,
Expand All @@ -2350,22 +2344,6 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
return count;
}

static void wait_for_more_refs(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
unsigned long num_refs,
struct list_head *first_seq)
{
spin_unlock(&delayed_refs->lock);
pr_debug("waiting for more refs (num %ld, first %p)\n",
num_refs, first_seq);
wait_event(fs_info->tree_mod_seq_wait,
num_refs != delayed_refs->num_entries ||
fs_info->tree_mod_seq_list.next != first_seq);
pr_debug("done waiting for more refs (num %ld, first %p)\n",
delayed_refs->num_entries, fs_info->tree_mod_seq_list.next);
spin_lock(&delayed_refs->lock);
}

#ifdef SCRAMBLE_DELAYED_REFS
/*
* Normally delayed refs get processed in ascending bytenr order. This
Expand Down Expand Up @@ -2460,13 +2438,11 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_node *ref;
struct list_head cluster;
struct list_head *first_seq = NULL;
int ret;
u64 delayed_start;
int run_all = count == (unsigned long)-1;
int run_most = 0;
unsigned long num_refs = 0;
int consider_waiting;
int loops;

/* We'll clean this up in btrfs_cleanup_transaction */
if (trans->aborted)
Expand All @@ -2484,7 +2460,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
delayed_refs = &trans->transaction->delayed_refs;
INIT_LIST_HEAD(&cluster);
again:
consider_waiting = 0;
loops = 0;
spin_lock(&delayed_refs->lock);

#ifdef SCRAMBLE_DELAYED_REFS
Expand Down Expand Up @@ -2512,31 +2488,6 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
if (ret)
break;

if (delayed_start >= delayed_refs->run_delayed_start) {
if (consider_waiting == 0) {
/*
* btrfs_find_ref_cluster looped. let's do one
* more cycle. if we don't run any delayed ref
* during that cycle (because we can't because
* all of them are blocked) and if the number of
* refs doesn't change, we avoid busy waiting.
*/
consider_waiting = 1;
num_refs = delayed_refs->num_entries;
first_seq = root->fs_info->tree_mod_seq_list.next;
} else {
wait_for_more_refs(root->fs_info, delayed_refs,
num_refs, first_seq);
/*
* after waiting, things have changed. we
* dropped the lock and someone else might have
* run some refs, built new clusters and so on.
* therefore, we restart staleness detection.
*/
consider_waiting = 0;
}
}

ret = run_clustered_refs(trans, root, &cluster);
if (ret < 0) {
spin_unlock(&delayed_refs->lock);
Expand All @@ -2549,9 +2500,26 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
if (count == 0)
break;

if (ret || delayed_refs->run_delayed_start == 0) {
if (delayed_start >= delayed_refs->run_delayed_start) {
if (loops == 0) {
/*
* btrfs_find_ref_cluster looped. let's do one
* more cycle. if we don't run any delayed ref
* during that cycle (because we can't because
* all of them are blocked), bail out.
*/
loops = 1;
} else {
/*
* no runnable refs left, stop trying
*/
BUG_ON(run_all);
break;
}
}
if (ret) {
/* refs were run, let's reset staleness detection */
consider_waiting = 0;
loops = 0;
}
}

Expand Down Expand Up @@ -5296,9 +5264,6 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
rb_erase(&head->node.rb_node, &delayed_refs->root);

delayed_refs->num_entries--;
smp_mb();
if (waitqueue_active(&root->fs_info->tree_mod_seq_wait))
wake_up(&root->fs_info->tree_mod_seq_wait);

/*
* we don't take a ref on the node because we're removing it from the
Expand Down

0 comments on commit 0e2359e

Please sign in to comment.