Skip to content

Commit

Permalink
Btrfs: make delayed ref lock logic more readable
Browse files Browse the repository at this point in the history
Locking and unlocking delayed ref mutex are in the different functions,
and the name of lock functions is not uniform, so the readability is not
so good, this patch optimizes the lock logic and makes it more readable.

Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
  • Loading branch information
Miao Xie authored and Josef Bacik committed Feb 20, 2013
1 parent 0e8c36a commit 093486c
Show file tree
Hide file tree
Showing 3 changed files with 38 additions and 18 deletions.
8 changes: 8 additions & 0 deletions fs/btrfs/delayed-ref.c
Original file line number Diff line number Diff line change
Expand Up @@ -426,6 +426,14 @@ int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
return 1;
}

void btrfs_release_ref_cluster(struct list_head *cluster)
{
struct list_head *pos, *q;

list_for_each_safe(pos, q, cluster)
list_del_init(pos);
}

/*
* helper function to update an extent delayed ref in the
* rbtree. existing and update must both have the same
Expand Down
6 changes: 6 additions & 0 deletions fs/btrfs/delayed-ref.h
Original file line number Diff line number Diff line change
Expand Up @@ -211,8 +211,14 @@ struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head);
static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
{
mutex_unlock(&head->mutex);
}

int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
struct list_head *cluster, u64 search_start);
void btrfs_release_ref_cluster(struct list_head *cluster);

int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
Expand Down
42 changes: 24 additions & 18 deletions fs/btrfs/extent-tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -2143,7 +2143,6 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
node->num_bytes);
}
}
mutex_unlock(&head->mutex);
return ret;
}

Expand Down Expand Up @@ -2258,7 +2257,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
* process of being added. Don't run this ref yet.
*/
list_del_init(&locked_ref->cluster);
mutex_unlock(&locked_ref->mutex);
btrfs_delayed_ref_unlock(locked_ref);
locked_ref = NULL;
delayed_refs->num_heads_ready++;
spin_unlock(&delayed_refs->lock);
Expand Down Expand Up @@ -2297,25 +2296,22 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
btrfs_free_delayed_extent_op(extent_op);

if (ret) {
list_del_init(&locked_ref->cluster);
mutex_unlock(&locked_ref->mutex);

printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
printk(KERN_DEBUG
"btrfs: run_delayed_extent_op "
"returned %d\n", ret);
spin_lock(&delayed_refs->lock);
btrfs_delayed_ref_unlock(locked_ref);
return ret;
}

goto next;
}

list_del_init(&locked_ref->cluster);
locked_ref = NULL;
}

ref->in_tree = 0;
rb_erase(&ref->rb_node, &delayed_refs->root);
delayed_refs->num_entries--;
if (locked_ref) {
if (!btrfs_delayed_ref_is_head(ref)) {
/*
* when we play the delayed ref, also correct the
* ref_mod on head
Expand All @@ -2337,20 +2333,29 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
ret = run_one_delayed_ref(trans, root, ref, extent_op,
must_insert_reserved);

btrfs_put_delayed_ref(ref);
btrfs_free_delayed_extent_op(extent_op);
count++;

if (ret) {
if (locked_ref) {
list_del_init(&locked_ref->cluster);
mutex_unlock(&locked_ref->mutex);
}
printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
btrfs_delayed_ref_unlock(locked_ref);
btrfs_put_delayed_ref(ref);
printk(KERN_DEBUG
"btrfs: run_one_delayed_ref returned %d\n", ret);
spin_lock(&delayed_refs->lock);
return ret;
}

/*
* If this node is a head, that means all the refs in this head
* have been dealt with, and we will pick the next head to deal
* with, so we must unlock the head and drop it from the cluster
* list before we release it.
*/
if (btrfs_delayed_ref_is_head(ref)) {
list_del_init(&locked_ref->cluster);
btrfs_delayed_ref_unlock(locked_ref);
locked_ref = NULL;
}
btrfs_put_delayed_ref(ref);
count++;
next:
cond_resched();
spin_lock(&delayed_refs->lock);
Expand Down Expand Up @@ -2500,6 +2505,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,

ret = run_clustered_refs(trans, root, &cluster);
if (ret < 0) {
btrfs_release_ref_cluster(&cluster);
spin_unlock(&delayed_refs->lock);
btrfs_abort_transaction(trans, root, ret);
return ret;
Expand Down

0 comments on commit 093486c

Please sign in to comment.