Skip to content

Commit

Permalink
Btrfs: Fix snapshot deletion to release the alloc_mutex much more often.
Browse files Browse the repository at this point in the history
This lowers the impact of snapshot deletion on the rest of the FS.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
  • Loading branch information
Chris Mason committed Sep 25, 2008
1 parent 5cd57b2 commit 333db94
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 9 deletions.
2 changes: 1 addition & 1 deletion fs/btrfs/ctree.c
Original file line number Diff line number Diff line change
Expand Up @@ -1255,7 +1255,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
lowest_level = p->lowest_level;
WARN_ON(lowest_level && ins_len);
WARN_ON(p->nodes[0] != NULL);
WARN_ON(root == root->fs_info->extent_root &&
WARN_ON(cow && root == root->fs_info->extent_root &&
!mutex_is_locked(&root->fs_info->alloc_mutex));
WARN_ON(root == root->fs_info->chunk_root &&
!mutex_is_locked(&root->fs_info->chunk_mutex));
Expand Down
2 changes: 2 additions & 0 deletions fs/btrfs/disk-io.c
Original file line number Diff line number Diff line change
Expand Up @@ -1674,6 +1674,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)

void btrfs_throttle(struct btrfs_root *root)
{
#if 0
struct backing_dev_info *bdi;

bdi = &root->fs_info->bdi;
Expand All @@ -1686,6 +1687,7 @@ void btrfs_throttle(struct btrfs_root *root)
#endif

}
#endif
}

void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
Expand Down
28 changes: 20 additions & 8 deletions fs/btrfs/extent-tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -1223,8 +1223,8 @@ printk("space info full %Lu\n", flags);
ret = btrfs_make_block_group(trans, extent_root, 0, flags,
BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
BUG_ON(ret);
mutex_unlock(&extent_root->fs_info->chunk_mutex);
out:
mutex_unlock(&extent_root->fs_info->chunk_mutex);
return 0;
}

Expand Down Expand Up @@ -2181,17 +2181,29 @@ static void noinline reada_walk_down(struct btrfs_root *root,
continue;
}
}
mutex_unlock(&root->fs_info->alloc_mutex);
ret = readahead_tree_block(root, bytenr, blocksize,
btrfs_node_ptr_generation(node, i));
last = bytenr + blocksize;
cond_resched();
mutex_lock(&root->fs_info->alloc_mutex);
if (ret)
break;
}
}

/*
* we want to avoid as much random IO as we can with the alloc mutex
* held, so drop the lock and do the lookup, then do it again with the
* lock held.
*/
int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
u32 *refs)
{
mutex_unlock(&root->fs_info->alloc_mutex);
lookup_extent_ref(NULL, root, start, len, refs);
mutex_lock(&root->fs_info->alloc_mutex);
return lookup_extent_ref(NULL, root, start, len, refs);
}

/*
* helper function for drop_snapshot, this walks down the tree dropping ref
* counts as it goes.
Expand All @@ -2215,8 +2227,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,

WARN_ON(*level < 0);
WARN_ON(*level >= BTRFS_MAX_LEVEL);
ret = lookup_extent_ref(trans, root,
path->nodes[*level]->start,
ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
path->nodes[*level]->len, &refs);
BUG_ON(ret);
if (refs > 1)
Expand Down Expand Up @@ -2245,7 +2256,7 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
blocksize = btrfs_level_size(root, *level - 1);

ret = lookup_extent_ref(trans, root, bytenr, blocksize, &refs);
ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
BUG_ON(ret);
if (refs != 1) {
parent = path->nodes[*level];
Expand All @@ -2261,15 +2272,16 @@ static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
next = btrfs_find_tree_block(root, bytenr, blocksize);
if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
free_extent_buffer(next);
mutex_unlock(&root->fs_info->alloc_mutex);

reada_walk_down(root, cur, path->slots[*level]);

mutex_unlock(&root->fs_info->alloc_mutex);
next = read_tree_block(root, bytenr, blocksize,
ptr_gen);
mutex_lock(&root->fs_info->alloc_mutex);

/* we've dropped the lock, double check */
ret = lookup_extent_ref(trans, root, bytenr,
ret = drop_snap_lookup_refcount(root, bytenr,
blocksize, &refs);
BUG_ON(ret);
if (refs != 1) {
Expand Down

0 comments on commit 333db94

Please sign in to comment.