Skip to content

Commit

Permalink
btrfs: remove pointless loop from btrfs_update_block_group()
Browse files Browse the repository at this point in the history
When an extent is allocated or freed, we call btrfs_update_block_group()
to update its block group and space info. An extent always belongs to a
single block group, it can never span multiple block groups, so the loop
we have at btrfs_update_block_group() is pointless, as it always has a
single iteration. The loop was added in the very early days, 2007, when
the block group code was added in commit 9078a3e ("Btrfs: start of
block group code"), but even back then it seemed pointless.

So remove the loop and assert the block group containing the start offset
of the extent also contains the whole extent.

Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
  • Loading branch information
Filipe Manana authored and David Sterba committed Oct 12, 2023
1 parent 4ebe8d4 commit 4d20c1d
Showing 1 changed file with 67 additions and 80 deletions.
147 changes: 67 additions & 80 deletions fs/btrfs/block-group.c
Original file line number Diff line number Diff line change
Expand Up @@ -3542,12 +3542,11 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, bool alloc)
{
struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_block_group *cache = NULL;
u64 total = num_bytes;
struct btrfs_space_info *space_info;
struct btrfs_block_group *cache;
u64 old_val;
u64 byte_in_group;
bool reclaim = false;
int factor;
int ret = 0;

/* Block accounting for super block */
spin_lock(&info->delalloc_root_lock);
Expand All @@ -3559,97 +3558,85 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans,
btrfs_set_super_bytes_used(info->super_copy, old_val);
spin_unlock(&info->delalloc_root_lock);

while (total) {
struct btrfs_space_info *space_info;
bool reclaim = false;

cache = btrfs_lookup_block_group(info, bytenr);
if (!cache) {
ret = -ENOENT;
break;
}
space_info = cache->space_info;
factor = btrfs_bg_type_to_factor(cache->flags);
cache = btrfs_lookup_block_group(info, bytenr);
if (!cache)
return -ENOENT;

/*
* If this block group has free space cache written out, we
* need to make sure to load it if we are removing space. This
* is because we need the unpinning stage to actually add the
* space back to the block group, otherwise we will leak space.
*/
if (!alloc && !btrfs_block_group_done(cache))
btrfs_cache_block_group(cache, true);
/* An extent can not span multiple block groups. */
ASSERT(bytenr + num_bytes <= cache->start + cache->length);

byte_in_group = bytenr - cache->start;
WARN_ON(byte_in_group > cache->length);
space_info = cache->space_info;
factor = btrfs_bg_type_to_factor(cache->flags);

spin_lock(&space_info->lock);
spin_lock(&cache->lock);
/*
* If this block group has free space cache written out, we need to make
* sure to load it if we are removing space. This is because we need
* the unpinning stage to actually add the space back to the block group,
* otherwise we will leak space.
*/
if (!alloc && !btrfs_block_group_done(cache))
btrfs_cache_block_group(cache, true);

if (btrfs_test_opt(info, SPACE_CACHE) &&
cache->disk_cache_state < BTRFS_DC_CLEAR)
cache->disk_cache_state = BTRFS_DC_CLEAR;
spin_lock(&space_info->lock);
spin_lock(&cache->lock);

old_val = cache->used;
num_bytes = min(total, cache->length - byte_in_group);
if (alloc) {
old_val += num_bytes;
cache->used = old_val;
cache->reserved -= num_bytes;
space_info->bytes_reserved -= num_bytes;
space_info->bytes_used += num_bytes;
space_info->disk_used += num_bytes * factor;
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);
} else {
old_val -= num_bytes;
cache->used = old_val;
cache->pinned += num_bytes;
btrfs_space_info_update_bytes_pinned(info, space_info,
num_bytes);
space_info->bytes_used -= num_bytes;
space_info->disk_used -= num_bytes * factor;
if (btrfs_test_opt(info, SPACE_CACHE) &&
cache->disk_cache_state < BTRFS_DC_CLEAR)
cache->disk_cache_state = BTRFS_DC_CLEAR;

reclaim = should_reclaim_block_group(cache, num_bytes);
old_val = cache->used;
if (alloc) {
old_val += num_bytes;
cache->used = old_val;
cache->reserved -= num_bytes;
space_info->bytes_reserved -= num_bytes;
space_info->bytes_used += num_bytes;
space_info->disk_used += num_bytes * factor;
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);
} else {
old_val -= num_bytes;
cache->used = old_val;
cache->pinned += num_bytes;
btrfs_space_info_update_bytes_pinned(info, space_info, num_bytes);
space_info->bytes_used -= num_bytes;
space_info->disk_used -= num_bytes * factor;

spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);
reclaim = should_reclaim_block_group(cache, num_bytes);

set_extent_bit(&trans->transaction->pinned_extents,
bytenr, bytenr + num_bytes - 1,
EXTENT_DIRTY, NULL);
}
spin_unlock(&cache->lock);
spin_unlock(&space_info->lock);

spin_lock(&trans->transaction->dirty_bgs_lock);
if (list_empty(&cache->dirty_list)) {
list_add_tail(&cache->dirty_list,
&trans->transaction->dirty_bgs);
trans->delayed_ref_updates++;
btrfs_get_block_group(cache);
}
spin_unlock(&trans->transaction->dirty_bgs_lock);
set_extent_bit(&trans->transaction->pinned_extents, bytenr,
bytenr + num_bytes - 1, EXTENT_DIRTY, NULL);
}

/*
* No longer have used bytes in this block group, queue it for
* deletion. We do this after adding the block group to the
* dirty list to avoid races between cleaner kthread and space
* cache writeout.
*/
if (!alloc && old_val == 0) {
if (!btrfs_test_opt(info, DISCARD_ASYNC))
btrfs_mark_bg_unused(cache);
} else if (!alloc && reclaim) {
btrfs_mark_bg_to_reclaim(cache);
}
spin_lock(&trans->transaction->dirty_bgs_lock);
if (list_empty(&cache->dirty_list)) {
list_add_tail(&cache->dirty_list, &trans->transaction->dirty_bgs);
trans->delayed_ref_updates++;
btrfs_get_block_group(cache);
}
spin_unlock(&trans->transaction->dirty_bgs_lock);

btrfs_put_block_group(cache);
total -= num_bytes;
bytenr += num_bytes;
/*
* No longer have used bytes in this block group, queue it for deletion.
* We do this after adding the block group to the dirty list to avoid
* races between cleaner kthread and space cache writeout.
*/
if (!alloc && old_val == 0) {
if (!btrfs_test_opt(info, DISCARD_ASYNC))
btrfs_mark_bg_unused(cache);
} else if (!alloc && reclaim) {
btrfs_mark_bg_to_reclaim(cache);
}

btrfs_put_block_group(cache);

/* Modified block groups are accounted for in the delayed_refs_rsv. */
btrfs_update_delayed_refs_rsv(trans);
return ret;

return 0;
}

/*
Expand Down

0 comments on commit 4d20c1d

Please sign in to comment.