Skip to content

Commit

Permalink
btrfs: refactor checksum calculations in btrfs_lookup_csums_range()
Browse files Browse the repository at this point in the history
The refactoring involves the following parts:

- Introduce bytes_to_csum_size() and csum_size_to_bytes() helpers
  As we have quite some open-coded calculations, some of them are even
  split into two assignments just to fit 80 chars limit.

- Remove the @csum_size parameter from max_ordered_sum_bytes()
  Csum size can be fetched from @fs_info.
  And we will use the csum_size_to_bytes() helper anyway.

- Add a comment explaining how we handle the first search result

- Use newly introduced helpers to cleanup btrfs_lookup_csums_range()

- Move variables declaration to the minimal scope

- Never mix number of sectors with bytes
  There are several locations doing things like:

 			size = min_t(size_t, csum_end - start,
				     max_ordered_sum_bytes(fs_info));
			...
			size >>= fs_info->sectorsize_bits

  Or

			offset = (start - key.offset) >> fs_info->sectorsize_bits;
			offset *= csum_size;

  Make sure these variables can only represent BYTES inside the
  function, by using the above bytes_to_csum_size() helpers.

Signed-off-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
  • Loading branch information
Qu Wenruo authored and David Sterba committed Dec 5, 2022
1 parent 9f0eac0 commit cb649e8
Showing 1 changed file with 47 additions and 21 deletions.
68 changes: 47 additions & 21 deletions fs/btrfs/file-item.c
Original file line number Diff line number Diff line change
Expand Up @@ -126,12 +126,26 @@ int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start,
start + len - 1, EXTENT_DIRTY, NULL);
}

static inline u32 max_ordered_sum_bytes(struct btrfs_fs_info *fs_info,
u16 csum_size)
static size_t bytes_to_csum_size(const struct btrfs_fs_info *fs_info, u32 bytes)
{
u32 ncsums = (PAGE_SIZE - sizeof(struct btrfs_ordered_sum)) / csum_size;
ASSERT(IS_ALIGNED(bytes, fs_info->sectorsize));

return ncsums * fs_info->sectorsize;
return (bytes >> fs_info->sectorsize_bits) * fs_info->csum_size;
}

static size_t csum_size_to_bytes(const struct btrfs_fs_info *fs_info, u32 csum_size)
{
ASSERT(IS_ALIGNED(csum_size, fs_info->csum_size));

return (csum_size / fs_info->csum_size) << fs_info->sectorsize_bits;
}

static inline u32 max_ordered_sum_bytes(const struct btrfs_fs_info *fs_info)
{
u32 max_csum_size = round_down(PAGE_SIZE - sizeof(struct btrfs_ordered_sum),
fs_info->csum_size);

return csum_size_to_bytes(fs_info, max_csum_size);
}

/*
Expand All @@ -140,9 +154,7 @@ static inline u32 max_ordered_sum_bytes(struct btrfs_fs_info *fs_info,
*/
static int btrfs_ordered_sum_size(struct btrfs_fs_info *fs_info, unsigned long bytes)
{
int num_sectors = (int)DIV_ROUND_UP(bytes, fs_info->sectorsize);

return sizeof(struct btrfs_ordered_sum) + num_sectors * fs_info->csum_size;
return sizeof(struct btrfs_ordered_sum) + bytes_to_csum_size(fs_info, bytes);
}

int btrfs_insert_hole_extent(struct btrfs_trans_handle *trans,
Expand Down Expand Up @@ -526,11 +538,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct btrfs_ordered_sum *sums;
struct btrfs_csum_item *item;
LIST_HEAD(tmplist);
unsigned long offset;
int ret;
size_t size;
u64 csum_end;
const u32 csum_size = fs_info->csum_size;

ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
IS_ALIGNED(end + 1, fs_info->sectorsize));
Expand All @@ -556,16 +564,33 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
if (ret > 0 && path->slots[0] > 0) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);

/*
* There are two cases we can hit here for the previous csum
* item:
*
* |<- search range ->|
* |<- csum item ->|
*
* Or
* |<- search range ->|
* |<- csum item ->|
*
* Check if the previous csum item covers the leading part of
* the search range. If so we have to start from previous csum
* item.
*/
if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
key.type == BTRFS_EXTENT_CSUM_KEY) {
offset = (start - key.offset) >> fs_info->sectorsize_bits;
if (offset * csum_size <
if (bytes_to_csum_size(fs_info, start - key.offset) <
btrfs_item_size(leaf, path->slots[0] - 1))
path->slots[0]--;
}
}

while (start <= end) {
u64 csum_end;

leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
Expand All @@ -585,8 +610,8 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
if (key.offset > start)
start = key.offset;

size = btrfs_item_size(leaf, path->slots[0]);
csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
csum_end = key.offset + csum_size_to_bytes(fs_info,
btrfs_item_size(leaf, path->slots[0]));
if (csum_end <= start) {
path->slots[0]++;
continue;
Expand All @@ -596,8 +621,11 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_csum_item);
while (start < csum_end) {
unsigned long offset;
size_t size;

size = min_t(size_t, csum_end - start,
max_ordered_sum_bytes(fs_info, csum_size));
max_ordered_sum_bytes(fs_info));
sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
GFP_NOFS);
if (!sums) {
Expand All @@ -608,16 +636,14 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
sums->bytenr = start;
sums->len = (int)size;

offset = (start - key.offset) >> fs_info->sectorsize_bits;
offset *= csum_size;
size >>= fs_info->sectorsize_bits;
offset = bytes_to_csum_size(fs_info, start - key.offset);

read_extent_buffer(path->nodes[0],
sums->sums,
((unsigned long)item) + offset,
csum_size * size);
bytes_to_csum_size(fs_info, size));

start += fs_info->sectorsize * size;
start += size;
list_add_tail(&sums->list, &tmplist);
}
path->slots[0]++;
Expand Down

0 comments on commit cb649e8

Please sign in to comment.