Skip to content

Commit

Permalink
btrfs: defrag: introduce helper to collect target file extents
Browse files Browse the repository at this point in the history
Introduce a helper, defrag_collect_targets(), to collect all possible
targets to be defragged.

This function will not consider things like max_sectors_to_defrag, thus
caller should be responsible to ensure we don't exceed the limit.

This function will be the first stage of later defrag rework.

Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
  • Loading branch information
Qu Wenruo authored and David Sterba committed Oct 26, 2021
1 parent 5767b50 commit eb793cf
Showing 1 changed file with 120 additions and 0 deletions.
120 changes: 120 additions & 0 deletions fs/btrfs/ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -1431,6 +1431,126 @@ static int cluster_pages_for_defrag(struct inode *inode,

}

struct defrag_target_range {
struct list_head list;
u64 start;
u64 len;
};

/*
* Collect all valid target extents.
*
* @start: file offset to lookup
* @len: length to lookup
* @extent_thresh: file extent size threshold, any extent size >= this value
* will be ignored
* @newer_than: only defrag extents newer than this value
* @do_compress: whether the defrag is doing compression
* if true, @extent_thresh will be ignored and all regular
* file extents meeting @newer_than will be targets.
* @target_list: list of targets file extents
*/
static int defrag_collect_targets(struct btrfs_inode *inode,
u64 start, u64 len, u32 extent_thresh,
u64 newer_than, bool do_compress,
struct list_head *target_list)
{
u64 cur = start;
int ret = 0;

while (cur < start + len) {
struct extent_map *em;
struct defrag_target_range *new;
bool next_mergeable = true;
u64 range_len;

em = defrag_lookup_extent(&inode->vfs_inode, cur);
if (!em)
break;

/* Skip hole/inline/preallocated extents */
if (em->block_start >= EXTENT_MAP_LAST_BYTE ||
test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
goto next;

/* Skip older extent */
if (em->generation < newer_than)
goto next;

/*
* For do_compress case, we want to compress all valid file
* extents, thus no @extent_thresh or mergeable check.
*/
if (do_compress)
goto add;

/* Skip too large extent */
if (em->len >= extent_thresh)
goto next;

next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em);
if (!next_mergeable) {
struct defrag_target_range *last;

/* Empty target list, no way to merge with last entry */
if (list_empty(target_list))
goto next;
last = list_entry(target_list->prev,
struct defrag_target_range, list);
/* Not mergeable with last entry */
if (last->start + last->len != cur)
goto next;

/* Mergeable, fall through to add it to @target_list. */
}

add:
range_len = min(extent_map_end(em), start + len) - cur;
/*
* This one is a good target, check if it can be merged into
* last range of the target list.
*/
if (!list_empty(target_list)) {
struct defrag_target_range *last;

last = list_entry(target_list->prev,
struct defrag_target_range, list);
ASSERT(last->start + last->len <= cur);
if (last->start + last->len == cur) {
/* Mergeable, enlarge the last entry */
last->len += range_len;
goto next;
}
/* Fall through to allocate a new entry */
}

/* Allocate new defrag_target_range */
new = kmalloc(sizeof(*new), GFP_NOFS);
if (!new) {
free_extent_map(em);
ret = -ENOMEM;
break;
}
new->start = cur;
new->len = range_len;
list_add_tail(&new->list, target_list);

next:
cur = extent_map_end(em);
free_extent_map(em);
}
if (ret < 0) {
struct defrag_target_range *entry;
struct defrag_target_range *tmp;

list_for_each_entry_safe(entry, tmp, target_list, list) {
list_del_init(&entry->list);
kfree(entry);
}
}
return ret;
}

/*
* Entry point to file defragmentation.
*
Expand Down

0 comments on commit eb793cf

Please sign in to comment.