Skip to content

Commit

Permalink
ext4: Abstract out logic to search average fragment list
Browse files Browse the repository at this point in the history
Make the logic of searching average fragment list of a given order reusable
by abstracting it out to a differnet function. This will also avoid
code duplication in upcoming patches.

No functional changes.

Signed-off-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/028c11d95b17ce0285f45456709a0ca922df1b83.1685449706.git.ojaswin@linux.ibm.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
  • Loading branch information
Ojaswin Mujoo authored and Theodore Ts'o committed Jun 2, 2023
1 parent 30c10fe commit 64ea18f
Showing 1 changed file with 33 additions and 18 deletions.
51 changes: 33 additions & 18 deletions fs/ext4/mballoc.c
Original file line number Diff line number Diff line change
Expand Up @@ -905,6 +905,37 @@ static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac,
}
}

/*
* Find a suitable group of given order from the average fragments list.
*/
static struct ext4_group_info *
ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int order)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct list_head *frag_list = &sbi->s_mb_avg_fragment_size[order];
rwlock_t *frag_list_lock = &sbi->s_mb_avg_fragment_size_locks[order];
struct ext4_group_info *grp = NULL, *iter;
enum criteria cr = ac->ac_criteria;

if (list_empty(frag_list))
return NULL;
read_lock(frag_list_lock);
if (list_empty(frag_list)) {
read_unlock(frag_list_lock);
return NULL;
}
list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) {
if (sbi->s_mb_stats)
atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]);
if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) {
grp = iter;
break;
}
}
read_unlock(frag_list_lock);
return grp;
}

/*
* Choose next group by traversing average fragment size list of suitable
* order. Updates *new_cr if cr level needs an update.
Expand All @@ -913,7 +944,7 @@ static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,
enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_group_info *grp = NULL, *iter;
struct ext4_group_info *grp = NULL;
int i;

if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) {
Expand All @@ -923,23 +954,7 @@ static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,

for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
i < MB_NUM_ORDERS(ac->ac_sb); i++) {
if (list_empty(&sbi->s_mb_avg_fragment_size[i]))
continue;
read_lock(&sbi->s_mb_avg_fragment_size_locks[i]);
if (list_empty(&sbi->s_mb_avg_fragment_size[i])) {
read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
continue;
}
list_for_each_entry(iter, &sbi->s_mb_avg_fragment_size[i],
bb_avg_fragment_size_node) {
if (sbi->s_mb_stats)
atomic64_inc(&sbi->s_bal_cX_groups_considered[CR1]);
if (likely(ext4_mb_good_group(ac, iter->bb_group, CR1))) {
grp = iter;
break;
}
}
read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
grp = ext4_mb_find_good_group_avg_frag_lists(ac, i);
if (grp)
break;
}
Expand Down

0 comments on commit 64ea18f

Please sign in to comment.