Commit 856d865c authored by Ojaswin Mujoo's avatar Ojaswin Mujoo Committed by Theodore Ts'o
Browse files

ext4: Abstract out logic to search average fragment list



Make the logic of searching average fragment list of a given order reusable
by abstracting it out to a differnet function. This will also avoid
code duplication in upcoming patches.

No functional changes.

Signed-off-by: default avatarOjaswin Mujoo <ojaswin@linux.ibm.com>
Reviewed-by: default avatarRitesh Harjani (IBM) <ritesh.list@gmail.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/028c11d95b17ce0285f45456709a0ca922df1b83.1685449706.git.ojaswin@linux.ibm.com


Signed-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent 4f3d1e45
Loading
Loading
Loading
Loading
+33 −18
Original line number Diff line number Diff line
@@ -904,6 +904,37 @@ static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac,
	}
}

/*
 * Find a suitable group of given order from the average fragments list.
 */
static struct ext4_group_info *
ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int order)
{
	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
	struct list_head *frag_list = &sbi->s_mb_avg_fragment_size[order];
	rwlock_t *frag_list_lock = &sbi->s_mb_avg_fragment_size_locks[order];
	struct ext4_group_info *grp = NULL, *iter;
	enum criteria cr = ac->ac_criteria;

	if (list_empty(frag_list))
		return NULL;
	read_lock(frag_list_lock);
	if (list_empty(frag_list)) {
		read_unlock(frag_list_lock);
		return NULL;
	}
	list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) {
		if (sbi->s_mb_stats)
			atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]);
		if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) {
			grp = iter;
			break;
		}
	}
	read_unlock(frag_list_lock);
	return grp;
}

/*
 * Choose next group by traversing average fragment size list of suitable
 * order. Updates *new_cr if cr level needs an update.
@@ -912,7 +943,7 @@ static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,
		enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups)
{
	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
	struct ext4_group_info *grp = NULL, *iter;
	struct ext4_group_info *grp = NULL;
	int i;

	if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) {
@@ -922,23 +953,7 @@ static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac,

	for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len);
	     i < MB_NUM_ORDERS(ac->ac_sb); i++) {
		if (list_empty(&sbi->s_mb_avg_fragment_size[i]))
			continue;
		read_lock(&sbi->s_mb_avg_fragment_size_locks[i]);
		if (list_empty(&sbi->s_mb_avg_fragment_size[i])) {
			read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
			continue;
		}
		list_for_each_entry(iter, &sbi->s_mb_avg_fragment_size[i],
				    bb_avg_fragment_size_node) {
			if (sbi->s_mb_stats)
				atomic64_inc(&sbi->s_bal_cX_groups_considered[CR1]);
			if (likely(ext4_mb_good_group(ac, iter->bb_group, CR1))) {
				grp = iter;
				break;
			}
		}
		read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]);
		grp = ext4_mb_find_good_group_avg_frag_lists(ac, i);
		if (grp)
			break;
	}