Unverified Commit 0acecb73 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!4004 ext4: fix some ext4_lblk_t overflow issues

Merge Pull Request from: @ci-robot 
 
PR sync from: Baokun Li <libaokun1@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/R4W2PGRCB46SGR7RPTMPVMNUF43LASHF/ 
Baokun Li (4):
  ext4: add two helper functions extent_logical_end() and
    pa_logical_end()
  ext4: fix BUG in ext4_mb_new_inode_pa() due to overflow
  ext4: avoid overlapping preallocations due to overflow
  ext4: prevent the normalized size from exceeding EXT_MAX_BLOCKS

Ojaswin Mujoo (1):
  ext4: Fix best extent lstart adjustment logic in
    ext4_mb_new_inode_pa()


-- 
2.31.1
 
https://gitee.com/openeuler/kernel/issues/I8PI1H 
 
Link:https://gitee.com/openeuler/kernel/pulls/4004

 

Reviewed-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents 9c2e42d5 2581aac8
Loading
Loading
Loading
Loading
+40 −30
Original line number Diff line number Diff line
@@ -3440,8 +3440,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
{
	struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
	int bsbits, max;
	ext4_lblk_t end;
	loff_t size, start_off;
	loff_t size, start_off, end;
	loff_t orig_size __maybe_unused;
	ext4_lblk_t start;
	struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
@@ -3470,7 +3469,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,

	/* first, let's learn actual file size
	 * given current request is allocated */
	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
	size = extent_logical_end(sbi, &ac->ac_o_ex);
	size = size << bsbits;
	if (size < i_size_read(ac->ac_inode))
		size = i_size_read(ac->ac_inode);
@@ -3529,6 +3528,10 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
	start = max(start, rounddown(ac->ac_o_ex.fe_logical,
			(ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb)));

	/* avoid unnecessary preallocation that may trigger assertions */
	if (start + size > EXT_MAX_BLOCKS)
		size = EXT_MAX_BLOCKS - start;

	/* don't cover already allocated blocks in selected range */
	if (ar->pleft && start <= ar->lleft) {
		size -= ar->lleft + 1 - start;
@@ -3549,7 +3552,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
	/* check we don't cross already preallocated blocks */
	rcu_read_lock();
	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
		ext4_lblk_t pa_end;
		loff_t pa_end;

		if (pa->pa_deleted)
			continue;
@@ -3559,8 +3562,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
			continue;
		}

		pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
						  pa->pa_len);
		pa_end = pa_logical_end(EXT4_SB(ac->ac_sb), pa);

		/* PA must not overlap original request */
		BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
@@ -3589,12 +3591,11 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
	/* XXX: extra loop to check we really don't overlap preallocations */
	rcu_read_lock();
	list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
		ext4_lblk_t pa_end;
		loff_t pa_end;

		spin_lock(&pa->pa_lock);
		if (pa->pa_deleted == 0) {
			pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
							  pa->pa_len);
			pa_end = pa_logical_end(EXT4_SB(ac->ac_sb), pa);
			BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
		}
		spin_unlock(&pa->pa_lock);
@@ -3737,6 +3738,7 @@ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
	BUG_ON(start < pa->pa_pstart);
	BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len));
	BUG_ON(pa->pa_free < len);
	BUG_ON(ac->ac_b_ex.fe_len <= 0);
	pa->pa_free -= len;

	mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
@@ -3820,8 +3822,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
		/* all fields in this condition don't change,
		 * so we can skip locking for them */
		if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
		    ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
					       EXT4_C2B(sbi, pa->pa_len)))
		    ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, pa))
			continue;

		/* non-extent files can't have physical blocks past 2^32 */
@@ -4061,10 +4062,11 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
	pa = ac->ac_pa;

	if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
		int winl;
		int wins;
		int win;
		int offs;
		struct ext4_free_extent ex = {
			.fe_logical = ac->ac_g_ex.fe_logical,
			.fe_len = ac->ac_g_ex.fe_len,
		};
		loff_t orig_goal_end = extent_logical_end(sbi, &ex);

		/* we can't allocate as much as normalizer wants.
		 * so, found space must get proper lstart
@@ -4072,26 +4074,34 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
		BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
		BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);

		/* we're limited by original request in that
		 * logical block must be covered any way
		 * winl is window we can move our chunk within */
		winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
		/*
		 * Use the below logic for adjusting best extent as it keeps
		 * fragmentation in check while ensuring logical range of best
		 * extent doesn't overflow out of goal extent:
		 *
		 * 1. Check if best ex can be kept at end of goal and still
		 *    cover original start
		 * 2. Else, check if best ex can be kept at start of goal and
		 *    still cover original start
		 * 3. Else, keep the best ex at start of original request.
		 */
		ex.fe_len = ac->ac_b_ex.fe_len;

		/* also, we should cover whole original request */
		wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len);
		ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
		if (ac->ac_o_ex.fe_logical >= ex.fe_logical)
			goto adjust_bex;

		/* the smallest one defines real window */
		win = min(winl, wins);
		ex.fe_logical = ac->ac_g_ex.fe_logical;
		if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex))
			goto adjust_bex;

		offs = ac->ac_o_ex.fe_logical %
			EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
		if (offs && offs < win)
			win = offs;
		ex.fe_logical = ac->ac_o_ex.fe_logical;
adjust_bex:
		ac->ac_b_ex.fe_logical = ex.fe_logical;

		ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
			EXT4_NUM_B2C(sbi, win);
		BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
		BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
		BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
	}

	/* preallocation can change ac_b_ex, thus we store actually
@@ -4609,7 +4619,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
		return;

	size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
	size = extent_logical_end(sbi, &ac->ac_o_ex);
	isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
		>> bsbits;

+14 −0
Original line number Diff line number Diff line
@@ -199,6 +199,20 @@ static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
		(fex->fe_start << EXT4_SB(sb)->s_cluster_bits);
}

static inline loff_t extent_logical_end(struct ext4_sb_info *sbi,
					struct ext4_free_extent *fex)
{
	/* Use loff_t to avoid end exceeding ext4_lblk_t max. */
	return (loff_t)fex->fe_logical + EXT4_C2B(sbi, fex->fe_len);
}

static inline loff_t pa_logical_end(struct ext4_sb_info *sbi,
				    struct ext4_prealloc_space *pa)
{
	/* Use loff_t to avoid end exceeding ext4_lblk_t max. */
	return (loff_t)pa->pa_lstart + EXT4_C2B(sbi, pa->pa_len);
}

typedef int (*ext4_mballoc_query_range_fn)(
	struct super_block		*sb,
	ext4_group_t			agno,