Commit 4a200a09 authored by Darrick J. Wong's avatar Darrick J. Wong
Browse files

xfs: implement masked btree key comparisons for _has_records scans



For keyspace fullness scans, we want to be able to mask off the parts of
the key that we don't care about.  For most btree types we /do/ want the
full keyspace, but for checking that a given space usage also has a full
complement of rmapbt records (even if different/multiple owners) we need
this masking so that we only track sparseness of rm_startblock, not the
whole keyspace (which is extremely sparse).

Augment the ->diff_two_keys and ->keys_contiguous helpers to take a
third union xfs_btree_key argument, and wire up xfs_rmap_has_records to
pass this through.  This third "mask" argument should contain a nonzero
value in each structure field that should be used in the key comparisons
done during the scan.

Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
parent 6abc7aef
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -3764,7 +3764,7 @@ xfs_alloc_has_records(
	memset(&high, 0xFF, sizeof(high));
	high.a.ar_startblock = bno + len - 1;

	return xfs_btree_has_records(cur, &low, &high, outcome);
	return xfs_btree_has_records(cur, &low, &high, NULL, outcome);
}

/*
+14 −4
Original line number Diff line number Diff line
@@ -260,8 +260,11 @@ STATIC int64_t
xfs_bnobt_diff_two_keys(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_key	*k1,
	const union xfs_btree_key	*k2)
	const union xfs_btree_key	*k2,
	const union xfs_btree_key	*mask)
{
	ASSERT(!mask || mask->alloc.ar_startblock);

	return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
			be32_to_cpu(k2->alloc.ar_startblock);
}
@@ -270,10 +273,14 @@ STATIC int64_t
xfs_cntbt_diff_two_keys(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_key	*k1,
	const union xfs_btree_key	*k2)
	const union xfs_btree_key	*k2,
	const union xfs_btree_key	*mask)
{
	int64_t				diff;

	ASSERT(!mask || (mask->alloc.ar_blockcount &&
			 mask->alloc.ar_startblock));

	diff =  be32_to_cpu(k1->alloc.ar_blockcount) -
		be32_to_cpu(k2->alloc.ar_blockcount);
	if (diff)
@@ -427,8 +434,11 @@ STATIC enum xbtree_key_contig
xfs_allocbt_keys_contiguous(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_key	*key1,
	const union xfs_btree_key	*key2)
	const union xfs_btree_key	*key2,
	const union xfs_btree_key	*mask)
{
	ASSERT(!mask || mask->alloc.ar_startblock);

	return xbtree_key_contig(be32_to_cpu(key1->alloc.ar_startblock),
				 be32_to_cpu(key2->alloc.ar_startblock));
}
+8 −2
Original line number Diff line number Diff line
@@ -382,11 +382,14 @@ STATIC int64_t
xfs_bmbt_diff_two_keys(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_key	*k1,
	const union xfs_btree_key	*k2)
	const union xfs_btree_key	*k2,
	const union xfs_btree_key	*mask)
{
	uint64_t			a = be64_to_cpu(k1->bmbt.br_startoff);
	uint64_t			b = be64_to_cpu(k2->bmbt.br_startoff);

	ASSERT(!mask || mask->bmbt.br_startoff);

	/*
	 * Note: This routine previously casted a and b to int64 and subtracted
	 * them to generate a result.  This lead to problems if b was the
@@ -504,8 +507,11 @@ STATIC enum xbtree_key_contig
xfs_bmbt_keys_contiguous(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_key	*key1,
	const union xfs_btree_key	*key2)
	const union xfs_btree_key	*key2,
	const union xfs_btree_key	*mask)
{
	ASSERT(!mask || mask->bmbt.br_startoff);

	return xbtree_key_contig(be64_to_cpu(key1->bmbt.br_startoff),
				 be64_to_cpu(key2->bmbt.br_startoff));
}
+20 −4
Original line number Diff line number Diff line
@@ -5030,6 +5030,9 @@ struct xfs_btree_has_records {
	union xfs_btree_key		start_key;
	union xfs_btree_key		end_key;

	/* Mask for key comparisons, if desired. */
	const union xfs_btree_key	*key_mask;

	/* Highest record key we've seen so far. */
	union xfs_btree_key		high_key;

@@ -5057,7 +5060,8 @@ xfs_btree_has_records_helper(
		 * then there is a hole at the start of the search range.
		 * Classify this as sparse and stop immediately.
		 */
		if (xfs_btree_keycmp_lt(cur, &info->start_key, &rec_key))
		if (xfs_btree_masked_keycmp_lt(cur, &info->start_key, &rec_key,
					info->key_mask))
			return -ECANCELED;
	} else {
		/*
@@ -5068,7 +5072,7 @@ xfs_btree_has_records_helper(
		 * signal corruption.
		 */
		key_contig = cur->bc_ops->keys_contiguous(cur, &info->high_key,
					&rec_key);
					&rec_key, info->key_mask);
		if (key_contig == XBTREE_KEY_OVERLAP &&
				!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
			return -EFSCORRUPTED;
@@ -5081,7 +5085,8 @@ xfs_btree_has_records_helper(
	 * remember it for later.
	 */
	cur->bc_ops->init_high_key_from_rec(&rec_high_key, rec);
	if (xfs_btree_keycmp_gt(cur, &rec_high_key, &info->high_key))
	if (xfs_btree_masked_keycmp_gt(cur, &rec_high_key, &info->high_key,
				info->key_mask))
		info->high_key = rec_high_key; /* struct copy */

	return 0;
@@ -5092,16 +5097,26 @@ xfs_btree_has_records_helper(
 * map to any records; is fully mapped to records; or is partially mapped to
 * records.  This is the btree record equivalent to determining if a file is
 * sparse.
 *
 * For most btree types, the record scan should use all available btree key
 * fields to compare the keys encountered.  These callers should pass NULL for
 * @mask.  However, some callers (e.g.  scanning physical space in the rmapbt)
 * want to ignore some part of the btree record keyspace when performing the
 * comparison.  These callers should pass in a union xfs_btree_key object with
 * the fields that *should* be a part of the comparison set to any nonzero
 * value, and the rest zeroed.
 */
int
xfs_btree_has_records(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_irec	*low,
	const union xfs_btree_irec	*high,
	const union xfs_btree_key	*mask,
	enum xbtree_recpacking		*outcome)
{
	struct xfs_btree_has_records	info = {
		.outcome		= XBTREE_RECPACKING_EMPTY,
		.key_mask		= mask,
	};
	int				error;

@@ -5129,7 +5144,8 @@ xfs_btree_has_records(
	 * the end of the search range, classify this as full.  Otherwise,
	 * there is a hole at the end of the search range.
	 */
	if (xfs_btree_keycmp_ge(cur, &info.high_key, &info.end_key))
	if (xfs_btree_masked_keycmp_ge(cur, &info.high_key, &info.end_key,
				mask))
		info.outcome = XBTREE_RECPACKING_FULL;

out:
+44 −6
Original line number Diff line number Diff line
@@ -161,11 +161,14 @@ struct xfs_btree_ops {

	/*
	 * Difference between key2 and key1 -- positive if key1 > key2,
	 * negative if key1 < key2, and zero if equal.
	 * negative if key1 < key2, and zero if equal.  If the @mask parameter
	 * is non NULL, each key field to be used in the comparison must
	 * contain a nonzero value.
	 */
	int64_t (*diff_two_keys)(struct xfs_btree_cur *cur,
				 const union xfs_btree_key *key1,
				 const union xfs_btree_key *key2);
				 const union xfs_btree_key *key2,
				 const union xfs_btree_key *mask);

	const struct xfs_buf_ops	*buf_ops;

@@ -187,10 +190,13 @@ struct xfs_btree_ops {
	 * @key1 < K < @key2.  To determine if two btree records are
	 * immediately adjacent, @key1 should be the high key of the first
	 * record and @key2 should be the low key of the second record.
	 * If the @mask parameter is non NULL, each key field to be used in the
	 * comparison must contain a nonzero value.
	 */
	enum xbtree_key_contig (*keys_contiguous)(struct xfs_btree_cur *cur,
			       const union xfs_btree_key *key1,
			       const union xfs_btree_key *key2);
			       const union xfs_btree_key *key2,
			       const union xfs_btree_key *mask);
};

/*
@@ -581,6 +587,7 @@ typedef bool (*xfs_btree_key_gap_fn)(struct xfs_btree_cur *cur,
int xfs_btree_has_records(struct xfs_btree_cur *cur,
		const union xfs_btree_irec *low,
		const union xfs_btree_irec *high,
		const union xfs_btree_key *mask,
		enum xbtree_recpacking *outcome);

bool xfs_btree_has_more_records(struct xfs_btree_cur *cur);
@@ -593,7 +600,7 @@ xfs_btree_keycmp_lt(
	const union xfs_btree_key	*key1,
	const union xfs_btree_key	*key2)
{
	return cur->bc_ops->diff_two_keys(cur, key1, key2) < 0;
	return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) < 0;
}

static inline bool
@@ -602,7 +609,7 @@ xfs_btree_keycmp_gt(
	const union xfs_btree_key	*key1,
	const union xfs_btree_key	*key2)
{
	return cur->bc_ops->diff_two_keys(cur, key1, key2) > 0;
	return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) > 0;
}

static inline bool
@@ -611,7 +618,7 @@ xfs_btree_keycmp_eq(
	const union xfs_btree_key	*key1,
	const union xfs_btree_key	*key2)
{
	return cur->bc_ops->diff_two_keys(cur, key1, key2) == 0;
	return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) == 0;
}

static inline bool
@@ -641,6 +648,37 @@ xfs_btree_keycmp_ne(
	return !xfs_btree_keycmp_eq(cur, key1, key2);
}

/* Masked key comparison helpers */
static inline bool
xfs_btree_masked_keycmp_lt(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_key	*key1,
	const union xfs_btree_key	*key2,
	const union xfs_btree_key	*mask)
{
	return cur->bc_ops->diff_two_keys(cur, key1, key2, mask) < 0;
}

static inline bool
xfs_btree_masked_keycmp_gt(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_key	*key1,
	const union xfs_btree_key	*key2,
	const union xfs_btree_key	*mask)
{
	return cur->bc_ops->diff_two_keys(cur, key1, key2, mask) > 0;
}

static inline bool
xfs_btree_masked_keycmp_ge(
	struct xfs_btree_cur		*cur,
	const union xfs_btree_key	*key1,
	const union xfs_btree_key	*key2,
	const union xfs_btree_key	*mask)
{
	return !xfs_btree_masked_keycmp_lt(cur, key1, key2, mask);
}

/* Does this cursor point to the last block in the given level? */
static inline bool
xfs_btree_islastblock(
Loading