Commit de67dc57 authored by Dave Chinner's avatar Dave Chinner Committed by Dave Chinner
Browse files

xfs: break up xfs_buf_find() into individual pieces



xfs_buf_find() is made up of three main parts: lookup, insert and
locking. The interactions with xfs_buf_get_map() require it to be
called twice - once for a pure lookup, and again on lookup failure
so the insert path can be run. We want to simplify this down a lot,
so split it into a fast path lookup, a slow path insert and a "lock
the found buffer" helper. This will then let us integrate these
operations more effectively into xfs_buf_get_map() in future
patches.

Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
parent 85c73bf7
Loading
Loading
Loading
Loading
+104 −54
Original line number Diff line number Diff line
@@ -503,6 +503,98 @@ xfs_buf_hash_destroy(
	rhashtable_destroy(&pag->pag_buf_hash);
}

static int
xfs_buf_map_verify(
	struct xfs_buftarg	*btp,
	struct xfs_buf_map	*map)
{
	xfs_daddr_t		eofs;

	/* Check for IOs smaller than the sector size / not sector aligned */
	ASSERT(!(BBTOB(map->bm_len) < btp->bt_meta_sectorsize));
	ASSERT(!(BBTOB(map->bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));

	/*
	 * Corrupted block numbers can get through to here, unfortunately, so we
	 * have to check that the buffer falls within the filesystem bounds.
	 */
	eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
	if (map->bm_bn < 0 || map->bm_bn >= eofs) {
		xfs_alert(btp->bt_mount,
			  "%s: daddr 0x%llx out of range, EOFS 0x%llx",
			  __func__, map->bm_bn, eofs);
		WARN_ON(1);
		return -EFSCORRUPTED;
	}
	return 0;
}

static int
xfs_buf_find_lock(
	struct xfs_buf          *bp,
	xfs_buf_flags_t		flags)
{
	if (!xfs_buf_trylock(bp)) {
		if (flags & XBF_TRYLOCK) {
			XFS_STATS_INC(bp->b_mount, xb_busy_locked);
			xfs_buf_rele(bp);
			return -EAGAIN;
		}
		xfs_buf_lock(bp);
		XFS_STATS_INC(bp->b_mount, xb_get_locked_waited);
	}

	/*
	 * if the buffer is stale, clear all the external state associated with
	 * it. We need to keep flags such as how we allocated the buffer memory
	 * intact here.
	 */
	if (bp->b_flags & XBF_STALE) {
		ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
		bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
		bp->b_ops = NULL;
	}
	return 0;
}

static inline struct xfs_buf *
xfs_buf_lookup(
	struct xfs_perag	*pag,
	struct xfs_buf_map	*map)
{
	struct xfs_buf          *bp;

	bp = rhashtable_lookup(&pag->pag_buf_hash, map, xfs_buf_hash_params);
	if (!bp)
		return NULL;
	atomic_inc(&bp->b_hold);
	return bp;
}

/*
 * Insert the new_bp into the hash table. This consumes the perag reference
 * taken for the lookup.
 */
static int
xfs_buf_find_insert(
	struct xfs_buftarg	*btp,
	struct xfs_perag	*pag,
	struct xfs_buf		*new_bp)
{
	/* No match found */
	if (!new_bp) {
		xfs_perag_put(pag);
		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
		return -ENOENT;
	}

	/* the buffer keeps the perag reference until it is freed */
	new_bp->b_pag = pag;
	rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
			       xfs_buf_hash_params);
	return 0;
}

/*
 * Look up a buffer in the buffer cache and return it referenced and locked
 * in @found_bp.
@@ -533,7 +625,7 @@ xfs_buf_find(
	struct xfs_perag	*pag;
	struct xfs_buf		*bp;
	struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn };
	xfs_daddr_t		eofs;
	int			error;
	int			i;

	*found_bp = NULL;
@@ -541,47 +633,22 @@ xfs_buf_find(
	for (i = 0; i < nmaps; i++)
		cmap.bm_len += map[i].bm_len;

	/* Check for IOs smaller than the sector size / not sector aligned */
	ASSERT(!(BBTOB(cmap.bm_len) < btp->bt_meta_sectorsize));
	ASSERT(!(BBTOB(cmap.bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));

	/*
	 * Corrupted block numbers can get through to here, unfortunately, so we
	 * have to check that the buffer falls within the filesystem bounds.
	 */
	eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
	if (cmap.bm_bn < 0 || cmap.bm_bn >= eofs) {
		xfs_alert(btp->bt_mount,
			  "%s: daddr 0x%llx out of range, EOFS 0x%llx",
			  __func__, cmap.bm_bn, eofs);
		WARN_ON(1);
		return -EFSCORRUPTED;
	}
	error = xfs_buf_map_verify(btp, &cmap);
	if (error)
		return error;

	pag = xfs_perag_get(btp->bt_mount,
			    xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));

	spin_lock(&pag->pag_buf_lock);
	bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap,
				    xfs_buf_hash_params);
	if (bp) {
		atomic_inc(&bp->b_hold);
	bp = xfs_buf_lookup(pag, &cmap);
	if (bp)
		goto found;
	}

	/* No match found */
	if (!new_bp) {
		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
		spin_unlock(&pag->pag_buf_lock);
		xfs_perag_put(pag);
		return -ENOENT;
	}

	/* the buffer keeps the perag reference until it is freed */
	new_bp->b_pag = pag;
	rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
			       xfs_buf_hash_params);
	error = xfs_buf_find_insert(btp, pag, new_bp);
	spin_unlock(&pag->pag_buf_lock);
	if (error)
		return error;
	*found_bp = new_bp;
	return 0;

@@ -589,26 +656,9 @@ xfs_buf_find(
	spin_unlock(&pag->pag_buf_lock);
	xfs_perag_put(pag);

	if (!xfs_buf_trylock(bp)) {
		if (flags & XBF_TRYLOCK) {
			xfs_buf_rele(bp);
			XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
			return -EAGAIN;
		}
		xfs_buf_lock(bp);
		XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
	}

	/*
	 * if the buffer is stale, clear all the external state associated with
	 * it. We need to keep flags such as how we allocated the buffer memory
	 * intact here.
	 */
	if (bp->b_flags & XBF_STALE) {
		ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
		bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
		bp->b_ops = NULL;
	}
	error = xfs_buf_find_lock(bp, flags);
	if (error)
		return error;

	trace_xfs_buf_find(bp, flags, _RET_IP_);
	XFS_STATS_INC(btp->bt_mount, xb_get_locked);