Commit 9fa47bdc authored by Darrick J. Wong's avatar Darrick J. Wong
Browse files

xfs: use separate btree cursor cache for each btree type



Now that we have the infrastructure to track the max possible height of
each btree type, we can create a separate slab cache for cursors of each
type of btree.  For smaller indices like the free space btrees, this
means that we can pack more cursors into a slab page, improving slab
utilization.

Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
parent 0ed5f735
Loading
Loading
Loading
Loading
+22 −1
Original line number Original line Diff line number Diff line
@@ -20,6 +20,7 @@
#include "xfs_trans.h"
#include "xfs_trans.h"
#include "xfs_ag.h"
#include "xfs_ag.h"


static kmem_zone_t	*xfs_allocbt_cur_cache;


STATIC struct xfs_btree_cur *
STATIC struct xfs_btree_cur *
xfs_allocbt_dup_cursor(
xfs_allocbt_dup_cursor(
@@ -477,7 +478,8 @@ xfs_allocbt_init_common(


	ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);
	ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);


	cur = xfs_btree_alloc_cursor(mp, tp, btnum, mp->m_alloc_maxlevels);
	cur = xfs_btree_alloc_cursor(mp, tp, btnum, mp->m_alloc_maxlevels,
			xfs_allocbt_cur_cache);
	cur->bc_ag.abt.active = false;
	cur->bc_ag.abt.active = false;


	if (btnum == XFS_BTNUM_CNT) {
	if (btnum == XFS_BTNUM_CNT) {
@@ -617,3 +619,22 @@ xfs_allocbt_calc_size(
{
{
	return xfs_btree_calc_size(mp->m_alloc_mnr, len);
	return xfs_btree_calc_size(mp->m_alloc_mnr, len);
}
}

int __init
xfs_allocbt_init_cur_cache(void)
{
	xfs_allocbt_cur_cache = kmem_cache_create("xfs_bnobt_cur",
			xfs_btree_cur_sizeof(xfs_allocbt_maxlevels_ondisk()),
			0, 0, NULL);

	if (!xfs_allocbt_cur_cache)
		return -ENOMEM;
	return 0;
}

void
xfs_allocbt_destroy_cur_cache(void)
{
	kmem_cache_destroy(xfs_allocbt_cur_cache);
	xfs_allocbt_cur_cache = NULL;
}
+3 −0
Original line number Original line Diff line number Diff line
@@ -62,4 +62,7 @@ void xfs_allocbt_commit_staged_btree(struct xfs_btree_cur *cur,


unsigned int xfs_allocbt_maxlevels_ondisk(void);
unsigned int xfs_allocbt_maxlevels_ondisk(void);


int __init xfs_allocbt_init_cur_cache(void);
void xfs_allocbt_destroy_cur_cache(void);

#endif	/* __XFS_ALLOC_BTREE_H__ */
#endif	/* __XFS_ALLOC_BTREE_H__ */
+22 −1
Original line number Original line Diff line number Diff line
@@ -22,6 +22,8 @@
#include "xfs_trace.h"
#include "xfs_trace.h"
#include "xfs_rmap.h"
#include "xfs_rmap.h"


static kmem_zone_t	*xfs_bmbt_cur_cache;

/*
/*
 * Convert on-disk form of btree root to in-memory form.
 * Convert on-disk form of btree root to in-memory form.
 */
 */
@@ -553,7 +555,7 @@ xfs_bmbt_init_cursor(
	ASSERT(whichfork != XFS_COW_FORK);
	ASSERT(whichfork != XFS_COW_FORK);


	cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP,
	cur = xfs_btree_alloc_cursor(mp, tp, XFS_BTNUM_BMAP,
			mp->m_bm_maxlevels[whichfork]);
			mp->m_bm_maxlevels[whichfork], xfs_bmbt_cur_cache);
	cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
	cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
	cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
	cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);


@@ -675,3 +677,22 @@ xfs_bmbt_calc_size(
{
{
	return xfs_btree_calc_size(mp->m_bmap_dmnr, len);
	return xfs_btree_calc_size(mp->m_bmap_dmnr, len);
}
}

int __init
xfs_bmbt_init_cur_cache(void)
{
	xfs_bmbt_cur_cache = kmem_cache_create("xfs_bmbt_cur",
			xfs_btree_cur_sizeof(xfs_bmbt_maxlevels_ondisk()),
			0, 0, NULL);

	if (!xfs_bmbt_cur_cache)
		return -ENOMEM;
	return 0;
}

void
xfs_bmbt_destroy_cur_cache(void)
{
	kmem_cache_destroy(xfs_bmbt_cur_cache);
	xfs_bmbt_cur_cache = NULL;
}
+3 −0
Original line number Original line Diff line number Diff line
@@ -112,4 +112,7 @@ extern unsigned long long xfs_bmbt_calc_size(struct xfs_mount *mp,


unsigned int xfs_bmbt_maxlevels_ondisk(void);
unsigned int xfs_bmbt_maxlevels_ondisk(void);


int __init xfs_bmbt_init_cur_cache(void);
void xfs_bmbt_destroy_cur_cache(void);

#endif	/* __XFS_BMAP_BTREE_H__ */
#endif	/* __XFS_BMAP_BTREE_H__ */
+45 −6
Original line number Original line Diff line number Diff line
@@ -22,11 +22,11 @@
#include "xfs_log.h"
#include "xfs_log.h"
#include "xfs_btree_staging.h"
#include "xfs_btree_staging.h"
#include "xfs_ag.h"
#include "xfs_ag.h"

#include "xfs_alloc_btree.h"
/*
#include "xfs_ialloc_btree.h"
 * Cursor allocation zone.
#include "xfs_bmap_btree.h"
 */
#include "xfs_rmap_btree.h"
kmem_zone_t	*xfs_btree_cur_zone;
#include "xfs_refcount_btree.h"


/*
/*
 * Btree magic numbers.
 * Btree magic numbers.
@@ -379,7 +379,7 @@ xfs_btree_del_cursor(
		kmem_free(cur->bc_ops);
		kmem_free(cur->bc_ops);
	if (!(cur->bc_flags & XFS_BTREE_LONG_PTRS) && cur->bc_ag.pag)
	if (!(cur->bc_flags & XFS_BTREE_LONG_PTRS) && cur->bc_ag.pag)
		xfs_perag_put(cur->bc_ag.pag);
		xfs_perag_put(cur->bc_ag.pag);
	kmem_cache_free(xfs_btree_cur_zone, cur);
	kmem_cache_free(cur->bc_cache, cur);
}
}


/*
/*
@@ -4962,3 +4962,42 @@ xfs_btree_has_more_records(
	else
	else
		return block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK);
		return block->bb_u.s.bb_rightsib != cpu_to_be32(NULLAGBLOCK);
}
}

/* Set up all the btree cursor caches. */
int __init
xfs_btree_init_cur_caches(void)
{
	int		error;

	error = xfs_allocbt_init_cur_cache();
	if (error)
		return error;
	error = xfs_inobt_init_cur_cache();
	if (error)
		goto err;
	error = xfs_bmbt_init_cur_cache();
	if (error)
		goto err;
	error = xfs_rmapbt_init_cur_cache();
	if (error)
		goto err;
	error = xfs_refcountbt_init_cur_cache();
	if (error)
		goto err;

	return 0;
err:
	xfs_btree_destroy_cur_caches();
	return error;
}

/* Destroy all the btree cursor caches, if they've been allocated. */
void
xfs_btree_destroy_cur_caches(void)
{
	xfs_allocbt_destroy_cur_cache();
	xfs_inobt_destroy_cur_cache();
	xfs_bmbt_destroy_cur_cache();
	xfs_rmapbt_destroy_cur_cache();
	xfs_refcountbt_destroy_cur_cache();
}
Loading