Commit ddb7afee authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Jakub Kicinski
Browse files

net/mlx5e: Optimize RQ page deallocation



mlx5e_free_rx_mpwqe loops over all pages of a MPWQE, calling
mlx5e_page_release for ones that are not scheduled for XDP_TX or
XDP_REDIRECT; and mlx5e_page_release checks whether it's an XSK RQ or a
regular one for each page/XSK frame. This check can be moved outside the
loop to reduce the number of branches.

mlx5e_free_rx_wqe loops over all fragments, calling mlx5e_page_release
for the ones that are last in a page; and mlx5e_page_release checks
whether it's an XSK RQ or a regular one for each fragment. Using the
fact that XSK doesn't support multiple fragments, it can be optimized
for both XSK and regular usages:

1. Make an early check for XSK and call its deallocator directly, saving
3 branches (loop condition, frag->last_in_page and selection of
deallocator).

2. Call the regular deallocator directly in the non-XSK case, saving a
branch per fragment, except the first one.

After the changes, mlx5e_page_release is removed, as there are no
callers left.

Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 96d37d86
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -253,7 +253,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
		return NULL; /* page/packet was consumed by XDP */

	/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
	 * will be handled by mlx5e_put_rx_frag.
	 * will be handled by mlx5e_free_rx_wqe.
	 * On SKB allocation failure, NULL is returned.
	 */
	return mlx5e_xsk_construct_skb(rq, xdp->data, xdp->data_end - xdp->data);
+23 −18
Original line number Diff line number Diff line
@@ -317,20 +317,6 @@ void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool rec
	}
}

static inline void mlx5e_page_release(struct mlx5e_rq *rq,
				      union mlx5e_alloc_unit *au,
				      bool recycle)
{
	if (rq->xsk_pool)
		/* The `recycle` parameter is ignored, and the page is always
		 * put into the Reuse Ring, because there is no way to return
		 * the page to the userspace when the interface goes down.
		 */
		xsk_buff_free(au->xsk);
	else
		mlx5e_page_release_dynamic(rq, au->page, recycle);
}

static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
				    struct mlx5e_wqe_frag_info *frag)
{
@@ -352,7 +338,7 @@ static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
				     bool recycle)
{
	if (frag->last_in_page)
		mlx5e_page_release(rq, frag->au, recycle);
		mlx5e_page_release_dynamic(rq, frag->au->page, recycle);
}

static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
@@ -395,6 +381,15 @@ static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
{
	int i;

	if (rq->xsk_pool) {
		/* The `recycle` parameter is ignored, and the page is always
		 * put into the Reuse Ring, because there is no way to return
		 * the page to the userspace when the interface goes down.
		 */
		xsk_buff_free(wi->au->xsk);
		return;
	}

	for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
		mlx5e_put_rx_frag(rq, wi, recycle);
}
@@ -463,9 +458,19 @@ mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle

	no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe);

	if (rq->xsk_pool) {
		/* The `recycle` parameter is ignored, and the page is always
		 * put into the Reuse Ring, because there is no way to return
		 * the page to the userspace when the interface goes down.
		 */
		for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
			if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
				xsk_buff_free(alloc_units[i].xsk);
	} else {
		for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
			if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap))
			mlx5e_page_release(rq, &alloc_units[i], recycle);
				mlx5e_page_release_dynamic(rq, alloc_units[i].page, recycle);
	}
}

static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)