Commit ddc87e7d authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Saeed Mahameed
Browse files

net/mlx5e: Store DMA address inside struct page



Use page_pool_set_dma_addr() to store the DMA address of a page inside
struct page, in order to avoid passing struct mlx5e_dma_info to XDP
handlers. Previously, struct mlx5e_dma_info was used to pass both the
DMA address and the page, and it worked well for the single-fragment
case.

When XDP multi buffer is in use, and a fragmented xdp_frame has to be
transmitted, the driver needs to know the DMA addresses of fragments,
however, the array of fragments in struct skb_shared_info doesn't
contain them. In order to pass the DMA addresses, the driver puts them
into struct page itself, which is accessible from the array of fragments
in struct skb_shared_info. The existing XDP handlers are modified to
remove the dependency on struct mlx5e_dma_info.

Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent ea5d49bd
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -515,7 +515,7 @@ struct mlx5e_xdp_info {
		} frame;
		struct {
			struct mlx5e_rq *rq;
			struct mlx5e_dma_info di;
			struct page *page;
		} page;
	};
};
+2 −4
Original line number Diff line number Diff line
@@ -44,10 +44,8 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);

/* RX */
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info);
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
				struct mlx5e_dma_info *dma_info,
				bool recycle);
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page);
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle);
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+7 −7
Original line number Diff line number Diff line
@@ -57,7 +57,7 @@ int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)

static inline bool
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
		    struct mlx5e_dma_info *di, struct xdp_buff *xdp)
		    struct page *page, struct xdp_buff *xdp)
{
	struct mlx5e_xmit_data xdptxd;
	struct mlx5e_xdp_info xdpi;
@@ -110,13 +110,13 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,

		xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE;

		dma_addr = di->addr + (xdpf->data - (void *)xdpf);
		dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
		dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len,
					   DMA_TO_DEVICE);

		xdptxd.dma_addr = dma_addr;
		xdpi.page.rq    = rq;
		xdpi.page.di    = *di;
		xdpi.page.page = page;
	}

	return INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
@@ -124,7 +124,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
}

/* returns true if packet was consumed by xdp */
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
		      struct bpf_prog *prog, struct xdp_buff *xdp)
{
	u32 act;
@@ -135,7 +135,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
	case XDP_PASS:
		return false;
	case XDP_TX:
		if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, xdp)))
		if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, page, xdp)))
			goto xdp_abort;
		__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
		return true;
@@ -147,7 +147,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
		__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
		__set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
		if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL)
			mlx5e_page_dma_unmap(rq, di);
			mlx5e_page_dma_unmap(rq, page);
		rq->stats->xdp_redirect++;
		return true;
	default:
@@ -384,7 +384,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
			break;
		case MLX5E_XDP_XMIT_MODE_PAGE:
			/* XDP_TX from the regular RQ */
			mlx5e_page_release_dynamic(xdpi.page.rq, &xdpi.page.di, recycle);
			mlx5e_page_release_dynamic(xdpi.page.rq, xdpi.page.page, recycle);
			break;
		case MLX5E_XDP_XMIT_MODE_XSK:
			/* AF_XDP send */
+1 −1
Original line number Diff line number Diff line
@@ -47,7 +47,7 @@

struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
		      struct bpf_prog *prog, struct xdp_buff *xdp);
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
+1 −1
Original line number Diff line number Diff line
@@ -780,7 +780,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
		 * entered, and it's safe to call mlx5e_page_release_dynamic
		 * directly.
		 */
		mlx5e_page_release_dynamic(rq, dma_info, false);
		mlx5e_page_release_dynamic(rq, dma_info->page, false);
	}

	xdp_rxq_info_unreg(&rq->xdp_rxq);
Loading