Commit 4a5c5e25 authored by Dragos Tatulea's avatar Dragos Tatulea Committed by Saeed Mahameed
Browse files

net/mlx5e: RX, Enable dma map and sync from page_pool allocator



Remove driver dma mapping and unmapping of pages. Let the
page_pool api do it.

Signed-off-by: default avatarDragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 08c9b61b
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -65,7 +65,6 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);

/* RX */
void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page);
void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle);
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq));
+0 −2
Original line number Diff line number Diff line
@@ -209,8 +209,6 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
			goto xdp_abort;
		__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
		__set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
		if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL)
			mlx5e_page_dma_unmap(rq, virt_to_page(xdp->data));
		rq->stats->xdp_redirect++;
		return true;
	default:
+4 −2
Original line number Diff line number Diff line
@@ -733,7 +733,6 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
			  struct mlx5e_rq_param *rqp,
			  int node, struct mlx5e_rq *rq)
{
	struct page_pool_params pp_params = { 0 };
	struct mlx5_core_dev *mdev = rq->mdev;
	void *rqc = rqp->rqc;
	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
@@ -829,12 +828,15 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
		xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq);
	} else {
		/* Create a page_pool and register it with rxq */
		struct page_pool_params pp_params = { 0 };

		pp_params.order     = 0;
		pp_params.flags     = 0; /* No-internal DMA mapping in page_pool */
		pp_params.flags     = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
		pp_params.pool_size = pool_size;
		pp_params.nid       = node;
		pp_params.dev       = rq->pdev;
		pp_params.dma_dir   = rq->buff.map_dir;
		pp_params.max_len   = PAGE_SIZE;

		/* page_pool can be used even when there is no rq->xdp_prog,
		 * given page_pool does not handle DMA mapping there is no
+0 −22
Original line number Diff line number Diff line
@@ -273,40 +273,18 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,

static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, struct page **pagep)
{
	dma_addr_t addr;

	*pagep = page_pool_dev_alloc_pages(rq->page_pool);
	if (unlikely(!*pagep))
		return -ENOMEM;

	/* Non-XSK always uses PAGE_SIZE. */
	addr = dma_map_page(rq->pdev, *pagep, 0, PAGE_SIZE, rq->buff.map_dir);
	if (unlikely(dma_mapping_error(rq->pdev, addr))) {
		page_pool_recycle_direct(rq->page_pool, *pagep);
		*pagep = NULL;
		return -ENOMEM;
	}
	page_pool_set_dma_addr(*pagep, addr);

	return 0;
}

void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct page *page)
{
	dma_addr_t dma_addr = page_pool_get_dma_addr(page);

	dma_unmap_page_attrs(rq->pdev, dma_addr, PAGE_SIZE, rq->buff.map_dir,
			     DMA_ATTR_SKIP_CPU_SYNC);
	page_pool_set_dma_addr(page, 0);
}

void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct page *page, bool recycle)
{
	if (likely(recycle)) {
		mlx5e_page_dma_unmap(rq, page);
		page_pool_recycle_direct(rq->page_pool, page);
	} else {
		mlx5e_page_dma_unmap(rq, page);
		page_pool_release_page(rq->page_pool, page);
		put_page(page);
	}