Commit 578fcfd2 authored by Somnath Kotur's avatar Somnath Kotur Committed by Jakub Kicinski
Browse files

bnxt_en: Let the page pool manage the DMA mapping

Use the page pool's ability to maintain DMA mappings for us.
This avoids re-mapping of the recycled pages.

Link: https://lore.kernel.org/netdev/20230728231829.235716-4-michael.chan@broadcom.com/


Signed-off-by: default avatarSomnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Link: https://lore.kernel.org/r/20230817231911.165035-3-michael.chan@broadcom.com


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 86b05508
Loading
Loading
Loading
Loading
+10 −22
Original line number Diff line number Diff line
@@ -761,7 +761,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
					 unsigned int *offset,
					 gfp_t gfp)
{
	struct device *dev = &bp->pdev->dev;
	struct page *page;

	if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
@@ -774,12 +773,7 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
	if (!page)
		return NULL;

	*mapping = dma_map_page_attrs(dev, page, *offset, BNXT_RX_PAGE_SIZE,
				      bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
	if (dma_mapping_error(dev, *mapping)) {
		page_pool_recycle_direct(rxr->page_pool, page);
		return NULL;
	}
	*mapping = page_pool_get_dma_addr(page) + *offset;
	return page;
}

@@ -998,8 +992,8 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
		return NULL;
	}
	dma_addr -= bp->rx_dma_offset;
	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
			     bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
	dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
				bp->rx_dir);
	skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
	if (!skb) {
		page_pool_recycle_direct(rxr->page_pool, page);
@@ -1032,8 +1026,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
		return NULL;
	}
	dma_addr -= bp->rx_dma_offset;
	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
			     bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
	dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
				bp->rx_dir);

	if (unlikely(!payload))
		payload = eth_get_headlen(bp->dev, data_ptr, len);
@@ -1149,9 +1143,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
			return 0;
		}

		dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
				     bp->rx_dir,
				     DMA_ATTR_WEAK_ORDERING);
		dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
					bp->rx_dir);

		total_frag_len += frag_len;
		prod = NEXT_RX_AGG(prod);
@@ -2947,10 +2940,6 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)

		rx_buf->data = NULL;
		if (BNXT_RX_PAGE_MODE(bp)) {
			mapping -= bp->rx_dma_offset;
			dma_unmap_page_attrs(&pdev->dev, mapping,
					     BNXT_RX_PAGE_SIZE, bp->rx_dir,
					     DMA_ATTR_WEAK_ORDERING);
			page_pool_recycle_direct(rxr->page_pool, data);
		} else {
			dma_unmap_single_attrs(&pdev->dev, mapping,
@@ -2971,9 +2960,6 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
		if (!page)
			continue;

		dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
				     BNXT_RX_PAGE_SIZE, bp->rx_dir,
				     DMA_ATTR_WEAK_ORDERING);
		rx_agg_buf->page = NULL;
		__clear_bit(i, rxr->rx_agg_bmap);

@@ -3205,7 +3191,9 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
	pp.nid = dev_to_node(&bp->pdev->dev);
	pp.napi = &rxr->bnapi->napi;
	pp.dev = &bp->pdev->dev;
	pp.dma_dir = DMA_BIDIRECTIONAL;
	pp.dma_dir = bp->rx_dir;
	pp.max_len = PAGE_SIZE;
	pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
	if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
		pp.flags |= PP_FLAG_PAGE_FRAG;