Commit 9a6aa350 authored by Andy Gospodarek's avatar Andy Gospodarek Committed by David S. Miller
Browse files

bnxt: add page_pool support for aggregation ring when using xdp



If we are using aggregation rings with XDP enabled, allocate page
buffers for the aggregation rings from the page_pool.

Signed-off-by: default avatarAndy Gospodarek <gospo@broadcom.com>
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 32861236
Loading
Loading
Loading
Loading
+47 −30
Original line number Diff line number Diff line
@@ -739,7 +739,6 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
		page_pool_recycle_direct(rxr->page_pool, page);
		return NULL;
	}
	*mapping += bp->rx_dma_offset;
	return page;
}

@@ -781,6 +780,7 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
		if (!page)
			return -ENOMEM;

		mapping += bp->rx_dma_offset;
		rx_buf->data = page;
		rx_buf->data_ptr = page_address(page) + bp->rx_offset;
	} else {
@@ -841,6 +841,13 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
	u16 sw_prod = rxr->rx_sw_agg_prod;
	unsigned int offset = 0;

	if (BNXT_RX_PAGE_MODE(bp)) {
		page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);

		if (!page)
			return -ENOMEM;

	} else {
		if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
			page = rxr->rx_page;
			if (!page) {
@@ -869,6 +876,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
			__free_page(page);
			return -EIO;
		}
	}

	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
		sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
@@ -1105,7 +1113,7 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
		}

		dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
				     DMA_FROM_DEVICE,
				     bp->rx_dir,
				     DMA_ATTR_WEAK_ORDERING);

		total_frag_len += frag_len;
@@ -2936,15 +2944,24 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
		if (!page)
			continue;

		if (BNXT_RX_PAGE_MODE(bp)) {
			dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
					     BNXT_RX_PAGE_SIZE, bp->rx_dir,
					     DMA_ATTR_WEAK_ORDERING);
			rx_agg_buf->page = NULL;
			__clear_bit(i, rxr->rx_agg_bmap);

			page_pool_recycle_direct(rxr->page_pool, page);
		} else {
			dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
					     BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
					     DMA_ATTR_WEAK_ORDERING);

			rx_agg_buf->page = NULL;
			__clear_bit(i, rxr->rx_agg_bmap);

			__free_page(page);
		}
	}

skip_rx_agg_free:
	if (rxr->rx_page) {