Commit 1dc4c557 authored by Andy Gospodarek's avatar Andy Gospodarek Committed by David S. Miller
Browse files

bnxt: adding bnxt_xdp_build_skb to build skb from multibuffer xdp_buff



Since we have an xdp_buff with frags there needs to be a way to
convert that into a valid sk_buff in the event that XDP_PASS is
the resulting operation.  This adds a new rx_skb_func when the
netdev has an MTU that prevents the packets from sitting in a
single page.

This also make sure that GRO/LRO stay disabled even when using
the aggregation ring for large buffers.

v3: Use BNXT_PAGE_MODE_BUF_SIZE for build_skb

Signed-off-by: default avatarAndy Gospodarek <gospo@broadcom.com>
Signed-off-by: default avatarMichael Chan <michael.chan@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9a6aa350
Loading
Loading
Loading
Loading
+58 −7
Original line number Diff line number Diff line
@@ -971,6 +971,39 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
	rxr->rx_sw_agg_prod = sw_prod;
}

static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
					      struct bnxt_rx_ring_info *rxr,
					      u16 cons, void *data, u8 *data_ptr,
					      dma_addr_t dma_addr,
					      unsigned int offset_and_len)
{
	unsigned int len = offset_and_len & 0xffff;
	struct page *page = data;
	u16 prod = rxr->rx_prod;
	struct sk_buff *skb;
	int err;

	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
	if (unlikely(err)) {
		bnxt_reuse_rx_data(rxr, cons, data);
		return NULL;
	}
	dma_addr -= bp->rx_dma_offset;
	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
			     DMA_ATTR_WEAK_ORDERING);
	skb = build_skb(page_address(page), BNXT_PAGE_MODE_BUF_SIZE +
					    bp->rx_dma_offset);
	if (!skb) {
		__free_page(page);
		return NULL;
	}
	skb_mark_for_recycle(skb);
	skb_reserve(skb, bp->rx_dma_offset);
	__skb_put(skb, len);

	return skb;
}

static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
					struct bnxt_rx_ring_info *rxr,
					u16 cons, void *data, u8 *data_ptr,
@@ -993,7 +1026,6 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
	dma_addr -= bp->rx_dma_offset;
	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
			     DMA_ATTR_WEAK_ORDERING);
	page_pool_release_page(rxr->page_pool, page);

	if (unlikely(!payload))
		payload = eth_get_headlen(bp->dev, data_ptr, len);
@@ -1004,6 +1036,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
		return NULL;
	}

	skb_mark_for_recycle(skb);
	off = (void *)data_ptr - page_address(page);
	skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
	memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
@@ -1949,6 +1982,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
				rc = -ENOMEM;
				goto next_rx;
			}
		} else {
			skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
			if (!skb) {
				/* we should be able to free the old skb here */
				cpr->sw_stats.rx.rx_oom_discards += 1;
				rc = -ENOMEM;
				goto next_rx;
			}
		}
	}

@@ -3964,14 +4005,21 @@ void bnxt_set_ring_params(struct bnxt *bp)
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
	if (page_mode) {
		if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
			return -EOPNOTSUPP;
		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
		bp->flags |= BNXT_FLAG_RX_PAGE_MODE;

		if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
			bp->flags |= BNXT_FLAG_JUMBO;
			bp->rx_skb_func = bnxt_rx_multi_page_skb;
			bp->dev->max_mtu =
				min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
		} else {
			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
			bp->rx_skb_func = bnxt_rx_page_skb;
			bp->dev->max_mtu =
				min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
		bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
		}
		bp->rx_dir = DMA_BIDIRECTIONAL;
		bp->rx_skb_func = bnxt_rx_page_skb;
		/* Disable LRO or GRO_HW */
		netdev_update_features(bp->dev);
	} else {
@@ -11121,6 +11169,9 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
		features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);

	if (!(bp->flags & BNXT_FLAG_TPA))
		features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);

	if (!(features & NETIF_F_GRO))
		features &= ~NETIF_F_GRO_HW;

+23 −0
Original line number Diff line number Diff line
@@ -361,3 +361,26 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
	}
	return rc;
}

struct sk_buff *
bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
		   struct page_pool *pool, struct xdp_buff *xdp,
		   struct rx_cmp_ext *rxcmp1)
{
	struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);

	if (!skb)
		return NULL;
	skb_checksum_none_assert(skb);
	if (RX_CMP_L4_CS_OK(rxcmp1)) {
		if (bp->dev->features & NETIF_F_RXCSUM) {
			skb->ip_summed = CHECKSUM_UNNECESSARY;
			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
		}
	}
	xdp_update_skb_shared_info(skb, num_frags,
				   sinfo->xdp_frags_size,
				   PAGE_SIZE * sinfo->nr_frags,
				   xdp_buff_is_frag_pfmemalloc(xdp));
	return skb;
}
+4 −0
Original line number Diff line number Diff line
@@ -28,4 +28,8 @@ bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr);
void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
			u16 cons, u8 **data_ptr, unsigned int *len,
			struct xdp_buff *xdp);
struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
				   u8 num_frags, struct page_pool *pool,
				   struct xdp_buff *xdp,
				   struct rx_cmp_ext *rxcmp1);
#endif