Commit 50ae0664 authored by Maciej Fijalkowski's avatar Maciej Fijalkowski Committed by Daniel Borkmann
Browse files

ice, xsk: Terminate Rx side of NAPI when XSK Rx queue gets full



When XSK pool uses need_wakeup feature, correlate -ENOBUFS that was
returned from xdp_do_redirect() with a XSK Rx queue being full. In such
case, terminate the Rx processing that is being done on the current HW
Rx ring and let the user space consume descriptors from XSK Rx queue so
that there is room that driver can use later on.

Introduce new internal return code ICE_XDP_EXIT that will indicate case
described above.

Note that it does not affect Tx processing that is bound to the same
NAPI context, nor the other Rx rings.

Signed-off-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20220413153015.453864-6-maciej.fijalkowski@intel.com
parent d090c885
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -133,6 +133,7 @@ static inline int ice_skb_pad(void)
#define ICE_XDP_CONSUMED	BIT(0)
#define ICE_XDP_TX		BIT(1)
#define ICE_XDP_REDIR		BIT(2)
#define ICE_XDP_EXIT		BIT(3)

#define ICE_RX_DMA_ATTR \
	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+19 −10
Original line number Diff line number Diff line
@@ -540,9 +540,13 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,

	if (likely(act == XDP_REDIRECT)) {
		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
		if (err)
			goto out_failure;
		if (!err)
			return ICE_XDP_REDIR;
		if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
			result = ICE_XDP_EXIT;
		else
			result = ICE_XDP_CONSUMED;
		goto out_failure;
	}

	switch (act) {
@@ -553,15 +557,16 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
		if (result == ICE_XDP_CONSUMED)
			goto out_failure;
		break;
	case XDP_DROP:
		result = ICE_XDP_CONSUMED;
		break;
	default:
		bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
		fallthrough;
	case XDP_ABORTED:
		result = ICE_XDP_CONSUMED;
out_failure:
		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
		fallthrough;
	case XDP_DROP:
		result = ICE_XDP_CONSUMED;
		break;
	}

@@ -629,12 +634,16 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
		xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);

		xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
		if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)))
		if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
			xdp_xmit |= xdp_res;
		else if (xdp_res == ICE_XDP_CONSUMED)
		} else if (xdp_res == ICE_XDP_EXIT) {
			failure = true;
			break;
		} else if (xdp_res == ICE_XDP_CONSUMED) {
			xsk_buff_free(xdp);
		else
		} else if (xdp_res == ICE_XDP_PASS) {
			goto construct_skb;
		}

		total_rx_bytes += size;
		total_rx_packets++;
@@ -669,7 +678,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
		ice_receive_skb(rx_ring, skb, vlan_tag);
	}

	failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
	failure |= !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));

	ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
	ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);