Commit c0dbbdf5 authored by Paolo Abeni's avatar Paolo Abeni
Browse files

Merge branch 'net-fec-fix-some-issues-of-ndo_xdp_xmit'

Wei Fang says:

====================
net: fec: fix some issues of ndo_xdp_xmit()

We encountered some issues when testing the ndo_xdp_xmit() interface
of the fec driver on i.MX8MP and i.MX93 platforms. These issues are
easy to reproduce, and the specific reproduction steps are as follows.

step1: The ethernet port of a board (board A) is connected to the EQOS
port of i.MX8MP/i.MX93, and the FEC port of i.MX8MP/i.MX93 is connected
to another ethernet port, such as a switch port.

step2: Board A uses the pktgen_sample03_burst_single_flow.sh to generate
and send packets to i.MX8MP/i.MX93. The command is shown below.
./pktgen_sample03_burst_single_flow.sh -i eth0 -d 192.168.6.8 -m \
56:bf:0d:68:b0:9e -s 1500

step3: i.MX8MP/i.MX93 use the xdp_redirect bfp program to redirect the
XDP frames from EQOS port to FEC port. The command is shown below.
./xdp_redirect eth1 eth0

After a few moments, the warning or error logs will be printed in the
console, for more details, please refer to the commit message of each
patch.
====================

Link: https://lore.kernel.org/r/20230706081012.2278063-1-wei.fang@nxp.com


Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 9d0aba98 84a10947
Loading
Loading
Loading
Loading
+15 −2
Original line number Diff line number Diff line
@@ -355,7 +355,7 @@ struct bufdesc_ex {
#define RX_RING_SIZE		(FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
#define FEC_ENET_TX_FRSIZE	2048
#define FEC_ENET_TX_FRPPG	(PAGE_SIZE / FEC_ENET_TX_FRSIZE)
#define TX_RING_SIZE		512	/* Must be power of two */
#define TX_RING_SIZE		1024	/* Must be power of two */
#define TX_RING_MOD_MASK	511	/*   for this to work */

#define BD_ENET_RX_INT		0x00800000
@@ -544,10 +544,23 @@ enum {
	XDP_STATS_TOTAL,
};

enum fec_txbuf_type {
	FEC_TXBUF_T_SKB,
	FEC_TXBUF_T_XDP_NDO,
};

struct fec_tx_buffer {
	union {
		struct sk_buff *skb;
		struct xdp_frame *xdp;
	};
	enum fec_txbuf_type type;
};

struct fec_enet_priv_tx_q {
	struct bufdesc_prop bd;
	unsigned char *tx_bounce[TX_RING_SIZE];
	struct  sk_buff *tx_skbuff[TX_RING_SIZE];
	struct fec_tx_buffer tx_buf[TX_RING_SIZE];

	unsigned short tx_stop_threshold;
	unsigned short tx_wake_threshold;
+112 −54
Original line number Diff line number Diff line
@@ -397,7 +397,7 @@ static void fec_dump(struct net_device *ndev)
			fec16_to_cpu(bdp->cbd_sc),
			fec32_to_cpu(bdp->cbd_bufaddr),
			fec16_to_cpu(bdp->cbd_datlen),
			txq->tx_skbuff[index]);
			txq->tx_buf[index].skb);
		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
		index++;
	} while (bdp != txq->bd.base);
@@ -654,7 +654,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,

	index = fec_enet_get_bd_index(last_bdp, &txq->bd);
	/* Save skb pointer */
	txq->tx_skbuff[index] = skb;
	txq->tx_buf[index].skb = skb;

	/* Make sure the updates to rest of the descriptor are performed before
	 * transferring ownership.
@@ -672,9 +672,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,

	skb_tx_timestamp(skb);

	/* Make sure the update to bdp and tx_skbuff are performed before
	 * txq->bd.cur.
	 */
	/* Make sure the update to bdp is performed before txq->bd.cur. */
	wmb();
	txq->bd.cur = bdp;

@@ -862,7 +860,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
	}

	/* Save skb pointer */
	txq->tx_skbuff[index] = skb;
	txq->tx_buf[index].skb = skb;

	skb_tx_timestamp(skb);
	txq->bd.cur = bdp;
@@ -952,16 +950,33 @@ static void fec_enet_bd_init(struct net_device *dev)
		for (i = 0; i < txq->bd.ring_size; i++) {
			/* Initialize the BD for every fragment in the page. */
			bdp->cbd_sc = cpu_to_fec16(0);
			if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
				if (bdp->cbd_bufaddr &&
				    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
					dma_unmap_single(&fep->pdev->dev,
							 fec32_to_cpu(bdp->cbd_bufaddr),
							 fec16_to_cpu(bdp->cbd_datlen),
							 DMA_TO_DEVICE);
			if (txq->tx_skbuff[i]) {
				dev_kfree_skb_any(txq->tx_skbuff[i]);
				txq->tx_skbuff[i] = NULL;
				if (txq->tx_buf[i].skb) {
					dev_kfree_skb_any(txq->tx_buf[i].skb);
					txq->tx_buf[i].skb = NULL;
				}
			} else {
				if (bdp->cbd_bufaddr)
					dma_unmap_single(&fep->pdev->dev,
							 fec32_to_cpu(bdp->cbd_bufaddr),
							 fec16_to_cpu(bdp->cbd_datlen),
							 DMA_TO_DEVICE);

				if (txq->tx_buf[i].xdp) {
					xdp_return_frame(txq->tx_buf[i].xdp);
					txq->tx_buf[i].xdp = NULL;
				}

				/* restore default tx buffer type: FEC_TXBUF_T_SKB */
				txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
			}

			bdp->cbd_bufaddr = cpu_to_fec32(0);
			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
		}
@@ -1360,6 +1375,7 @@ static void
fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
{
	struct	fec_enet_private *fep;
	struct xdp_frame *xdpf;
	struct bufdesc *bdp;
	unsigned short status;
	struct	sk_buff	*skb;
@@ -1387,16 +1403,31 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)

		index = fec_enet_get_bd_index(bdp, &txq->bd);

		skb = txq->tx_skbuff[index];
		txq->tx_skbuff[index] = NULL;
		if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
		if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
			skb = txq->tx_buf[index].skb;
			txq->tx_buf[index].skb = NULL;
			if (bdp->cbd_bufaddr &&
			    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
				dma_unmap_single(&fep->pdev->dev,
						 fec32_to_cpu(bdp->cbd_bufaddr),
						 fec16_to_cpu(bdp->cbd_datlen),
						 DMA_TO_DEVICE);
			bdp->cbd_bufaddr = cpu_to_fec32(0);
			if (!skb)
			goto skb_done;
				goto tx_buf_done;
		} else {
			xdpf = txq->tx_buf[index].xdp;
			if (bdp->cbd_bufaddr)
				dma_unmap_single(&fep->pdev->dev,
						 fec32_to_cpu(bdp->cbd_bufaddr),
						 fec16_to_cpu(bdp->cbd_datlen),
						 DMA_TO_DEVICE);
			bdp->cbd_bufaddr = cpu_to_fec32(0);
			if (!xdpf) {
				txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
				goto tx_buf_done;
			}
		}

		/* Check for errors. */
		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1415,16 +1446,26 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
				ndev->stats.tx_carrier_errors++;
		} else {
			ndev->stats.tx_packets++;

			if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
				ndev->stats.tx_bytes += skb->len;
			else
				ndev->stats.tx_bytes += xdpf->len;
		}

		/* Deferred means some collisions occurred during transmit,
		 * but we eventually sent the packet OK.
		 */
		if (status & BD_ENET_TX_DEF)
			ndev->stats.collisions++;

		if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
			/* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
			 * are to time stamp the packet, so we still need to check time
			 * stamping enabled flag.
			 */
			if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
			     fep->hwts_tx_en) &&
		    fep->bufdesc_ex) {
				     fep->hwts_tx_en) && fep->bufdesc_ex) {
				struct skb_shared_hwtstamps shhwtstamps;
				struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;

@@ -1432,16 +1473,18 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
				skb_tstamp_tx(skb, &shhwtstamps);
			}

		/* Deferred means some collisions occurred during transmit,
		 * but we eventually sent the packet OK.
		 */
		if (status & BD_ENET_TX_DEF)
			ndev->stats.collisions++;

			/* Free the sk buffer associated with this last transmit */
			dev_kfree_skb_any(skb);
skb_done:
		/* Make sure the update to bdp and tx_skbuff are performed
		} else {
			xdp_return_frame(xdpf);

			txq->tx_buf[index].xdp = NULL;
			/* restore default tx buffer type: FEC_TXBUF_T_SKB */
			txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
		}

tx_buf_done:
		/* Make sure the update to bdp and tx_buf are performed
		 * before dirty_tx
		 */
		wmb();
@@ -3249,9 +3292,19 @@ static void fec_enet_free_buffers(struct net_device *ndev)
		for (i = 0; i < txq->bd.ring_size; i++) {
			kfree(txq->tx_bounce[i]);
			txq->tx_bounce[i] = NULL;
			skb = txq->tx_skbuff[i];
			txq->tx_skbuff[i] = NULL;

			if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
				skb = txq->tx_buf[i].skb;
				txq->tx_buf[i].skb = NULL;
				dev_kfree_skb(skb);
			} else {
				if (txq->tx_buf[i].xdp) {
					xdp_return_frame(txq->tx_buf[i].xdp);
					txq->tx_buf[i].xdp = NULL;
				}

				txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
			}
		}
	}
}
@@ -3296,8 +3349,7 @@ static int fec_enet_alloc_queue(struct net_device *ndev)
		fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;

		txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
		txq->tx_wake_threshold =
			(txq->bd.ring_size - txq->tx_stop_threshold) / 2;
		txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS;

		txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
					txq->bd.ring_size * TSO_HEADER_SIZE,
@@ -3732,12 +3784,18 @@ static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
			return -EOPNOTSUPP;

		if (!bpf->prog)
			xdp_features_clear_redirect_target(dev);

		if (is_run) {
			napi_disable(&fep->napi);
			netif_tx_disable(dev);
		}

		old_prog = xchg(&fep->xdp_prog, bpf->prog);
		if (old_prog)
			bpf_prog_put(old_prog);

		fec_restart(dev);

		if (is_run) {
@@ -3745,8 +3803,8 @@ static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
			netif_tx_start_all_queues(dev);
		}

		if (old_prog)
			bpf_prog_put(old_prog);
		if (bpf->prog)
			xdp_features_set_redirect_target(dev, false);

		return 0;

@@ -3778,7 +3836,7 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,

	entries_free = fec_enet_get_free_txdesc_num(txq);
	if (entries_free < MAX_SKB_FRAGS + 1) {
		netdev_err(fep->netdev, "NOT enough BD for SG!\n");
		netdev_err_once(fep->netdev, "NOT enough BD for SG!\n");
		return -EBUSY;
	}

@@ -3811,7 +3869,8 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
		ebdp->cbd_esc = cpu_to_fec32(estatus);
	}

	txq->tx_skbuff[index] = NULL;
	txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
	txq->tx_buf[index].xdp = frame;

	/* Make sure the updates to rest of the descriptor are performed before
	 * transferring ownership.
@@ -4016,8 +4075,7 @@ static int fec_enet_init(struct net_device *ndev)

	if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME))
		ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
				     NETDEV_XDP_ACT_REDIRECT |
				     NETDEV_XDP_ACT_NDO_XMIT;
				     NETDEV_XDP_ACT_REDIRECT;

	fec_restart(ndev);