Commit 4033eaa6 authored by Paolo Abeni's avatar Paolo Abeni
Browse files

Merge branch 'net-use-read_once-write_once-for-ring-index-accesses'

Jakub Kicinski says:

====================
net: use READ_ONCE/WRITE_ONCE for ring index accesses

Small follow up to the lockless ring stop/start macros.
Update the doc and the drivers suggested by Eric:
https://lore.kernel.org/all/CANn89iJrBGSybMX1FqrhCEMWT3Nnz2=2+aStsbbwpWzKHjk51g@mail.gmail.com/

====================

Link: https://lore.kernel.org/r/20230412015038.674023-1-kuba@kernel.org


Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 21cdc87f 9a714997
Loading
Loading
Loading
Loading
+27 −34
Original line number Diff line number Diff line
@@ -47,30 +47,43 @@ for a driver implementing scatter-gather this means:

.. code-block:: c

	static u32 drv_tx_avail(struct drv_ring *dr)
	{
		u32 used = READ_ONCE(dr->prod) - READ_ONCE(dr->cons);

		return dr->tx_ring_size - (used & bp->tx_ring_mask);
	}

	static netdev_tx_t drv_hard_start_xmit(struct sk_buff *skb,
					       struct net_device *dev)
	{
		struct drv *dp = netdev_priv(dev);
		struct netdev_queue *txq;
		struct drv_ring *dr;
		int idx;

		idx = skb_get_queue_mapping(skb);
		dr = dp->tx_rings[idx];
		txq = netdev_get_tx_queue(dev, idx);

		lock_tx(dp);
		//...
		/* This is a hard error log it. */
		if (TX_BUFFS_AVAIL(dp) <= (skb_shinfo(skb)->nr_frags + 1)) {
		/* This should be a very rare race - log it. */
		if (drv_tx_avail(dr) <= skb_shinfo(skb)->nr_frags + 1) {
			netif_stop_queue(dev);
			unlock_tx(dp);
			printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
			       dev->name);
			netdev_warn(dev, "Tx Ring full when queue awake!\n");
			return NETDEV_TX_BUSY;
		}

		//... queue packet to card ...
		//... update tx consumer index ...

		if (TX_BUFFS_AVAIL(dp) <= (MAX_SKB_FRAGS + 1))
			netif_stop_queue(dev);
		netdev_tx_sent_queue(txq, skb->len);

		//... update tx producer index using WRITE_ONCE() ...

		if (!netif_txq_maybe_stop(txq, drv_tx_avail(dr),
					  MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS))
			dr->stats.stopped++;

		//...
		unlock_tx(dp);
		//...
		return NETDEV_TX_OK;
	}
@@ -79,30 +92,10 @@ And then at the end of your TX reclamation event handling:

.. code-block:: c

	if (netif_queue_stopped(dp->dev) &&
	    TX_BUFFS_AVAIL(dp) > (MAX_SKB_FRAGS + 1))
		netif_wake_queue(dp->dev);

For a non-scatter-gather supporting card, the three tests simply become:

.. code-block:: c

		/* This is a hard error log it. */
		if (TX_BUFFS_AVAIL(dp) <= 0)

and:

.. code-block:: c

		if (TX_BUFFS_AVAIL(dp) == 0)

and:

.. code-block:: c
	//... update tx consumer index using WRITE_ONCE() ...

	if (netif_queue_stopped(dp->dev) &&
	    TX_BUFFS_AVAIL(dp) > 0)
		netif_wake_queue(dp->dev);
	netif_txq_completed_wake(txq, cmpl_pkts, cmpl_bytes,
				 drv_tx_avail(dr), 2 * MAX_SKB_FRAGS);

Lockless queue stop / wake helper macros
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+3 −3
Original line number Diff line number Diff line
@@ -472,7 +472,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
		prod = NEXT_TX(prod);
		tx_push->doorbell =
			cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
		txr->tx_prod = prod;
		WRITE_ONCE(txr->tx_prod, prod);

		tx_buf->is_push = 1;
		netdev_tx_sent_queue(txq, skb->len);
@@ -583,7 +583,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
	wmb();

	prod = NEXT_TX(prod);
	txr->tx_prod = prod;
	WRITE_ONCE(txr->tx_prod, prod);

	if (!netdev_xmit_more() || netif_xmit_stopped(txq))
		bnxt_txr_db_kick(bp, txr, prod);
@@ -688,7 +688,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
		dev_kfree_skb_any(skb);
	}

	txr->tx_cons = cons;
	WRITE_ONCE(txr->tx_cons, cons);

	__netif_txq_completed_wake(txq, nr_pkts, tx_bytes,
				   bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
+4 −5
Original line number Diff line number Diff line
@@ -2231,13 +2231,12 @@ struct bnxt {
#define SFF_MODULE_ID_QSFP28			0x11
#define BNXT_MAX_PHY_I2C_RESP_SIZE		64

static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
static inline u32 bnxt_tx_avail(struct bnxt *bp,
				const struct bnxt_tx_ring_info *txr)
{
	/* Tell compiler to fetch tx indices from memory. */
	barrier();
	u32 used = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);

	return bp->tx_ring_size -
		((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
	return bp->tx_ring_size - (used & bp->tx_ring_mask);
}

static inline void bnxt_writeq(struct bnxt *bp, u64 val,
+3 −3
Original line number Diff line number Diff line
@@ -64,7 +64,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
		int frag_len;

		prod = NEXT_TX(prod);
		txr->tx_prod = prod;
		WRITE_ONCE(txr->tx_prod, prod);

		/* first fill up the first buffer */
		frag_tx_buf = &txr->tx_buf_ring[prod];
@@ -94,7 +94,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
	/* Sync TX BD */
	wmb();
	prod = NEXT_TX(prod);
	txr->tx_prod = prod;
	WRITE_ONCE(txr->tx_prod, prod);

	return tx_buf;
}
@@ -161,7 +161,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
		}
		tx_cons = NEXT_TX(tx_cons);
	}
	txr->tx_cons = tx_cons;
	WRITE_ONCE(txr->tx_cons, tx_cons);
	if (rx_doorbell_needed) {
		tx_buf = &txr->tx_buf_ring[last_tx_cons];
		bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
+5 −3
Original line number Diff line number Diff line
@@ -228,7 +228,9 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,

static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring)
{
	return ring->prod - ring->cons > ring->full_size;
	u32 used = READ_ONCE(ring->prod) - READ_ONCE(ring->cons);

	return used > ring->full_size;
}

static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
@@ -1083,7 +1085,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
			op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
	}

	ring->prod += nr_txbb;
	WRITE_ONCE(ring->prod, ring->prod + nr_txbb);

	/* If we used a bounce buffer then copy descriptor back into place */
	if (unlikely(bounce))
@@ -1214,7 +1216,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,

	rx_ring->xdp_tx++;

	ring->prod += MLX4_EN_XDP_TX_NRTXBB;
	WRITE_ONCE(ring->prod, ring->prod + MLX4_EN_XDP_TX_NRTXBB);

	/* Ensure new descriptor hits memory
	 * before setting ownership of this descriptor to HW