Commit 133466c3 authored by Jisheng Zhang's avatar Jisheng Zhang Committed by Jakub Kicinski
Browse files

net: stmmac: use per-queue 64 bit statistics where necessary



Currently, there are two major issues with stmmac driver statistics
First of all, statistics in stmmac_extra_stats, stmmac_rxq_stats
and stmmac_txq_stats are 32 bit variables on 32 bit platforms. This
can cause some stats to overflow after several minutes of
high traffic, for example rx_pkt_n, tx_pkt_n and so on.

Secondly, if HW supports multiqueues, there are frequent cacheline
ping pongs on some driver statistic vars, for example, normal_irq_n,
tx_pkt_n and so on. What's more, frequent cacheline ping pongs on
normal_irq_n happens in ISR, this makes the situation worse.

To improve the driver, we convert those statistics to 64 bit, implement
ndo_get_stats64 and update .get_ethtool_stats implementation
accordingly. We also use per-queue statistics where necessary to remove
the cacheline ping pongs as much as possible to make multiqueue
operations faster. Those statistics which are not possible to overflow
and not frequently updated are kept as is.

Signed-off-by: default avatarJisheng Zhang <jszhang@kernel.org>
Link: https://lore.kernel.org/r/20230717160630.1892-3-jszhang@kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 2eb85b75
Loading
Loading
Loading
Loading
+21 −18
Original line number Diff line number Diff line
@@ -59,13 +59,25 @@
/* #define FRAME_FILTER_DEBUG */

struct stmmac_txq_stats {
	unsigned long tx_pkt_n;
	unsigned long tx_normal_irq_n;
	u64 tx_bytes;
	u64 tx_packets;
	u64 tx_pkt_n;
	u64 tx_normal_irq_n;
	u64 napi_poll;
	u64 tx_clean;
	u64 tx_set_ic_bit;
	u64 tx_tso_frames;
	u64 tx_tso_nfrags;
	struct u64_stats_sync syncp;
};

struct stmmac_rxq_stats {
	unsigned long rx_pkt_n;
	unsigned long rx_normal_irq_n;
	u64 rx_bytes;
	u64 rx_packets;
	u64 rx_pkt_n;
	u64 rx_normal_irq_n;
	u64 napi_poll;
	struct u64_stats_sync syncp;
};

/* Extra statistic and debug information exposed by ethtool */
@@ -81,6 +93,7 @@ struct stmmac_extra_stats {
	unsigned long tx_frame_flushed;
	unsigned long tx_payload_error;
	unsigned long tx_ip_header_error;
	unsigned long tx_collision;
	/* Receive errors */
	unsigned long rx_desc;
	unsigned long sa_filter_fail;
@@ -113,14 +126,6 @@ struct stmmac_extra_stats {
	/* Tx/Rx IRQ Events */
	unsigned long rx_early_irq;
	unsigned long threshold;
	unsigned long tx_pkt_n;
	unsigned long rx_pkt_n;
	unsigned long normal_irq_n;
	unsigned long rx_normal_irq_n;
	unsigned long napi_poll;
	unsigned long tx_normal_irq_n;
	unsigned long tx_clean;
	unsigned long tx_set_ic_bit;
	unsigned long irq_receive_pmt_irq_n;
	/* MMC info */
	unsigned long mmc_tx_irq_n;
@@ -190,18 +195,16 @@ struct stmmac_extra_stats {
	unsigned long mtl_rx_fifo_ctrl_active;
	unsigned long mac_rx_frame_ctrl_fifo;
	unsigned long mac_gmii_rx_proto_engine;
	/* TSO */
	unsigned long tx_tso_frames;
	unsigned long tx_tso_nfrags;
	/* EST */
	unsigned long mtl_est_cgce;
	unsigned long mtl_est_hlbs;
	unsigned long mtl_est_hlbf;
	unsigned long mtl_est_btre;
	unsigned long mtl_est_btrlm;
	/* per queue statistics */
	struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
	struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
	unsigned long rx_dropped;
	unsigned long rx_errors;
	unsigned long tx_dropped;
	unsigned long tx_errors;
};

/* Safety Feature statistics exposed by ethtool */
+9 −3
Original line number Diff line number Diff line
@@ -440,8 +440,10 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
				     struct stmmac_extra_stats *x, u32 chan,
				     u32 dir)
{
	u32 v;
	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
	int ret = 0;
	u32 v;

	v = readl(ioaddr + EMAC_INT_STA);

@@ -452,7 +454,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,

	if (v & EMAC_TX_INT) {
		ret |= handle_tx;
		x->tx_normal_irq_n++;
		u64_stats_update_begin(&tx_q->txq_stats.syncp);
		tx_q->txq_stats.tx_normal_irq_n++;
		u64_stats_update_end(&tx_q->txq_stats.syncp);
	}

	if (v & EMAC_TX_DMA_STOP_INT)
@@ -474,7 +478,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,

	if (v & EMAC_RX_INT) {
		ret |= handle_rx;
		x->rx_normal_irq_n++;
		u64_stats_update_begin(&rx_q->rxq_stats.syncp);
		rx_q->rxq_stats.rx_normal_irq_n++;
		u64_stats_update_end(&rx_q->rxq_stats.syncp);
	}

	if (v & EMAC_RX_BUF_UA_INT)
+1 −6
Original line number Diff line number Diff line
@@ -82,29 +82,24 @@ static void dwmac100_dump_dma_regs(struct stmmac_priv *priv,
}

/* DMA controller has two counters to track the number of the missed frames. */
static void dwmac100_dma_diagnostic_fr(struct net_device_stats *stats,
				       struct stmmac_extra_stats *x,
static void dwmac100_dma_diagnostic_fr(struct stmmac_extra_stats *x,
				       void __iomem *ioaddr)
{
	u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);

	if (unlikely(csr8)) {
		if (csr8 & DMA_MISSED_FRAME_OVE) {
			stats->rx_over_errors += 0x800;
			x->rx_overflow_cntr += 0x800;
		} else {
			unsigned int ove_cntr;
			ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
			stats->rx_over_errors += ove_cntr;
			x->rx_overflow_cntr += ove_cntr;
		}

		if (csr8 & DMA_MISSED_FRAME_OVE_M) {
			stats->rx_missed_errors += 0xffff;
			x->rx_missed_cntr += 0xffff;
		} else {
			unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
			stats->rx_missed_errors += miss_f;
			x->rx_missed_cntr += miss_f;
		}
	}
+5 −11
Original line number Diff line number Diff line
@@ -13,8 +13,7 @@
#include "dwmac4.h"
#include "dwmac4_descs.h"

static int dwmac4_wrback_get_tx_status(struct net_device_stats *stats,
				       struct stmmac_extra_stats *x,
static int dwmac4_wrback_get_tx_status(struct stmmac_extra_stats *x,
				       struct dma_desc *p,
				       void __iomem *ioaddr)
{
@@ -40,15 +39,13 @@ static int dwmac4_wrback_get_tx_status(struct net_device_stats *stats,
			x->tx_frame_flushed++;
		if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) {
			x->tx_losscarrier++;
			stats->tx_carrier_errors++;
		}
		if (unlikely(tdes3 & TDES3_NO_CARRIER)) {
			x->tx_carrier++;
			stats->tx_carrier_errors++;
		}
		if (unlikely((tdes3 & TDES3_LATE_COLLISION) ||
			     (tdes3 & TDES3_EXCESSIVE_COLLISION)))
			stats->collisions +=
			x->tx_collision +=
			    (tdes3 & TDES3_COLLISION_COUNT_MASK)
			    >> TDES3_COLLISION_COUNT_SHIFT;

@@ -73,8 +70,7 @@ static int dwmac4_wrback_get_tx_status(struct net_device_stats *stats,
	return ret;
}

static int dwmac4_wrback_get_rx_status(struct net_device_stats *stats,
				       struct stmmac_extra_stats *x,
static int dwmac4_wrback_get_rx_status(struct stmmac_extra_stats *x,
				       struct dma_desc *p)
{
	unsigned int rdes1 = le32_to_cpu(p->des1);
@@ -93,7 +89,7 @@ static int dwmac4_wrback_get_rx_status(struct net_device_stats *stats,

	if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
		if (unlikely(rdes3 & RDES3_GIANT_PACKET))
			stats->rx_length_errors++;
			x->rx_length++;
		if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR))
			x->rx_gmac_overflow++;

@@ -103,10 +99,8 @@ static int dwmac4_wrback_get_rx_status(struct net_device_stats *stats,
		if (unlikely(rdes3 & RDES3_RECEIVE_ERROR))
			x->rx_mii++;

		if (unlikely(rdes3 & RDES3_CRC_ERROR)) {
		if (unlikely(rdes3 & RDES3_CRC_ERROR))
			x->rx_crc_errors++;
			stats->rx_crc_errors++;
		}

		if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR))
			x->dribbling_bit++;
+9 −6
Original line number Diff line number Diff line
@@ -171,6 +171,8 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
	const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
	u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan));
	u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
	struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
	struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
	int ret = 0;

	if (dir == DMA_DIR_RX)
@@ -198,18 +200,19 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
		}
	}
	/* TX/RX NORMAL interrupts */
	if (likely(intr_status & DMA_CHAN_STATUS_NIS))
		x->normal_irq_n++;
	if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
		x->rx_normal_irq_n++;
		x->rxq_stats[chan].rx_normal_irq_n++;
		u64_stats_update_begin(&rx_q->rxq_stats.syncp);
		rx_q->rxq_stats.rx_normal_irq_n++;
		u64_stats_update_end(&rx_q->rxq_stats.syncp);
		ret |= handle_rx;
	}
	if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
		x->tx_normal_irq_n++;
		x->txq_stats[chan].tx_normal_irq_n++;
		u64_stats_update_begin(&tx_q->txq_stats.syncp);
		tx_q->txq_stats.tx_normal_irq_n++;
		u64_stats_update_end(&tx_q->txq_stats.syncp);
		ret |= handle_tx;
	}

	if (unlikely(intr_status & DMA_CHAN_STATUS_TBU))
		ret |= handle_tx;
	if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
Loading