Commit 0a8bd81f authored by Biao Huang's avatar Biao Huang Committed by David S. Miller
Browse files

net: ethernet: mtk-star-emac: separate tx/rx handling with two NAPIs



Current driver may lost tx interrupts under bidirectional test with iperf3,
which leads to some unexpected issues.

This patch let rx/tx interrupt enable/disable separately, and rx/tx are
handled in different NAPIs.

Signed-off-by: default avatarBiao Huang <biao.huang@mediatek.com>
Signed-off-by: default avatarYinghua Pan <ot_yinghua.pan@mediatek.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0027340a
Loading
Loading
Loading
Loading
+199 −141
Original line number Diff line number Diff line
@@ -33,6 +33,7 @@
#define MTK_STAR_SKB_ALIGNMENT			16
#define MTK_STAR_HASHTABLE_MC_LIMIT		256
#define MTK_STAR_HASHTABLE_SIZE_MAX		512
#define MTK_STAR_DESC_NEEDED			(MAX_SKB_FRAGS + 4)

/* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
 * work for this controller.
@@ -228,7 +229,8 @@ struct mtk_star_ring_desc_data {
	struct sk_buff *skb;
};

#define MTK_STAR_RING_NUM_DESCS			128
#define MTK_STAR_RING_NUM_DESCS			512
#define MTK_STAR_TX_THRESH			(MTK_STAR_RING_NUM_DESCS / 4)
#define MTK_STAR_NUM_TX_DESCS			MTK_STAR_RING_NUM_DESCS
#define MTK_STAR_NUM_RX_DESCS			MTK_STAR_RING_NUM_DESCS
#define MTK_STAR_NUM_DESCS_TOTAL		(MTK_STAR_RING_NUM_DESCS * 2)
@@ -263,7 +265,8 @@ struct mtk_star_priv {
	struct mtk_star_ring rx_ring;

	struct mii_bus *mii;
	struct napi_struct napi;
	struct napi_struct tx_napi;
	struct napi_struct rx_napi;

	struct device_node *phy_node;
	phy_interface_t phy_intf;
@@ -379,19 +382,16 @@ mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
	mtk_star_ring_push_head(ring, desc_data, flags);
}

static unsigned int mtk_star_ring_num_used_descs(struct mtk_star_ring *ring)
static unsigned int mtk_star_tx_ring_avail(struct mtk_star_ring *ring)
{
	return abs(ring->head - ring->tail);
}
	u32 avail;

static bool mtk_star_ring_full(struct mtk_star_ring *ring)
{
	return mtk_star_ring_num_used_descs(ring) == MTK_STAR_RING_NUM_DESCS;
}
	if (ring->tail > ring->head)
		avail = ring->tail - ring->head - 1;
	else
		avail = MTK_STAR_RING_NUM_DESCS - ring->head + ring->tail - 1;

static bool mtk_star_ring_descs_available(struct mtk_star_ring *ring)
{
	return mtk_star_ring_num_used_descs(ring) > 0;
	return avail;
}

static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
@@ -436,6 +436,36 @@ static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
			  MTK_STAR_BIT_MAC_CFG_NIC_PD);
}

static void mtk_star_enable_dma_irq(struct mtk_star_priv *priv,
				    bool rx, bool tx)
{
	u32 value;

	regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);

	if (tx)
		value &= ~MTK_STAR_BIT_INT_STS_TNTC;
	if (rx)
		value &= ~MTK_STAR_BIT_INT_STS_FNRC;

	regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
}

static void mtk_star_disable_dma_irq(struct mtk_star_priv *priv,
				     bool rx, bool tx)
{
	u32 value;

	regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);

	if (tx)
		value |= MTK_STAR_BIT_INT_STS_TNTC;
	if (rx)
		value |= MTK_STAR_BIT_INT_STS_FNRC;

	regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
}

/* Unmask the three interrupts we care about, mask all others. */
static void mtk_star_intr_enable(struct mtk_star_priv *priv)
{
@@ -451,20 +481,11 @@ static void mtk_star_intr_disable(struct mtk_star_priv *priv)
	regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
}

static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
{
	unsigned int val;

	regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);

	return val;
}

static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
{
	unsigned int val;

	val = mtk_star_intr_read(priv);
	regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
	regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);

	return val;
@@ -736,25 +757,44 @@ static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
	mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
}

/* All processing for TX and RX happens in the napi poll callback.
 *
 * FIXME: The interrupt handling should be more fine-grained with each
 * interrupt enabled/disabled independently when needed. Unfortunatly this
 * turned out to impact the driver's stability and until we have something
 * working properly, we're disabling all interrupts during TX & RX processing
 * or when resetting the counter registers.
 */
/**
 * mtk_star_handle_irq - Interrupt Handler.
 * @irq: interrupt number.
 * @data: pointer to a network interface device structure.
 * Description : this is the driver interrupt service routine.
 * it mainly handles:
 *  1. tx complete interrupt for frame transmission.
 *  2. rx complete interrupt for frame reception.
 *  3. MAC Management Counter interrupt to avoid counter overflow.
 **/
static irqreturn_t mtk_star_handle_irq(int irq, void *data)
{
	struct mtk_star_priv *priv;
	struct net_device *ndev;
	struct net_device *ndev = data;
	struct mtk_star_priv *priv = netdev_priv(ndev);
	unsigned int intr_status = mtk_star_intr_ack_all(priv);
	bool rx, tx;

	ndev = data;
	priv = netdev_priv(ndev);
	rx = (intr_status & MTK_STAR_BIT_INT_STS_FNRC) &&
	     napi_schedule_prep(&priv->rx_napi);
	tx = (intr_status & MTK_STAR_BIT_INT_STS_TNTC) &&
	     napi_schedule_prep(&priv->tx_napi);

	if (netif_running(ndev)) {
		mtk_star_intr_disable(priv);
		napi_schedule(&priv->napi);
	if (rx || tx) {
		spin_lock(&priv->lock);
		/* mask Rx and TX Complete interrupt */
		mtk_star_disable_dma_irq(priv, rx, tx);
		spin_unlock(&priv->lock);

		if (rx)
			__napi_schedule(&priv->rx_napi);
		if (tx)
			__napi_schedule(&priv->tx_napi);
	}

	/* interrupt is triggered once any counters reach 0x8000000 */
	if (intr_status & MTK_STAR_REG_INT_STS_MIB_CNT_TH) {
		mtk_star_update_stats(priv);
		mtk_star_reset_counters(priv);
	}

	return IRQ_HANDLED;
@@ -970,7 +1010,8 @@ static int mtk_star_enable(struct net_device *ndev)
	if (ret)
		goto err_free_skbs;

	napi_enable(&priv->napi);
	napi_enable(&priv->tx_napi);
	napi_enable(&priv->rx_napi);

	mtk_star_intr_ack_all(priv);
	mtk_star_intr_enable(priv);
@@ -1003,7 +1044,8 @@ static void mtk_star_disable(struct net_device *ndev)
	struct mtk_star_priv *priv = netdev_priv(ndev);

	netif_stop_queue(ndev);
	napi_disable(&priv->napi);
	napi_disable(&priv->tx_napi);
	napi_disable(&priv->rx_napi);
	mtk_star_intr_disable(priv);
	mtk_star_dma_disable(priv);
	mtk_star_intr_ack_all(priv);
@@ -1035,13 +1077,45 @@ static int mtk_star_netdev_ioctl(struct net_device *ndev,
	return phy_mii_ioctl(ndev->phydev, req, cmd);
}

static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
static int __mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
{
	netif_stop_queue(priv->ndev);

	/* Might race with mtk_star_tx_poll, check again */
	smp_mb();
	if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) < size))
		return -EBUSY;

	netif_start_queue(priv->ndev);

	return 0;
}

static inline int mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
{
	if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) >= size))
		return 0;

	return __mtk_star_maybe_stop_tx(priv, size);
}

static netdev_tx_t mtk_star_netdev_start_xmit(struct sk_buff *skb,
					      struct net_device *ndev)
{
	struct mtk_star_priv *priv = netdev_priv(ndev);
	struct mtk_star_ring *ring = &priv->tx_ring;
	struct device *dev = mtk_star_get_dev(priv);
	struct mtk_star_ring_desc_data desc_data;
	int nfrags = skb_shinfo(skb)->nr_frags;

	if (unlikely(mtk_star_tx_ring_avail(ring) < nfrags + 1)) {
		if (!netif_queue_stopped(ndev)) {
			netif_stop_queue(ndev);
			/* This is a hard error, log it. */
			pr_err_ratelimited("Tx ring full when queue awake\n");
		}
		return NETDEV_TX_BUSY;
	}

	desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
	if (dma_mapping_error(dev, desc_data.dma_addr))
@@ -1049,17 +1123,11 @@ static int mtk_star_netdev_start_xmit(struct sk_buff *skb,

	desc_data.skb = skb;
	desc_data.len = skb->len;

	spin_lock_bh(&priv->lock);

	mtk_star_ring_push_head_tx(ring, &desc_data);

	netdev_sent_queue(ndev, skb->len);

	if (mtk_star_ring_full(ring))
		netif_stop_queue(ndev);

	spin_unlock_bh(&priv->lock);
	mtk_star_maybe_stop_tx(priv, MTK_STAR_DESC_NEEDED);

	mtk_star_dma_resume_tx(priv);

@@ -1091,33 +1159,42 @@ static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
	return ret;
}

static void mtk_star_tx_complete_all(struct mtk_star_priv *priv)
static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
{
	struct mtk_star_priv *priv = container_of(napi, struct mtk_star_priv,
						  tx_napi);
	int ret = 0, pkts_compl = 0, bytes_compl = 0, count = 0;
	struct mtk_star_ring *ring = &priv->tx_ring;
	struct net_device *ndev = priv->ndev;
	int ret, pkts_compl, bytes_compl;
	bool wake = false;

	spin_lock(&priv->lock);

	for (pkts_compl = 0, bytes_compl = 0;;
	     pkts_compl++, bytes_compl += ret, wake = true) {
		if (!mtk_star_ring_descs_available(ring))
			break;
	unsigned int head = ring->head;
	unsigned int entry = ring->tail;

	while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
		ret = mtk_star_tx_complete_one(priv);
		if (ret < 0)
			break;

		count++;
		pkts_compl++;
		bytes_compl += ret;
		entry = ring->tail;
	}

	netdev_completed_queue(ndev, pkts_compl, bytes_compl);

	if (wake && netif_queue_stopped(ndev))
	if (unlikely(netif_queue_stopped(ndev)) &&
	    (mtk_star_tx_ring_avail(ring) > MTK_STAR_TX_THRESH))
		netif_wake_queue(ndev);

	if (napi_complete(napi)) {
		spin_lock(&priv->lock);
		mtk_star_enable_dma_irq(priv, false, true);
		spin_unlock(&priv->lock);
	}

	return 0;
}

static void mtk_star_netdev_get_stats64(struct net_device *ndev,
					struct rtnl_link_stats64 *stats)
{
@@ -1195,7 +1272,7 @@ static const struct ethtool_ops mtk_star_ethtool_ops = {
	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
};

static int mtk_star_receive_packet(struct mtk_star_priv *priv)
static int mtk_star_rx(struct mtk_star_priv *priv, int budget)
{
	struct mtk_star_ring *ring = &priv->rx_ring;
	struct device *dev = mtk_star_get_dev(priv);
@@ -1203,11 +1280,10 @@ static int mtk_star_receive_packet(struct mtk_star_priv *priv)
	struct net_device *ndev = priv->ndev;
	struct sk_buff *curr_skb, *new_skb;
	dma_addr_t new_dma_addr;
	int ret;
	int ret, count = 0;

	spin_lock(&priv->lock);
	while (count < budget) {
		ret = mtk_star_ring_pop_tail(ring, &desc_data);
	spin_unlock(&priv->lock);
		if (ret)
			return -1;

@@ -1220,8 +1296,8 @@ static int mtk_star_receive_packet(struct mtk_star_priv *priv)
			goto push_new_skb;
		}

	/* Prepare new skb before receiving the current one. Reuse the current
	 * skb if we fail at any point.
		/* Prepare new skb before receiving the current one.
		 * Reuse the current skb if we fail at any point.
		 */
		new_skb = mtk_star_alloc_skb(ndev);
		if (!new_skb) {
@@ -1239,7 +1315,9 @@ static int mtk_star_receive_packet(struct mtk_star_priv *priv)
			goto push_new_skb;
		}

	/* We can't fail anymore at this point: it's safe to unmap the skb. */
		/* We can't fail anymore at this point:
		 * it's safe to unmap the skb.
		 */
		mtk_star_dma_unmap_rx(priv, &desc_data);

		skb_put(desc_data.skb, desc_data.len);
@@ -1252,58 +1330,35 @@ static int mtk_star_receive_packet(struct mtk_star_priv *priv)
		desc_data.dma_addr = new_dma_addr;

push_new_skb:

		count++;

		desc_data.len = skb_tailroom(new_skb);
		desc_data.skb = new_skb;

	spin_lock(&priv->lock);
		mtk_star_ring_push_head_rx(ring, &desc_data);
	spin_unlock(&priv->lock);

	return 0;
	}

static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
{
	int received, ret;

	for (received = 0, ret = 0; received < budget && ret == 0; received++)
		ret = mtk_star_receive_packet(priv);

	mtk_star_dma_resume_rx(priv);

	return received;
	return count;
}

static int mtk_star_poll(struct napi_struct *napi, int budget)
static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
{
	struct mtk_star_priv *priv;
	unsigned int status;
	int received = 0;

	priv = container_of(napi, struct mtk_star_priv, napi);

	status = mtk_star_intr_read(priv);
	mtk_star_intr_ack_all(priv);

	if (status & MTK_STAR_BIT_INT_STS_TNTC)
		/* Clean-up all TX descriptors. */
		mtk_star_tx_complete_all(priv);
	int work_done = 0;

	if (status & MTK_STAR_BIT_INT_STS_FNRC)
		/* Receive up to $budget packets. */
		received = mtk_star_process_rx(priv, budget);
	priv = container_of(napi, struct mtk_star_priv, rx_napi);

	if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
		mtk_star_update_stats(priv);
		mtk_star_reset_counters(priv);
	work_done = mtk_star_rx(priv, budget);
	if (work_done < budget) {
		napi_complete_done(napi, work_done);
		spin_lock(&priv->lock);
		mtk_star_enable_dma_irq(priv, true, false);
		spin_unlock(&priv->lock);
	}

	if (received < budget)
		napi_complete_done(napi, received);

	mtk_star_intr_enable(priv);

	return received;
	return work_done;
}

static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
@@ -1602,7 +1657,10 @@ static int mtk_star_probe(struct platform_device *pdev)
	ndev->netdev_ops = &mtk_star_netdev_ops;
	ndev->ethtool_ops = &mtk_star_ethtool_ops;

	netif_napi_add(ndev, &priv->napi, mtk_star_poll, NAPI_POLL_WEIGHT);
	netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll,
		       NAPI_POLL_WEIGHT);
	netif_tx_napi_add(ndev, &priv->tx_napi, mtk_star_tx_poll,
			  NAPI_POLL_WEIGHT);

	return devm_register_netdev(dev, ndev);
}