Commit c61ce06f authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge branch 'bcm63xx_enet-major-makeover-of-driver'

Sieng Piaw Liew says:

====================
bcm63xx_enet: major makeover of driver

This patch series aim to improve the bcm63xx_enet driver by integrating the
latest networking features, i.e. batched rx processing, BQL, build_skb,
etc.

The newer enetsw SoCs are found to be able to do unaligned rx DMA by adding
NET_IP_ALIGN padding which, combined with these patches, improved packet
processing performance by ~50% on BCM6328.

Older non-enetsw SoCs still benefit mainly from rx batching. Performance
improvement of ~30% is observed on BCM6333.

The BCM63xx SoCs are designed for routers. As such, having BQL is
beneficial as well as trivial to add.

v3:
* Simplify xmit_more patch by not moving around the code needlessly.
* Fix indentation in xmit_more patch.
* Fix indentation in build_skb patch.
* Split rx ring cleanup patch from build_skb patch and precede build_skb
  patch for better understanding, as suggested by Florian Fainelli.

v2:
* Add xmit_more support and rx loop improvisation patches.
* Moved BQL netdev_reset_queue() to bcm_enet_stop()/bcm_enetsw_stop()
  functions as suggested by Florian Fainelli.
* Improved commit messages.
====================

Link: https://lore.kernel.org/r/20210106144208.1935-1-liew.s.piaw@gmail.com


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 2e423387 ae2259ee
Loading
Loading
Loading
Loading
+93 −93
Original line number Diff line number Diff line
@@ -220,7 +220,7 @@ static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
/*
 * refill rx queue
 */
static int bcm_enet_refill_rx(struct net_device *dev)
static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode)
{
	struct bcm_enet_priv *priv;

@@ -228,26 +228,29 @@ static int bcm_enet_refill_rx(struct net_device *dev)

	while (priv->rx_desc_count < priv->rx_ring_size) {
		struct bcm_enet_desc *desc;
		struct sk_buff *skb;
		dma_addr_t p;
		int desc_idx;
		u32 len_stat;

		desc_idx = priv->rx_dirty_desc;
		desc = &priv->rx_desc_cpu[desc_idx];

		if (!priv->rx_skb[desc_idx]) {
			skb = netdev_alloc_skb(dev, priv->rx_skb_size);
			if (!skb)
		if (!priv->rx_buf[desc_idx]) {
			void *buf;

			if (likely(napi_mode))
				buf = napi_alloc_frag(priv->rx_frag_size);
			else
				buf = netdev_alloc_frag(priv->rx_frag_size);
			if (unlikely(!buf))
				break;
			priv->rx_skb[desc_idx] = skb;
			p = dma_map_single(&priv->pdev->dev, skb->data,
					   priv->rx_skb_size,
			priv->rx_buf[desc_idx] = buf;
			desc->address = dma_map_single(&priv->pdev->dev,
						       buf + priv->rx_buf_offset,
						       priv->rx_buf_size,
						       DMA_FROM_DEVICE);
			desc->address = p;
		}

		len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
		len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT;
		len_stat |= DMADESC_OWNER_MASK;
		if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
			len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
@@ -287,7 +290,7 @@ static void bcm_enet_refill_rx_timer(struct timer_list *t)
	struct net_device *dev = priv->net_dev;

	spin_lock(&priv->rx_lock);
	bcm_enet_refill_rx(dev);
	bcm_enet_refill_rx(dev, false);
	spin_unlock(&priv->rx_lock);
}

@@ -297,10 +300,12 @@ static void bcm_enet_refill_rx_timer(struct timer_list *t)
static int bcm_enet_receive_queue(struct net_device *dev, int budget)
{
	struct bcm_enet_priv *priv;
	struct list_head rx_list;
	struct device *kdev;
	int processed;

	priv = netdev_priv(dev);
	INIT_LIST_HEAD(&rx_list);
	kdev = &priv->pdev->dev;
	processed = 0;

@@ -315,6 +320,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
		int desc_idx;
		u32 len_stat;
		unsigned int len;
		void *buf;

		desc_idx = priv->rx_curr_desc;
		desc = &priv->rx_desc_cpu[desc_idx];
@@ -333,7 +339,6 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
		priv->rx_curr_desc++;
		if (priv->rx_curr_desc == priv->rx_ring_size)
			priv->rx_curr_desc = 0;
		priv->rx_desc_count--;

		/* if the packet does not have start of packet _and_
		 * end of packet flag set, then just recycle it */
@@ -360,16 +365,14 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
		}

		/* valid packet */
		skb = priv->rx_skb[desc_idx];
		buf = priv->rx_buf[desc_idx];
		len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
		/* don't include FCS */
		len -= 4;

		if (len < copybreak) {
			struct sk_buff *nskb;

			nskb = napi_alloc_skb(&priv->napi, len);
			if (!nskb) {
			skb = napi_alloc_skb(&priv->napi, len);
			if (unlikely(!skb)) {
				/* forget packet, just rearm desc */
				dev->stats.rx_dropped++;
				continue;
@@ -377,26 +380,36 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)

			dma_sync_single_for_cpu(kdev, desc->address,
						len, DMA_FROM_DEVICE);
			memcpy(nskb->data, skb->data, len);
			memcpy(skb->data, buf + priv->rx_buf_offset, len);
			dma_sync_single_for_device(kdev, desc->address,
						   len, DMA_FROM_DEVICE);
			skb = nskb;
		} else {
			dma_unmap_single(&priv->pdev->dev, desc->address,
					 priv->rx_skb_size, DMA_FROM_DEVICE);
			priv->rx_skb[desc_idx] = NULL;
			dma_unmap_single(kdev, desc->address,
					 priv->rx_buf_size, DMA_FROM_DEVICE);
			priv->rx_buf[desc_idx] = NULL;

			skb = build_skb(buf, priv->rx_frag_size);
			if (unlikely(!skb)) {
				skb_free_frag(buf);
				dev->stats.rx_dropped++;
				continue;
			}
			skb_reserve(skb, priv->rx_buf_offset);
		}

		skb_put(skb, len);
		skb->protocol = eth_type_trans(skb, dev);
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += len;
		netif_receive_skb(skb);
		list_add_tail(&skb->list, &rx_list);

	} while (processed < budget);

	} while (--budget > 0);
	netif_receive_skb_list(&rx_list);
	priv->rx_desc_count -= processed;

	if (processed || !priv->rx_desc_count) {
		bcm_enet_refill_rx(dev);
		bcm_enet_refill_rx(dev, true);

		/* kick rx dma */
		enet_dmac_writel(priv, priv->dma_chan_en_mask,
@@ -413,9 +426,11 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
{
	struct bcm_enet_priv *priv;
	unsigned int bytes;
	int released;

	priv = netdev_priv(dev);
	bytes = 0;
	released = 0;

	while (priv->tx_desc_count < priv->tx_ring_size) {
@@ -452,10 +467,13 @@ static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
		if (desc->len_stat & DMADESC_UNDER_MASK)
			dev->stats.tx_errors++;

		bytes += skb->len;
		dev_kfree_skb(skb);
		released++;
	}

	netdev_completed_queue(dev, released, bytes);

	if (netif_queue_stopped(dev) && released)
		netif_wake_queue(dev);

@@ -622,7 +640,10 @@ bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
	desc->len_stat = len_stat;
	wmb();

	netdev_sent_queue(dev, skb->len);

	/* kick tx dma */
	if (!netdev_xmit_more() || !priv->tx_desc_count)
		enet_dmac_writel(priv, priv->dma_chan_en_mask,
				 ENETDMAC_CHANCFG, priv->tx_chan);

@@ -845,6 +866,24 @@ static void bcm_enet_adjust_link(struct net_device *dev)
		priv->pause_tx ? "tx" : "off");
}

static void bcm_enet_free_rx_buf_ring(struct device *kdev, struct bcm_enet_priv *priv)
{
	int i;

	for (i = 0; i < priv->rx_ring_size; i++) {
		struct bcm_enet_desc *desc;

		if (!priv->rx_buf[i])
			continue;

		desc = &priv->rx_desc_cpu[i];
		dma_unmap_single(kdev, desc->address, priv->rx_buf_size,
				 DMA_FROM_DEVICE);
		skb_free_frag(priv->rx_buf[i]);
	}
	kfree(priv->rx_buf);
}

/*
 * open callback, allocate dma rings & buffers and start rx operation
 */
@@ -954,10 +993,10 @@ static int bcm_enet_open(struct net_device *dev)
	priv->tx_curr_desc = 0;
	spin_lock_init(&priv->tx_lock);

	/* init & fill rx ring with skbs */
	priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
	/* init & fill rx ring with buffers */
	priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
			       GFP_KERNEL);
	if (!priv->rx_skb) {
	if (!priv->rx_buf) {
		ret = -ENOMEM;
		goto out_free_tx_skb;
	}
@@ -974,8 +1013,8 @@ static int bcm_enet_open(struct net_device *dev)
		enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
				ENETDMAC_BUFALLOC, priv->rx_chan);

	if (bcm_enet_refill_rx(dev)) {
		dev_err(kdev, "cannot allocate rx skb queue\n");
	if (bcm_enet_refill_rx(dev, false)) {
		dev_err(kdev, "cannot allocate rx buffer queue\n");
		ret = -ENOMEM;
		goto out;
	}
@@ -1069,18 +1108,7 @@ static int bcm_enet_open(struct net_device *dev)
	return 0;

out:
	for (i = 0; i < priv->rx_ring_size; i++) {
		struct bcm_enet_desc *desc;

		if (!priv->rx_skb[i])
			continue;

		desc = &priv->rx_desc_cpu[i];
		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
				 DMA_FROM_DEVICE);
		kfree_skb(priv->rx_skb[i]);
	}
	kfree(priv->rx_skb);
	bcm_enet_free_rx_buf_ring(kdev, priv);

out_free_tx_skb:
	kfree(priv->tx_skb);
@@ -1159,12 +1187,12 @@ static int bcm_enet_stop(struct net_device *dev)
{
	struct bcm_enet_priv *priv;
	struct device *kdev;
	int i;

	priv = netdev_priv(dev);
	kdev = &priv->pdev->dev;

	netif_stop_queue(dev);
	netdev_reset_queue(dev);
	napi_disable(&priv->napi);
	if (priv->has_phy)
		phy_stop(dev->phydev);
@@ -1186,21 +1214,10 @@ static int bcm_enet_stop(struct net_device *dev)
	/* force reclaim of all tx buffers */
	bcm_enet_tx_reclaim(dev, 1);

	/* free the rx skb ring */
	for (i = 0; i < priv->rx_ring_size; i++) {
		struct bcm_enet_desc *desc;

		if (!priv->rx_skb[i])
			continue;

		desc = &priv->rx_desc_cpu[i];
		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
				 DMA_FROM_DEVICE);
		kfree_skb(priv->rx_skb[i]);
	}
	/* free the rx buffer ring */
	bcm_enet_free_rx_buf_ring(kdev, priv);

	/* free remaining allocated memory */
	kfree(priv->rx_skb);
	kfree(priv->tx_skb);
	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
			  priv->rx_desc_cpu, priv->rx_desc_dma);
@@ -1622,9 +1639,12 @@ static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
	 * align rx buffer size to dma burst len, account FCS since
	 * it's appended
	 */
	priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
	priv->rx_buf_size = ALIGN(actual_mtu + ETH_FCS_LEN,
				  priv->dma_maxburst * 4);

	priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) +
					    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));

	dev->mtu = new_mtu;
	return 0;
}
@@ -1709,6 +1729,7 @@ static int bcm_enet_probe(struct platform_device *pdev)

	priv->enet_is_sw = false;
	priv->dma_maxburst = BCMENET_DMA_MAXBURST;
	priv->rx_buf_offset = NET_SKB_PAD;

	ret = bcm_enet_change_mtu(dev, dev->mtu);
	if (ret)
@@ -2126,7 +2147,7 @@ static int bcm_enetsw_open(struct net_device *dev)
	priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
			       GFP_KERNEL);
	if (!priv->tx_skb) {
		dev_err(kdev, "cannot allocate rx skb queue\n");
		dev_err(kdev, "cannot allocate tx skb queue\n");
		ret = -ENOMEM;
		goto out_free_tx_ring;
	}
@@ -2136,11 +2157,11 @@ static int bcm_enetsw_open(struct net_device *dev)
	priv->tx_curr_desc = 0;
	spin_lock_init(&priv->tx_lock);

	/* init & fill rx ring with skbs */
	priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
	/* init & fill rx ring with buffers */
	priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
			       GFP_KERNEL);
	if (!priv->rx_skb) {
		dev_err(kdev, "cannot allocate rx skb queue\n");
	if (!priv->rx_buf) {
		dev_err(kdev, "cannot allocate rx buffer queue\n");
		ret = -ENOMEM;
		goto out_free_tx_skb;
	}
@@ -2187,8 +2208,8 @@ static int bcm_enetsw_open(struct net_device *dev)
	enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
			ENETDMA_BUFALLOC_REG(priv->rx_chan));

	if (bcm_enet_refill_rx(dev)) {
		dev_err(kdev, "cannot allocate rx skb queue\n");
	if (bcm_enet_refill_rx(dev, false)) {
		dev_err(kdev, "cannot allocate rx buffer queue\n");
		ret = -ENOMEM;
		goto out;
	}
@@ -2287,18 +2308,7 @@ static int bcm_enetsw_open(struct net_device *dev)
	return 0;

out:
	for (i = 0; i < priv->rx_ring_size; i++) {
		struct bcm_enet_desc *desc;

		if (!priv->rx_skb[i])
			continue;

		desc = &priv->rx_desc_cpu[i];
		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
				 DMA_FROM_DEVICE);
		kfree_skb(priv->rx_skb[i]);
	}
	kfree(priv->rx_skb);
	bcm_enet_free_rx_buf_ring(kdev, priv);

out_free_tx_skb:
	kfree(priv->tx_skb);
@@ -2327,13 +2337,13 @@ static int bcm_enetsw_stop(struct net_device *dev)
{
	struct bcm_enet_priv *priv;
	struct device *kdev;
	int i;

	priv = netdev_priv(dev);
	kdev = &priv->pdev->dev;

	del_timer_sync(&priv->swphy_poll);
	netif_stop_queue(dev);
	netdev_reset_queue(dev);
	napi_disable(&priv->napi);
	del_timer_sync(&priv->rx_timeout);

@@ -2348,21 +2358,10 @@ static int bcm_enetsw_stop(struct net_device *dev)
	/* force reclaim of all tx buffers */
	bcm_enet_tx_reclaim(dev, 1);

	/* free the rx skb ring */
	for (i = 0; i < priv->rx_ring_size; i++) {
		struct bcm_enet_desc *desc;

		if (!priv->rx_skb[i])
			continue;

		desc = &priv->rx_desc_cpu[i];
		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
				 DMA_FROM_DEVICE);
		kfree_skb(priv->rx_skb[i]);
	}
	/* free the rx buffer ring */
	bcm_enet_free_rx_buf_ring(kdev, priv);

	/* free remaining allocated memory */
	kfree(priv->rx_skb);
	kfree(priv->tx_skb);
	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
			  priv->rx_desc_cpu, priv->rx_desc_dma);
@@ -2659,6 +2658,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
	priv->rx_ring_size = BCMENET_DEF_RX_DESC;
	priv->tx_ring_size = BCMENET_DEF_TX_DESC;
	priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
	priv->rx_buf_offset = NET_SKB_PAD + NET_IP_ALIGN;

	pd = dev_get_platdata(&pdev->dev);
	if (pd) {
+10 −4
Original line number Diff line number Diff line
@@ -230,11 +230,17 @@ struct bcm_enet_priv {
	/* next dirty rx descriptor to refill */
	int rx_dirty_desc;

	/* size of allocated rx skbs */
	unsigned int rx_skb_size;
	/* size of allocated rx buffers */
	unsigned int rx_buf_size;

	/* list of skb given to hw for rx */
	struct sk_buff **rx_skb;
	/* allocated rx buffer offset */
	unsigned int rx_buf_offset;

	/* size of allocated rx frag */
	unsigned int rx_frag_size;

	/* list of buffer given to hw for rx */
	void **rx_buf;

	/* used when rx skb allocation failed, so we defer rx queue
	 * refill */