Commit 223e4bb7 authored by Yunsheng Lin's avatar Yunsheng Lin Committed by Yang Yingliang
Browse files

net: hns3: batch tx doorbell operation



mainline inclusion
from mainline-v5.10-rc1
commit f6061a05
category: feature
bugzilla: NA
CVE: NA

----------------------------

Use netdev_xmit_more() to defer the tx doorbell operation when
the skb is passed to the driver continuously. By doing this we
can improve the overall xmit performance by avoid some doorbell
operations.

Also, the tx_err_cnt stat is not used, so rename it to tx_more
stat.

Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: default avatarHuazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarYonglong Liu <liuyonglong@huawei.com>
Reviewed-by: default avatarli yongxin <liyongxin1@huawei.com>
Signed-off-by: default avatarYang Yingliang <yangyingliang@huawei.com>
parent 547733a4
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -9,7 +9,7 @@ const struct ring_stats_name hns3_ring_stats_name[] = {
	{"seg_pkt_cnt", SEG_PKT_CNT},
	{"tx_pkts", TX_PKTS},
	{"tx_bytes", TX_BYTES},
	{"tx_err_cnt", TX_ERR_CNT},
	{"tx_more", TX_MORE},
	{"restart_queue", RESTART_QUEUE},
	{"tx_busy", TX_BUSY},
	{"rx_pkts", RX_PKTS},
@@ -58,8 +58,8 @@ static int hns3_get_stat_val(struct ring_stats *r_stats, char *val_name,
	case TX_BYTES:
		*val = &r_stats->tx_bytes;
		break;
	case TX_ERR_CNT:
		*val = &r_stats->tx_err_cnt;
	case TX_MORE:
		*val = &r_stats->tx_more;
		break;
	case RESTART_QUEUE:
		*val = &r_stats->restart_queue;
+1 −1
Original line number Diff line number Diff line
@@ -24,7 +24,7 @@ enum stats_name_type {
	SEG_PKT_CNT,
	TX_PKTS,
	TX_BYTES,
	TX_ERR_CNT,
	TX_MORE,
	RESTART_QUEUE,
	TX_BUSY,
	RX_PKTS,
+41 −10
Original line number Diff line number Diff line
@@ -1388,6 +1388,27 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
	return bd_num;
}

static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
			     bool doorbell)
{
	ring->pending_buf += num;

	if (!doorbell) {
		u64_stats_update_begin(&ring->syncp);
		ring->stats.tx_more++;
		u64_stats_update_end(&ring->syncp);
		return;
	}

	if (!ring->pending_buf)
		return;

	wmb(); /* Commit all data before submit */

	hnae3_queue_xmit(ring->tqp, ring->pending_buf);
	ring->pending_buf = 0;
}

netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -1399,8 +1420,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
	int ret;

	/* Hardware can only handle short frames above 32 bytes */
	if (skb_put_padto(skb, HNS3_MIN_TX_LEN))
	if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
		hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
		return NETDEV_TX_OK;
	}

	/* Prefetch the data used later */
	prefetch(skb->data);
@@ -1411,6 +1434,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
			u64_stats_update_begin(&ring->syncp);
			ring->stats.tx_busy++;
			u64_stats_update_end(&ring->syncp);
			hns3_tx_doorbell(ring, 0, true);
			return NETDEV_TX_BUSY;
		} else if (ret == -ENOMEM) {
			u64_stats_update_begin(&ring->syncp);
@@ -1453,11 +1477,14 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)

	/* Complete translate all packets */
	dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
	if (!netdev_xmit_more()) {
		netdev_tx_sent_queue(dev_queue, skb->len);

	wmb(); /* Commit all data before submit */

	hnae3_queue_xmit(ring->tqp, bd_num);
		hns3_tx_doorbell(ring, bd_num, true);
	} else {
		dql_queued(&dev_queue->dql, skb->len);
		hns3_tx_doorbell(ring, bd_num,
				 netif_tx_queue_stopped(dev_queue));
	}

	return NETDEV_TX_OK;

@@ -1466,6 +1493,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)

out_err_tx_ok:
	dev_kfree_skb_any(skb);
	hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
	return NETDEV_TX_OK;
}

@@ -1896,13 +1924,14 @@ bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
		    tx_ring->next_to_clean, napi->state);

	netdev_info(ndev,
		    "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
		    "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu, tx_pending: %d\n",
		    tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
		    tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
		    tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt,
		    tx_ring->pending_buf);

	netdev_info(ndev,
		    "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
		    tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
		    "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
		    tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
		    tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);

	/* When mac received many pause frames continuous, it's unable to send
@@ -4285,6 +4314,8 @@ static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
		hns3_free_buffer_detach(ring, ring->next_to_clean);
		ring_ptr_move_fw(ring, next_to_clean);
	}

	ring->pending_buf = 0;
}

static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
+1 −1
Original line number Diff line number Diff line
@@ -377,7 +377,7 @@ struct ring_stats {
		struct {
			u64 tx_pkts;
			u64 tx_bytes;
			u64 tx_err_cnt;
			u64 tx_more;
			u64 restart_queue;
			u64 tx_busy;
			u64 tx_copy;
+1 −1
Original line number Diff line number Diff line
@@ -36,7 +36,7 @@ static const struct hns3_stats hns3_txq_stats[] = {
	HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
	HNS3_TQP_STAT("packets", tx_pkts),
	HNS3_TQP_STAT("bytes", tx_bytes),
	HNS3_TQP_STAT("errors", tx_err_cnt),
	HNS3_TQP_STAT("more", tx_more),
	HNS3_TQP_STAT("wake", restart_queue),
	HNS3_TQP_STAT("busy", tx_busy),
	HNS3_TQP_STAT("copy", tx_copy),