Commit 0bc7f8d5 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'hns3-misc'



Huazhong Tan says:

====================
net: hns3: misc updates for -next

This series include some updates for the HNS3 ethernet driver.

   flow director configuration").
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a180be79 97b9e5c1
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -579,7 +579,7 @@ struct hnae3_ae_ops {
				      int vector_num,
				      struct hnae3_ring_chain_node *vr_chain);

	int (*reset_queue)(struct hnae3_handle *handle, u16 queue_id);
	int (*reset_queue)(struct hnae3_handle *handle);
	u32 (*get_fw_version)(struct hnae3_handle *handle);
	void (*get_mdix_mode)(struct hnae3_handle *handle,
			      u8 *tp_mdix_ctrl, u8 *tp_mdix);
+101 −52
Original line number Diff line number Diff line
@@ -694,7 +694,7 @@ void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
}

static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs,
			u16 *mss, u32 *type_cs_vlan_tso)
			u16 *mss, u32 *type_cs_vlan_tso, u32 *send_bytes)
{
	u32 l4_offset, hdr_len;
	union l3_hdr_info l3;
@@ -750,6 +750,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs,
				     (__force __wsum)htonl(l4_paylen));
	}

	*send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len;

	/* find the txbd field values */
	*paylen_fdop_ol4cs = skb->len - hdr_len;
	hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
@@ -1076,7 +1078,8 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
}

static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
			      struct sk_buff *skb, struct hns3_desc *desc)
			      struct sk_buff *skb, struct hns3_desc *desc,
			      struct hns3_desc_cb *desc_cb)
{
	u32 ol_type_vlan_len_msec = 0;
	u32 paylen_ol4cs = skb->len;
@@ -1105,6 +1108,8 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
			       1);
	}

	desc_cb->send_bytes = skb->len;

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		u8 ol4_proto, il4_proto;

@@ -1140,7 +1145,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
		}

		ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum,
				   &type_cs_vlan_tso);
				   &type_cs_vlan_tso, &desc_cb->send_bytes);
		if (unlikely(ret < 0)) {
			u64_stats_update_begin(&ring->syncp);
			ring->stats.tx_tso_err++;
@@ -1275,30 +1280,29 @@ static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
}

static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
				   u8 max_non_tso_bd_num)
				   u8 max_non_tso_bd_num, unsigned int bd_num,
				   unsigned int recursion_level)
{
#define HNS3_MAX_RECURSION_LEVEL	24

	struct sk_buff *frag_skb;
	unsigned int bd_num = 0;

	/* If the total len is within the max bd limit */
	if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) &&
	if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level &&
		   !skb_has_frag_list(skb) &&
		   skb_shinfo(skb)->nr_frags < max_non_tso_bd_num))
		return skb_shinfo(skb)->nr_frags + 1U;

	/* The below case will always be linearized, return
	 * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized.
	 */
	if (unlikely(skb->len > HNS3_MAX_TSO_SIZE ||
		     (!skb_is_gso(skb) && skb->len >
		      HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))))
		return HNS3_MAX_TSO_BD_NUM + 1U;
	if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL))
		return UINT_MAX;

	bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
	if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM)
		return bd_num;

	skb_walk_frags(skb, frag_skb) {
		bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num);
		bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num,
					bd_num, recursion_level + 1);
		if (bd_num > HNS3_MAX_TSO_BD_NUM)
			return bd_num;
	}
@@ -1358,6 +1362,43 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
		size[i] = skb_frag_size(&shinfo->frags[i]);
}

static int hns3_skb_linearize(struct hns3_enet_ring *ring,
			      struct sk_buff *skb,
			      u8 max_non_tso_bd_num,
			      unsigned int bd_num)
{
	/* 'bd_num == UINT_MAX' means the skb' fraglist has a
	 * recursion level of over HNS3_MAX_RECURSION_LEVEL.
	 */
	if (bd_num == UINT_MAX) {
		u64_stats_update_begin(&ring->syncp);
		ring->stats.over_max_recursion++;
		u64_stats_update_end(&ring->syncp);
		return -ENOMEM;
	}

	/* The skb->len has exceeded the hw limitation, linearization
	 * will not help.
	 */
	if (skb->len > HNS3_MAX_TSO_SIZE ||
	    (!skb_is_gso(skb) && skb->len >
	     HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
		u64_stats_update_begin(&ring->syncp);
		ring->stats.hw_limitation++;
		u64_stats_update_end(&ring->syncp);
		return -ENOMEM;
	}

	if (__skb_linearize(skb)) {
		u64_stats_update_begin(&ring->syncp);
		ring->stats.sw_err_cnt++;
		u64_stats_update_end(&ring->syncp);
		return -ENOMEM;
	}

	return 0;
}

static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
				  struct net_device *netdev,
				  struct sk_buff *skb)
@@ -1367,7 +1408,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
	unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
	unsigned int bd_num;

	bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num);
	bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0);
	if (unlikely(bd_num > max_non_tso_bd_num)) {
		if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
		    !hns3_skb_need_linearized(skb, bd_size, bd_num,
@@ -1376,16 +1417,11 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
			goto out;
		}

		if (__skb_linearize(skb))
		if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
				       bd_num))
			return -ENOMEM;

		bd_num = hns3_tx_bd_count(skb->len);
		if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
		    (!skb_is_gso(skb) &&
		     bd_num > max_non_tso_bd_num)) {
			trace_hns3_over_max_bd(skb);
			return -ENOMEM;
		}

		u64_stats_update_begin(&ring->syncp);
		ring->stats.tx_copy++;
@@ -1409,6 +1445,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
		return bd_num;
	}

	u64_stats_update_begin(&ring->syncp);
	ring->stats.tx_busy++;
	u64_stats_update_end(&ring->syncp);

	return -EBUSY;
}

@@ -1456,6 +1496,7 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
				 struct sk_buff *skb, enum hns_desc_type type)
{
	unsigned int size = skb_headlen(skb);
	struct sk_buff *frag_skb;
	int i, ret, bd_num = 0;

	if (size) {
@@ -1480,6 +1521,15 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
		bd_num += ret;
	}

	skb_walk_frags(skb, frag_skb) {
		ret = hns3_fill_skb_to_desc(ring, frag_skb,
					    DESC_TYPE_FRAGLIST_SKB);
		if (unlikely(ret < 0))
			return ret;

		bd_num += ret;
	}

	return bd_num;
}

@@ -1508,16 +1558,20 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	struct hns3_nic_priv *priv = netdev_priv(netdev);
	struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
	struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
	struct netdev_queue *dev_queue;
	int pre_ntu, next_to_use_head;
	struct sk_buff *frag_skb;
	int bd_num = 0;
	bool doorbell;
	int ret;

	/* Hardware can only handle short frames above 32 bytes */
	if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
		hns3_tx_doorbell(ring, 0, !netdev_xmit_more());

		u64_stats_update_begin(&ring->syncp);
		ring->stats.sw_err_cnt++;
		u64_stats_update_end(&ring->syncp);

		return NETDEV_TX_OK;
	}

@@ -1527,15 +1581,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
	ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
	if (unlikely(ret <= 0)) {
		if (ret == -EBUSY) {
			u64_stats_update_begin(&ring->syncp);
			ring->stats.tx_busy++;
			u64_stats_update_end(&ring->syncp);
			hns3_tx_doorbell(ring, 0, true);
			return NETDEV_TX_BUSY;
		} else if (ret == -ENOMEM) {
			u64_stats_update_begin(&ring->syncp);
			ring->stats.sw_err_cnt++;
			u64_stats_update_end(&ring->syncp);
		}

		hns3_rl_err(netdev, "xmit error: %d!\n", ret);
@@ -1544,25 +1591,19 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)

	next_to_use_head = ring->next_to_use;

	ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]);
	ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use],
				 desc_cb);
	if (unlikely(ret < 0))
		goto fill_err;

	/* 'ret < 0' means filling error, 'ret == 0' means skb->len is
	 * zero, which is unlikely, and 'ret > 0' means how many tx desc
	 * need to be notified to the hw.
	 */
	ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
	if (unlikely(ret < 0))
	if (unlikely(ret <= 0))
		goto fill_err;

	bd_num += ret;

	skb_walk_frags(skb, frag_skb) {
		ret = hns3_fill_skb_to_desc(ring, frag_skb,
					    DESC_TYPE_FRAGLIST_SKB);
		if (unlikely(ret < 0))
			goto fill_err;

		bd_num += ret;
	}

	pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
					(ring->desc_num - 1);
	ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
@@ -1571,9 +1612,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)

	/* Complete translate all packets */
	dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
	doorbell = __netdev_tx_sent_queue(dev_queue, skb->len,
	doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes,
					  netdev_xmit_more());
	hns3_tx_doorbell(ring, bd_num, doorbell);
	hns3_tx_doorbell(ring, ret, doorbell);

	return NETDEV_TX_OK;

@@ -1745,11 +1786,15 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
			tx_drop += ring->stats.tx_l4_proto_err;
			tx_drop += ring->stats.tx_l2l3l4_err;
			tx_drop += ring->stats.tx_tso_err;
			tx_drop += ring->stats.over_max_recursion;
			tx_drop += ring->stats.hw_limitation;
			tx_errors += ring->stats.sw_err_cnt;
			tx_errors += ring->stats.tx_vlan_err;
			tx_errors += ring->stats.tx_l4_proto_err;
			tx_errors += ring->stats.tx_l2l3l4_err;
			tx_errors += ring->stats.tx_tso_err;
			tx_errors += ring->stats.over_max_recursion;
			tx_errors += ring->stats.hw_limitation;
		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));

		/* fetch the rx stats */
@@ -2688,8 +2733,12 @@ static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
			break;

		desc_cb = &ring->desc_cb[ntc];
		(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
		(*bytes) += desc_cb->length;

		if (desc_cb->type == DESC_TYPE_SKB) {
			(*pkts)++;
			(*bytes) += desc_cb->send_bytes;
		}

		/* desc_cb will be cleaned, after hnae3_free_buffer_detach */
		hns3_free_buffer_detach(ring, ntc, budget);

@@ -4455,11 +4504,11 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
	int i, j;
	int ret;

	for (i = 0; i < h->kinfo.num_tqps; i++) {
		ret = h->ae_algo->ops->reset_queue(h, i);
	ret = h->ae_algo->ops->reset_queue(h);
	if (ret)
		return ret;

	for (i = 0; i < h->kinfo.num_tqps; i++) {
		hns3_init_ring_hw(&priv->ring[i]);

		/* We need to clear tx ring here because self test will
+8 −1
Original line number Diff line number Diff line
@@ -298,7 +298,12 @@ struct hns3_desc_cb {

	/* priv data for the desc, e.g. skb when use with ip stack */
	void *priv;
	u32 page_offset;

	union {
		u32 page_offset;	/* for rx */
		u32 send_bytes;		/* for tx */
	};

	u32 length;     /* length of the buffer */

	u16 reuse_flag;
@@ -376,6 +381,8 @@ struct ring_stats {
			u64 tx_l4_proto_err;
			u64 tx_l2l3l4_err;
			u64 tx_tso_err;
			u64 over_max_recursion;
			u64 hw_limitation;
		};
		struct {
			u64 rx_pkts;
+2 −0
Original line number Diff line number Diff line
@@ -44,6 +44,8 @@ static const struct hns3_stats hns3_txq_stats[] = {
	HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err),
	HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err),
	HNS3_TQP_STAT("tso_err", tx_tso_err),
	HNS3_TQP_STAT("over_max_recursion", over_max_recursion),
	HNS3_TQP_STAT("hw_limitation", hw_limitation),
};

#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
+7 −1
Original line number Diff line number Diff line
@@ -948,10 +948,16 @@ struct hclge_reset_tqp_queue_cmd {

#define HCLGE_CFG_RESET_MAC_B		3
#define HCLGE_CFG_RESET_FUNC_B		7
#define HCLGE_CFG_RESET_RCB_B		1
struct hclge_reset_cmd {
	u8 mac_func_reset;
	u8 fun_reset_vfid;
	u8 rsv[22];
	u8 fun_reset_rcb;
	u8 rsv;
	__le16 fun_reset_rcb_vqid_start;
	__le16 fun_reset_rcb_vqid_num;
	u8 fun_reset_rcb_return_status;
	u8 rsv1[15];
};

#define HCLGE_PF_RESET_DONE_BIT		BIT(0)
Loading