Commit 23044d77 authored by David S. Miller's avatar David S. Miller
Browse files


Tony Nguyen says:

====================
40GbE Intel Wired LAN Driver Updates 2021-12-17

Brett Creeley says:

This patch series adds support in the iavf driver for communicating and
using VIRTCHNL_VF_OFFLOAD_VLAN_V2. The current VIRTCHNL_VF_OFFLOAD_VLAN
is very limited and covers all 802.1Q VLAN offloads and filtering with
no granularity.

The new VIRTCHNL_VF_OFFLOAD_VLAN_V2 adds more granularity, flexibility,
and support for 802.1ad offloads and filtering. This includes the VF
negotiating which VLAN offloads/filtering it's allowed, where VLAN tags
should be inserted and/or stripped into and from descriptors, and the
supported VLAN protocols.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7e1c5d7b 92fc5085
Loading
Loading
Loading
Loading
+72 −33
Original line number Diff line number Diff line
@@ -55,7 +55,8 @@ enum iavf_vsi_state_t {
struct iavf_vsi {
	struct iavf_adapter *back;
	struct net_device *netdev;
	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
	unsigned long active_cvlans[BITS_TO_LONGS(VLAN_N_VID)];
	unsigned long active_svlans[BITS_TO_LONGS(VLAN_N_VID)];
	u16 seid;
	u16 id;
	DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);
@@ -146,9 +147,15 @@ struct iavf_mac_filter {
	};
};

#define IAVF_VLAN(vid, tpid) ((struct iavf_vlan){ vid, tpid })
struct iavf_vlan {
	u16 vid;
	u16 tpid;
};

struct iavf_vlan_filter {
	struct list_head list;
	u16 vlan;
	struct iavf_vlan vlan;
	bool remove;		/* filter needs to be removed */
	bool add;		/* filter needs to be added */
};
@@ -181,6 +188,8 @@ enum iavf_state_t {
	__IAVF_REMOVE,		/* driver is being unloaded */
	__IAVF_INIT_VERSION_CHECK,	/* aq msg sent, awaiting reply */
	__IAVF_INIT_GET_RESOURCES,	/* aq msg sent, awaiting reply */
	__IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS,
	__IAVF_INIT_CONFIG_ADAPTER,
	__IAVF_INIT_SW,		/* got resources, setting up structs */
	__IAVF_INIT_FAILED,	/* init failed, restarting procedure */
	__IAVF_RESETTING,		/* in reset */
@@ -278,38 +287,47 @@ struct iavf_adapter {
/* duplicates for common code */
#define IAVF_FLAG_DCB_ENABLED			0
	/* flags for admin queue service task */
	u32 aq_required;
#define IAVF_FLAG_AQ_ENABLE_QUEUES		BIT(0)
#define IAVF_FLAG_AQ_DISABLE_QUEUES		BIT(1)
#define IAVF_FLAG_AQ_ADD_MAC_FILTER		BIT(2)
#define IAVF_FLAG_AQ_ADD_VLAN_FILTER		BIT(3)
#define IAVF_FLAG_AQ_DEL_MAC_FILTER		BIT(4)
#define IAVF_FLAG_AQ_DEL_VLAN_FILTER		BIT(5)
#define IAVF_FLAG_AQ_CONFIGURE_QUEUES		BIT(6)
#define IAVF_FLAG_AQ_MAP_VECTORS		BIT(7)
#define IAVF_FLAG_AQ_HANDLE_RESET		BIT(8)
#define IAVF_FLAG_AQ_CONFIGURE_RSS		BIT(9) /* direct AQ config */
#define IAVF_FLAG_AQ_GET_CONFIG		BIT(10)
	u64 aq_required;
#define IAVF_FLAG_AQ_ENABLE_QUEUES		BIT_ULL(0)
#define IAVF_FLAG_AQ_DISABLE_QUEUES		BIT_ULL(1)
#define IAVF_FLAG_AQ_ADD_MAC_FILTER		BIT_ULL(2)
#define IAVF_FLAG_AQ_ADD_VLAN_FILTER		BIT_ULL(3)
#define IAVF_FLAG_AQ_DEL_MAC_FILTER		BIT_ULL(4)
#define IAVF_FLAG_AQ_DEL_VLAN_FILTER		BIT_ULL(5)
#define IAVF_FLAG_AQ_CONFIGURE_QUEUES		BIT_ULL(6)
#define IAVF_FLAG_AQ_MAP_VECTORS		BIT_ULL(7)
#define IAVF_FLAG_AQ_HANDLE_RESET		BIT_ULL(8)
#define IAVF_FLAG_AQ_CONFIGURE_RSS		BIT_ULL(9) /* direct AQ config */
#define IAVF_FLAG_AQ_GET_CONFIG			BIT_ULL(10)
/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
#define IAVF_FLAG_AQ_GET_HENA			BIT(11)
#define IAVF_FLAG_AQ_SET_HENA			BIT(12)
#define IAVF_FLAG_AQ_SET_RSS_KEY		BIT(13)
#define IAVF_FLAG_AQ_SET_RSS_LUT		BIT(14)
#define IAVF_FLAG_AQ_REQUEST_PROMISC		BIT(15)
#define IAVF_FLAG_AQ_RELEASE_PROMISC		BIT(16)
#define IAVF_FLAG_AQ_REQUEST_ALLMULTI		BIT(17)
#define IAVF_FLAG_AQ_RELEASE_ALLMULTI		BIT(18)
#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING	BIT(19)
#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING	BIT(20)
#define IAVF_FLAG_AQ_ENABLE_CHANNELS		BIT(21)
#define IAVF_FLAG_AQ_DISABLE_CHANNELS		BIT(22)
#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER		BIT(23)
#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER		BIT(24)
#define IAVF_FLAG_AQ_ADD_FDIR_FILTER		BIT(25)
#define IAVF_FLAG_AQ_DEL_FDIR_FILTER		BIT(26)
#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG		BIT(27)
#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG		BIT(28)
#define IAVF_FLAG_AQ_REQUEST_STATS		BIT(29)
#define IAVF_FLAG_AQ_GET_HENA			BIT_ULL(11)
#define IAVF_FLAG_AQ_SET_HENA			BIT_ULL(12)
#define IAVF_FLAG_AQ_SET_RSS_KEY		BIT_ULL(13)
#define IAVF_FLAG_AQ_SET_RSS_LUT		BIT_ULL(14)
#define IAVF_FLAG_AQ_REQUEST_PROMISC		BIT_ULL(15)
#define IAVF_FLAG_AQ_RELEASE_PROMISC		BIT_ULL(16)
#define IAVF_FLAG_AQ_REQUEST_ALLMULTI		BIT_ULL(17)
#define IAVF_FLAG_AQ_RELEASE_ALLMULTI		BIT_ULL(18)
#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING	BIT_ULL(19)
#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING	BIT_ULL(20)
#define IAVF_FLAG_AQ_ENABLE_CHANNELS		BIT_ULL(21)
#define IAVF_FLAG_AQ_DISABLE_CHANNELS		BIT_ULL(22)
#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER		BIT_ULL(23)
#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER		BIT_ULL(24)
#define IAVF_FLAG_AQ_ADD_FDIR_FILTER		BIT_ULL(25)
#define IAVF_FLAG_AQ_DEL_FDIR_FILTER		BIT_ULL(26)
#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG		BIT_ULL(27)
#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG		BIT_ULL(28)
#define IAVF_FLAG_AQ_REQUEST_STATS		BIT_ULL(29)
#define IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS	BIT_ULL(30)
#define IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING		BIT_ULL(31)
#define IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING	BIT_ULL(32)
#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING		BIT_ULL(33)
#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING	BIT_ULL(34)
#define IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION		BIT_ULL(35)
#define IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION	BIT_ULL(36)
#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION		BIT_ULL(37)
#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION	BIT_ULL(38)

	/* OS defined structs */
	struct net_device *netdev;
@@ -349,6 +367,14 @@ struct iavf_adapter {
			VIRTCHNL_VF_OFFLOAD_RSS_PF)))
#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
			  VIRTCHNL_VF_OFFLOAD_VLAN)
#define VLAN_V2_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
			     VIRTCHNL_VF_OFFLOAD_VLAN_V2)
#define VLAN_V2_FILTERING_ALLOWED(_a) \
	(VLAN_V2_ALLOWED((_a)) && \
	 ((_a)->vlan_v2_caps.filtering.filtering_support.outer || \
	  (_a)->vlan_v2_caps.filtering.filtering_support.inner))
#define VLAN_FILTERING_ALLOWED(_a) \
	(VLAN_ALLOWED((_a)) || VLAN_V2_FILTERING_ALLOWED((_a)))
#define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
			      VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
#define FDIR_FLTR_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
@@ -360,6 +386,7 @@ struct iavf_adapter {
	struct virtchnl_version_info pf_version;
#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
		       ((_a)->pf_version.minor == 1))
	struct virtchnl_vlan_caps vlan_v2_caps;
	u16 msg_enable;
	struct iavf_eth_stats current_stats;
	struct iavf_vsi vsi;
@@ -448,6 +475,7 @@ static inline void iavf_change_state(struct iavf_adapter *adapter,
int iavf_up(struct iavf_adapter *adapter);
void iavf_down(struct iavf_adapter *adapter);
int iavf_process_config(struct iavf_adapter *adapter);
int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter);
void iavf_schedule_reset(struct iavf_adapter *adapter);
void iavf_schedule_request_stats(struct iavf_adapter *adapter);
void iavf_reset(struct iavf_adapter *adapter);
@@ -466,6 +494,9 @@ int iavf_send_api_ver(struct iavf_adapter *adapter);
int iavf_verify_api_ver(struct iavf_adapter *adapter);
int iavf_send_vf_config_msg(struct iavf_adapter *adapter);
int iavf_get_vf_config(struct iavf_adapter *adapter);
int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter);
int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter);
void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter);
void iavf_irq_enable(struct iavf_adapter *adapter, bool flush);
void iavf_configure_queues(struct iavf_adapter *adapter);
void iavf_deconfigure_queues(struct iavf_adapter *adapter);
@@ -501,6 +532,14 @@ void iavf_enable_channels(struct iavf_adapter *adapter);
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);
void iavf_del_cloud_filter(struct iavf_adapter *adapter);
void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
void
iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
			       netdev_features_t prev_features,
			       netdev_features_t features);
void iavf_add_fdir_filter(struct iavf_adapter *adapter);
void iavf_del_fdir_filter(struct iavf_adapter *adapter);
void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter);
+673 −94

File changed.

Preview size limit exceeded, changes collapsed.

+33 −38
Original line number Diff line number Diff line
@@ -865,6 +865,9 @@ static void iavf_receive_skb(struct iavf_ring *rx_ring,
	if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
	    (vlan_tag & VLAN_VID_MASK))
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
	else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) &&
		 vlan_tag & VLAN_VID_MASK)
		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);

	napi_gro_receive(&q_vector->napi, skb);
}
@@ -1468,7 +1471,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
		struct iavf_rx_buffer *rx_buffer;
		union iavf_rx_desc *rx_desc;
		unsigned int size;
		u16 vlan_tag;
		u16 vlan_tag = 0;
		u8 rx_ptype;
		u64 qword;

@@ -1551,9 +1554,13 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
		/* populate checksum, VLAN, and protocol */
		iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);


		vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
			   le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
		if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) &&
		    rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1)
			vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1);
		if (rx_desc->wb.qword2.ext_status &
		    cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) &&
		    rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2)
			vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2);

		iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
		iavf_receive_skb(rx_ring, skb, vlan_tag);
@@ -1781,46 +1788,29 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)
 * Returns error code indicate the frame should be dropped upon error and the
 * otherwise  returns 0 to indicate the flags has been set properly.
 **/
static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
					     struct iavf_ring *tx_ring,
					     u32 *flags)
static void iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
				       struct iavf_ring *tx_ring, u32 *flags)
{
	__be16 protocol = skb->protocol;
	u32  tx_flags = 0;

	if (protocol == htons(ETH_P_8021Q) &&
	    !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
		/* When HW VLAN acceleration is turned off by the user the
		 * stack sets the protocol to 8021q so that the driver
		 * can take any steps required to support the SW only
		 * VLAN handling.  In our case the driver doesn't need
		 * to take any further steps so just set the protocol
		 * to the encapsulated ethertype.

	/* stack will only request hardware VLAN insertion offload for protocols
	 * that the driver supports and has enabled
	 */
		skb->protocol = vlan_get_protocol(skb);
		goto out;
	}
	if (!skb_vlan_tag_present(skb))
		return;

	/* if we have a HW VLAN tag being added, default to the HW one */
	if (skb_vlan_tag_present(skb)) {
	tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
	if (tx_ring->flags & IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2) {
		tx_flags |= IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
	} else if (tx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
		tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
	/* else if it is a SW VLAN, check the next protocol and store the tag */
	} else if (protocol == htons(ETH_P_8021Q)) {
		struct vlan_hdr *vhdr, _vhdr;

		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
		if (!vhdr)
			return -EINVAL;

		protocol = vhdr->h_vlan_encapsulated_proto;
		tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT;
		tx_flags |= IAVF_TX_FLAGS_SW_VLAN;
	} else {
		dev_dbg(tx_ring->dev, "Unsupported Tx VLAN tag location requested\n");
		return;
	}

out:
	*flags = tx_flags;
	return 0;
}

/**
@@ -2440,8 +2430,13 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
	first->gso_segs = 1;

	/* prepare the xmit flags */
	if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
		goto out_drop;
	iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags);
	if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
		cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 <<
			IAVF_TXD_CTX_QW1_CMD_SHIFT;
		cd_l2tag2 = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
			IAVF_TX_FLAGS_VLAN_SHIFT;
	}

	/* obtain protocol of skb */
	protocol = vlan_get_protocol(skb);
+17 −13
Original line number Diff line number Diff line
@@ -252,6 +252,7 @@ static inline unsigned int iavf_txd_use_count(unsigned int size)
#define IAVF_TX_FLAGS_FSO			BIT(7)
#define IAVF_TX_FLAGS_FD_SB			BIT(9)
#define IAVF_TX_FLAGS_VXLAN_TUNNEL		BIT(10)
#define IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN	BIT(11)
#define IAVF_TX_FLAGS_VLAN_MASK			0xffff0000
#define IAVF_TX_FLAGS_VLAN_PRIO_MASK		0xe0000000
#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT		29
@@ -362,6 +363,9 @@ struct iavf_ring {
	u16 flags;
#define IAVF_TXR_FLAGS_WB_ON_ITR		BIT(0)
#define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED	BIT(1)
#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1	BIT(3)
#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2	BIT(4)
#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2	BIT(5)

	/* stats structs */
	struct iavf_queue_stats	stats;
+465 −69
Original line number Diff line number Diff line
@@ -137,6 +137,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
	       VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
	       VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
	       VIRTCHNL_VF_OFFLOAD_ENCAP |
	       VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
	       VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
	       VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
	       VIRTCHNL_VF_OFFLOAD_ADQ |
@@ -155,6 +156,19 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
					NULL, 0);
}

int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter)
{
	adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;

	if (!VLAN_V2_ALLOWED(adapter))
		return -EOPNOTSUPP;

	adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS;

	return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
				NULL, 0);
}

/**
 * iavf_validate_num_queues
 * @adapter: adapter structure
@@ -235,6 +249,45 @@ int iavf_get_vf_config(struct iavf_adapter *adapter)
	return err;
}

int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
{
	struct iavf_hw *hw = &adapter->hw;
	struct iavf_arq_event_info event;
	enum virtchnl_ops op;
	enum iavf_status err;
	u16 len;

	len =  sizeof(struct virtchnl_vlan_caps);
	event.buf_len = len;
	event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
	if (!event.msg_buf) {
		err = -ENOMEM;
		goto out;
	}

	while (1) {
		/* When the AQ is empty, iavf_clean_arq_element will return
		 * nonzero and this loop will terminate.
		 */
		err = iavf_clean_arq_element(hw, &event, NULL);
		if (err)
			goto out_alloc;
		op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
		if (op == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
			break;
	}

	err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
	if (err)
		goto out_alloc;

	memcpy(&adapter->vlan_v2_caps, event.msg_buf, min(event.msg_len, len));
out_alloc:
	kfree(event.msg_buf);
out:
	return err;
}

/**
 * iavf_configure_queues
 * @adapter: adapter structure
@@ -589,7 +642,6 @@ static void iavf_mac_add_reject(struct iavf_adapter *adapter)
 **/
void iavf_add_vlans(struct iavf_adapter *adapter)
{
	struct virtchnl_vlan_filter_list *vvfl;
	int len, i = 0, count = 0;
	struct iavf_vlan_filter *f;
	bool more = false;
@@ -607,22 +659,23 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
		if (f->add)
			count++;
	}
	if (!count || !VLAN_ALLOWED(adapter)) {
	if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
		adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
		spin_unlock_bh(&adapter->mac_vlan_list_lock);
		return;
	}

	if (VLAN_ALLOWED(adapter)) {
		struct virtchnl_vlan_filter_list *vvfl;

		adapter->current_op = VIRTCHNL_OP_ADD_VLAN;

	len = sizeof(struct virtchnl_vlan_filter_list) +
	      (count * sizeof(u16));
		len = sizeof(*vvfl) + (count * sizeof(u16));
		if (len > IAVF_MAX_AQ_BUF_SIZE) {
			dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
		count = (IAVF_MAX_AQ_BUF_SIZE -
			 sizeof(struct virtchnl_vlan_filter_list)) /
			count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) /
				sizeof(u16);
		len = sizeof(struct virtchnl_vlan_filter_list) +
		      (count * sizeof(u16));
			len = sizeof(*vvfl) + (count * sizeof(u16));
			more = true;
		}
		vvfl = kzalloc(len, GFP_ATOMIC);
@@ -635,7 +688,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
		vvfl->num_elements = count;
		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
			if (f->add) {
			vvfl->vlan_id[i] = f->vlan;
				vvfl->vlan_id[i] = f->vlan.vid;
				i++;
				f->add = false;
				if (i == count)
@@ -649,6 +702,62 @@ void iavf_add_vlans(struct iavf_adapter *adapter)

		iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
		kfree(vvfl);
	} else {
		struct virtchnl_vlan_filter_list_v2 *vvfl_v2;

		adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2;

		len = sizeof(*vvfl_v2) + ((count - 1) *
					  sizeof(struct virtchnl_vlan_filter));
		if (len > IAVF_MAX_AQ_BUF_SIZE) {
			dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
			count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl_v2)) /
				sizeof(struct virtchnl_vlan_filter);
			len = sizeof(*vvfl_v2) +
				((count - 1) *
				 sizeof(struct virtchnl_vlan_filter));
			more = true;
		}

		vvfl_v2 = kzalloc(len, GFP_ATOMIC);
		if (!vvfl_v2) {
			spin_unlock_bh(&adapter->mac_vlan_list_lock);
			return;
		}

		vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
		vvfl_v2->num_elements = count;
		list_for_each_entry(f, &adapter->vlan_filter_list, list) {
			if (f->add) {
				struct virtchnl_vlan_supported_caps *filtering_support =
					&adapter->vlan_v2_caps.filtering.filtering_support;
				struct virtchnl_vlan *vlan;

				/* give priority over outer if it's enabled */
				if (filtering_support->outer)
					vlan = &vvfl_v2->filters[i].outer;
				else
					vlan = &vvfl_v2->filters[i].inner;

				vlan->tci = f->vlan.vid;
				vlan->tpid = f->vlan.tpid;

				i++;
				f->add = false;
				if (i == count)
					break;
			}
		}

		if (!more)
			adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;

		spin_unlock_bh(&adapter->mac_vlan_list_lock);

		iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2,
				 (u8 *)vvfl_v2, len);
		kfree(vvfl_v2);
	}
}

/**
@@ -659,7 +768,6 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
 **/
void iavf_del_vlans(struct iavf_adapter *adapter)
{
	struct virtchnl_vlan_filter_list *vvfl;
	struct iavf_vlan_filter *f, *ftmp;
	int len, i = 0, count = 0;
	bool more = false;
@@ -680,29 +788,30 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
		 * filters marked for removal to enable bailing out before
		 * sending a virtchnl message
		 */
		if (f->remove && !VLAN_ALLOWED(adapter)) {
		if (f->remove && !VLAN_FILTERING_ALLOWED(adapter)) {
			list_del(&f->list);
			kfree(f);
		} else if (f->remove) {
			count++;
		}
	}
	if (!count) {
	if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
		adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
		spin_unlock_bh(&adapter->mac_vlan_list_lock);
		return;
	}

	if (VLAN_ALLOWED(adapter)) {
		struct virtchnl_vlan_filter_list *vvfl;

		adapter->current_op = VIRTCHNL_OP_DEL_VLAN;

	len = sizeof(struct virtchnl_vlan_filter_list) +
	      (count * sizeof(u16));
		len = sizeof(*vvfl) + (count * sizeof(u16));
		if (len > IAVF_MAX_AQ_BUF_SIZE) {
			dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
		count = (IAVF_MAX_AQ_BUF_SIZE -
			 sizeof(struct virtchnl_vlan_filter_list)) /
			count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) /
				sizeof(u16);
		len = sizeof(struct virtchnl_vlan_filter_list) +
		      (count * sizeof(u16));
			len = sizeof(*vvfl) + (count * sizeof(u16));
			more = true;
		}
		vvfl = kzalloc(len, GFP_ATOMIC);
@@ -715,7 +824,7 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
		vvfl->num_elements = count;
		list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
			if (f->remove) {
			vvfl->vlan_id[i] = f->vlan;
				vvfl->vlan_id[i] = f->vlan.vid;
				i++;
				list_del(&f->list);
				kfree(f);
@@ -723,6 +832,7 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
					break;
			}
		}

		if (!more)
			adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;

@@ -730,6 +840,64 @@ void iavf_del_vlans(struct iavf_adapter *adapter)

		iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
		kfree(vvfl);
	} else {
		struct virtchnl_vlan_filter_list_v2 *vvfl_v2;

		adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2;

		len = sizeof(*vvfl_v2) +
			((count - 1) * sizeof(struct virtchnl_vlan_filter));
		if (len > IAVF_MAX_AQ_BUF_SIZE) {
			dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
			count = (IAVF_MAX_AQ_BUF_SIZE -
				 sizeof(*vvfl_v2)) /
				sizeof(struct virtchnl_vlan_filter);
			len = sizeof(*vvfl_v2) +
				((count - 1) *
				 sizeof(struct virtchnl_vlan_filter));
			more = true;
		}

		vvfl_v2 = kzalloc(len, GFP_ATOMIC);
		if (!vvfl_v2) {
			spin_unlock_bh(&adapter->mac_vlan_list_lock);
			return;
		}

		vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
		vvfl_v2->num_elements = count;
		list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
			if (f->remove) {
				struct virtchnl_vlan_supported_caps *filtering_support =
					&adapter->vlan_v2_caps.filtering.filtering_support;
				struct virtchnl_vlan *vlan;

				/* give priority over outer if it's enabled */
				if (filtering_support->outer)
					vlan = &vvfl_v2->filters[i].outer;
				else
					vlan = &vvfl_v2->filters[i].inner;

				vlan->tci = f->vlan.vid;
				vlan->tpid = f->vlan.tpid;

				list_del(&f->list);
				kfree(f);
				i++;
				if (i == count)
					break;
			}
		}

		if (!more)
			adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;

		spin_unlock_bh(&adapter->mac_vlan_list_lock);

		iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2,
				 (u8 *)vvfl_v2, len);
		kfree(vvfl_v2);
	}
}

/**
@@ -956,6 +1124,204 @@ void iavf_disable_vlan_stripping(struct iavf_adapter *adapter)
	iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0);
}

/**
 * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype
 * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
 */
static u32 iavf_tpid_to_vc_ethertype(u16 tpid)
{
	switch (tpid) {
	case ETH_P_8021Q:
		return VIRTCHNL_VLAN_ETHERTYPE_8100;
	case ETH_P_8021AD:
		return VIRTCHNL_VLAN_ETHERTYPE_88A8;
	}

	return 0;
}

/**
 * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message
 * @adapter: adapter structure
 * @msg: message structure used for updating offloads over virtchnl to update
 * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
 * @offload_op: opcode used to determine which support structure to check
 */
static int
iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter,
			      struct virtchnl_vlan_setting *msg, u16 tpid,
			      enum virtchnl_ops offload_op)
{
	struct virtchnl_vlan_supported_caps *offload_support;
	u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid);

	/* reference the correct offload support structure */
	switch (offload_op) {
	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
		offload_support =
			&adapter->vlan_v2_caps.offloads.stripping_support;
		break;
	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
		offload_support =
			&adapter->vlan_v2_caps.offloads.insertion_support;
		break;
	default:
		dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n",
			offload_op);
		return -EINVAL;
	}

	/* make sure ethertype is supported */
	if (offload_support->outer & vc_ethertype &&
	    offload_support->outer & VIRTCHNL_VLAN_TOGGLE) {
		msg->outer_ethertype_setting = vc_ethertype;
	} else if (offload_support->inner & vc_ethertype &&
		   offload_support->inner & VIRTCHNL_VLAN_TOGGLE) {
		msg->inner_ethertype_setting = vc_ethertype;
	} else {
		dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n",
			offload_op, tpid);
		return -EINVAL;
	}

	return 0;
}

/**
 * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request
 * @adapter: adapter structure
 * @tpid: VLAN TPID
 * @offload_op: opcode used to determine which AQ required bit to clear
 */
static void
iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid,
				  enum virtchnl_ops offload_op)
{
	switch (offload_op) {
	case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
		if (tpid == ETH_P_8021Q)
			adapter->aq_required &=
				~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
		else if (tpid == ETH_P_8021AD)
			adapter->aq_required &=
				~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
		break;
	case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
		if (tpid == ETH_P_8021Q)
			adapter->aq_required &=
				~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
		else if (tpid == ETH_P_8021AD)
			adapter->aq_required &=
				~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
		break;
	case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
		if (tpid == ETH_P_8021Q)
			adapter->aq_required &=
				~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
		else if (tpid == ETH_P_8021AD)
			adapter->aq_required &=
				~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
		break;
	case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
		if (tpid == ETH_P_8021Q)
			adapter->aq_required &=
				~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
		else if (tpid == ETH_P_8021AD)
			adapter->aq_required &=
				~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
		break;
	default:
		dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n",
			offload_op);
	}
}

/**
 * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl
 * @adapter: adapter structure
 * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8)
 * @offload_op: offload_op used to make the request over virtchnl
 */
static void
iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid,
			  enum virtchnl_ops offload_op)
{
	struct virtchnl_vlan_setting *msg;
	int len = sizeof(*msg);

	if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
		/* bail because we already have a command pending */
		dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n",
			offload_op, adapter->current_op);
		return;
	}

	adapter->current_op = offload_op;

	msg = kzalloc(len, GFP_KERNEL);
	if (!msg)
		return;

	msg->vport_id = adapter->vsi_res->vsi_id;

	/* always clear to prevent unsupported and endless requests */
	iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op);

	/* only send valid offload requests */
	if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op))
		iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len);
	else
		adapter->current_op = VIRTCHNL_OP_UNKNOWN;

	kfree(msg);
}

/**
 * iavf_enable_vlan_stripping_v2 - enable VLAN stripping
 * @adapter: adapter structure
 * @tpid: VLAN TPID used to enable VLAN stripping
 */
void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
{
	iavf_send_vlan_offload_v2(adapter, tpid,
				  VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2);
}

/**
 * iavf_disable_vlan_stripping_v2 - disable VLAN stripping
 * @adapter: adapter structure
 * @tpid: VLAN TPID used to disable VLAN stripping
 */
void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
{
	iavf_send_vlan_offload_v2(adapter, tpid,
				  VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2);
}

/**
 * iavf_enable_vlan_insertion_v2 - enable VLAN insertion
 * @adapter: adapter structure
 * @tpid: VLAN TPID used to enable VLAN insertion
 */
void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
{
	iavf_send_vlan_offload_v2(adapter, tpid,
				  VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2);
}

/**
 * iavf_disable_vlan_insertion_v2 - disable VLAN insertion
 * @adapter: adapter structure
 * @tpid: VLAN TPID used to disable VLAN insertion
 */
void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
{
	iavf_send_vlan_offload_v2(adapter, tpid,
				  VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2);
}

#define IAVF_MAX_SPEED_STRLEN	13

/**
@@ -1759,6 +2125,26 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
		}

		spin_unlock_bh(&adapter->mac_vlan_list_lock);

		iavf_parse_vf_resource_msg(adapter);

		/* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
		 * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish
		 * configuration
		 */
		if (VLAN_V2_ALLOWED(adapter))
			break;
		/* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2
		 * wasn't successfully negotiated with the PF
		 */
		}
		fallthrough;
	case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: {
		if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
			memcpy(&adapter->vlan_v2_caps, msg,
			       min_t(u16, msglen,
				     sizeof(adapter->vlan_v2_caps)));

		iavf_process_config(adapter);

		/* unlock crit_lock before acquiring rtnl_lock as other
@@ -1766,13 +2152,23 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
		 * crit_lock
		 */
		mutex_unlock(&adapter->crit_lock);
		/* VLAN capabilities can change during VFR, so make sure to
		 * update the netdev features with the new capabilities
		 */
		rtnl_lock();
		netdev_update_features(adapter->netdev);
		netdev_update_features(netdev);
		rtnl_unlock();
		if (iavf_lock_timeout(&adapter->crit_lock, 10000))
			dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n",
				 __FUNCTION__);

		/* Request VLAN offload settings */
		if (VLAN_V2_ALLOWED(adapter))
			iavf_set_vlan_offload_features(adapter, 0,
						       netdev->features);

		iavf_set_queue_vlan_tag_loc(adapter);

		}
		break;
	case VIRTCHNL_OP_ENABLE_QUEUES:
Loading