Commit e4a3d6a6 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files
Tony Nguyen says:

====================
100GbE Intel Wired LAN Driver Updates 2022-01-06

Victor adds restoring of advanced rules after reset.

Wojciech improves usage of switchdev control VSI by utilizing the
device's advanced rules for forwarding.

Christophe Jaillet removes some unneeded calls to zero bitmaps, changes
some bitmap operations that don't need to be atomic, and converts a
kfree() to a more appropriate bitmap_free().

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  ice: Use bitmap_free() to free bitmap
  ice: Optimize a few bitmap operations
  ice: Slightly simply ice_find_free_recp_res_idx
  ice: improve switchdev's slow-path
  ice: replay advanced rules after reset
====================

Link: https://lore.kernel.org/r/20220106183013.3777622-1-anthony.l.nguyen@intel.com


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 8947c390 0dbc4162
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -4603,7 +4603,7 @@ static int ice_replay_pre_init(struct ice_hw *hw)
	 * will allow adding rules entries back to filt_rules list,
	 * which is operational list.
	 */
	for (i = 0; i < ICE_SW_LKUP_LAST; i++)
	for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
		list_replace_init(&sw->recp_list[i].filt_rules,
				  &sw->recp_list[i].filt_replay_rules);
	ice_sched_replay_agg_vsi_preinit(hw);
+95 −74
Original line number Diff line number Diff line
@@ -9,6 +9,100 @@
#include "ice_devlink.h"
#include "ice_tc_lib.h"

/**
 * ice_eswitch_add_vf_mac_rule - add adv rule with VF's MAC
 * @pf: pointer to PF struct
 * @vf: pointer to VF struct
 * @mac: VF's MAC address
 *
 * This function adds advanced rule that forwards packets with
 * VF's MAC address (src MAC) to the corresponding switchdev ctrl VSI queue.
 */
int
ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
{
	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
	struct ice_adv_rule_info rule_info = { 0 };
	struct ice_adv_lkup_elem *list;
	struct ice_hw *hw = &pf->hw;
	const u16 lkups_cnt = 1;
	int err;

	list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
	if (!list)
		return -ENOMEM;

	list[0].type = ICE_MAC_OFOS;
	ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
	eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);

	rule_info.sw_act.flag |= ICE_FLTR_TX;
	rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
	rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
	rule_info.rx = false;
	rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
				       ctrl_vsi->rxq_map[vf->vf_id];
	rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
	rule_info.flags_info.act_valid = true;

	err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
			       vf->repr->mac_rule);
	if (err)
		dev_err(ice_pf_to_dev(pf), "Unable to add VF mac rule in switchdev mode for VF %d",
			vf->vf_id);
	else
		vf->repr->rule_added = true;

	kfree(list);
	return err;
}

/**
 * ice_eswitch_replay_vf_mac_rule - replay adv rule with VF's MAC
 * @vf: pointer to vF struct
 *
 * This function replays VF's MAC rule after reset.
 */
void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf)
{
	int err;

	if (!ice_is_switchdev_running(vf->pf))
		return;

	if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
		err = ice_eswitch_add_vf_mac_rule(vf->pf, vf,
						  vf->hw_lan_addr.addr);
		if (err) {
			dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
				vf->hw_lan_addr.addr, vf->vf_id, err);
			return;
		}
		vf->num_mac++;

		ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
	}
}

/**
 * ice_eswitch_del_vf_mac_rule - delete adv rule with VF's MAC
 * @vf: pointer to the VF struct
 *
 * Delete the advanced rule that was used to forward packets with the VF's MAC
 * address (src MAC) to the corresponding switchdev ctrl VSI queue.
 */
void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf)
{
	if (!ice_is_switchdev_running(vf->pf))
		return;

	if (!vf->repr->rule_added)
		return;

	ice_rem_adv_rule_by_id(&vf->pf->hw, vf->repr->mac_rule);
	vf->repr->rule_added = false;
}

/**
 * ice_eswitch_setup_env - configure switchdev HW filters
 * @pf: pointer to PF struct
@@ -21,7 +115,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
	struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
	struct net_device *uplink_netdev = uplink_vsi->netdev;
	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
	struct ice_port_info *pi = pf->hw.port_info;
	bool rule_added = false;

	ice_vsi_manage_vlan_stripping(ctrl_vsi, false);
@@ -42,29 +135,17 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
		rule_added = true;
	}

	if (ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, true, ICE_FLTR_TX))
		goto err_def_tx;

	if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
		goto err_override_uplink;

	if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
		goto err_override_control;

	if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id,
					    ICE_FLTR_TX,
					    ICE_SINGLE_ACT_LB_ENABLE))
		goto err_update_action;

	return 0;

err_update_action:
	ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
err_override_control:
	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
err_override_uplink:
	ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
err_def_tx:
	if (rule_added)
		ice_clear_dflt_vsi(uplink_vsi->vsw);
err_def_rx:
@@ -167,21 +248,11 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
		netif_keep_dst(vf->repr->netdev);
	}

	kfree(ctrl_vsi->target_netdevs);

	ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1,
					   sizeof(*ctrl_vsi->target_netdevs),
					   GFP_KERNEL);
	if (!ctrl_vsi->target_netdevs)
		goto err;

	ice_for_each_vf(pf, i) {
		struct ice_repr *repr = pf->vf[i].repr;
		struct ice_vsi *vsi = repr->src_vsi;
		struct metadata_dst *dst;

		ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev;

		dst = repr->dst;
		dst->u.port_info.port_id = vsi->vsi_num;
		dst->u.port_info.lower_dev = repr->netdev;
@@ -214,7 +285,6 @@ ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
{
	int i;

	kfree(ctrl_vsi->target_netdevs);
	ice_for_each_vf(pf, i) {
		struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
		struct ice_vf *vf = &pf->vf[i];
@@ -320,7 +390,6 @@ static void ice_eswitch_release_env(struct ice_pf *pf)

	ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
	ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
	ice_cfg_dflt_vsi(&pf->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
	ice_clear_dflt_vsi(uplink_vsi->vsw);
	ice_fltr_add_mac_and_broadcast(uplink_vsi,
				       uplink_vsi->port_info->mac.perm_addr,
@@ -374,24 +443,6 @@ static void ice_eswitch_napi_disable(struct ice_pf *pf)
		napi_disable(&pf->vf[i].repr->q_vector->napi);
}

/**
 * ice_eswitch_set_rxdid - configure rxdid on all Rx queues from VSI
 * @vsi: VSI to setup rxdid on
 * @rxdid: flex descriptor id
 */
static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid)
{
	struct ice_hw *hw = &vsi->back->hw;
	int i;

	ice_for_each_rxq(vsi, i) {
		struct ice_rx_ring *ring = vsi->rx_rings[i];
		u16 pf_q = vsi->rxq_map[ring->q_index];

		ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
	}
}

/**
 * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
 * @pf: pointer to PF structure
@@ -425,8 +476,6 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)

	ice_eswitch_napi_enable(pf);

	ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);

	return 0;

err_setup_reprs:
@@ -448,6 +497,7 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf)

	ice_eswitch_napi_disable(pf);
	ice_eswitch_release_env(pf);
	ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx);
	ice_eswitch_release_reprs(pf, ctrl_vsi);
	ice_vsi_release(ctrl_vsi);
	ice_repr_rem_from_all_vfs(pf);
@@ -496,34 +546,6 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
	return 0;
}

/**
 * ice_eswitch_get_target_netdev - return port representor netdev
 * @rx_ring: pointer to Rx ring
 * @rx_desc: pointer to Rx descriptor
 *
 * When working in switchdev mode context (when control VSI is used), this
 * function returns netdev of appropriate port representor. For non-switchdev
 * context, regular netdev associated with Rx ring is returned.
 */
struct net_device *
ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
			      union ice_32b_rx_flex_desc *rx_desc)
{
	struct ice_32b_rx_flex_desc_nic_2 *desc;
	struct ice_vsi *vsi = rx_ring->vsi;
	struct ice_vsi *control_vsi;
	u16 target_vsi_id;

	control_vsi = vsi->back->switchdev.control_vsi;
	if (vsi != control_vsi)
		return rx_ring->netdev;

	desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
	target_vsi_id = le16_to_cpu(desc->src_vsi);

	return vsi->target_netdevs[target_vsi_id];
}

/**
 * ice_eswitch_mode_get - get current eswitch mode
 * @devlink: pointer to devlink structure
@@ -648,7 +670,6 @@ int ice_eswitch_rebuild(struct ice_pf *pf)
		return status;

	ice_eswitch_napi_enable(pf);
	ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
	ice_eswitch_start_all_tx_queues(pf);

	return 0;
+14 −11
Original line number Diff line number Diff line
@@ -20,10 +20,11 @@ bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
void ice_eswitch_update_repr(struct ice_vsi *vsi);

void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);

struct net_device *
ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
			      union ice_32b_rx_flex_desc *rx_desc);
int
ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf,
			    const u8 *mac);
void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf);
void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf);

void ice_eswitch_set_target_vsi(struct sk_buff *skb,
				struct ice_tx_offload_params *off);
@@ -33,6 +34,15 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
static inline void ice_eswitch_release(struct ice_pf *pf) { }

static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { }
static inline void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf) { }
static inline void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf) { }

static inline int
ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf,
			    const u8 *mac)
{
	return -EOPNOTSUPP;
}

static inline void
ice_eswitch_set_target_vsi(struct sk_buff *skb,
@@ -67,13 +77,6 @@ static inline bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
	return false;
}

static inline struct net_device *
ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
			      union ice_32b_rx_flex_desc *rx_desc)
{
	return rx_ring->netdev;
}

static inline netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
+3 −3
Original line number Diff line number Diff line
@@ -4440,7 +4440,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
		for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
			if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
			    es[i].off == ice_fd_pairs[j].off) {
				set_bit(j, pair_list);
				__set_bit(j, pair_list);
				pair_start[j] = i;
			}
	}
@@ -4710,7 +4710,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
			if (test_bit(ptg, ptgs_used))
				continue;

			set_bit(ptg, ptgs_used);
			__set_bit(ptg, ptgs_used);
			/* Check to see there are any attributes for
			 * this PTYPE, and add them if found.
			 */
@@ -5339,7 +5339,7 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
			}

			/* keep track of used ptgs */
			set_bit(t->tcam[i].ptg, ptgs_used);
			__set_bit(t->tcam[i].ptg, ptgs_used);
		}
	}

+0 −80
Original line number Diff line number Diff line
@@ -445,83 +445,3 @@ int ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
	return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
				    ice_fltr_remove_eth_list);
}

/**
 * ice_fltr_update_rule_flags - update lan_en/lb_en flags
 * @hw: pointer to hw
 * @rule_id: id of rule being updated
 * @recipe_id: recipe id of rule
 * @act: current action field
 * @type: Rx or Tx
 * @src: source VSI
 * @new_flags: combinations of lb_en and lan_en
 */
static int
ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id,
			   u32 act, u16 type, u16 src, u32 new_flags)
{
	struct ice_aqc_sw_rules_elem *s_rule;
	u32 flags_mask;
	int err;

	s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL);
	if (!s_rule)
		return -ENOMEM;

	flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
	act &= ~flags_mask;
	act |= (flags_mask & new_flags);

	s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(recipe_id);
	s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id);
	s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);

	if (type & ICE_FLTR_RX) {
		s_rule->pdata.lkup_tx_rx.src =
			cpu_to_le16(hw->port_info->lport);
		s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);

	} else {
		s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(src);
		s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
	}

	err = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
			      ice_aqc_opc_update_sw_rules, NULL);

	kfree(s_rule);
	return err;
}

/**
 * ice_fltr_build_action - build action for rule
 * @vsi_id: id of VSI which is use to build action
 */
static u32 ice_fltr_build_action(u16 vsi_id)
{
	return ((vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M) |
		ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
}

/**
 * ice_fltr_update_flags_dflt_rule - update flags on default rule
 * @vsi: pointer to VSI
 * @rule_id: id of rule
 * @direction: Tx or Rx
 * @new_flags: flags to update
 *
 * Function updates flags on default rule with ICE_SW_LKUP_DFLT.
 *
 * Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and
 * ICE_SINGLE_ACT_LAN_ENABLE.
 */
int
ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
				u32 new_flags)
{
	u32 action = ice_fltr_build_action(vsi->vsi_num);
	struct ice_hw *hw = &vsi->back->hw;

	return ice_fltr_update_rule_flags(hw, rule_id, ICE_SW_LKUP_DFLT, action,
					  direction, vsi->vsi_num, new_flags);
}
Loading