Commit c4c2c7db authored by Jacob Keller's avatar Jacob Keller Committed by Tony Nguyen
Browse files

ice: convert ice_for_each_vf to include VF entry iterator



The ice_for_each_vf macro is intended to be used to loop over all VFs.
The current implementation relies on an iterator that is the index into
the VF array in the PF structure. This forces all users to perform a
look up themselves.

This abstraction forces a lot of duplicate work on callers and leaks the
interface implementation to the caller. Replace this with an
implementation that includes the VF pointer the primary iterator. This
version simplifies callers which just want to iterate over every VF, as
they no longer need to perform their own lookup.

The "i" iterator value is replaced with a new unsigned int "bkt"
parameter, as this will match the necessary interface for replacing
the VF array with a hash table. For now, the bkt is the VF ID, but in
the future it will simply be the hash bucket index. Document that it
should not be treated as a VF ID.

This change aims to simplify switching from the array to a hash table. I
considered alternative implementations such as an xarray but decided
that the hash table was the simplest and most suitable implementation. I
also looked at methods to hide the bkt iterator entirely, but I couldn't
come up with a feasible solution that worked for hash table iterators.

Signed-off-by: default avatarJacob Keller <jacob.e.keller@intel.com>
Tested-by: default avatarKonrad Jankowski <konrad0.jankowski@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 19281e86
Loading
Loading
Loading
Loading
+32 −31
Original line number Diff line number Diff line
@@ -210,11 +210,11 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
static void
ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
{
	int i;
	struct ice_vf *vf;
	unsigned int bkt;

	ice_for_each_vf(pf, i) {
		struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
		struct ice_vf *vf = &pf->vf[i];
	ice_for_each_vf(pf, bkt, vf) {
		struct ice_vsi *vsi = vf->repr->src_vsi;

		/* Skip VFs that aren't configured */
		if (!vf->repr->dst)
@@ -238,11 +238,11 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
{
	struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
	int max_vsi_num = 0;
	int i;
	struct ice_vf *vf;
	unsigned int bkt;

	ice_for_each_vf(pf, i) {
		struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
		struct ice_vf *vf = &pf->vf[i];
	ice_for_each_vf(pf, bkt, vf) {
		struct ice_vsi *vsi = vf->repr->src_vsi;

		ice_remove_vsi_fltr(&pf->hw, vsi->idx);
		vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
@@ -282,8 +282,8 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
		netif_keep_dst(vf->repr->netdev);
	}

	ice_for_each_vf(pf, i) {
		struct ice_repr *repr = pf->vf[i].repr;
	ice_for_each_vf(pf, bkt, vf) {
		struct ice_repr *repr = vf->repr;
		struct ice_vsi *vsi = repr->src_vsi;
		struct metadata_dst *dst;

@@ -417,10 +417,11 @@ ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
 */
static void ice_eswitch_napi_del(struct ice_pf *pf)
{
	int i;
	struct ice_vf *vf;
	unsigned int bkt;

	ice_for_each_vf(pf, i)
		netif_napi_del(&pf->vf[i].repr->q_vector->napi);
	ice_for_each_vf(pf, bkt, vf)
		netif_napi_del(&vf->repr->q_vector->napi);
}

/**
@@ -429,10 +430,11 @@ static void ice_eswitch_napi_del(struct ice_pf *pf)
 */
static void ice_eswitch_napi_enable(struct ice_pf *pf)
{
	int i;
	struct ice_vf *vf;
	unsigned int bkt;

	ice_for_each_vf(pf, i)
		napi_enable(&pf->vf[i].repr->q_vector->napi);
	ice_for_each_vf(pf, bkt, vf)
		napi_enable(&vf->repr->q_vector->napi);
}

/**
@@ -441,10 +443,11 @@ static void ice_eswitch_napi_enable(struct ice_pf *pf)
 */
static void ice_eswitch_napi_disable(struct ice_pf *pf)
{
	int i;
	struct ice_vf *vf;
	unsigned int bkt;

	ice_for_each_vf(pf, i)
		napi_disable(&pf->vf[i].repr->q_vector->napi);
	ice_for_each_vf(pf, bkt, vf)
		napi_disable(&vf->repr->q_vector->napi);
}

/**
@@ -613,16 +616,15 @@ int ice_eswitch_configure(struct ice_pf *pf)
 */
static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
{
	struct ice_repr *repr;
	int i;
	struct ice_vf *vf;
	unsigned int bkt;

	if (test_bit(ICE_DOWN, pf->state))
		return;

	ice_for_each_vf(pf, i) {
		repr = pf->vf[i].repr;
		if (repr)
			ice_repr_start_tx_queues(repr);
	ice_for_each_vf(pf, bkt, vf) {
		if (vf->repr)
			ice_repr_start_tx_queues(vf->repr);
	}
}

@@ -632,16 +634,15 @@ static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
 */
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
{
	struct ice_repr *repr;
	int i;
	struct ice_vf *vf;
	unsigned int bkt;

	if (test_bit(ICE_DOWN, pf->state))
		return;

	ice_for_each_vf(pf, i) {
		repr = pf->vf[i].repr;
		if (repr)
			ice_repr_stop_tx_queues(repr);
	ice_for_each_vf(pf, bkt, vf) {
		if (vf->repr)
			ice_repr_stop_tx_queues(vf->repr);
	}
}

+3 −4
Original line number Diff line number Diff line
@@ -316,11 +316,10 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
 */
static bool ice_active_vfs(struct ice_pf *pf)
{
	unsigned int i;

	ice_for_each_vf(pf, i) {
		struct ice_vf *vf = &pf->vf[i];
	struct ice_vf *vf;
	unsigned int bkt;

	ice_for_each_vf(pf, bkt, vf) {
		if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
			return true;
	}
+10 −11
Original line number Diff line number Diff line
@@ -433,13 +433,14 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d
{
	struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
	struct ice_pf *pf = q_vector->vsi->back;
	int i;
	struct ice_vf *vf;
	unsigned int bkt;

	if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
		return IRQ_HANDLED;

	ice_for_each_vf(pf, i)
		napi_schedule(&pf->vf[i].repr->q_vector->napi);
	ice_for_each_vf(pf, bkt, vf)
		napi_schedule(&vf->repr->q_vector->napi);

	return IRQ_HANDLED;
}
@@ -1342,11 +1343,10 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
 */
static int ice_get_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
{
	int i;

	ice_for_each_vf(pf, i) {
		struct ice_vf *vf = &pf->vf[i];
	struct ice_vf *vf;
	unsigned int bkt;

	ice_for_each_vf(pf, bkt, vf) {
		if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI)
			return pf->vsi[vf->ctrl_vsi_idx]->base_vector;
	}
@@ -2891,11 +2891,10 @@ void ice_napi_del(struct ice_vsi *vsi)
 */
static void ice_free_vf_ctrl_res(struct ice_pf *pf,  struct ice_vsi *vsi)
{
	int i;

	ice_for_each_vf(pf, i) {
		struct ice_vf *vf = &pf->vf[i];
	struct ice_vf *vf;
	unsigned int bkt;

	ice_for_each_vf(pf, bkt, vf) {
		if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI)
			return;
	}
+22 −22
Original line number Diff line number Diff line
@@ -505,7 +505,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
	struct ice_hw *hw = &pf->hw;
	struct ice_vsi *vsi;
	unsigned int i;
	struct ice_vf *vf;
	unsigned int bkt;

	dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);

@@ -520,8 +521,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
		ice_vc_notify_reset(pf);

	/* Disable VFs until reset is completed */
	ice_for_each_vf(pf, i)
		ice_set_vf_state_qs_dis(&pf->vf[i]);
	ice_for_each_vf(pf, bkt, vf)
		ice_set_vf_state_qs_dis(vf);

	if (ice_is_eswitch_mode_switchdev(pf)) {
		if (reset_type != ICE_RESET_PFR)
@@ -1666,7 +1667,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
{
	struct device *dev = ice_pf_to_dev(pf);
	struct ice_hw *hw = &pf->hw;
	unsigned int i;
	struct ice_vf *vf;
	unsigned int bkt;
	u32 reg;

	if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
@@ -1754,47 +1756,45 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
	/* Check to see if one of the VFs caused an MDD event, and then
	 * increment counters and set print pending
	 */
	ice_for_each_vf(pf, i) {
		struct ice_vf *vf = &pf->vf[i];

		reg = rd32(hw, VP_MDET_TX_PQM(i));
	ice_for_each_vf(pf, bkt, vf) {
		reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
		if (reg & VP_MDET_TX_PQM_VALID_M) {
			wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
			wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
			vf->mdd_tx_events.count++;
			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
			if (netif_msg_tx_err(pf))
				dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
					 i);
					 vf->vf_id);
		}

		reg = rd32(hw, VP_MDET_TX_TCLAN(i));
		reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
		if (reg & VP_MDET_TX_TCLAN_VALID_M) {
			wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
			wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
			vf->mdd_tx_events.count++;
			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
			if (netif_msg_tx_err(pf))
				dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
					 i);
					 vf->vf_id);
		}

		reg = rd32(hw, VP_MDET_TX_TDPU(i));
		reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
		if (reg & VP_MDET_TX_TDPU_VALID_M) {
			wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
			wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
			vf->mdd_tx_events.count++;
			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
			if (netif_msg_tx_err(pf))
				dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
					 i);
					 vf->vf_id);
		}

		reg = rd32(hw, VP_MDET_RX(i));
		reg = rd32(hw, VP_MDET_RX(vf->vf_id));
		if (reg & VP_MDET_RX_VALID_M) {
			wr32(hw, VP_MDET_RX(i), 0xFFFF);
			wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
			vf->mdd_rx_events.count++;
			set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
			if (netif_msg_rx_err(pf))
				dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
					 i);
					 vf->vf_id);

			/* Since the queue is disabled on VF Rx MDD events, the
			 * PF can be configured to reset the VF through ethtool
@@ -1805,9 +1805,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
				 * reset, so print the event prior to reset.
				 */
				ice_print_vf_rx_mdd_event(vf);
				mutex_lock(&pf->vf[i].cfg_lock);
				ice_reset_vf(&pf->vf[i], false);
				mutex_unlock(&pf->vf[i].cfg_lock);
				mutex_lock(&vf->cfg_lock);
				ice_reset_vf(vf, false);
				mutex_unlock(&vf->cfg_lock);
			}
		}
	}
+6 −9
Original line number Diff line number Diff line
@@ -338,14 +338,12 @@ static void ice_repr_rem(struct ice_vf *vf)
 */
void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
{
	int i;

	ice_for_each_vf(pf, i) {
		struct ice_vf *vf = &pf->vf[i];
	struct ice_vf *vf;
	unsigned int bkt;

	ice_for_each_vf(pf, bkt, vf)
		ice_repr_rem(vf);
}
}

/**
 * ice_repr_add_for_all_vfs - add port representor for all VFs
@@ -353,12 +351,11 @@ void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
 */
int ice_repr_add_for_all_vfs(struct ice_pf *pf)
{
	struct ice_vf *vf;
	unsigned int bkt;
	int err;
	int i;

	ice_for_each_vf(pf, i) {
		struct ice_vf *vf = &pf->vf[i];

	ice_for_each_vf(pf, bkt, vf) {
		err = ice_repr_add(vf);
		if (err)
			goto err;
Loading