Commit 29796143 authored by David S. Miller's avatar David S. Miller
Browse files


Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2022-09-06 (ice)

This series contains updates to ice driver only.

Tony reduces device MSI-X request/usage when entire request can't be fulfilled.

Michal adds check for reset when waiting for PTP offsets.

Paul refactors firmware version checks to use a common helper.

Christophe Jaillet changes a couple of local memory allocation to not
use the devm variant.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 53fc01a0 04cbaa6c
Loading
Loading
Loading
Loading
+29 −28
Original line number Diff line number Diff line
@@ -5286,26 +5286,41 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
}

/**
 * ice_fw_supports_link_override
 * ice_is_fw_api_min_ver
 * @hw: pointer to the hardware structure
 * @maj: major version
 * @min: minor version
 * @patch: patch version
 *
 * Checks if the firmware supports link override
 * Checks if the firmware API is minimum version
 */
bool ice_fw_supports_link_override(struct ice_hw *hw)
static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
{
	if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
		if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
	if (hw->api_maj_ver == maj) {
		if (hw->api_min_ver > min)
			return true;
		if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
		    hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
		if (hw->api_min_ver == min && hw->api_patch >= patch)
			return true;
	} else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
	} else if (hw->api_maj_ver > maj) {
		return true;
	}

	return false;
}

/**
 * ice_fw_supports_link_override
 * @hw: pointer to the hardware structure
 *
 * Checks if the firmware supports link override
 */
bool ice_fw_supports_link_override(struct ice_hw *hw)
{
	return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
				     ICE_FW_API_LINK_OVERRIDE_MIN,
				     ICE_FW_API_LINK_OVERRIDE_PATCH);
}

/**
 * ice_get_link_default_override
 * @ldo: pointer to the link default override struct
@@ -5436,16 +5451,9 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
	if (hw->mac_type != ICE_MAC_E810)
		return false;

	if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
		if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
			return true;
		if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
		    hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
			return true;
	} else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
		return true;
	}
	return false;
	return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
				     ICE_FW_API_LLDP_FLTR_MIN,
				     ICE_FW_API_LLDP_FLTR_PATCH);
}

/**
@@ -5482,14 +5490,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
 */
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
{
	if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
		if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
			return true;
		if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
		    hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
			return true;
	} else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
		return true;
	}
	return false;
	return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
				     ICE_FW_API_REPORT_DFLT_CFG_MIN,
				     ICE_FW_API_REPORT_DFLT_CFG_PATCH);
}
+100 −85
Original line number Diff line number Diff line
@@ -3921,88 +3921,135 @@ static int ice_init_pf(struct ice_pf *pf)
	return 0;
}

/**
 * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
 * @pf: board private structure
 * @v_remain: number of remaining MSI-X vectors to be distributed
 *
 * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
 * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
 * remaining vectors.
 */
static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
{
	int v_rdma;

	if (!ice_is_rdma_ena(pf)) {
		pf->num_lan_msix = v_remain;
		return;
	}

	/* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
	v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;

	if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
		dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
		clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);

		pf->num_rdma_msix = 0;
		pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
	} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
		   (v_remain - v_rdma < v_rdma)) {
		/* Support minimum RDMA and give remaining vectors to LAN MSIX */
		pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
		pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
	} else {
		/* Split remaining MSIX with RDMA after accounting for AEQ MSIX
		 */
		pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
				    ICE_RDMA_NUM_AEQ_MSIX;
		pf->num_lan_msix = v_remain - pf->num_rdma_msix;
	}
}

/**
 * ice_ena_msix_range - Request a range of MSIX vectors from the OS
 * @pf: board private structure
 *
 * compute the number of MSIX vectors required (v_budget) and request from
 * the OS. Return the number of vectors reserved or negative on failure
 * Compute the number of MSIX vectors wanted and request from the OS. Adjust
 * device usage if there are not enough vectors. Return the number of vectors
 * reserved or negative on failure.
 */
static int ice_ena_msix_range(struct ice_pf *pf)
{
	int num_cpus, v_left, v_actual, v_other, v_budget = 0;
	int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
	struct device *dev = ice_pf_to_dev(pf);
	int needed, err, i;
	int err, i;

	v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
	hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
	num_cpus = num_online_cpus();

	/* reserve for LAN miscellaneous handler */
	needed = ICE_MIN_LAN_OICR_MSIX;
	if (v_left < needed)
		goto no_hw_vecs_left_err;
	v_budget += needed;
	v_left -= needed;
	/* LAN miscellaneous handler */
	v_other = ICE_MIN_LAN_OICR_MSIX;

	/* reserve for flow director */
	if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
		needed = ICE_FDIR_MSIX;
		if (v_left < needed)
			goto no_hw_vecs_left_err;
		v_budget += needed;
		v_left -= needed;
	}

	/* reserve for switchdev */
	needed = ICE_ESWITCH_MSIX;
	if (v_left < needed)
		goto no_hw_vecs_left_err;
	v_budget += needed;
	v_left -= needed;

	/* total used for non-traffic vectors */
	v_other = v_budget;

	/* reserve vectors for LAN traffic */
	needed = num_cpus;
	if (v_left < needed)
		goto no_hw_vecs_left_err;
	pf->num_lan_msix = needed;
	v_budget += needed;
	v_left -= needed;

	/* reserve vectors for RDMA auxiliary driver */
	/* Flow Director */
	if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
		v_other += ICE_FDIR_MSIX;

	/* switchdev */
	v_other += ICE_ESWITCH_MSIX;

	v_wanted = v_other;

	/* LAN traffic */
	pf->num_lan_msix = num_cpus;
	v_wanted += pf->num_lan_msix;

	/* RDMA auxiliary driver */
	if (ice_is_rdma_ena(pf)) {
		needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
		if (v_left < needed)
			goto no_hw_vecs_left_err;
		pf->num_rdma_msix = needed;
		v_budget += needed;
		v_left -= needed;
		pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
		v_wanted += pf->num_rdma_msix;
	}

	if (v_wanted > hw_num_msix) {
		int v_remain;

		dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
			 v_wanted, hw_num_msix);

		if (hw_num_msix < ICE_MIN_MSIX) {
			err = -ERANGE;
			goto exit_err;
		}

		v_remain = hw_num_msix - v_other;
		if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
			v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
			v_remain = ICE_MIN_LAN_TXRX_MSIX;
		}

		ice_reduce_msix_usage(pf, v_remain);
		v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;

		dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
			   pf->num_lan_msix);
		if (ice_is_rdma_ena(pf))
			dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
				   pf->num_rdma_msix);
	}

	pf->msix_entries = devm_kcalloc(dev, v_budget,
	pf->msix_entries = devm_kcalloc(dev, v_wanted,
					sizeof(*pf->msix_entries), GFP_KERNEL);
	if (!pf->msix_entries) {
		err = -ENOMEM;
		goto exit_err;
	}

	for (i = 0; i < v_budget; i++)
	for (i = 0; i < v_wanted; i++)
		pf->msix_entries[i].entry = i;

	/* actually reserve the vectors */
	v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
					 ICE_MIN_MSIX, v_budget);
					 ICE_MIN_MSIX, v_wanted);
	if (v_actual < 0) {
		dev_err(dev, "unable to reserve MSI-X vectors\n");
		err = v_actual;
		goto msix_err;
	}

	if (v_actual < v_budget) {
	if (v_actual < v_wanted) {
		dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
			 v_budget, v_actual);
			 v_wanted, v_actual);

		if (v_actual < ICE_MIN_MSIX) {
			/* error if we can't get minimum vectors */
@@ -4011,38 +4058,11 @@ static int ice_ena_msix_range(struct ice_pf *pf)
			goto msix_err;
		} else {
			int v_remain = v_actual - v_other;
			int v_rdma = 0, v_min_rdma = 0;

			if (ice_is_rdma_ena(pf)) {
				/* Need at least 1 interrupt in addition to
				 * AEQ MSIX
				 */
				v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
				v_min_rdma = ICE_MIN_RDMA_MSIX;
			}

			if (v_actual == ICE_MIN_MSIX ||
			    v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
				dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
				clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
			if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
				v_remain = ICE_MIN_LAN_TXRX_MSIX;

				pf->num_rdma_msix = 0;
				pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
			} else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
				   (v_remain - v_rdma < v_rdma)) {
				/* Support minimum RDMA and give remaining
				 * vectors to LAN MSIX
				 */
				pf->num_rdma_msix = v_min_rdma;
				pf->num_lan_msix = v_remain - v_min_rdma;
			} else {
				/* Split remaining MSIX with RDMA after
				 * accounting for AEQ MSIX
				 */
				pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
						    ICE_RDMA_NUM_AEQ_MSIX;
				pf->num_lan_msix = v_remain - pf->num_rdma_msix;
			}
			ice_reduce_msix_usage(pf, v_remain);

			dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
				   pf->num_lan_msix);
@@ -4057,12 +4077,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)

msix_err:
	devm_kfree(dev, pf->msix_entries);
	goto exit_err;

no_hw_vecs_left_err:
	dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
		needed, v_left);
	err = -ERANGE;
exit_err:
	pf->num_rdma_msix = 0;
	pf->num_lan_msix = 0;
+3 −0
Original line number Diff line number Diff line
@@ -1242,6 +1242,9 @@ static void ice_ptp_wait_for_offset_valid(struct kthread_work *work)
	hw = &pf->hw;
	dev = ice_pf_to_dev(pf);

	if (ice_is_reset_in_progress(pf->state))
		return;

	if (ice_ptp_check_offset_valid(port)) {
		/* Offsets not ready yet, try again later */
		kthread_queue_delayed_work(pf->ptp.kworker,
+2 −2
Original line number Diff line number Diff line
@@ -1212,7 +1212,7 @@ int ice_sched_init_port(struct ice_port_info *pi)
	hw = pi->hw;

	/* Query the Default Topology from FW */
	buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
	buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;

@@ -1290,7 +1290,7 @@ int ice_sched_init_port(struct ice_port_info *pi)
		pi->root = NULL;
	}

	devm_kfree(ice_hw_to_dev(hw), buf);
	kfree(buf);
	return status;
}

+2 −4
Original line number Diff line number Diff line
@@ -2274,9 +2274,7 @@ int ice_get_initial_sw_cfg(struct ice_hw *hw)
	int status;
	u16 i;

	rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
			    GFP_KERNEL);

	rbuf = kzalloc(ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL);
	if (!rbuf)
		return -ENOMEM;

@@ -2324,7 +2322,7 @@ int ice_get_initial_sw_cfg(struct ice_hw *hw)
		}
	} while (req_desc && !status);

	devm_kfree(ice_hw_to_dev(hw), rbuf);
	kfree(rbuf);
	return status;
}