Commit cfd40b82 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files
Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2023-06-22 (ice)

This series contains updates to ice driver only.

Jake adds a slight wait on control queue send to reduce wait time for
responses that occur within normal times.

Maciej allows for hot-swapping XDP programs.

Przemek removes unnecessary checks when enabling SR-IOV and freeing
allocated memory.

Christophe Jaillet converts a managed memory allocation to a regular one.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  ice: use ice_down_up() where applicable
  ice: Remove managed memory usage in ice_get_fw_log_cfg()
  ice: remove null checks before devm_kfree() calls
  ice: clean up freeing SR-IOV VFs
  ice: allow hot-swapping XDP programs
  ice: reduce initial wait for control queue messages
====================

Link: https://lore.kernel.org/r/20230622183601.2406499-1-anthony.l.nguyen@intel.com


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 2fe11c9d b7a03457
Loading
Loading
Loading
Loading
+4 −6
Original line number Diff line number Diff line
@@ -814,7 +814,6 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
				devm_kfree(ice_hw_to_dev(hw), lst_itr);
			}
		}
		if (recps[i].root_buf)
		devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
	}
	ice_rm_all_sw_replay_rule_info(hw);
@@ -834,7 +833,7 @@ static int ice_get_fw_log_cfg(struct ice_hw *hw)
	u16 size;

	size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
	config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
	config = kzalloc(size, GFP_KERNEL);
	if (!config)
		return -ENOMEM;

@@ -857,7 +856,7 @@ static int ice_get_fw_log_cfg(struct ice_hw *hw)
		}
	}

	devm_kfree(ice_hw_to_dev(hw), config);
	kfree(config);

	return status;
}
@@ -1011,7 +1010,6 @@ static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
	}

out:
	if (data)
	devm_kfree(ice_hw_to_dev(hw), data);

	return status;
+8 −4
Original line number Diff line number Diff line
@@ -339,7 +339,6 @@ do { \
		}							\
	}								\
	/* free the buffer info list */					\
	if ((qi)->ring.cmd_buf)						\
	devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf);		\
	/* free DMA head */						\
	devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head);		\
@@ -1056,14 +1055,19 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
	if (cq->sq.next_to_use == cq->sq.count)
		cq->sq.next_to_use = 0;
	wr32(hw, cq->sq.tail, cq->sq.next_to_use);
	ice_flush(hw);

	/* Wait a short time before initial ice_sq_done() check, to allow
	 * hardware time for completion.
	 */
	udelay(5);

	timeout = jiffies + ICE_CTL_Q_SQ_CMD_TIMEOUT;
	do {
		if (ice_sq_done(hw, cq))
			break;

		usleep_range(ICE_CTL_Q_SQ_CMD_USEC,
			     ICE_CTL_Q_SQ_CMD_USEC * 3 / 2);
		usleep_range(100, 150);
	} while (time_before(jiffies, timeout));

	/* if ready, copy the desc back to temp */
+0 −1
Original line number Diff line number Diff line
@@ -35,7 +35,6 @@ enum ice_ctl_q {

/* Control Queue timeout settings - max delay 1s */
#define ICE_CTL_Q_SQ_CMD_TIMEOUT	HZ    /* Wait max 1s */
#define ICE_CTL_Q_SQ_CMD_USEC		100   /* Check every 100usec */
#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT	10    /* Count 10 times */
#define ICE_CTL_Q_ADMIN_INIT_MSEC	100   /* Check every 100msec */

+3 −20
Original line number Diff line number Diff line
@@ -1303,23 +1303,6 @@ ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
	return NULL;
}

/**
 * ice_dealloc_flow_entry - Deallocate flow entry memory
 * @hw: pointer to the HW struct
 * @entry: flow entry to be removed
 */
static void
ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
{
	if (!entry)
		return;

	if (entry->entry)
		devm_kfree(ice_hw_to_dev(hw), entry->entry);

	devm_kfree(ice_hw_to_dev(hw), entry);
}

/**
 * ice_flow_rem_entry_sync - Remove a flow entry
 * @hw: pointer to the HW struct
@@ -1335,7 +1318,8 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,

	list_del(&entry->l_entry);

	ice_dealloc_flow_entry(hw, entry);
	devm_kfree(ice_hw_to_dev(hw), entry->entry);
	devm_kfree(ice_hw_to_dev(hw), entry);

	return 0;
}
@@ -1662,7 +1646,6 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,

out:
	if (status && e) {
		if (e->entry)
		devm_kfree(ice_hw_to_dev(hw), e->entry);
		devm_kfree(ice_hw_to_dev(hw), e);
	}
+14 −28
Original line number Diff line number Diff line
@@ -321,32 +321,20 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)

	dev = ice_pf_to_dev(pf);

	if (vsi->af_xdp_zc_qps) {
	bitmap_free(vsi->af_xdp_zc_qps);
	vsi->af_xdp_zc_qps = NULL;
	}
	/* free the ring and vector containers */
	if (vsi->q_vectors) {
	devm_kfree(dev, vsi->q_vectors);
	vsi->q_vectors = NULL;
	}
	if (vsi->tx_rings) {
	devm_kfree(dev, vsi->tx_rings);
	vsi->tx_rings = NULL;
	}
	if (vsi->rx_rings) {
	devm_kfree(dev, vsi->rx_rings);
	vsi->rx_rings = NULL;
	}
	if (vsi->txq_map) {
	devm_kfree(dev, vsi->txq_map);
	vsi->txq_map = NULL;
	}
	if (vsi->rxq_map) {
	devm_kfree(dev, vsi->rxq_map);
	vsi->rxq_map = NULL;
}
}

/**
 * ice_vsi_free_stats - Free the ring statistics structures
@@ -902,9 +890,7 @@ static void ice_rss_clean(struct ice_vsi *vsi)

	dev = ice_pf_to_dev(pf);

	if (vsi->rss_hkey_user)
	devm_kfree(dev, vsi->rss_hkey_user);
	if (vsi->rss_lut_user)
	devm_kfree(dev, vsi->rss_lut_user);

	ice_vsi_clean_rss_flow_fld(vsi);
Loading