Commit fe40a830 authored by Prabhakar Kushwaha's avatar Prabhakar Kushwaha Committed by David S. Miller
Browse files

qed: Update qed_hsi.h for fw 8.59.1.0



The qed_hsi.h has been updated to support new FW version 8.59.1.0 with
changes.
 - Updates FW HSI (Hardware Software interface) structures.
 - Addition/update in function declaration and defines as per HSI.
 - Add generic infrastructure for FW error reporting as part of
   common event queue handling.
 - Move malicious VF error reporting to FW error reporting
   infrastructure.
 - Move consolidation queue initialization from FW context to ramrod
   message.

qed_hsi.h header file changes lead to change in many files to ensure
compilation.

This patch also fixes the existing checkpatch warnings and few important
checks.

Signed-off-by: default avatarAriel Elior <aelior@marvell.com>
Signed-off-by: default avatarShai Malin <smalin@marvell.com>
Signed-off-by: default avatarOmkar Kulkarni <okulkarni@marvell.com>
Signed-off-by: default avatarPrabhakar Kushwaha <pkushwaha@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f2a74107
Loading
Loading
Loading
Loading
+85 −27
Original line number Diff line number Diff line
@@ -1397,12 +1397,13 @@ void qed_resc_free(struct qed_dev *cdev)
			qed_rdma_info_free(p_hwfn);
		}

		qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
		qed_iov_free(p_hwfn);
		qed_l2_free(p_hwfn);
		qed_dmae_info_free(p_hwfn);
		qed_dcbx_info_free(p_hwfn);
		qed_dbg_user_data_free(p_hwfn);
		qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem);
		qed_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem);

		/* Destroy doorbell recovery mechanism */
		qed_db_recovery_teardown(p_hwfn);
@@ -1629,9 +1630,9 @@ static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
 */

/* flags for pq init */
#define PQ_INIT_SHARE_VPORT     (1 << 0)
#define PQ_INIT_PF_RL           (1 << 1)
#define PQ_INIT_VF_RL           (1 << 2)
#define PQ_INIT_SHARE_VPORT     BIT(0)
#define PQ_INIT_PF_RL           BIT(1)
#define PQ_INIT_VF_RL           BIT(2)

/* defines for pq init */
#define PQ_INIT_DEFAULT_WRR_GROUP       1
@@ -2376,6 +2377,49 @@ int qed_resc_alloc(struct qed_dev *cdev)
	return rc;
}

static int qed_fw_err_handler(struct qed_hwfn *p_hwfn,
			      u8 opcode,
			      u16 echo,
			      union event_ring_data *data, u8 fw_return_code)
{
	if (fw_return_code != COMMON_ERR_CODE_ERROR)
		goto eqe_unexpected;

	if (data->err_data.recovery_scope == ERR_SCOPE_FUNC &&
	    le16_to_cpu(data->err_data.entity_id) >= MAX_NUM_PFS) {
		qed_sriov_vfpf_malicious(p_hwfn, &data->err_data);
		return 0;
	}

eqe_unexpected:
	DP_ERR(p_hwfn,
	       "Skipping unexpected eqe 0x%02x, FW return code 0x%x, echo 0x%x\n",
	       opcode, fw_return_code, echo);
	return -EINVAL;
}

static int qed_common_eqe_event(struct qed_hwfn *p_hwfn,
				u8 opcode,
				__le16 echo,
				union event_ring_data *data,
				u8 fw_return_code)
{
	switch (opcode) {
	case COMMON_EVENT_VF_PF_CHANNEL:
	case COMMON_EVENT_VF_FLR:
		return qed_sriov_eqe_event(p_hwfn, opcode, echo, data,
					   fw_return_code);
	case COMMON_EVENT_FW_ERROR:
		return qed_fw_err_handler(p_hwfn, opcode,
					  le16_to_cpu(echo), data,
					  fw_return_code);
	default:
		DP_INFO(p_hwfn->cdev, "Unknown eqe event 0x%02x, echo 0x%x\n",
			opcode, echo);
		return -EINVAL;
	}
}

void qed_resc_setup(struct qed_dev *cdev)
{
	int i;
@@ -2404,6 +2448,8 @@ void qed_resc_setup(struct qed_dev *cdev)

		qed_l2_setup(p_hwfn);
		qed_iov_setup(p_hwfn);
		qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
					  qed_common_eqe_event);
#ifdef CONFIG_QED_LL2
		if (p_hwfn->using_ll2)
			qed_ll2_setup(p_hwfn);
@@ -2593,7 +2639,7 @@ static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn,
			cache_line_size);
	}

	if (L1_CACHE_BYTES > wr_mbs)
	if (wr_mbs < L1_CACHE_BYTES)
		DP_INFO(p_hwfn,
			"The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
			L1_CACHE_BYTES, wr_mbs);
@@ -2609,13 +2655,21 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
			      struct qed_ptt *p_ptt, int hw_mode)
{
	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
	struct qed_qm_common_rt_init_params params;
	struct qed_qm_common_rt_init_params *params;
	struct qed_dev *cdev = p_hwfn->cdev;
	u8 vf_id, max_num_vfs;
	u16 num_pfs, pf_id;
	u32 concrete_fid;
	int rc = 0;

	params = kzalloc(sizeof(*params), GFP_KERNEL);
	if (!params) {
		DP_NOTICE(p_hwfn->cdev,
			  "Failed to allocate common init params\n");

		return -ENOMEM;
	}

	qed_init_cau_rt_data(cdev);

	/* Program GTT windows */
@@ -2628,16 +2682,15 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
			qm_info->pf_wfq_en = true;
	}

	memset(&params, 0, sizeof(params));
	params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
	params.pf_rl_en = qm_info->pf_rl_en;
	params.pf_wfq_en = qm_info->pf_wfq_en;
	params.global_rl_en = qm_info->vport_rl_en;
	params.vport_wfq_en = qm_info->vport_wfq_en;
	params.port_params = qm_info->qm_port_params;
	params->max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
	params->max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
	params->pf_rl_en = qm_info->pf_rl_en;
	params->pf_wfq_en = qm_info->pf_wfq_en;
	params->global_rl_en = qm_info->vport_rl_en;
	params->vport_wfq_en = qm_info->vport_wfq_en;
	params->port_params = qm_info->qm_port_params;

	qed_qm_common_rt_init(p_hwfn, &params);
	qed_qm_common_rt_init(p_hwfn, params);

	qed_cxt_hw_init_common(p_hwfn);

@@ -2645,7 +2698,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,

	rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
	if (rc)
		return rc;
		goto out;

	qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
	qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
@@ -2673,6 +2726,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
	/* pretend to original PF */
	qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);

out:
	kfree(params);

	return rc;
}

@@ -3671,12 +3727,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type)

	return qed_hsi_def_val[type][chip_id];
}

static int
qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
	u32 resc_max_val, mcp_resp;
	u8 res_id;
	int rc;

	for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
		switch (res_id) {
		case QED_LL2_RAM_QUEUE:
@@ -3922,7 +3980,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
	 * resources allocation queries should be atomic. Since several PFs can
	 * run in parallel - a resource lock is needed.
	 * If either the resource lock or resource set value commands are not
	 * supported - skip the the max values setting, release the lock if
	 * supported - skip the max values setting, release the lock if
	 * needed, and proceed to the queries. Other failures, including a
	 * failure to acquire the lock, will cause this function to fail.
	 */
+1376 −180

File changed.

Preview size limit exceeded, changes collapsed.

+8 −6
Original line number Diff line number Diff line
@@ -920,7 +920,8 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
}

int qed_init_global_rl(struct qed_hwfn *p_hwfn,
		       struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit)
		       struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit,
		       enum init_qm_rl_type vport_rl_type)
{
	u32 inc_val;

@@ -1645,7 +1646,7 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,

	/* If memory allocation has failed, free all allocated memory */
	if (buf_offset < buf_size) {
		qed_fw_overlay_mem_free(p_hwfn, allocated_mem);
		qed_fw_overlay_mem_free(p_hwfn, &allocated_mem);
		return NULL;
	}

@@ -1679,16 +1680,16 @@ void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
}

void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
			     struct phys_mem_desc *fw_overlay_mem)
			     struct phys_mem_desc **fw_overlay_mem)
{
	u8 storm_id;

	if (!fw_overlay_mem)
	if (!fw_overlay_mem || !(*fw_overlay_mem))
		return;

	for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
		struct phys_mem_desc *storm_mem_desc =
		    (struct phys_mem_desc *)fw_overlay_mem + storm_id;
		    (struct phys_mem_desc *)*fw_overlay_mem + storm_id;

		/* Free Storm's physical memory */
		if (storm_mem_desc->virt_addr)
@@ -1699,5 +1700,6 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
	}

	/* Free allocated virtual memory */
	kfree(fw_overlay_mem);
	kfree(*fw_overlay_mem);
	*fw_overlay_mem = NULL;
}
+2 −4
Original line number Diff line number Diff line
@@ -38,7 +38,6 @@
#include "qed_sp.h"
#include "qed_sriov.h"


#define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41

@@ -1112,7 +1111,6 @@ qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
{
	int rc;


	rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
				      pbl_addr, pbl_size,
				      qed_get_cm_pq_idx_mcos(p_hwfn, tc));
@@ -2011,7 +2009,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
				struct qed_spq_comp_cb *p_cb,
				struct qed_ntuple_filter_params *p_params)
{
	struct rx_update_gft_filter_data *p_ramrod = NULL;
	struct rx_update_gft_filter_ramrod_data *p_ramrod = NULL;
	struct qed_spq_entry *p_ent = NULL;
	struct qed_sp_init_data init_data;
	u16 abs_rx_q_id = 0;
@@ -2032,7 +2030,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
	}

	rc = qed_sp_init_request(p_hwfn, &p_ent,
				 ETH_RAMROD_GFT_UPDATE_FILTER,
				 ETH_RAMROD_RX_UPDATE_GFT_FILTER,
				 PROTOCOLID_ETH, &init_data);
	if (rc)
		return rc;
+0 −1
Original line number Diff line number Diff line
@@ -146,7 +146,6 @@ struct qed_sp_vport_start_params {
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
			   struct qed_sp_vport_start_params *p_params);


struct qed_filter_accept_flags {
	u8	update_rx_mode_config;
	u8	update_tx_mode_config;
Loading