Commit 5ab90341 authored by Alexander Lobakin's avatar Alexander Lobakin Committed by David S. Miller
Browse files

net: qed: sanitize BE/LE data processing



Current code assumes that both host and device operates in Little Endian
in lots of places. While this is true for x86 platform, this doesn't mean
we should not care about this.

This commit addresses all parts of the code that were pointed out by sparse
checker. All operations with restricted (__be*/__le*) types are now
protected with explicit from/to CPU conversions, even if they're noops on
common setups.

I'm sure there are more such places, but this implies a deeper code
investigation, and is a subject for future works.

Signed-off-by: default avatarAlexander Lobakin <alobakin@marvell.com>
Signed-off-by: default avatarIgor Russkikh <irusskikh@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a0f3266f
Loading
Loading
Loading
Loading
+7 −4
Original line number Diff line number Diff line
@@ -73,8 +73,8 @@ union type1_task_context {
};

struct src_ent {
	u8				opaque[56];
	u64				next;
	__u8				opaque[56];
	__be64				next;
};

#define CDUT_SEG_ALIGNMET		3 /* in 4k chunks */
@@ -2177,6 +2177,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
	dma_addr_t p_phys;
	u64 ilt_hw_entry;
	void *p_virt;
	u32 flags1;
	int rc = 0;

	switch (elem_type) {
@@ -2255,8 +2256,10 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
			elem = (union type1_task_context *)elem_start;
			tdif_context = &elem->roce_ctx.tdif_context;

			SET_FIELD(tdif_context->flags1,
				  TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
			flags1 = le32_to_cpu(tdif_context->flags1);
			SET_FIELD(flags1, TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
			tdif_context->flags1 = cpu_to_le32(flags1);

			elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
		}
	}
+13 −14
Original line number Diff line number Diff line
@@ -547,7 +547,8 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
		      struct dcbx_ets_feature *p_ets,
		      struct qed_dcbx_params *p_params)
{
	u32 bw_map[2], tsa_map[2], pri_map;
	__be32 bw_map[2], tsa_map[2];
	u32 pri_map;
	int i;

	p_params->ets_willing = QED_MFW_GET_FIELD(p_ets->flags,
@@ -573,11 +574,10 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
	/* 8 bit tsa and bw data corresponding to each of the 8 TC's are
	 * encoded in a type u32 array of size 2.
	 */
	bw_map[0] = be32_to_cpu(p_ets->tc_bw_tbl[0]);
	bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]);
	tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]);
	tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]);
	cpu_to_be32_array(bw_map, p_ets->tc_bw_tbl, 2);
	cpu_to_be32_array(tsa_map, p_ets->tc_tsa_tbl, 2);
	pri_map = p_ets->pri_tc_tbl[0];

	for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
		p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
		p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
@@ -1054,7 +1054,7 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
		      struct dcbx_ets_feature *p_ets,
		      struct qed_dcbx_params *p_params)
{
	u8 *bw_map, *tsa_map;
	__be32 bw_map[2], tsa_map[2];
	u32 val;
	int i;

@@ -1076,22 +1076,21 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
	p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
	p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT;

	bw_map = (u8 *)&p_ets->tc_bw_tbl[0];
	tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0];
	p_ets->pri_tc_tbl[0] = 0;

	for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
		bw_map[i] = p_params->ets_tc_bw_tbl[i];
		tsa_map[i] = p_params->ets_tc_tsa_tbl[i];
		((u8 *)bw_map)[i] = p_params->ets_tc_bw_tbl[i];
		((u8 *)tsa_map)[i] = p_params->ets_tc_tsa_tbl[i];

		/* Copy the priority value to the corresponding 4 bits in the
		 * traffic class table.
		 */
		val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
		p_ets->pri_tc_tbl[0] |= val;
	}
	for (i = 0; i < 2; i++) {
		p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]);
		p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]);
	}

	be32_to_cpu_array(p_ets->tc_bw_tbl, bw_map, 2);
	be32_to_cpu_array(p_ets->tc_tsa_tbl, tsa_map, 2);
}

static void
+29 −20
Original line number Diff line number Diff line
@@ -972,7 +972,7 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
{
	struct storm_defs *storm = &s_storm_defs[storm_id];
	struct fw_info_location fw_info_location;
	u32 addr, i, *dest;
	u32 addr, i, size, *dest;

	memset(&fw_info_location, 0, sizeof(fw_info_location));
	memset(fw_info, 0, sizeof(*fw_info));
@@ -985,20 +985,29 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
	    sizeof(fw_info_location);

	dest = (u32 *)&fw_info_location;
	size = BYTES_TO_DWORDS(sizeof(fw_info_location));

	for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
	     i++, addr += BYTES_IN_DWORD)
	for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
		dest[i] = qed_rd(p_hwfn, p_ptt, addr);

	/* qed_rq() fetches data in CPU byteorder. Swap it back to
	 * the device's to get right structure layout.
	 */
	cpu_to_le32_array(dest, size);

	/* Read FW version info from Storm RAM */
	if (fw_info_location.size > 0 && fw_info_location.size <=
	    sizeof(*fw_info)) {
		addr = fw_info_location.grc_addr;
	size = le32_to_cpu(fw_info_location.size);
	if (!size || size > sizeof(*fw_info))
		return;

	addr = le32_to_cpu(fw_info_location.grc_addr);
	dest = (u32 *)fw_info;
		for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
		     i++, addr += BYTES_IN_DWORD)
	size = BYTES_TO_DWORDS(size);

	for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
	}

	cpu_to_le32_array(dest, size);
}

/* Dumps the specified string to the specified buffer.
@@ -1123,7 +1132,7 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
	offset += qed_dump_str_param(dump_buf + offset,
				     dump, "fw-image", fw_img_str);
	offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp",
				     fw_info.ver.timestamp);
				     le32_to_cpu(fw_info.ver.timestamp));

	return offset;
}
@@ -4440,9 +4449,11 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
			continue;
		}

		addr = le16_to_cpu(asserts->section_ram_line_offset);
		fw_asserts_section_addr = storm->sem_fast_mem_addr +
					  SEM_FAST_REG_INT_RAM +
			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
					  RAM_LINES_TO_BYTES(addr);

		next_list_idx_addr = fw_asserts_section_addr +
			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
@@ -7650,8 +7661,7 @@ static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
{
	struct qed_hwfn *p_hwfn =
		&cdev->hwfns[cdev->engine_for_debug];
	u32 len_rounded, i;
	__be32 val;
	u32 len_rounded;
	int rc;

	*num_dumped_bytes = 0;
@@ -7670,10 +7680,9 @@ static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,

	/* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
	if (image_id != QED_NVM_IMAGE_NVM_META)
		for (i = 0; i < len_rounded; i += 4) {
			val = cpu_to_be32(*(u32 *)(buffer + i));
			*(u32 *)(buffer + i) = val;
		}
		cpu_to_be32_array((__force __be32 *)buffer,
				  (const u32 *)buffer,
				  len_rounded / sizeof(u32));

	*num_dumped_bytes = len_rounded;

+28 −26
Original line number Diff line number Diff line
@@ -95,7 +95,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
	struct qed_cxt_info cxt_info;
	u32 dummy_cid;
	int rc = 0;
	u16 tmp;
	__le16 tmp;
	u8 i;

	/* Get SPQ entry */
@@ -162,17 +162,13 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
	tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries);
	p_data->q_params.cmdq_num_entries = tmp;

	tmp = fcoe_pf_params->num_cqs;
	p_data->q_params.num_queues = (u8)tmp;
	p_data->q_params.num_queues = fcoe_pf_params->num_cqs;

	tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
	p_data->q_params.queue_relative_offset = (u8)tmp;
	tmp = (__force __le16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
	p_data->q_params.queue_relative_offset = (__force u8)tmp;

	for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
		u16 igu_sb_id;

		igu_sb_id = qed_get_igu_sb_id(p_hwfn, i);
		tmp = cpu_to_le16(igu_sb_id);
		tmp = cpu_to_le16(qed_get_igu_sb_id(p_hwfn, i));
		p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
	}

@@ -185,21 +181,21 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
		       fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
	p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
	    fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ];
	tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ];
	p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
	tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ];
	p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
	tmp = cpu_to_le16(fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ]);
	p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = tmp;
	tmp = cpu_to_le16(fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ]);
	p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = tmp;

	DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA],
		       fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
	p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
	    fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
	tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
	p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
	tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
	p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
	tmp = fcoe_pf_params->rq_buffer_size;
	p_data->q_params.rq_buffer_size = cpu_to_le16(tmp);
	tmp = cpu_to_le16(fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]);
	p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = tmp;
	tmp = cpu_to_le16(fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]);
	p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = tmp;
	tmp = cpu_to_le16(fcoe_pf_params->rq_buffer_size);
	p_data->q_params.rq_buffer_size = tmp;

	if (fcoe_pf_params->is_target) {
		SET_FIELD(p_data->q_params.q_validity,
@@ -233,7 +229,8 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
	struct fcoe_conn_offload_ramrod_data *p_data;
	struct qed_spq_entry *p_ent = NULL;
	struct qed_sp_init_data init_data;
	u16 physical_q0, tmp;
	u16 physical_q0;
	__le16 tmp;
	int rc;

	/* Get SPQ entry */
@@ -254,7 +251,7 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,

	/* Transmission PQ is the first of the PF */
	physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
	p_conn->physical_q0 = cpu_to_le16(physical_q0);
	p_conn->physical_q0 = physical_q0;
	p_data->physical_q0 = cpu_to_le16(physical_q0);

	p_data->conn_id = cpu_to_le16(p_conn->conn_id);
@@ -553,8 +550,8 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
{
	struct e4_fcoe_task_context *p_task_ctx = NULL;
	u32 i, lc;
	int rc;
	u32 i;

	spin_lock_init(&p_hwfn->p_fcoe_info->lock);
	for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
@@ -565,10 +562,15 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
			continue;

		memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
		SET_FIELD(p_task_ctx->timer_context.logical_client_0,
			  TIMERS_CONTEXT_VALIDLC0, 1);
		SET_FIELD(p_task_ctx->timer_context.logical_client_1,
			  TIMERS_CONTEXT_VALIDLC1, 1);

		lc = 0;
		SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC0, 1);
		p_task_ctx->timer_context.logical_client_0 = cpu_to_le32(lc);

		lc = 0;
		SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC1, 1);
		p_task_ctx->timer_context.logical_client_1 = cpu_to_le32(lc);

		SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
			  E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
	}
+24 −24
Original line number Diff line number Diff line
@@ -2793,7 +2793,7 @@ struct fw_overlay_buf_hdr {

/* init array header: raw */
struct init_array_raw_hdr {
	u32						data;
	__le32						data;
#define INIT_ARRAY_RAW_HDR_TYPE_MASK			0xF
#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT			0
#define INIT_ARRAY_RAW_HDR_PARAMS_MASK			0xFFFFFFF
@@ -2802,7 +2802,7 @@ struct init_array_raw_hdr {

/* init array header: standard */
struct init_array_standard_hdr {
	u32						data;
	__le32						data;
#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK		0xF
#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT		0
#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK		0xFFFFFFF
@@ -2811,7 +2811,7 @@ struct init_array_standard_hdr {

/* init array header: zipped */
struct init_array_zipped_hdr {
	u32						data;
	__le32						data;
#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK			0xF
#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT		0
#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK		0xFFFFFFF
@@ -2820,7 +2820,7 @@ struct init_array_zipped_hdr {

/* init array header: pattern */
struct init_array_pattern_hdr {
	u32						data;
	__le32						data;
#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK		0xF
#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT		0
#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK	0xF
@@ -2847,48 +2847,48 @@ enum init_array_types {

/* init operation: callback */
struct init_callback_op {
	u32						op_data;
	__le32						op_data;
#define INIT_CALLBACK_OP_OP_MASK			0xF
#define INIT_CALLBACK_OP_OP_SHIFT			0
#define INIT_CALLBACK_OP_RESERVED_MASK			0xFFFFFFF
#define INIT_CALLBACK_OP_RESERVED_SHIFT			4
	u16						callback_id;
	u16						block_id;
	__le16						callback_id;
	__le16						block_id;
};

/* init operation: delay */
struct init_delay_op {
	u32						op_data;
	__le32						op_data;
#define INIT_DELAY_OP_OP_MASK				0xF
#define INIT_DELAY_OP_OP_SHIFT				0
#define INIT_DELAY_OP_RESERVED_MASK			0xFFFFFFF
#define INIT_DELAY_OP_RESERVED_SHIFT			4
	u32						delay;
	__le32						delay;
};

/* init operation: if_mode */
struct init_if_mode_op {
	u32						op_data;
	__le32						op_data;
#define INIT_IF_MODE_OP_OP_MASK				0xF
#define INIT_IF_MODE_OP_OP_SHIFT			0
#define INIT_IF_MODE_OP_RESERVED1_MASK			0xFFF
#define INIT_IF_MODE_OP_RESERVED1_SHIFT			4
#define INIT_IF_MODE_OP_CMD_OFFSET_MASK			0xFFFF
#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT		16
	u16						reserved2;
	u16						modes_buf_offset;
	__le16						reserved2;
	__le16						modes_buf_offset;
};

/* init operation: if_phase */
struct init_if_phase_op {
	u32						op_data;
	__le32						op_data;
#define INIT_IF_PHASE_OP_OP_MASK			0xF
#define INIT_IF_PHASE_OP_OP_SHIFT			0
#define INIT_IF_PHASE_OP_RESERVED1_MASK			0xFFF
#define INIT_IF_PHASE_OP_RESERVED1_SHIFT		4
#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK		0xFFFF
#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT		16
	u32						phase_data;
	__le32						phase_data;
#define INIT_IF_PHASE_OP_PHASE_MASK			0xFF
#define INIT_IF_PHASE_OP_PHASE_SHIFT			0
#define INIT_IF_PHASE_OP_RESERVED2_MASK			0xFF
@@ -2907,31 +2907,31 @@ enum init_mode_ops {

/* init operation: raw */
struct init_raw_op {
	u32						op_data;
	__le32						op_data;
#define INIT_RAW_OP_OP_MASK				0xF
#define INIT_RAW_OP_OP_SHIFT				0
#define INIT_RAW_OP_PARAM1_MASK				0xFFFFFFF
#define INIT_RAW_OP_PARAM1_SHIFT			4
	u32						param2;
	__le32						param2;
};

/* init array params */
struct init_op_array_params {
	u16						size;
	u16						offset;
	__le16						size;
	__le16						offset;
};

/* Write init operation arguments */
union init_write_args {
	u32						inline_val;
	u32						zeros_count;
	u32						array_offset;
	__le32						inline_val;
	__le32						zeros_count;
	__le32						array_offset;
	struct init_op_array_params			runtime;
};

/* init operation: write */
struct init_write_op {
	u32						data;
	__le32						data;
#define INIT_WRITE_OP_OP_MASK				0xF
#define INIT_WRITE_OP_OP_SHIFT				0
#define INIT_WRITE_OP_SOURCE_MASK			0x7
@@ -2947,7 +2947,7 @@ struct init_write_op {

/* init operation: read */
struct init_read_op {
	u32						op_data;
	__le32						op_data;
#define INIT_READ_OP_OP_MASK				0xF
#define INIT_READ_OP_OP_SHIFT				0
#define INIT_READ_OP_POLL_TYPE_MASK			0xF
@@ -2956,7 +2956,7 @@ struct init_read_op {
#define INIT_READ_OP_RESERVED_SHIFT			8
#define INIT_READ_OP_ADDRESS_MASK			0x7FFFFF
#define INIT_READ_OP_ADDRESS_SHIFT			9
	u32						expected_val;
	__le32						expected_val;
};

/* Init operations union */
Loading