Commit b199dddb authored by Qi Zhang's avatar Qi Zhang Committed by Tony Nguyen
Browse files

ice: Support non word aligned input set field



To support FDIR input set with protocol field like DSCP, TTL,
PROT, etc. which is not word aligned, we need to enable field
vector masking.

Signed-off-by: default avatarDan Nowlin <dan.nowlin@intel.com>
Signed-off-by: default avatarQi Zhang <qi.z.zhang@intel.com>
Tested-by: default avatarChen Bo <BoX.C.Chen@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 390bd141
Loading
Loading
Loading
Loading
+55 −7
Original line number Diff line number Diff line
@@ -470,6 +470,39 @@ static void ice_pkt_insert_ipv6_addr(u8 *pkt, int offset, __be32 *addr)
		       sizeof(*addr));
}

/**
 * ice_pkt_insert_u8 - insert a u8 value into a memory buffer.
 * @pkt: packet buffer
 * @offset: offset into buffer
 * @data: 8 bit value to convert and insert into pkt at offset
 */
static void ice_pkt_insert_u8(u8 *pkt, int offset, u8 data)
{
	memcpy(pkt + offset, &data, sizeof(data));
}

/**
 * ice_pkt_insert_u8_tc - insert a u8 value into a memory buffer for TC ipv6.
 * @pkt: packet buffer
 * @offset: offset into buffer
 * @data: 8 bit value to convert and insert into pkt at offset
 *
 * This function is designed for inserting Traffic Class (TC) for IPv6,
 * since that TC is not aligned in number of bytes. Here we split it out
 * into two part and fill each byte with data copy from pkt, then insert
 * the two bytes data one by one.
 */
static void ice_pkt_insert_u8_tc(u8 *pkt, int offset, u8 data)
{
	u8 high, low;

	high = (data >> 4) + (*(pkt + offset) & 0xF0);
	memcpy(pkt + offset, &high, sizeof(high));

	low = (*(pkt + offset + 1) & 0x0F) + ((data & 0x0F) << 4);
	memcpy(pkt + offset + 1, &low, sizeof(low));
}

/**
 * ice_pkt_insert_u16 - insert a be16 value into a memory buffer
 * @pkt: packet buffer
@@ -530,11 +563,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
		case IPPROTO_SCTP:
			flow = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
			break;
		case IPPROTO_IP:
		default:
			flow = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
			break;
		default:
			return ICE_ERR_PARAM;
		}
	} else if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) {
		switch (input->ip.v6.proto) {
@@ -547,11 +578,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
		case IPPROTO_SCTP:
			flow = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
			break;
		case IPPROTO_IP:
		default:
			flow = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
			break;
		default:
			return ICE_ERR_PARAM;
		}
	} else {
		flow = input->flow_type;
@@ -590,6 +619,8 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
				   input->ip.v4.dst_ip);
		ice_pkt_insert_u16(loc, ICE_IPV4_TCP_SRC_PORT_OFFSET,
				   input->ip.v4.dst_port);
		ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos);
		ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
		ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
		if (frag)
			loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF;
@@ -603,6 +634,8 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
				   input->ip.v4.dst_ip);
		ice_pkt_insert_u16(loc, ICE_IPV4_UDP_SRC_PORT_OFFSET,
				   input->ip.v4.dst_port);
		ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos);
		ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
		ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
		ice_pkt_insert_mac_addr(loc + ETH_ALEN,
					input->ext_data.src_mac);
@@ -616,6 +649,8 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
				   input->ip.v4.dst_ip);
		ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_SRC_PORT_OFFSET,
				   input->ip.v4.dst_port);
		ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos);
		ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
		ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
		break;
	case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
@@ -623,7 +658,10 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
				   input->ip.v4.src_ip);
		ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
				   input->ip.v4.dst_ip);
		ice_pkt_insert_u16(loc, ICE_IPV4_PROTO_OFFSET, 0);
		ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos);
		ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
		ice_pkt_insert_u8(loc, ICE_IPV4_PROTO_OFFSET,
				  input->ip.v4.proto);
		ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
		break;
	case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
@@ -635,6 +673,8 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
				   input->ip.v6.src_port);
		ice_pkt_insert_u16(loc, ICE_IPV6_TCP_SRC_PORT_OFFSET,
				   input->ip.v6.dst_port);
		ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc);
		ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim);
		ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
		break;
	case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
@@ -646,6 +686,8 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
				   input->ip.v6.src_port);
		ice_pkt_insert_u16(loc, ICE_IPV6_UDP_SRC_PORT_OFFSET,
				   input->ip.v6.dst_port);
		ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc);
		ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim);
		ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
		break;
	case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
@@ -657,6 +699,8 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
				   input->ip.v6.src_port);
		ice_pkt_insert_u16(loc, ICE_IPV6_SCTP_SRC_PORT_OFFSET,
				   input->ip.v6.dst_port);
		ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc);
		ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim);
		ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
		break;
	case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
@@ -664,6 +708,10 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
					 input->ip.v6.src_ip);
		ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
					 input->ip.v6.dst_ip);
		ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc);
		ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim);
		ice_pkt_insert_u8(loc, ICE_IPV6_PROTO_OFFSET,
				  input->ip.v6.proto);
		ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
		break;
	default:
+8 −0
Original line number Diff line number Diff line
@@ -25,6 +25,12 @@
#define ICE_IPV6_UDP_DST_PORT_OFFSET	56
#define ICE_IPV6_SCTP_SRC_PORT_OFFSET	54
#define ICE_IPV6_SCTP_DST_PORT_OFFSET	56
#define ICE_IPV4_TOS_OFFSET		15
#define ICE_IPV4_TTL_OFFSET		22
#define ICE_IPV6_TC_OFFSET		14
#define ICE_IPV6_HLIM_OFFSET		21
#define ICE_IPV6_PROTO_OFFSET		20

/* IP v4 has 2 flag bits that enable fragment processing: DF and MF. DF
 * requests that the packet not be fragmented. MF indicates that a packet has
 * been fragmented.
@@ -86,6 +92,7 @@ struct ice_fdir_v4 {
	u8 tos;
	u8 ip_ver;
	u8 proto;
	u8 ttl;
};

#define ICE_IPV6_ADDR_LEN_AS_U32		4
@@ -99,6 +106,7 @@ struct ice_fdir_v6 {
	__be32 sec_parm_idx; /* security parameter index */
	u8 tc;
	u8 proto;
	u8 hlim;
};

struct ice_fdir_extra {
+414 −8
Original line number Diff line number Diff line
@@ -2361,18 +2361,82 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
}

/**
 * ice_find_prof_id - find profile ID for a given field vector
 * ice_prof_has_mask_idx - determine if profile index masking is identical
 * @hw: pointer to the hardware structure
 * @blk: HW block
 * @prof: profile to check
 * @idx: profile index to check
 * @mask: mask to match
 */
static bool
ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
		      u16 mask)
{
	bool expect_no_mask = false;
	bool found = false;
	bool match = false;
	u16 i;

	/* If mask is 0x0000 or 0xffff, then there is no masking */
	if (mask == 0 || mask == 0xffff)
		expect_no_mask = true;

	/* Scan the enabled masks on this profile, for the specified idx */
	for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
	     hw->blk[blk].masks.count; i++)
		if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
			if (hw->blk[blk].masks.masks[i].in_use &&
			    hw->blk[blk].masks.masks[i].idx == idx) {
				found = true;
				if (hw->blk[blk].masks.masks[i].mask == mask)
					match = true;
				break;
			}

	if (expect_no_mask) {
		if (found)
			return false;
	} else {
		if (!match)
			return false;
	}

	return true;
}

/**
 * ice_prof_has_mask - determine if profile masking is identical
 * @hw: pointer to the hardware structure
 * @blk: HW block
 * @prof: profile to check
 * @masks: masks to match
 */
static bool
ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
{
	u16 i;

	/* es->mask_ena[prof] will have the mask */
	for (i = 0; i < hw->blk[blk].es.fvw; i++)
		if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
			return false;

	return true;
}

/**
 * ice_find_prof_id_with_mask - find profile ID for a given field vector
 * @hw: pointer to the hardware structure
 * @blk: HW block
 * @fv: field vector to search for
 * @masks: masks for FV
 * @prof_id: receives the profile ID
 */
static enum ice_status
ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
		 struct ice_fv_word *fv, u8 *prof_id)
ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
			   struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
{
	struct ice_es *es = &hw->blk[blk].es;
	u16 off;
	u8 i;

	/* For FD, we don't want to re-use a existed profile with the same
@@ -2382,11 +2446,15 @@ ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
		return ICE_ERR_DOES_NOT_EXIST;

	for (i = 0; i < (u8)es->count; i++) {
		off = i * es->fvw;
		u16 off = i * es->fvw;

		if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
			continue;

		/* check if masks settings are the same for this profile */
		if (masks && !ice_prof_has_mask(hw, blk, i, masks))
			continue;

		*prof_id = i;
		return 0;
	}
@@ -2536,6 +2604,330 @@ ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
	return 0;
}

/**
 * ice_write_prof_mask_reg - write profile mask register
 * @hw: pointer to the HW struct
 * @blk: hardware block
 * @mask_idx: mask index
 * @idx: index of the FV which will use the mask
 * @mask: the 16-bit mask
 */
static void
ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
			u16 idx, u16 mask)
{
	u32 offset;
	u32 val;

	switch (blk) {
	case ICE_BLK_RSS:
		offset = GLQF_HMASK(mask_idx);
		val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M;
		val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
		break;
	case ICE_BLK_FD:
		offset = GLQF_FDMASK(mask_idx);
		val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M;
		val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M;
		break;
	default:
		ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
			  blk);
		return;
	}

	wr32(hw, offset, val);
	ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
		  blk, idx, offset, val);
}

/**
 * ice_write_prof_mask_enable_res - write profile mask enable register
 * @hw: pointer to the HW struct
 * @blk: hardware block
 * @prof_id: profile ID
 * @enable_mask: enable mask
 */
static void
ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
			       u16 prof_id, u32 enable_mask)
{
	u32 offset;

	switch (blk) {
	case ICE_BLK_RSS:
		offset = GLQF_HMASK_SEL(prof_id);
		break;
	case ICE_BLK_FD:
		offset = GLQF_FDMASK_SEL(prof_id);
		break;
	default:
		ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
			  blk);
		return;
	}

	wr32(hw, offset, enable_mask);
	ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
		  blk, prof_id, offset, enable_mask);
}

/**
 * ice_init_prof_masks - initial prof masks
 * @hw: pointer to the HW struct
 * @blk: hardware block
 */
static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
{
	u16 per_pf;
	u16 i;

	mutex_init(&hw->blk[blk].masks.lock);

	per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;

	hw->blk[blk].masks.count = per_pf;
	hw->blk[blk].masks.first = hw->pf_id * per_pf;

	memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));

	for (i = hw->blk[blk].masks.first;
	     i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
		ice_write_prof_mask_reg(hw, blk, i, 0, 0);
}

/**
 * ice_init_all_prof_masks - initialize all prof masks
 * @hw: pointer to the HW struct
 */
static void ice_init_all_prof_masks(struct ice_hw *hw)
{
	ice_init_prof_masks(hw, ICE_BLK_RSS);
	ice_init_prof_masks(hw, ICE_BLK_FD);
}

/**
 * ice_alloc_prof_mask - allocate profile mask
 * @hw: pointer to the HW struct
 * @blk: hardware block
 * @idx: index of FV which will use the mask
 * @mask: the 16-bit mask
 * @mask_idx: variable to receive the mask index
 */
static enum ice_status
ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
		    u16 *mask_idx)
{
	bool found_unused = false, found_copy = false;
	enum ice_status status = ICE_ERR_MAX_LIMIT;
	u16 unused_idx = 0, copy_idx = 0;
	u16 i;

	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
		return ICE_ERR_PARAM;

	mutex_lock(&hw->blk[blk].masks.lock);

	for (i = hw->blk[blk].masks.first;
	     i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
		if (hw->blk[blk].masks.masks[i].in_use) {
			/* if mask is in use and it exactly duplicates the
			 * desired mask and index, then in can be reused
			 */
			if (hw->blk[blk].masks.masks[i].mask == mask &&
			    hw->blk[blk].masks.masks[i].idx == idx) {
				found_copy = true;
				copy_idx = i;
				break;
			}
		} else {
			/* save off unused index, but keep searching in case
			 * there is an exact match later on
			 */
			if (!found_unused) {
				found_unused = true;
				unused_idx = i;
			}
		}

	if (found_copy)
		i = copy_idx;
	else if (found_unused)
		i = unused_idx;
	else
		goto err_ice_alloc_prof_mask;

	/* update mask for a new entry */
	if (found_unused) {
		hw->blk[blk].masks.masks[i].in_use = true;
		hw->blk[blk].masks.masks[i].mask = mask;
		hw->blk[blk].masks.masks[i].idx = idx;
		hw->blk[blk].masks.masks[i].ref = 0;
		ice_write_prof_mask_reg(hw, blk, i, idx, mask);
	}

	hw->blk[blk].masks.masks[i].ref++;
	*mask_idx = i;
	status = 0;

err_ice_alloc_prof_mask:
	mutex_unlock(&hw->blk[blk].masks.lock);

	return status;
}

/**
 * ice_free_prof_mask - free profile mask
 * @hw: pointer to the HW struct
 * @blk: hardware block
 * @mask_idx: index of mask
 */
static enum ice_status
ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
{
	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
		return ICE_ERR_PARAM;

	if (!(mask_idx >= hw->blk[blk].masks.first &&
	      mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
		return ICE_ERR_DOES_NOT_EXIST;

	mutex_lock(&hw->blk[blk].masks.lock);

	if (!hw->blk[blk].masks.masks[mask_idx].in_use)
		goto exit_ice_free_prof_mask;

	if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
		hw->blk[blk].masks.masks[mask_idx].ref--;
		goto exit_ice_free_prof_mask;
	}

	/* remove mask */
	hw->blk[blk].masks.masks[mask_idx].in_use = false;
	hw->blk[blk].masks.masks[mask_idx].mask = 0;
	hw->blk[blk].masks.masks[mask_idx].idx = 0;

	/* update mask as unused entry */
	ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
		  mask_idx);
	ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);

exit_ice_free_prof_mask:
	mutex_unlock(&hw->blk[blk].masks.lock);

	return 0;
}

/**
 * ice_free_prof_masks - free all profile masks for a profile
 * @hw: pointer to the HW struct
 * @blk: hardware block
 * @prof_id: profile ID
 */
static enum ice_status
ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
{
	u32 mask_bm;
	u16 i;

	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
		return ICE_ERR_PARAM;

	mask_bm = hw->blk[blk].es.mask_ena[prof_id];
	for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
		if (mask_bm & BIT(i))
			ice_free_prof_mask(hw, blk, i);

	return 0;
}

/**
 * ice_shutdown_prof_masks - releases lock for masking
 * @hw: pointer to the HW struct
 * @blk: hardware block
 *
 * This should be called before unloading the driver
 */
static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
{
	u16 i;

	mutex_lock(&hw->blk[blk].masks.lock);

	for (i = hw->blk[blk].masks.first;
	     i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
		ice_write_prof_mask_reg(hw, blk, i, 0, 0);

		hw->blk[blk].masks.masks[i].in_use = false;
		hw->blk[blk].masks.masks[i].idx = 0;
		hw->blk[blk].masks.masks[i].mask = 0;
	}

	mutex_unlock(&hw->blk[blk].masks.lock);
	mutex_destroy(&hw->blk[blk].masks.lock);
}

/**
 * ice_shutdown_all_prof_masks - releases all locks for masking
 * @hw: pointer to the HW struct
 *
 * This should be called before unloading the driver
 */
static void ice_shutdown_all_prof_masks(struct ice_hw *hw)
{
	ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
	ice_shutdown_prof_masks(hw, ICE_BLK_FD);
}

/**
 * ice_update_prof_masking - set registers according to masking
 * @hw: pointer to the HW struct
 * @blk: hardware block
 * @prof_id: profile ID
 * @masks: masks
 */
static enum ice_status
ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
			u16 *masks)
{
	bool err = false;
	u32 ena_mask = 0;
	u16 idx;
	u16 i;

	/* Only support FD and RSS masking, otherwise nothing to be done */
	if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
		return 0;

	for (i = 0; i < hw->blk[blk].es.fvw; i++)
		if (masks[i] && masks[i] != 0xFFFF) {
			if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
				ena_mask |= BIT(idx);
			} else {
				/* not enough bitmaps */
				err = true;
				break;
			}
		}

	if (err) {
		/* free any bitmaps we have allocated */
		for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
			if (ena_mask & BIT(i))
				ice_free_prof_mask(hw, blk, i);

		return ICE_ERR_OUT_OF_RANGE;
	}

	/* enable the masks for this profile */
	ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);

	/* store enabled masks with profile so that they can be freed later */
	hw->blk[blk].es.mask_ena[prof_id] = ena_mask;

	return 0;
}

/**
 * ice_write_es - write an extraction sequence to hardware
 * @hw: pointer to the HW struct
@@ -2575,6 +2967,7 @@ ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
	if (hw->blk[blk].es.ref_count[prof_id] > 0) {
		if (!--hw->blk[blk].es.ref_count[prof_id]) {
			ice_write_es(hw, blk, prof_id, NULL);
			ice_free_prof_masks(hw, blk, prof_id);
			return ice_free_prof_id(hw, blk, prof_id);
		}
	}
@@ -2937,6 +3330,7 @@ void ice_free_hw_tbls(struct ice_hw *hw)
		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
		devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
	}

	list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
@@ -2944,6 +3338,7 @@ void ice_free_hw_tbls(struct ice_hw *hw)
		devm_kfree(ice_hw_to_dev(hw), r);
	}
	mutex_destroy(&hw->rss_locks);
	ice_shutdown_all_prof_masks(hw);
	memset(hw->blk, 0, sizeof(hw->blk));
}

@@ -2997,6 +3392,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
		memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
		memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
		memset(es->written, 0, es->count * sizeof(*es->written));
		memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
	}
}

@@ -3010,6 +3406,7 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)

	mutex_init(&hw->rss_locks);
	INIT_LIST_HEAD(&hw->rss_list_head);
	ice_init_all_prof_masks(hw);
	for (i = 0; i < ICE_BLK_COUNT; i++) {
		struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
		struct ice_prof_tcam *prof = &hw->blk[i].prof;
@@ -3112,6 +3509,11 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
					   sizeof(*es->written), GFP_KERNEL);
		if (!es->written)
			goto err;

		es->mask_ena = devm_kcalloc(ice_hw_to_dev(hw), es->count,
					    sizeof(*es->mask_ena), GFP_KERNEL);
		if (!es->mask_ena)
			goto err;
	}
	return 0;

@@ -3718,15 +4120,16 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
 * @id: profile tracking ID
 * @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
 * @es: extraction sequence (length of array is determined by the block)
 * @masks: mask for extraction sequence
 *
 * This function registers a profile, which matches a set of PTGs with a
 * This function registers a profile, which matches a set of PTYPES with a
 * particular extraction sequence. While the hardware profile is allocated
 * it will not be written until the first call to ice_add_flow that specifies
 * the ID value used here.
 */
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
	     struct ice_fv_word *es)
	     struct ice_fv_word *es, u16 *masks)
{
	u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
	DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
@@ -3740,7 +4143,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
	mutex_lock(&hw->blk[blk].es.prof_map_lock);

	/* search for existing profile */
	status = ice_find_prof_id(hw, blk, es, &prof_id);
	status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
	if (status) {
		/* allocate profile ID */
		status = ice_alloc_prof_id(hw, blk, &prof_id);
@@ -3758,6 +4161,9 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
			if (status)
				goto err_ice_add_prof;
		}
		status = ice_update_prof_masking(hw, blk, prof_id, masks);
		if (status)
			goto err_ice_add_prof;

		/* and write new es */
		ice_write_es(hw, blk, prof_id, es);
+1 −1
Original line number Diff line number Diff line
@@ -27,7 +27,7 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,

enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
	     struct ice_fv_word *es);
	     struct ice_fv_word *es, u16 *masks);
enum ice_status
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
enum ice_status
+17 −0
Original line number Diff line number Diff line
@@ -335,6 +335,7 @@ struct ice_es {
	u16 count;
	u16 fvw;
	u16 *ref_count;
	u32 *mask_ena;
	struct list_head prof_map;
	struct ice_fv_word *t;
	struct mutex prof_map_lock;	/* protect access to profiles list */
@@ -478,6 +479,21 @@ struct ice_prof_redir {
	u16 count;
};

struct ice_mask {
	u16 mask;	/* 16-bit mask */
	u16 idx;	/* index */
	u16 ref;	/* reference count */
	u8 in_use;	/* non-zero if used */
};

struct ice_masks {
	struct mutex lock; /* lock to protect this structure */
	u16 first;	/* first mask owned by the PF */
	u16 count;	/* number of masks owned by the PF */
#define ICE_PROF_MASK_COUNT 32
	struct ice_mask masks[ICE_PROF_MASK_COUNT];
};

/* Tables per block */
struct ice_blk_info {
	struct ice_xlt1 xlt1;
@@ -485,6 +501,7 @@ struct ice_blk_info {
	struct ice_prof_tcam prof;
	struct ice_prof_redir prof_redir;
	struct ice_es es;
	struct ice_masks masks;
	u8 overwrite; /* set to true to allow overwrite of table entries */
	u8 is_list_init;
};
Loading