Commit 05bb74c2 authored by Oz Shlomo's avatar Oz Shlomo Committed by Saeed Mahameed
Browse files

net/mlx5e: CT, optimize pre_ct table lookup



The pre_ct table realizes in hardware the act_ct cache logic, bypassing
the CT table if the ct state was already set by a previous ct lookup.
As such, the pre_ct table will always miss for chain 0 filters.

Optimize the pre_ct table lookup for rules installed on chain 0.

Signed-off-by: default avatarOz Shlomo <ozsh@nvidia.com>
Reviewed-by: default avatarRoi Dayan <roid@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 34136153
Loading
Loading
Loading
Loading
+56 −33
Original line number Diff line number Diff line
@@ -1782,12 +1782,18 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
 *		 | set fte_id
 *		 | set tunnel_id
 *		 | do decap
 *      v
 * +---------------------+
 * + pre_ct/pre_ct_nat   +  if matches     +-------------------------+
 * + zone+nat match      +---------------->+ post_act (see below)    +
 * +---------------------+  set zone       +-------------------------+
 *      | set zone
 *		 |
 * +-------------+
 * | Chain 0	 |
 * | optimization|
 * |		 v
 * |	+---------------------+
 * |	+ pre_ct/pre_ct_nat   +  if matches     +----------------------+
 * |	+ zone+nat match      +---------------->+ post_act (see below) +
 * |	+---------------------+  set zone       +----------------------+
 * |		 |
 * +-------------+ set zone
 *		 |
 *		 v
 *	+--------------------+
 *	+ CT (nat or no nat) +
@@ -1803,6 +1809,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
 *	+ post_act     + original filter actions
 *	+ fte_id match +------------------------>
 *	+--------------+
 *
 */
static struct mlx5_flow_handle *
__mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
@@ -1818,6 +1825,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
	struct mlx5_ct_flow *ct_flow;
	int chain_mapping = 0, err;
	struct mlx5_ct_ft *ft;
	u16 zone;

	ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
	if (!ct_flow) {
@@ -1884,6 +1892,25 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
		}
	}

	/* Change original rule point to ct table
	 * Chain 0 sets the zone and jumps to ct table
	 * Other chains jump to pre_ct table to align with act_ct cached logic
	 */
	pre_ct_attr->dest_chain = 0;
	if (!attr->chain) {
		zone = ft->zone & MLX5_CT_ZONE_MASK;
		err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts, ct_priv->ns_type,
						ZONE_TO_REG, zone);
		if (err) {
			ct_dbg("Failed to set zone register mapping");
			goto err_mapping;
		}

		pre_ct_attr->dest_ft = nat ? ct_priv->ct_nat : ct_priv->ct;
	} else {
		pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
	}

	mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
					   pre_mod_acts->num_actions,
					   pre_mod_acts->actions);
@@ -1893,10 +1920,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
		goto err_mapping;
	}
	pre_ct_attr->modify_hdr = mod_hdr;

	/* Change original rule point to ct table */
	pre_ct_attr->dest_chain = 0;
	pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
	ct_flow->pre_ct_rule = mlx5_tc_rule_insert(priv, orig_spec,
						   pre_ct_attr);
	if (IS_ERR(ct_flow->pre_ct_rule)) {