Commit 6873465c authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge branch 'nfp-flower-add-support-for-multi-zone-conntrack'

Louis Peens says:

====================
nfp: flower: add support for multi-zone conntrack

This series add changes to support offload of connection tracking across
multiple zones. Previously the driver only supported offloading of a
single goto_chain, spanning a single zone. This was implemented by
merging a pre_ct rule, post_ct rule and the nft rule. This series
provides updates to let the original post_ct rule act as the new pre_ct
rule for a next set of merges if it contains another goto and
conntrack action. In pseudo-tc rule format this adds support for:

    ingress chain 0 proto ip flower
        action ct zone 1 pipe action goto 1

    ingress chain 1 proto ip flower ct_state +tr+new ct_zone 1
        action ct_clear pipe action ct zone 2 pipe action goto 2
    ingress chain 1 proto ip flower ct_state +tr+est ct_zone 1
        action ct_clear pipe action ct zone 2 pipe action goto 2

    ingress chain 2 proto ip flower ct_state +tr+new ct_zone 2
        action mirred egress redirect dev ...
    ingress chain 2 proto ip flower ct_state +tr+est ct_zone 2
        action mirred egress redirect dev ...

This can continue for up to a maximum of 4 zone recirculations.

The first few patches are some smaller preparation patches while the
last one introduces the functionality.
====================

Link: https://lore.kernel.org/r/20230314063610.10544-1-louis.peens@corigine.com


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents fabdc100 a87ceb3d
Loading
Loading
Loading
Loading
+200 −60
Original line number Diff line number Diff line
@@ -55,9 +55,21 @@ static void *get_hashentry(struct rhashtable *ht, void *key,

bool is_pre_ct_flow(struct flow_cls_offload *flow)
{
	struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
	struct flow_dissector *dissector = rule->match.dissector;
	struct flow_action_entry *act;
	struct flow_match_ct ct;
	int i;

	if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
		flow_rule_match_ct(rule, &ct);
		if (ct.key->ct_state)
			return false;
	}

	if (flow->common.chain_index)
		return false;

	flow_action_for_each(i, act, &flow->rule->action) {
		if (act->id == FLOW_ACTION_CT) {
			/* The pre_ct rule only have the ct or ct nat action, cannot
@@ -82,6 +94,11 @@ bool is_post_ct_flow(struct flow_cls_offload *flow)
	struct flow_match_ct ct;
	int i;

	if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
		flow_rule_match_ct(rule, &ct);
		if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)
			return true;
	} else {
		/* post ct entry cannot contains any ct action except ct_clear. */
		flow_action_for_each(i, act, &flow->rule->action) {
			if (act->id == FLOW_ACTION_CT) {
@@ -94,12 +111,6 @@ bool is_post_ct_flow(struct flow_cls_offload *flow)
				return false;
			}
		}

	if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
		flow_rule_match_ct(rule, &ct);
		if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)
			return true;
	} else {
		/* when do nat with ct, the post ct entry ignore the ct status,
		 * will match the nat field(sip/dip) instead. In this situation,
		 * the flow chain index is not zero and contains ct clear action.
@@ -511,6 +522,21 @@ static int nfp_ct_check_vlan_merge(struct flow_action_entry *a_in,
	return 0;
}

/* Extra check for multiple ct-zones merge
 * currently surpport nft entries merge check in different zones
 */
static int nfp_ct_merge_extra_check(struct nfp_fl_ct_flow_entry *nft_entry,
				    struct nfp_fl_ct_tc_merge *tc_m_entry)
{
	struct nfp_fl_nft_tc_merge *prev_nft_m_entry;
	struct nfp_fl_ct_flow_entry *pre_ct_entry;

	pre_ct_entry = tc_m_entry->pre_ct_parent;
	prev_nft_m_entry = pre_ct_entry->prev_m_entries[pre_ct_entry->num_prev_m_entries - 1];

	return nfp_ct_merge_check(prev_nft_m_entry->nft_parent, nft_entry);
}

static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
				  struct nfp_fl_ct_flow_entry *post_ct_entry,
				  struct nfp_fl_ct_flow_entry *nft_entry)
@@ -682,34 +708,34 @@ static void nfp_fl_get_csum_flag(struct flow_action_entry *a_in, u8 ip_proto, u3
static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
					struct nfp_flower_priv *priv,
					struct net_device *netdev,
					struct nfp_fl_payload *flow_pay)
					struct nfp_fl_payload *flow_pay,
					int num_rules)
{
	enum flow_action_hw_stats tmp_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
	struct flow_action_entry *a_in;
	int i, j, num_actions, id;
	int i, j, id, num_actions = 0;
	struct flow_rule *a_rule;
	int err = 0, offset = 0;

	num_actions = rules[CT_TYPE_PRE_CT]->action.num_entries +
		      rules[CT_TYPE_NFT]->action.num_entries +
		      rules[CT_TYPE_POST_CT]->action.num_entries;
	for (i = 0; i < num_rules; i++)
		num_actions += rules[i]->action.num_entries;

	/* Add one action to make sure there is enough room to add an checksum action
	 * when do nat.
	 */
	a_rule = flow_rule_alloc(num_actions + 1);
	a_rule = flow_rule_alloc(num_actions + (num_rules / 2));
	if (!a_rule)
		return -ENOMEM;

	/* Actions need a BASIC dissector. */
	a_rule->match = rules[CT_TYPE_PRE_CT]->match;
	/* post_ct entry have one action at least. */
	if (rules[CT_TYPE_POST_CT]->action.num_entries != 0) {
		tmp_stats = rules[CT_TYPE_POST_CT]->action.entries[0].hw_stats;
	}
	if (rules[num_rules - 1]->action.num_entries != 0)
		tmp_stats = rules[num_rules - 1]->action.entries[0].hw_stats;

	/* Actions need a BASIC dissector. */
	a_rule->match = rules[0]->match;

	/* Copy actions */
	for (j = 0; j < _CT_TYPE_MAX; j++) {
	for (j = 0; j < num_rules; j++) {
		u32 csum_updated = 0;
		u8 ip_proto = 0;

@@ -747,8 +773,9 @@ static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
				/* nft entry is generated by tc ct, which mangle action do not care
				 * the stats, inherit the post entry stats to meet the
				 * flow_action_hw_stats_check.
				 * nft entry flow rules are at odd array index.
				 */
				if (j == CT_TYPE_NFT) {
				if (j & 0x01) {
					if (a_in->hw_stats == FLOW_ACTION_HW_STATS_DONT_CARE)
						a_in->hw_stats = tmp_stats;
					nfp_fl_get_csum_flag(a_in, ip_proto, &csum_updated);
@@ -784,32 +811,40 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
{
	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
	struct nfp_fl_ct_zone_entry *zt = m_entry->zt;
	struct flow_rule *rules[NFP_MAX_ENTRY_RULES];
	struct nfp_fl_ct_flow_entry *pre_ct_entry;
	struct nfp_fl_key_ls key_layer, tmp_layer;
	struct nfp_flower_priv *priv = zt->priv;
	u16 key_map[_FLOW_PAY_LAYERS_MAX];
	struct nfp_fl_payload *flow_pay;

	struct flow_rule *rules[_CT_TYPE_MAX];
	u8 *key, *msk, *kdata, *mdata;
	struct nfp_port *port = NULL;
	int num_rules, err, i, j = 0;
	struct net_device *netdev;
	bool qinq_sup;
	u32 port_id;
	u16 offset;
	int i, err;

	netdev = m_entry->netdev;
	qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ);

	rules[CT_TYPE_PRE_CT] = m_entry->tc_m_parent->pre_ct_parent->rule;
	rules[CT_TYPE_NFT] = m_entry->nft_parent->rule;
	rules[CT_TYPE_POST_CT] = m_entry->tc_m_parent->post_ct_parent->rule;
	pre_ct_entry = m_entry->tc_m_parent->pre_ct_parent;
	num_rules = pre_ct_entry->num_prev_m_entries * 2 + _CT_TYPE_MAX;

	for (i = 0; i < pre_ct_entry->num_prev_m_entries; i++) {
		rules[j++] = pre_ct_entry->prev_m_entries[i]->tc_m_parent->pre_ct_parent->rule;
		rules[j++] = pre_ct_entry->prev_m_entries[i]->nft_parent->rule;
	}

	rules[j++] = m_entry->tc_m_parent->pre_ct_parent->rule;
	rules[j++] = m_entry->nft_parent->rule;
	rules[j++] = m_entry->tc_m_parent->post_ct_parent->rule;

	memset(&key_layer, 0, sizeof(struct nfp_fl_key_ls));
	memset(&key_map, 0, sizeof(key_map));

	/* Calculate the resultant key layer and size for offload */
	for (i = 0; i < _CT_TYPE_MAX; i++) {
	for (i = 0; i < num_rules; i++) {
		err = nfp_flower_calculate_key_layers(priv->app,
						      m_entry->netdev,
						      &tmp_layer, rules[i],
@@ -875,7 +910,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
	 * that the layer is not present.
	 */
	if (!qinq_sup) {
		for (i = 0; i < _CT_TYPE_MAX; i++) {
		for (i = 0; i < num_rules; i++) {
			offset = key_map[FLOW_PAY_META_TCI];
			key = kdata + offset;
			msk = mdata + offset;
@@ -889,7 +924,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
		offset = key_map[FLOW_PAY_MAC_MPLS];
		key = kdata + offset;
		msk = mdata + offset;
		for (i = 0; i < _CT_TYPE_MAX; i++) {
		for (i = 0; i < num_rules; i++) {
			nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)key,
					       (struct nfp_flower_mac_mpls *)msk,
					       rules[i]);
@@ -905,7 +940,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
		offset = key_map[FLOW_PAY_IPV4];
		key = kdata + offset;
		msk = mdata + offset;
		for (i = 0; i < _CT_TYPE_MAX; i++) {
		for (i = 0; i < num_rules; i++) {
			nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)key,
						(struct nfp_flower_ipv4 *)msk,
						rules[i]);
@@ -916,7 +951,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
		offset = key_map[FLOW_PAY_IPV6];
		key = kdata + offset;
		msk = mdata + offset;
		for (i = 0; i < _CT_TYPE_MAX; i++) {
		for (i = 0; i < num_rules; i++) {
			nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)key,
						(struct nfp_flower_ipv6 *)msk,
						rules[i]);
@@ -927,7 +962,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
		offset = key_map[FLOW_PAY_L4];
		key = kdata + offset;
		msk = mdata + offset;
		for (i = 0; i < _CT_TYPE_MAX; i++) {
		for (i = 0; i < num_rules; i++) {
			nfp_flower_compile_tport((struct nfp_flower_tp_ports *)key,
						 (struct nfp_flower_tp_ports *)msk,
						 rules[i]);
@@ -938,7 +973,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
		offset = key_map[FLOW_PAY_QINQ];
		key = kdata + offset;
		msk = mdata + offset;
		for (i = 0; i < _CT_TYPE_MAX; i++) {
		for (i = 0; i < num_rules; i++) {
			nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
						(struct nfp_flower_vlan *)msk,
						rules[i]);
@@ -954,7 +989,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
			struct nfp_ipv6_addr_entry *entry;
			struct in6_addr *dst;

			for (i = 0; i < _CT_TYPE_MAX; i++) {
			for (i = 0; i < num_rules; i++) {
				nfp_flower_compile_ipv6_gre_tun((void *)key,
								(void *)msk, rules[i]);
			}
@@ -971,7 +1006,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
		} else {
			__be32 dst;

			for (i = 0; i < _CT_TYPE_MAX; i++) {
			for (i = 0; i < num_rules; i++) {
				nfp_flower_compile_ipv4_gre_tun((void *)key,
								(void *)msk, rules[i]);
			}
@@ -995,7 +1030,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
			struct nfp_ipv6_addr_entry *entry;
			struct in6_addr *dst;

			for (i = 0; i < _CT_TYPE_MAX; i++) {
			for (i = 0; i < num_rules; i++) {
				nfp_flower_compile_ipv6_udp_tun((void *)key,
								(void *)msk, rules[i]);
			}
@@ -1012,7 +1047,7 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
		} else {
			__be32 dst;

			for (i = 0; i < _CT_TYPE_MAX; i++) {
			for (i = 0; i < num_rules; i++) {
				nfp_flower_compile_ipv4_udp_tun((void *)key,
								(void *)msk, rules[i]);
			}
@@ -1029,13 +1064,13 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
			offset = key_map[FLOW_PAY_GENEVE_OPT];
			key = kdata + offset;
			msk = mdata + offset;
			for (i = 0; i < _CT_TYPE_MAX; i++)
			for (i = 0; i < num_rules; i++)
				nfp_flower_compile_geneve_opt(key, msk, rules[i]);
		}
	}

	/* Merge actions into flow_pay */
	err = nfp_fl_merge_actions_offload(rules, priv, netdev, flow_pay);
	err = nfp_fl_merge_actions_offload(rules, priv, netdev, flow_pay, num_rules);
	if (err)
		goto ct_offload_err;

@@ -1168,6 +1203,12 @@ static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
	if (err)
		return err;

	if (pre_ct_entry->num_prev_m_entries > 0) {
		err = nfp_ct_merge_extra_check(nft_entry, tc_m_entry);
		if (err)
			return err;
	}

	/* Combine tc_merge and nft cookies for this cookie. */
	new_cookie[0] = tc_m_entry->cookie[0];
	new_cookie[1] = tc_m_entry->cookie[1];
@@ -1198,11 +1239,6 @@ static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
	list_add(&nft_m_entry->tc_merge_list, &tc_m_entry->children);
	list_add(&nft_m_entry->nft_flow_list, &nft_entry->children);

	/* Generate offload structure and send to nfp */
	err = nfp_fl_ct_add_offload(nft_m_entry);
	if (err)
		goto err_nft_ct_offload;

	err = rhashtable_insert_fast(&zt->nft_merge_tb, &nft_m_entry->hash_node,
				     nfp_nft_ct_merge_params);
	if (err)
@@ -1210,12 +1246,20 @@ static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,

	zt->nft_merge_count++;

	if (post_ct_entry->goto_chain_index > 0)
		return nfp_fl_create_new_pre_ct(nft_m_entry);

	/* Generate offload structure and send to nfp */
	err = nfp_fl_ct_add_offload(nft_m_entry);
	if (err)
		goto err_nft_ct_offload;

	return err;

err_nft_ct_merge_insert:
err_nft_ct_offload:
	nfp_fl_ct_del_offload(zt->priv->app, nft_m_entry->tc_flower_cookie,
			      nft_m_entry->netdev);
err_nft_ct_offload:
err_nft_ct_merge_insert:
	list_del(&nft_m_entry->tc_merge_list);
	list_del(&nft_m_entry->nft_flow_list);
	kfree(nft_m_entry);
@@ -1243,7 +1287,7 @@ static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
	/* Checks that the chain_index of the filter matches the
	 * chain_index of the GOTO action.
	 */
	if (post_ct_entry->chain_index != pre_ct_entry->chain_index)
	if (post_ct_entry->chain_index != pre_ct_entry->goto_chain_index)
		return -EINVAL;

	err = nfp_ct_merge_check(pre_ct_entry, post_ct_entry);
@@ -1461,7 +1505,7 @@ nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,

	entry->zt = zt;
	entry->netdev = netdev;
	entry->cookie = flow->cookie;
	entry->cookie = flow->cookie > 0 ? flow->cookie : (unsigned long)entry;
	entry->chain_index = flow->common.chain_index;
	entry->tun_offset = NFP_FL_CT_NO_TUN;

@@ -1501,6 +1545,9 @@ nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,

	INIT_LIST_HEAD(&entry->children);

	if (flow->cookie == 0)
		return entry;

	/* Now add a ct map entry to flower-priv */
	map = get_hashentry(&zt->priv->ct_map_table, &flow->cookie,
			    nfp_ct_map_params, sizeof(*map));
@@ -1559,6 +1606,14 @@ static void cleanup_nft_merge_entry(struct nfp_fl_nft_tc_merge *m_entry)
	list_del(&m_entry->tc_merge_list);
	list_del(&m_entry->nft_flow_list);

	if (m_entry->next_pre_ct_entry) {
		struct nfp_fl_ct_map_entry pre_ct_map_ent;

		pre_ct_map_ent.ct_entry = m_entry->next_pre_ct_entry;
		pre_ct_map_ent.cookie = 0;
		nfp_fl_ct_del_flow(&pre_ct_map_ent);
	}

	kfree(m_entry);
}

@@ -1656,6 +1711,22 @@ void nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry *entry)
	kfree(entry);
}

static struct flow_action_entry *get_flow_act_ct(struct flow_rule *rule)
{
	struct flow_action_entry *act;
	int i;

	/* More than one ct action may be present in a flow rule,
	 * Return the first one that is not a CT clear action
	 */
	flow_action_for_each(i, act, &rule->action) {
		if (act->id == FLOW_ACTION_CT && act->ct.action != TCA_CT_ACT_CLEAR)
			return act;
	}

	return NULL;
}

static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
					      enum flow_action_id act_id)
{
@@ -1713,14 +1784,15 @@ nfp_ct_merge_nft_with_tc(struct nfp_fl_ct_flow_entry *nft_entry,
int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
			    struct net_device *netdev,
			    struct flow_cls_offload *flow,
			    struct netlink_ext_ack *extack)
			    struct netlink_ext_ack *extack,
			    struct nfp_fl_nft_tc_merge *m_entry)
{
	struct flow_action_entry *ct_act, *ct_goto;
	struct nfp_fl_ct_flow_entry *ct_entry;
	struct nfp_fl_ct_zone_entry *zt;
	int err;

	ct_act = get_flow_act(flow->rule, FLOW_ACTION_CT);
	ct_act = get_flow_act_ct(flow->rule);
	if (!ct_act) {
		NL_SET_ERR_MSG_MOD(extack,
				   "unsupported offload: Conntrack action empty in conntrack offload");
@@ -1756,7 +1828,22 @@ int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
	if (IS_ERR(ct_entry))
		return PTR_ERR(ct_entry);
	ct_entry->type = CT_TYPE_PRE_CT;
	ct_entry->chain_index = ct_goto->chain_index;
	ct_entry->chain_index = flow->common.chain_index;
	ct_entry->goto_chain_index = ct_goto->chain_index;

	if (m_entry) {
		struct nfp_fl_ct_flow_entry *pre_ct_entry;
		int i;

		pre_ct_entry = m_entry->tc_m_parent->pre_ct_parent;
		for (i = 0; i < pre_ct_entry->num_prev_m_entries; i++)
			ct_entry->prev_m_entries[i] = pre_ct_entry->prev_m_entries[i];
		ct_entry->prev_m_entries[i++] = m_entry;
		ct_entry->num_prev_m_entries = i;

		m_entry->next_pre_ct_entry = ct_entry;
	}

	list_add(&ct_entry->list_node, &zt->pre_ct_list);
	zt->pre_ct_count++;

@@ -1779,6 +1866,7 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
	struct nfp_fl_ct_zone_entry *zt;
	bool wildcarded = false;
	struct flow_match_ct ct;
	struct flow_action_entry *ct_goto;

	flow_rule_match_ct(rule, &ct);
	if (!ct.mask->ct_zone) {
@@ -1803,6 +1891,8 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,

	ct_entry->type = CT_TYPE_POST_CT;
	ct_entry->chain_index = flow->common.chain_index;
	ct_goto = get_flow_act(flow->rule, FLOW_ACTION_GOTO);
	ct_entry->goto_chain_index = ct_goto ? ct_goto->chain_index : 0;
	list_add(&ct_entry->list_node, &zt->post_ct_list);
	zt->post_ct_count++;

@@ -1831,6 +1921,28 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
	return 0;
}

int nfp_fl_create_new_pre_ct(struct nfp_fl_nft_tc_merge *m_entry)
{
	struct nfp_fl_ct_flow_entry *pre_ct_entry, *post_ct_entry;
	struct flow_cls_offload new_pre_ct_flow;
	int err;

	pre_ct_entry = m_entry->tc_m_parent->pre_ct_parent;
	if (pre_ct_entry->num_prev_m_entries >= NFP_MAX_RECIRC_CT_ZONES - 1)
		return -1;

	post_ct_entry = m_entry->tc_m_parent->post_ct_parent;
	memset(&new_pre_ct_flow, 0, sizeof(struct flow_cls_offload));
	new_pre_ct_flow.rule = post_ct_entry->rule;
	new_pre_ct_flow.common.chain_index = post_ct_entry->chain_index;

	err = nfp_fl_ct_handle_pre_ct(pre_ct_entry->zt->priv,
				      pre_ct_entry->netdev,
				      &new_pre_ct_flow, NULL,
				      m_entry);
	return err;
}

static void
nfp_fl_ct_sub_stats(struct nfp_fl_nft_tc_merge *nft_merge,
		    enum ct_entry_type type, u64 *m_pkts,
@@ -1876,6 +1988,32 @@ nfp_fl_ct_sub_stats(struct nfp_fl_nft_tc_merge *nft_merge,
				  0, priv->stats[ctx_id].used,
				  FLOW_ACTION_HW_STATS_DELAYED);
	}

	/* Update previous pre_ct/post_ct/nft flow stats */
	if (nft_merge->tc_m_parent->pre_ct_parent->num_prev_m_entries > 0) {
		struct nfp_fl_nft_tc_merge *tmp_nft_merge;
		int i;

		for (i = 0; i < nft_merge->tc_m_parent->pre_ct_parent->num_prev_m_entries; i++) {
			tmp_nft_merge = nft_merge->tc_m_parent->pre_ct_parent->prev_m_entries[i];
			flow_stats_update(&tmp_nft_merge->tc_m_parent->pre_ct_parent->stats,
					  priv->stats[ctx_id].bytes,
					  priv->stats[ctx_id].pkts,
					  0, priv->stats[ctx_id].used,
					  FLOW_ACTION_HW_STATS_DELAYED);
			flow_stats_update(&tmp_nft_merge->tc_m_parent->post_ct_parent->stats,
					  priv->stats[ctx_id].bytes,
					  priv->stats[ctx_id].pkts,
					  0, priv->stats[ctx_id].used,
					  FLOW_ACTION_HW_STATS_DELAYED);
			flow_stats_update(&tmp_nft_merge->nft_parent->stats,
					  priv->stats[ctx_id].bytes,
					  priv->stats[ctx_id].pkts,
					  0, priv->stats[ctx_id].used,
					  FLOW_ACTION_HW_STATS_DELAYED);
		}
	}

	/* Reset stats from the nfp */
	priv->stats[ctx_id].pkts = 0;
	priv->stats[ctx_id].bytes = 0;
@@ -2080,9 +2218,11 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
	switch (ct_entry->type) {
	case CT_TYPE_PRE_CT:
		zt->pre_ct_count--;
		if (ct_map_ent->cookie > 0)
			rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
					       nfp_ct_map_params);
		nfp_fl_ct_clean_flow_entry(ct_entry);
		if (ct_map_ent->cookie > 0)
			kfree(ct_map_ent);

		if (!zt->pre_ct_count) {
+29 −3
Original line number Diff line number Diff line
@@ -86,6 +86,9 @@ enum ct_entry_type {
	_CT_TYPE_MAX,
};

#define NFP_MAX_RECIRC_CT_ZONES 4
#define NFP_MAX_ENTRY_RULES  (NFP_MAX_RECIRC_CT_ZONES * 2 + 1)

enum nfp_nfp_layer_name {
	FLOW_PAY_META_TCI =    0,
	FLOW_PAY_INPORT,
@@ -112,27 +115,33 @@ enum nfp_nfp_layer_name {
 * @cookie:	Flow cookie, same as original TC flow, used as key
 * @list_node:	Used by the list
 * @chain_index:	Chain index of the original flow
 * @goto_chain_index:	goto chain index of the flow
 * @netdev:	netdev structure.
 * @type:	Type of pre-entry from enum ct_entry_type
 * @zt:		Reference to the zone table this belongs to
 * @children:	List of tc_merge flows this flow forms part of
 * @rule:	Reference to the original TC flow rule
 * @stats:	Used to cache stats for updating
 * @prev_m_entries:	Array of all previous nft_tc_merge entries
 * @num_prev_m_entries:	The number of all previous nft_tc_merge entries
 * @tun_offset: Used to indicate tunnel action offset in action list
 * @flags:	Used to indicate flow flag like NAT which used by merge.
 * @type:	Type of ct-entry from enum ct_entry_type
 */
struct nfp_fl_ct_flow_entry {
	unsigned long cookie;
	struct list_head list_node;
	u32 chain_index;
	enum ct_entry_type type;
	u32 goto_chain_index;
	struct net_device *netdev;
	struct nfp_fl_ct_zone_entry *zt;
	struct list_head children;
	struct flow_rule *rule;
	struct flow_stats stats;
	struct nfp_fl_nft_tc_merge *prev_m_entries[NFP_MAX_RECIRC_CT_ZONES - 1];
	u8 num_prev_m_entries;
	u8 tun_offset;		// Set to NFP_FL_CT_NO_TUN if no tun
	u8 flags;
	u8 type;
};

/**
@@ -169,6 +178,7 @@ struct nfp_fl_ct_tc_merge {
 * @nft_parent:	The nft_entry parent
 * @tc_flower_cookie:	The cookie of the flow offloaded to the nfp
 * @flow_pay:	Reference to the offloaded flow struct
 * @next_pre_ct_entry:	Reference to the next ct zone pre ct entry
 */
struct nfp_fl_nft_tc_merge {
	struct net_device *netdev;
@@ -181,6 +191,7 @@ struct nfp_fl_nft_tc_merge {
	struct nfp_fl_ct_flow_entry *nft_parent;
	unsigned long tc_flower_cookie;
	struct nfp_fl_payload *flow_pay;
	struct nfp_fl_ct_flow_entry *next_pre_ct_entry;
};

/**
@@ -204,6 +215,7 @@ bool is_post_ct_flow(struct flow_cls_offload *flow);
 * @netdev:	netdev structure.
 * @flow:	TC flower classifier offload structure.
 * @extack:	Extack pointer for errors
 * @m_entry:previous nfp_fl_nft_tc_merge entry
 *
 * Adds a new entry to the relevant zone table and tries to
 * merge with other +trk+est entries and offload if possible.
@@ -213,7 +225,8 @@ bool is_post_ct_flow(struct flow_cls_offload *flow);
int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
			    struct net_device *netdev,
			    struct flow_cls_offload *flow,
			    struct netlink_ext_ack *extack);
			    struct netlink_ext_ack *extack,
			    struct nfp_fl_nft_tc_merge *m_entry);
/**
 * nfp_fl_ct_handle_post_ct() - Handles +trk+est conntrack rules
 * @priv:	Pointer to app priv
@@ -231,6 +244,19 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
			     struct flow_cls_offload *flow,
			     struct netlink_ext_ack *extack);

/**
 * nfp_fl_create_new_pre_ct() - create next ct_zone -trk conntrack rules
 * @m_entry:previous nfp_fl_nft_tc_merge entry
 *
 * Create a new pre_ct entry from previous nfp_fl_nft_tc_merge entry
 * to the next relevant zone table. Try to merge with other +trk+est
 * entries and offload if possible. The created new pre_ct entry is
 * linked to the previous nfp_fl_nft_tc_merge entry.
 *
 * Return: negative value on error, 0 if configured successfully.
 */
int nfp_fl_create_new_pre_ct(struct nfp_fl_nft_tc_merge *m_entry);

/**
 * nfp_fl_ct_clean_flow_entry() - Free a nfp_fl_ct_flow_entry
 * @entry:	Flow entry to cleanup
+1 −1
Original line number Diff line number Diff line
@@ -1344,7 +1344,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
		port = nfp_port_from_netdev(netdev);

	if (is_pre_ct_flow(flow))
		return nfp_fl_ct_handle_pre_ct(priv, netdev, flow, extack);
		return nfp_fl_ct_handle_pre_ct(priv, netdev, flow, extack, NULL);

	if (is_post_ct_flow(flow))
		return nfp_fl_ct_handle_post_ct(priv, netdev, flow, extack);