Commit 679500e3 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge branch 'mlx5-updates-2021-01-07'

Saeed Mahameed says:

====================
mlx5 updates 2021-01-07

Misc updates series for mlx5 driver:

1) From Eli and Jianbo, E-Switch cleanups and usage of new
   FW capability for mpls over udp

2) Paul Blakey, Adds support for mirroring with Connection tracking
by splitting rules to pre and post Connection tracking to perform the
mirroring.

3) Roi Dayan, cleanups for connection tracking

4) From Tariq, Cleanups and improvements to IPSec
====================

Link: https://lore.kernel.org/r/20210112070534.136841-1-saeed@kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents f50e2f9f 224169d2
Loading
Loading
Loading
Loading
+2 −5
Original line number Diff line number Diff line
@@ -705,9 +705,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
	attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;

	mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
	mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
				    entry->tuple.zone & MLX5_CT_ZONE_MASK,
				    MLX5_CT_ZONE_MASK);
	mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);

	zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
	if (IS_ERR(zone_rule->rule)) {
@@ -1241,9 +1239,8 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
	pre_ct->flow_rule = rule;

	/* add miss rule */
	memset(spec, 0, sizeof(*spec));
	dest.ft = nat ? ct_priv->ct_nat : ct_priv->ct;
	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
	rule = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
	if (IS_ERR(rule)) {
		err = PTR_ERR(rule);
		ct_dbg("Failed to add pre ct miss rule zone %d", zone);
+2 −2
Original line number Diff line number Diff line
@@ -81,8 +81,8 @@ static int parse_tunnel(struct mlx5e_priv *priv,
	if (!enc_keyid.mask->keyid)
		return 0;

	if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
	      MLX5_FLEX_PROTO_CW_MPLS_UDP))
	if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) &&
	    !(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP))
		return -EOPNOTSUPP;

	flow_rule_match_mpls(rule, &match);
+2 −2
Original line number Diff line number Diff line
@@ -144,9 +144,9 @@ static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *sta
{
#ifdef CONFIG_MLX5_EN_IPSEC
	return mlx5e_ipsec_is_tx_flow(&state->ipsec);
#endif

#else
	return false;
#endif
}

static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
+0 −14
Original line number Diff line number Diff line
@@ -497,20 +497,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
	}
}

bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
			       netdev_features_t features)
{
	struct sec_path *sp = skb_sec_path(skb);
	struct xfrm_state *x;

	if (sp && sp->len) {
		x = sp->xvec[0];
		if (x && x->xso.offload_handle)
			return true;
	}
	return false;
}

void mlx5e_ipsec_build_inverse_table(void)
{
	u16 mss_inv;
+27 −2
Original line number Diff line number Diff line
@@ -57,8 +57,6 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
					  struct sk_buff *skb, u32 *cqe_bcnt);

void mlx5e_ipsec_inverse_table_init(void);
bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
			       netdev_features_t features);
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
			    struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
@@ -87,8 +85,28 @@ static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ips
	return ipsec_st->x;
}

static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
{
	return eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
}

void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
			       struct mlx5_wqe_eth_seg *eseg);

static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
					     netdev_features_t features)
{
	struct sec_path *sp = skb_sec_path(skb);

	if (sp && sp->len) {
		struct xfrm_state *x = sp->xvec[0];

		if (x && x->xso.offload_handle)
			return true;
	}
	return false;
}

#else
static inline
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
@@ -96,7 +114,14 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
				       struct mlx5_cqe64 *cqe)
{}

static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
{
	return false;
}

static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
					     netdev_features_t features) { return false; }
#endif /* CONFIG_MLX5_EN_IPSEC */

#endif /* __MLX5E_IPSEC_RXTX_H__ */
Loading