Commit 220efcf9 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge tag 'mlx5-fixes-2021-01-07' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2021-01-07

* tag 'mlx5-fixes-2021-01-07' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Fix memleak in mlx5e_create_l2_table_groups
  net/mlx5e: Fix two double free cases
  net/mlx5: Release devlink object if adev fails
  net/mlx5e: ethtool, Fix restriction of autoneg with 56G
  net/mlx5e: In skb build skip setting mark in switchdev mode
  net/mlx5: E-Switch, fix changing vf VLANID
  net/mlx5e: Fix SWP offsets when vlan inserted by driver
  net/mlx5e: CT: Use per flow counter when CT flow accounting is enabled
  net/mlx5: Use port_num 1 instead of 0 when delete a RoCE address
  net/mlx5e: Add missing capability check for uplink follow
  net/mlx5: Check if lag is supported before creating one
====================

Link: https://lore.kernel.org/r/20210107202845.470205-1-saeed@kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 3545454c 5b0bb12c
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -626,6 +626,11 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
	if (!reg_c0)
		return true;

	/* If reg_c0 is not equal to the default flow tag then skb->mark
	 * is not supported and must be reset back to 0.
	 */
	skb->mark = 0;

	priv = netdev_priv(skb->dev);
	esw = priv->mdev->priv.eswitch;

+49 −28
Original line number Diff line number Diff line
@@ -118,16 +118,17 @@ struct mlx5_ct_tuple {
	u16 zone;
};

struct mlx5_ct_shared_counter {
struct mlx5_ct_counter {
	struct mlx5_fc *counter;
	refcount_t refcount;
	bool is_shared;
};

struct mlx5_ct_entry {
	struct rhash_head node;
	struct rhash_head tuple_node;
	struct rhash_head tuple_nat_node;
	struct mlx5_ct_shared_counter *shared_counter;
	struct mlx5_ct_counter *counter;
	unsigned long cookie;
	unsigned long restore_cookie;
	struct mlx5_ct_tuple tuple;
@@ -394,13 +395,14 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
}

static void
mlx5_tc_ct_shared_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
mlx5_tc_ct_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
{
	if (!refcount_dec_and_test(&entry->shared_counter->refcount))
	if (entry->counter->is_shared &&
	    !refcount_dec_and_test(&entry->counter->refcount))
		return;

	mlx5_fc_destroy(ct_priv->dev, entry->shared_counter->counter);
	kfree(entry->shared_counter);
	mlx5_fc_destroy(ct_priv->dev, entry->counter->counter);
	kfree(entry->counter);
}

static void
@@ -699,7 +701,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
	attr->dest_ft = ct_priv->post_ct;
	attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
	attr->outer_match_level = MLX5_MATCH_L4;
	attr->counter = entry->shared_counter->counter;
	attr->counter = entry->counter->counter;
	attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;

	mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
@@ -732,13 +734,34 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
	return err;
}

static struct mlx5_ct_shared_counter *
static struct mlx5_ct_counter *
mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
{
	struct mlx5_ct_counter *counter;
	int ret;

	counter = kzalloc(sizeof(*counter), GFP_KERNEL);
	if (!counter)
		return ERR_PTR(-ENOMEM);

	counter->is_shared = false;
	counter->counter = mlx5_fc_create(ct_priv->dev, true);
	if (IS_ERR(counter->counter)) {
		ct_dbg("Failed to create counter for ct entry");
		ret = PTR_ERR(counter->counter);
		kfree(counter);
		return ERR_PTR(ret);
	}

	return counter;
}

static struct mlx5_ct_counter *
mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
			      struct mlx5_ct_entry *entry)
{
	struct mlx5_ct_tuple rev_tuple = entry->tuple;
	struct mlx5_ct_shared_counter *shared_counter;
	struct mlx5_core_dev *dev = ct_priv->dev;
	struct mlx5_ct_counter *shared_counter;
	struct mlx5_ct_entry *rev_entry;
	__be16 tmp_port;
	int ret;
@@ -767,25 +790,20 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
	rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
					   tuples_ht_params);
	if (rev_entry) {
		if (refcount_inc_not_zero(&rev_entry->shared_counter->refcount)) {
		if (refcount_inc_not_zero(&rev_entry->counter->refcount)) {
			mutex_unlock(&ct_priv->shared_counter_lock);
			return rev_entry->shared_counter;
			return rev_entry->counter;
		}
	}
	mutex_unlock(&ct_priv->shared_counter_lock);

	shared_counter = kzalloc(sizeof(*shared_counter), GFP_KERNEL);
	if (!shared_counter)
		return ERR_PTR(-ENOMEM);

	shared_counter->counter = mlx5_fc_create(dev, true);
	if (IS_ERR(shared_counter->counter)) {
		ct_dbg("Failed to create counter for ct entry");
		ret = PTR_ERR(shared_counter->counter);
		kfree(shared_counter);
	shared_counter = mlx5_tc_ct_counter_create(ct_priv);
	if (IS_ERR(shared_counter)) {
		ret = PTR_ERR(shared_counter);
		return ERR_PTR(ret);
	}

	shared_counter->is_shared = true;
	refcount_set(&shared_counter->refcount, 1);
	return shared_counter;
}
@@ -798,10 +816,13 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
{
	int err;

	entry->shared_counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
	if (IS_ERR(entry->shared_counter)) {
		err = PTR_ERR(entry->shared_counter);
		ct_dbg("Failed to create counter for ct entry");
	if (nf_ct_acct_enabled(dev_net(ct_priv->netdev)))
		entry->counter = mlx5_tc_ct_counter_create(ct_priv);
	else
		entry->counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);

	if (IS_ERR(entry->counter)) {
		err = PTR_ERR(entry->counter);
		return err;
	}

@@ -820,7 +841,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
err_nat:
	mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
err_orig:
	mlx5_tc_ct_shared_counter_put(ct_priv, entry);
	mlx5_tc_ct_counter_put(ct_priv, entry);
	return err;
}

@@ -918,7 +939,7 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
	rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
			       tuples_ht_params);
	mutex_unlock(&ct_priv->shared_counter_lock);
	mlx5_tc_ct_shared_counter_put(ct_priv, entry);
	mlx5_tc_ct_counter_put(ct_priv, entry);

}

@@ -956,7 +977,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
	if (!entry)
		return -ENOENT;

	mlx5_fc_query_cached(entry->shared_counter->counter, &bytes, &packets, &lastuse);
	mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
	flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
			  FLOW_ACTION_HW_STATS_DELAYED);

+9 −0
Original line number Diff line number Diff line
@@ -371,6 +371,15 @@ struct mlx5e_swp_spec {
	u8 tun_l4_proto;
};

static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
{
	/* SWP offsets are in 2-bytes words */
	eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
	eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
	eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
	eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
}

static inline void
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
		   struct mlx5e_swp_spec *swp_spec)
+5 −3
Original line number Diff line number Diff line
@@ -51,7 +51,7 @@ static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
}

static inline void
mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
	struct mlx5e_swp_spec swp_spec = {};
	unsigned int offset = 0;
@@ -85,6 +85,8 @@ mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
	}

	mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
	if (skb_vlan_tag_present(skb) &&  ihs)
		mlx5e_eseg_swp_offsets_add_vlan(eseg);
}

#else
@@ -163,7 +165,7 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,

static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
				       struct sk_buff *skb,
				       struct mlx5_wqe_eth_seg *eseg)
				       struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
#ifdef CONFIG_MLX5_EN_IPSEC
	if (xfrm_offload(skb))
@@ -172,7 +174,7 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,

#if IS_ENABLED(CONFIG_GENEVE)
	if (skb->encapsulation)
		mlx5e_tx_tunnel_accel(skb, eseg);
		mlx5e_tx_tunnel_accel(skb, eseg, ihs);
#endif

	return true;
+18 −6
Original line number Diff line number Diff line
@@ -1010,6 +1010,22 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
	return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
}

static int mlx5e_speed_validate(struct net_device *netdev, bool ext,
				const unsigned long link_modes, u8 autoneg)
{
	/* Extended link-mode has no speed limitations. */
	if (ext)
		return 0;

	if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
	    autoneg != AUTONEG_ENABLE) {
		netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n",
			   __func__);
		return -EINVAL;
	}
	return 0;
}

static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
{
	u32 i, ptys_modes = 0;
@@ -1103,13 +1119,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
	link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
		mlx5e_port_speed2linkmodes(mdev, speed, !ext);

	if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
	    autoneg != AUTONEG_ENABLE) {
		netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
			   __func__);
		err = -EINVAL;
	err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
	if (err)
		goto out;
	}

	link_modes = link_modes & eproto.cap;
	if (!link_modes) {
Loading