Commit 765d1216 authored by David S. Miller's avatar David S. Miller
Browse files

Merge tag 'mlx5-fixes-2022-05-17' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux



Saeed Mahameed says:

====================
mlx5 fixes 2022-05-17

This series provides bug fixes to mlx5 driver.
Please pull and let me know if there is any problem.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 23dd4581 16d42d31
Loading
Loading
Loading
Loading
+33 −25
Original line number Diff line number Diff line
@@ -23,7 +23,7 @@ struct mlx5_ct_fs_smfs_matcher {
};

struct mlx5_ct_fs_smfs_matchers {
	struct mlx5_ct_fs_smfs_matcher smfs_matchers[4];
	struct mlx5_ct_fs_smfs_matcher smfs_matchers[6];
	struct list_head used;
};

@@ -44,7 +44,8 @@ struct mlx5_ct_fs_smfs_rule {
};

static inline void
mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp)
mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bool ipv4, bool tcp,
			  bool gre)
{
	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);

@@ -77,7 +78,7 @@ mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bo
		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, tcp_dport);
		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
			 ntohs(MLX5_CT_TCP_FLAGS_MASK));
	} else {
	} else if (!gre) {
		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_sport);
		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, udp_dport);
	}
@@ -87,7 +88,7 @@ mlx5_ct_fs_smfs_fill_mask(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec, bo

static struct mlx5dr_matcher *
mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl, bool ipv4,
			       bool tcp, u32 priority)
			       bool tcp, bool gre, u32 priority)
{
	struct mlx5dr_matcher *dr_matcher;
	struct mlx5_flow_spec *spec;
@@ -96,7 +97,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
	if (!spec)
		return ERR_PTR(-ENOMEM);

	mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp);
	mlx5_ct_fs_smfs_fill_mask(fs, spec, ipv4, tcp, gre);
	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2 | MLX5_MATCH_OUTER_HEADERS;

	dr_matcher = mlx5_smfs_matcher_create(tbl, priority, spec);
@@ -108,7 +109,7 @@ mlx5_ct_fs_smfs_matcher_create(struct mlx5_ct_fs *fs, struct mlx5dr_table *tbl,
}

static struct mlx5_ct_fs_smfs_matcher *
mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp)
mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp, bool gre)
{
	struct mlx5_ct_fs_smfs *fs_smfs = mlx5_ct_fs_priv(fs);
	struct mlx5_ct_fs_smfs_matcher *m, *smfs_matcher;
@@ -119,7 +120,7 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
	int prio;

	matchers = nat ? &fs_smfs->matchers_nat : &fs_smfs->matchers;
	smfs_matcher = &matchers->smfs_matchers[ipv4 * 2 + tcp];
	smfs_matcher = &matchers->smfs_matchers[ipv4 * 3 + tcp * 2 + gre];

	if (refcount_inc_not_zero(&smfs_matcher->ref))
		return smfs_matcher;
@@ -145,11 +146,11 @@ mlx5_ct_fs_smfs_matcher_get(struct mlx5_ct_fs *fs, bool nat, bool ipv4, bool tcp
	}

	tbl = nat ? fs_smfs->ct_nat_tbl : fs_smfs->ct_tbl;
	dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, prio);
	dr_matcher = mlx5_ct_fs_smfs_matcher_create(fs, tbl, ipv4, tcp, gre, prio);
	if (IS_ERR(dr_matcher)) {
		netdev_warn(fs->netdev,
			    "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d), err: %ld\n",
			    nat, ipv4, tcp, PTR_ERR(dr_matcher));
			    "ct_fs_smfs: failed to create matcher (nat %d, ipv4 %d, tcp %d, gre %d), err: %ld\n",
			    nat, ipv4, tcp, gre, PTR_ERR(dr_matcher));

		smfs_matcher = ERR_CAST(dr_matcher);
		goto out_unlock;
@@ -222,16 +223,17 @@ mlx5_ct_fs_smfs_destroy(struct mlx5_ct_fs *fs)
static inline bool
mlx5_tc_ct_valid_used_dissector_keys(const u32 used_keys)
{
#define DISSECTOR_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
	const u32 basic_keys = DISSECTOR_BIT(BASIC) | DISSECTOR_BIT(CONTROL) |
			       DISSECTOR_BIT(PORTS) | DISSECTOR_BIT(META);
	const u32 ipv4_tcp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS) | DISSECTOR_BIT(TCP);
	const u32 ipv4_udp = basic_keys | DISSECTOR_BIT(IPV4_ADDRS);
	const u32 ipv6_tcp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS) | DISSECTOR_BIT(TCP);
	const u32 ipv6_udp = basic_keys | DISSECTOR_BIT(IPV6_ADDRS);
#define DISS_BIT(name) BIT(FLOW_DISSECTOR_KEY_ ## name)
	const u32 basic_keys = DISS_BIT(BASIC) | DISS_BIT(CONTROL) | DISS_BIT(META);
	const u32 ipv4_tcp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
	const u32 ipv6_tcp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS) | DISS_BIT(TCP);
	const u32 ipv4_udp = basic_keys | DISS_BIT(IPV4_ADDRS) | DISS_BIT(PORTS);
	const u32 ipv6_udp = basic_keys | DISS_BIT(IPV6_ADDRS) | DISS_BIT(PORTS);
	const u32 ipv4_gre = basic_keys | DISS_BIT(IPV4_ADDRS);
	const u32 ipv6_gre = basic_keys | DISS_BIT(IPV6_ADDRS);

	return (used_keys == ipv4_tcp || used_keys == ipv4_udp || used_keys == ipv6_tcp ||
		used_keys == ipv6_udp);
		used_keys == ipv6_udp || used_keys == ipv4_gre || used_keys == ipv6_gre);
}

static bool
@@ -254,20 +256,24 @@ mlx5_ct_fs_smfs_ct_validate_flow_rule(struct mlx5_ct_fs *fs, struct flow_rule *f
	flow_rule_match_control(flow_rule, &control);
	flow_rule_match_ipv4_addrs(flow_rule, &ipv4_addrs);
	flow_rule_match_ipv6_addrs(flow_rule, &ipv6_addrs);
	if (basic.key->ip_proto != IPPROTO_GRE)
		flow_rule_match_ports(flow_rule, &ports);
	if (basic.key->ip_proto == IPPROTO_TCP)
		flow_rule_match_tcp(flow_rule, &tcp);

	if (basic.mask->n_proto != htons(0xFFFF) ||
	    (basic.key->n_proto != htons(ETH_P_IP) && basic.key->n_proto != htons(ETH_P_IPV6)) ||
	    basic.mask->ip_proto != 0xFF ||
	    (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP)) {
	    (basic.key->ip_proto != IPPROTO_UDP && basic.key->ip_proto != IPPROTO_TCP &&
	     basic.key->ip_proto != IPPROTO_GRE)) {
		ct_dbg("rule uses unexpected basic match (n_proto 0x%04x/0x%04x, ip_proto 0x%02x/0x%02x)",
		       ntohs(basic.key->n_proto), ntohs(basic.mask->n_proto),
		       basic.key->ip_proto, basic.mask->ip_proto);
		return false;
	}

	if (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF)) {
	if (basic.key->ip_proto != IPPROTO_GRE &&
	    (ports.mask->src != htons(0xFFFF) || ports.mask->dst != htons(0xFFFF))) {
		ct_dbg("rule uses ports match (src 0x%04x, dst 0x%04x)",
		       ports.mask->src, ports.mask->dst);
		return false;
@@ -291,7 +297,7 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
	struct mlx5dr_action *actions[5];
	struct mlx5dr_rule *rule;
	int num_actions = 0, err;
	bool nat, tcp, ipv4;
	bool nat, tcp, ipv4, gre;

	if (!mlx5_ct_fs_smfs_ct_validate_flow_rule(fs, flow_rule))
		return ERR_PTR(-EOPNOTSUPP);
@@ -314,15 +320,17 @@ mlx5_ct_fs_smfs_ct_rule_add(struct mlx5_ct_fs *fs, struct mlx5_flow_spec *spec,
	ipv4 = mlx5e_tc_get_ip_version(spec, true) == 4;
	tcp = MLX5_GET(fte_match_param, spec->match_value,
		       outer_headers.ip_protocol) == IPPROTO_TCP;
	gre = MLX5_GET(fte_match_param, spec->match_value,
		       outer_headers.ip_protocol) == IPPROTO_GRE;

	smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp);
	smfs_matcher = mlx5_ct_fs_smfs_matcher_get(fs, nat, ipv4, tcp, gre);
	if (IS_ERR(smfs_matcher)) {
		err = PTR_ERR(smfs_matcher);
		goto err_matcher;
	}

	rule = mlx5_smfs_rule_create(smfs_matcher->dr_matcher, spec, num_actions, actions,
				     MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT);
				     spec->flow_context.flow_source);
	if (!rule) {
		err = -EINVAL;
		goto err_create;
+10 −3
Original line number Diff line number Diff line
@@ -14,19 +14,26 @@ static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
	bool busy = false;
	int work_done = 0;

	rcu_read_lock();

	ch_stats->poll++;

	work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
	busy |= work_done == budget;
	busy |= rq->post_wqes(rq);

	if (busy)
		return budget;
	if (busy) {
		work_done = budget;
		goto out;
	}

	if (unlikely(!napi_complete_done(napi, work_done)))
		return work_done;
		goto out;

	mlx5e_cq_arm(&rq->cq);

out:
	rcu_read_unlock();
	return work_done;
}

+23 −4
Original line number Diff line number Diff line
@@ -3864,6 +3864,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
	if (netdev->features & NETIF_F_NTUPLE)
		netdev_warn(netdev, "Disabling ntuple, not supported in switchdev mode\n");

	features &= ~NETIF_F_GRO_HW;
	if (netdev->features & NETIF_F_GRO_HW)
		netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");

	return features;
}

@@ -3896,6 +3900,25 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
		}
	}

	if (params->xdp_prog) {
		if (features & NETIF_F_LRO) {
			netdev_warn(netdev, "LRO is incompatible with XDP\n");
			features &= ~NETIF_F_LRO;
		}
		if (features & NETIF_F_GRO_HW) {
			netdev_warn(netdev, "HW GRO is incompatible with XDP\n");
			features &= ~NETIF_F_GRO_HW;
		}
	}

	if (priv->xsk.refcnt) {
		if (features & NETIF_F_GRO_HW) {
			netdev_warn(netdev, "HW GRO is incompatible with AF_XDP (%u XSKs are active)\n",
				    priv->xsk.refcnt);
			features &= ~NETIF_F_GRO_HW;
		}
	}

	if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
		features &= ~NETIF_F_RXHASH;
		if (netdev->features & NETIF_F_RXHASH)
@@ -4850,10 +4873,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
	netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
	netdev->hw_features      |= NETIF_F_HW_VLAN_STAG_TX;

	if (!!MLX5_CAP_GEN(mdev, shampo) &&
	    mlx5e_check_fragmented_striding_rq_cap(mdev))
		netdev->hw_features    |= NETIF_F_GRO_HW;

	if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
		netdev->hw_enc_features |= NETIF_F_HW_CSUM;
		netdev->hw_enc_features |= NETIF_F_TSO;
+75 −56
Original line number Diff line number Diff line
@@ -2663,28 +2663,6 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
	clean_tree(&root_ns->ns.node);
}

void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
	struct mlx5_flow_steering *steering = dev->priv.steering;

	cleanup_root_ns(steering->root_ns);
	cleanup_root_ns(steering->fdb_root_ns);
	steering->fdb_root_ns = NULL;
	kfree(steering->fdb_sub_ns);
	steering->fdb_sub_ns = NULL;
	cleanup_root_ns(steering->port_sel_root_ns);
	cleanup_root_ns(steering->sniffer_rx_root_ns);
	cleanup_root_ns(steering->sniffer_tx_root_ns);
	cleanup_root_ns(steering->rdma_rx_root_ns);
	cleanup_root_ns(steering->rdma_tx_root_ns);
	cleanup_root_ns(steering->egress_root_ns);
	mlx5_cleanup_fc_stats(dev);
	kmem_cache_destroy(steering->ftes_cache);
	kmem_cache_destroy(steering->fgs_cache);
	mlx5_ft_pool_destroy(dev);
	kfree(steering);
}

static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
{
	struct fs_prio *prio;
@@ -3086,42 +3064,27 @@ static int init_egress_root_ns(struct mlx5_flow_steering *steering)
	return err;
}

int mlx5_init_fs(struct mlx5_core_dev *dev)
void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
{
	struct mlx5_flow_steering *steering;
	int err = 0;

	err = mlx5_init_fc_stats(dev);
	if (err)
		return err;

	err = mlx5_ft_pool_init(dev);
	if (err)
		return err;
	struct mlx5_flow_steering *steering = dev->priv.steering;

	steering = kzalloc(sizeof(*steering), GFP_KERNEL);
	if (!steering) {
		err = -ENOMEM;
		goto err;
	cleanup_root_ns(steering->root_ns);
	cleanup_root_ns(steering->fdb_root_ns);
	steering->fdb_root_ns = NULL;
	kfree(steering->fdb_sub_ns);
	steering->fdb_sub_ns = NULL;
	cleanup_root_ns(steering->port_sel_root_ns);
	cleanup_root_ns(steering->sniffer_rx_root_ns);
	cleanup_root_ns(steering->sniffer_tx_root_ns);
	cleanup_root_ns(steering->rdma_rx_root_ns);
	cleanup_root_ns(steering->rdma_tx_root_ns);
	cleanup_root_ns(steering->egress_root_ns);
}

	steering->dev = dev;
	dev->priv.steering = steering;

	if (mlx5_fs_dr_is_supported(dev))
		steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
	else
		steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;

	steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
						sizeof(struct mlx5_flow_group), 0,
						0, NULL);
	steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
						 0, NULL);
	if (!steering->ftes_cache || !steering->fgs_cache) {
		err = -ENOMEM;
		goto err;
	}
int mlx5_fs_core_init(struct mlx5_core_dev *dev)
{
	struct mlx5_flow_steering *steering = dev->priv.steering;
	int err = 0;

	if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
	      (MLX5_CAP_GEN(dev, nic_flow_table))) ||
@@ -3180,8 +3143,64 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
	}

	return 0;

err:
	mlx5_fs_core_cleanup(dev);
	return err;
}

void mlx5_fs_core_free(struct mlx5_core_dev *dev)
{
	struct mlx5_flow_steering *steering = dev->priv.steering;

	kmem_cache_destroy(steering->ftes_cache);
	kmem_cache_destroy(steering->fgs_cache);
	kfree(steering);
	mlx5_ft_pool_destroy(dev);
	mlx5_cleanup_fc_stats(dev);
}

int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
{
	struct mlx5_flow_steering *steering;
	int err = 0;

	err = mlx5_init_fc_stats(dev);
	if (err)
		return err;

	err = mlx5_ft_pool_init(dev);
	if (err)
		goto err;

	steering = kzalloc(sizeof(*steering), GFP_KERNEL);
	if (!steering) {
		err = -ENOMEM;
		goto err;
	}

	steering->dev = dev;
	dev->priv.steering = steering;

	if (mlx5_fs_dr_is_supported(dev))
		steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
	else
		steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;

	steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
						sizeof(struct mlx5_flow_group), 0,
						0, NULL);
	steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
						 0, NULL);
	if (!steering->ftes_cache || !steering->fgs_cache) {
		err = -ENOMEM;
		goto err;
	}

	return 0;

err:
	mlx5_cleanup_fs(dev);
	mlx5_fs_core_free(dev);
	return err;
}

+4 −2
Original line number Diff line number Diff line
@@ -298,8 +298,10 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
				 enum mlx5_flow_steering_mode mode);

int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
int mlx5_fs_core_alloc(struct mlx5_core_dev *dev);
void mlx5_fs_core_free(struct mlx5_core_dev *dev);
int mlx5_fs_core_init(struct mlx5_core_dev *dev);
void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);

int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
Loading