Commit b9077ef4 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge tag 'mlx5-fixes-2023-08-07' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2023-08-07

This series provides bug fixes to mlx5 driver.

* tag 'mlx5-fixes-2023-08-07' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Add capability check for vnic counters
  net/mlx5: Reload auxiliary devices in pci error handlers
  net/mlx5: Skip clock update work when device is in error state
  net/mlx5: LAG, Check correct bucket when modifying LAG
  net/mlx5e: Unoffload post act rule when handling FIB events
  net/mlx5: Fix devlink controller number for ECVF
  net/mlx5: Allow 0 for total host VFs
  net/mlx5: Return correct EC_VF function ID
  net/mlx5: DR, Fix wrong allocation of modify hdr pattern
  net/mlx5e: TC, Fix internal port memory leak
  net/mlx5e: Take RTNL lock when needed before calling xdp_set_features()
====================

Link: https://lore.kernel.org/r/20230807212607.50883-1-saeed@kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 81f3768d 548ee049
Loading
Loading
Loading
Loading
+67 −49
Original line number Diff line number Diff line
@@ -2,6 +2,7 @@
/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. */

#include "reporter_vnic.h"
#include "en_stats.h"
#include "devlink.h"

#define VNIC_ENV_GET64(vnic_env_stats, c) \
@@ -36,48 +37,64 @@ int mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
	if (err)
		return err;

	err = devlink_fmsg_u64_pair_put(fmsg, "total_error_queues",
					VNIC_ENV_GET64(&vnic, total_error_queues));
	if (MLX5_CAP_GEN(dev, vnic_env_queue_counters)) {
		err = devlink_fmsg_u32_pair_put(fmsg, "total_error_queues",
						VNIC_ENV_GET(&vnic, total_error_queues));
		if (err)
			return err;

	err = devlink_fmsg_u64_pair_put(fmsg, "send_queue_priority_update_flow",
					VNIC_ENV_GET64(&vnic, send_queue_priority_update_flow));
		err = devlink_fmsg_u32_pair_put(fmsg, "send_queue_priority_update_flow",
						VNIC_ENV_GET(&vnic,
							     send_queue_priority_update_flow));
		if (err)
			return err;
	}

	err = devlink_fmsg_u64_pair_put(fmsg, "comp_eq_overrun",
					VNIC_ENV_GET64(&vnic, comp_eq_overrun));
	if (MLX5_CAP_GEN(dev, eq_overrun_count)) {
		err = devlink_fmsg_u32_pair_put(fmsg, "comp_eq_overrun",
						VNIC_ENV_GET(&vnic, comp_eq_overrun));
		if (err)
			return err;

	err = devlink_fmsg_u64_pair_put(fmsg, "async_eq_overrun",
					VNIC_ENV_GET64(&vnic, async_eq_overrun));
		err = devlink_fmsg_u32_pair_put(fmsg, "async_eq_overrun",
						VNIC_ENV_GET(&vnic, async_eq_overrun));
		if (err)
			return err;
	}

	err = devlink_fmsg_u64_pair_put(fmsg, "cq_overrun",
					VNIC_ENV_GET64(&vnic, cq_overrun));
	if (MLX5_CAP_GEN(dev, vnic_env_cq_overrun)) {
		err = devlink_fmsg_u32_pair_put(fmsg, "cq_overrun",
						VNIC_ENV_GET(&vnic, cq_overrun));
		if (err)
			return err;
	}

	err = devlink_fmsg_u64_pair_put(fmsg, "invalid_command",
					VNIC_ENV_GET64(&vnic, invalid_command));
	if (MLX5_CAP_GEN(dev, invalid_command_count)) {
		err = devlink_fmsg_u32_pair_put(fmsg, "invalid_command",
						VNIC_ENV_GET(&vnic, invalid_command));
		if (err)
			return err;
	}

	err = devlink_fmsg_u64_pair_put(fmsg, "quota_exceeded_command",
					VNIC_ENV_GET64(&vnic, quota_exceeded_command));
	if (MLX5_CAP_GEN(dev, quota_exceeded_count)) {
		err = devlink_fmsg_u32_pair_put(fmsg, "quota_exceeded_command",
						VNIC_ENV_GET(&vnic, quota_exceeded_command));
		if (err)
			return err;
	}

	if (MLX5_CAP_GEN(dev, nic_receive_steering_discard)) {
		err = devlink_fmsg_u64_pair_put(fmsg, "nic_receive_steering_discard",
					VNIC_ENV_GET64(&vnic, nic_receive_steering_discard));
						VNIC_ENV_GET64(&vnic,
							       nic_receive_steering_discard));
		if (err)
			return err;
	}

	if (MLX5_CAP_GEN(dev, vnic_env_cnt_steering_fail)) {
		err = devlink_fmsg_u64_pair_put(fmsg, "generated_pkt_steering_fail",
					VNIC_ENV_GET64(&vnic, generated_pkt_steering_fail));
						VNIC_ENV_GET64(&vnic,
							       generated_pkt_steering_fail));
		if (err)
			return err;

@@ -85,6 +102,7 @@ int mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
						VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail));
		if (err)
			return err;
	}

	err = devlink_fmsg_obj_nest_end(fmsg);
	if (err)
+4 −2
Original line number Diff line number Diff line
@@ -1461,10 +1461,12 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv,
		attr = mlx5e_tc_get_encap_attr(flow);
		esw_attr = attr->esw_attr;

		if (flow_flag_test(flow, SLOW))
		if (flow_flag_test(flow, SLOW)) {
			mlx5e_tc_unoffload_from_slow_path(esw, flow);
		else
		} else {
			mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
			mlx5e_tc_unoffload_flow_post_acts(flow);
		}

		mlx5e_tc_detach_mod_hdr(priv, flow, attr);
		attr->modify_hdr = NULL;
+11 −0
Original line number Diff line number Diff line
@@ -5266,6 +5266,7 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
			  struct net_device *netdev)
{
	const bool take_rtnl = netdev->reg_state == NETREG_REGISTERED;
	struct mlx5e_priv *priv = netdev_priv(netdev);
	struct mlx5e_flow_steering *fs;
	int err;
@@ -5294,9 +5295,19 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
		mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);

	mlx5e_health_create_reporters(priv);

	/* If netdev is already registered (e.g. move from uplink to nic profile),
	 * RTNL lock must be held before triggering netdev notifiers.
	 */
	if (take_rtnl)
		rtnl_lock();

	/* update XDP supported features */
	mlx5e_set_xdp_feature(netdev);

	if (take_rtnl)
		rtnl_unlock();

	return 0;
}

+13 −8
Original line number Diff line number Diff line
@@ -1943,9 +1943,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
{
	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
	struct mlx5_flow_attr *attr = flow->attr;
	struct mlx5_esw_flow_attr *esw_attr;

	esw_attr = attr->esw_attr;
	mlx5e_put_flow_tunnel_id(flow);

	remove_unready_flow(flow);
@@ -1966,12 +1964,6 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,

	mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);

	if (esw_attr->int_port)
		mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port);

	if (esw_attr->dest_int_port)
		mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->dest_int_port);

	if (flow_flag_test(flow, L3_TO_L2_DECAP))
		mlx5e_detach_decap(priv, flow);

@@ -4268,6 +4260,7 @@ static void
mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
{
	struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
	struct mlx5_esw_flow_attr *esw_attr;

	if (!attr)
		return;
@@ -4285,6 +4278,18 @@ mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *a
		mlx5e_tc_detach_mod_hdr(flow->priv, flow, attr);
	}

	if (mlx5e_is_eswitch_flow(flow)) {
		esw_attr = attr->esw_attr;

		if (esw_attr->int_port)
			mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(flow->priv),
					      esw_attr->int_port);

		if (esw_attr->dest_int_port)
			mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(flow->priv),
					      esw_attr->dest_int_port);
	}

	mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);

	free_branch_attr(flow, attr->branch_true);
+1 −1
Original line number Diff line number Diff line
@@ -60,7 +60,7 @@ static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16
	}  else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) {
		memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
		dl_port->attrs.switch_id.id_len = ppid.id_len;
		devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
		devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
					      vport_num - 1, false);
	}
	return dl_port;
Loading