Commit bf56a091 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge tag 'mlx5-fixes-2022-06-08' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2022-06-08

This series provides bug fixes to mlx5 driver.

* tag 'mlx5-fixes-2022-06-08' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5: fs, fail conflicting actions
  net/mlx5: Rearm the FW tracer after each tracer event
  net/mlx5: E-Switch, pair only capable devices
  net/mlx5e: CT: Fix cleanup of CT before cleanup of TC ct rules
  Revert "net/mlx5e: Allow relaxed ordering over VFs"
  MAINTAINERS: adjust MELLANOX ETHERNET INNOVA DRIVERS to TLS support removal
====================

Link: https://lore.kernel.org/r/20220608185855.19818-1-saeed@kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents a3bd2102 8fa5e7b2
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -12703,7 +12703,6 @@ L: netdev@vger.kernel.org
S:	Supported
W:	http://www.mellanox.com
Q:	https://patchwork.kernel.org/project/netdevbpf/list/
F:	drivers/net/ethernet/mellanox/mlx5/core/accel/*
F:	drivers/net/ethernet/mellanox/mlx5/core/en_accel/*
F:	drivers/net/ethernet/mellanox/mlx5/core/fpga/*
F:	include/linux/mlx5/mlx5_ifc_fpga.h
+0 −18
Original line number Diff line number Diff line
@@ -579,17 +579,6 @@ static void *pci_get_other_drvdata(struct device *this, struct device *other)
	return pci_get_drvdata(to_pci_dev(other));
}

static int next_phys_dev(struct device *dev, const void *data)
{
	struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;

	mdev = pci_get_other_drvdata(this->device, dev);
	if (!mdev)
		return 0;

	return _next_phys_dev(mdev, data);
}

static int next_phys_dev_lag(struct device *dev, const void *data)
{
	struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
@@ -623,13 +612,6 @@ static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
	return pci_get_drvdata(to_pci_dev(next));
}

/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
{
	lockdep_assert_held(&mlx5_intf_mutex);
	return mlx5_get_next_dev(dev, &next_phys_dev);
}

/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
{
+5 −2
Original line number Diff line number Diff line
@@ -675,6 +675,9 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
	if (!tracer->owner)
		return;

	if (unlikely(!tracer->str_db.loaded))
		goto arm;

	block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE;
	start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;

@@ -732,6 +735,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
						      &tmp_trace_block[TRACES_PER_BLOCK - 1]);
	}

arm:
	mlx5_fw_tracer_arm(dev);
}

@@ -1136,7 +1140,6 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void
		queue_work(tracer->work_queue, &tracer->ownership_change_work);
		break;
	case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
		if (likely(tracer->str_db.loaded))
		queue_work(tracer->work_queue, &tracer->handle_traces_work);
		break;
	default:
+2 −1
Original line number Diff line number Diff line
@@ -565,7 +565,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
	bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO;
	bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
	bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
		MLX5_CAP_GEN(mdev, relaxed_ordering_write);

	return ro && lro_en ?
		MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
+3 −2
Original line number Diff line number Diff line
@@ -38,11 +38,12 @@

void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
{
	bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev);
	bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
	bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read);

	MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_read);
	MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_write);
	MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read);
	MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write);
}

static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
Loading