Commit 32e41702 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files
Saeed Mahameed says:

====================
mlx5-next-2020-12-02

Low level mlx5 updates required by both netdev and rdma trees.

* tag 'mlx5-next-2020-12-02' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux:
  net/mlx5: Treat host PF vport as other (non eswitch manager) vport
  net/mlx5: Enable host PF HCA after eswitch is initialized
  net/mlx5: Rename peer_pf to host_pf
  net/mlx5: Make API mlx5_core_is_ecpf accept const pointer
  net/mlx5: Export steering related functions
  net/mlx5: Expose other function ifc bits
  net/mlx5: Expose IP-in-IP TX and RX capability bits
  net/mlx5: Update the hardware interface definition for vhca state
  net/mlx5: Update the list of the PCI supported devices
  net/mlx5: Avoid exposing driver internal command helpers
  net/mlx5: Add ts_cqe_to_dest_cqn related bits
  net/mlx5: Add misc4 to mlx5_ifc_fte_match_param_bits
  net/mlx5: Check dr mask size against mlx5_match_param size
  net/mlx5: Add sampler destination type
  net/mlx5: Add sample offload hardware bits and structures
====================

Link: https://lore.kernel.org/r/20201203011010.213440-1-saeedm@nvidia.com


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 6ec1dfb5 617b860c
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
@@ -2142,7 +2142,6 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
	kvfree(cmd->stats);
	return err;
}
EXPORT_SYMBOL(mlx5_cmd_init);

void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
{
@@ -2155,11 +2154,9 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
	dma_pool_destroy(cmd->pool);
	kvfree(cmd->stats);
}
EXPORT_SYMBOL(mlx5_cmd_cleanup);

void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
			enum mlx5_cmdif_state cmdif_state)
{
	dev->cmd.state = cmdif_state;
}
EXPORT_SYMBOL(mlx5_cmd_set_state);
+3 −0
Original line number Diff line number Diff line
@@ -247,6 +247,9 @@ const char *parse_fs_dst(struct trace_seq *p,
	case MLX5_FLOW_DESTINATION_TYPE_TIR:
		trace_seq_printf(p, "tir=%u\n", dst->tir_num);
		break;
	case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
		trace_seq_printf(p, "sampler_id=%u\n", dst->sampler_id);
		break;
	case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
		trace_seq_printf(p, "counter_id=%u\n", counter_id);
		break;
+54 −22
Original line number Diff line number Diff line
@@ -8,37 +8,66 @@ bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev)
	return (ioread32be(&dev->iseg->initializing) >> MLX5_ECPU_BIT_NUM) & 1;
}

static int mlx5_peer_pf_init(struct mlx5_core_dev *dev)
static bool mlx5_ecpf_esw_admins_host_pf(const struct mlx5_core_dev *dev)
{
	/* In separate host mode, PF enables itself.
	 * When ECPF is eswitch manager, eswitch enables host PF after
	 * eswitch is setup.
	 */
	return mlx5_core_is_ecpf_esw_manager(dev);
}

int mlx5_cmd_host_pf_enable_hca(struct mlx5_core_dev *dev)
{
	u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {};
	u32 in[MLX5_ST_SZ_DW(enable_hca_in)]   = {};
	int err;

	MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
	err = mlx5_cmd_exec_in(dev, enable_hca, in);
	MLX5_SET(enable_hca_in, in, function_id, 0);
	MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
	return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
}

int mlx5_cmd_host_pf_disable_hca(struct mlx5_core_dev *dev)
{
	u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {};
	u32 in[MLX5_ST_SZ_DW(disable_hca_in)]   = {};

	MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
	MLX5_SET(disable_hca_in, in, function_id, 0);
	MLX5_SET(disable_hca_in, in, embedded_cpu_function, 0);
	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}

static int mlx5_host_pf_init(struct mlx5_core_dev *dev)
{
	int err;

	if (mlx5_ecpf_esw_admins_host_pf(dev))
		return 0;

	/* ECPF shall enable HCA for host PF in the same way a PF
	 * does this for its VFs when ECPF is not a eswitch manager.
	 */
	err = mlx5_cmd_host_pf_enable_hca(dev);
	if (err)
		mlx5_core_err(dev, "Failed to enable peer PF HCA err(%d)\n",
			      err);
		mlx5_core_err(dev, "Failed to enable external host PF HCA err(%d)\n", err);

	return err;
}

static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev)
static void mlx5_host_pf_cleanup(struct mlx5_core_dev *dev)
{
	u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
	int err;

	MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
	err = mlx5_cmd_exec_in(dev, disable_hca, in);
	if (mlx5_ecpf_esw_admins_host_pf(dev))
		return;

	err = mlx5_cmd_host_pf_disable_hca(dev);
	if (err) {
		mlx5_core_err(dev, "Failed to disable peer PF HCA err(%d)\n",
			      err);
		mlx5_core_err(dev, "Failed to disable external host PF HCA err(%d)\n", err);
		return;
	}

	err = mlx5_wait_for_pages(dev, &dev->priv.peer_pf_pages);
	if (err)
		mlx5_core_warn(dev, "Timeout reclaiming peer PF pages err(%d)\n",
			       err);
}

int mlx5_ec_init(struct mlx5_core_dev *dev)
@@ -46,16 +75,19 @@ int mlx5_ec_init(struct mlx5_core_dev *dev)
	if (!mlx5_core_is_ecpf(dev))
		return 0;

	/* ECPF shall enable HCA for peer PF in the same way a PF
	 * does this for its VFs.
	 */
	return mlx5_peer_pf_init(dev);
	return mlx5_host_pf_init(dev);
}

void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
{
	int err;

	if (!mlx5_core_is_ecpf(dev))
		return;

	mlx5_peer_pf_cleanup(dev);
	mlx5_host_pf_cleanup(dev);

	err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages);
	if (err)
		mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err);
}
+3 −0
Original line number Diff line number Diff line
@@ -17,6 +17,9 @@ bool mlx5_read_embedded_cpu(struct mlx5_core_dev *dev);
int mlx5_ec_init(struct mlx5_core_dev *dev);
void mlx5_ec_cleanup(struct mlx5_core_dev *dev);

int mlx5_cmd_host_pf_enable_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_host_pf_disable_hca(struct mlx5_core_dev *dev);

#else  /* CONFIG_MLX5_ESWITCH */

static inline bool
+4 −1
Original line number Diff line number Diff line
@@ -8,6 +8,7 @@
struct mlx5_flow_table *
esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size)
{
	struct mlx5_flow_table_attr ft_attr = {};
	struct mlx5_core_dev *dev = esw->dev;
	struct mlx5_flow_namespace *root_ns;
	struct mlx5_flow_table *acl;
@@ -33,7 +34,9 @@ esw_acl_table_create(struct mlx5_eswitch *esw, u16 vport_num, int ns, int size)
		return ERR_PTR(-EOPNOTSUPP);
	}

	acl = mlx5_create_vport_flow_table(root_ns, 0, size, 0, vport_num);
	ft_attr.max_fte = size;
	ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
	acl = mlx5_create_vport_flow_table(root_ns, &ft_attr, vport_num);
	if (IS_ERR(acl)) {
		err = PTR_ERR(acl);
		esw_warn(dev, "vport[%d] create %s ACL table, err(%d)\n", vport_num,
Loading