Commit 9f6708a6 authored by Paolo Abeni's avatar Paolo Abeni
Browse files

Merge tag 'mlx5-updates-2023-08-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2023-08-22

1) Patches #1..#13 From Jiri:

The goal of this patchset is to make the SF code cleaner.

Benefit from previously introduced devlink_port struct containerization
to avoid unnecessary lookups in devlink port ops.

Also, benefit from the devlink locking changes and avoid unnecessary
reference counting.

2) Patches #14,#15:

Add ability to configure proto both UDP and TCP selectors in RX and TX
directions.

* tag 'mlx5-updates-2023-08-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Support IPsec upper TCP protocol selector
  net/mlx5e: Support IPsec upper protocol selector field offload for RX
  net/mlx5: Store vport in struct mlx5_devlink_port and use it in port ops
  net/mlx5: Check vhca_resource_manager capability in each op and add extack msg
  net/mlx5: Relax mlx5_devlink_eswitch_get() return value checking
  net/mlx5: Return -EOPNOTSUPP in mlx5_devlink_port_fn_migratable_set() directly
  net/mlx5: Reduce number of vport lookups passing vport pointer instead of index
  net/mlx5: Embed struct devlink_port into driver structure
  net/mlx5: Don't register ops for non-PF/VF/SF port and avoid checks in ops
  net/mlx5: Remove no longer used mlx5_esw_offloads_sf_vport_enable/disable()
  net/mlx5: Introduce mlx5_eswitch_load/unload_sf_vport() and use it from SF code
  net/mlx5: Allow mlx5_esw_offloads_devlink_port_register() to register SFs
  net/mlx5: Push devlink port PF/VF init/cleanup calls out of devlink_port_register/unregister()
  net/mlx5: Push out SF devlink port init and cleanup code to separate helpers
  net/mlx5: Rework devlink port alloc/free into init/cleanup
====================

Link: https://lore.kernel.org/all/20230823051012.162483-1-saeed@kernel.org/


Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 23c167af b8c697e1
Loading
Loading
Loading
Loading
+7 −6
Original line number Diff line number Diff line
@@ -440,9 +440,9 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
		return -EINVAL;
	}

	if (x->sel.proto != IPPROTO_IP &&
	    (x->sel.proto != IPPROTO_UDP || x->xso.dir != XFRM_DEV_OFFLOAD_OUT)) {
		NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction");
	if (x->sel.proto != IPPROTO_IP && x->sel.proto != IPPROTO_UDP &&
	    x->sel.proto != IPPROTO_TCP) {
		NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP");
		return -EINVAL;
	}

@@ -983,9 +983,10 @@ static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
		return -EINVAL;
	}

	if (sel->proto != IPPROTO_IP &&
	    (sel->proto != IPPROTO_UDP || x->xdo.dir != XFRM_DEV_OFFLOAD_OUT)) {
		NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction");
	if (x->selector.proto != IPPROTO_IP &&
	    x->selector.proto != IPPROTO_UDP &&
	    x->selector.proto != IPPROTO_TCP) {
		NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP");
		return -EINVAL;
	}

+33 −12
Original line number Diff line number Diff line
@@ -936,23 +936,42 @@ static void setup_fte_reg_c4(struct mlx5_flow_spec *spec, u32 reqid)

static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
{
	if (upspec->proto != IPPROTO_UDP)
	switch (upspec->proto) {
	case IPPROTO_UDP:
		if (upspec->dport) {
			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
				 udp_dport, upspec->dport_mask);
			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
				 udp_dport, upspec->dport);
		}
		if (upspec->sport) {
			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
				 udp_sport, upspec->sport_mask);
			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
				 udp_sport, upspec->sport);
		}
		break;
	case IPPROTO_TCP:
		if (upspec->dport) {
			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
				 tcp_dport, upspec->dport_mask);
			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
				 tcp_dport, upspec->dport);
		}
		if (upspec->sport) {
			MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
				 tcp_sport, upspec->sport_mask);
			MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
				 tcp_sport, upspec->sport);
		}
		break;
	default:
		return;
	}

	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
	MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
	if (upspec->dport) {
		MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport,
			 upspec->dport_mask);
		MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, upspec->dport);
	}

	if (upspec->sport) {
		MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_sport,
			 upspec->sport_mask);
		MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_sport, upspec->sport);
	}
}

static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec,
@@ -1243,6 +1262,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
	setup_fte_spi(spec, attrs->spi);
	setup_fte_esp(spec);
	setup_fte_no_frags(spec);
	setup_fte_upper_proto_match(spec, &attrs->upspec);

	if (rx != ipsec->rx_esw)
		err = setup_modify_header(ipsec, attrs->type,
@@ -1519,6 +1539,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
		setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);

	setup_fte_no_frags(spec);
	setup_fte_upper_proto_match(spec, &attrs->upspec);

	switch (attrs->action) {
	case XFRM_POLICY_ALLOW:
+80 −89
Original line number Diff line number Diff line
@@ -21,19 +21,16 @@ static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_
	       mlx5_core_is_ec_vf_vport(esw->dev, vport_num);
}

static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16 vport_num)
static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
							   u16 vport_num,
							   struct devlink_port *dl_port)
{
	struct mlx5_core_dev *dev = esw->dev;
	struct netdev_phys_item_id ppid = {};
	struct devlink_port *dl_port;
	u32 controller_num = 0;
	bool external;
	u16 pfnum;

	dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL);
	if (!dl_port)
		return NULL;

	mlx5_esw_get_port_parent_id(dev, &ppid);
	pfnum = mlx5_get_dev_index(dev);
	external = mlx5_core_is_ecpf_esw_manager(dev);
@@ -55,12 +52,37 @@ static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16
		devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
					      vport_num - 1, false);
	}
	return dl_port;
}

static void mlx5_esw_dl_port_free(struct devlink_port *dl_port)
int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw,
					      struct mlx5_vport *vport)
{
	kfree(dl_port);
	struct mlx5_devlink_port *dl_port;
	u16 vport_num = vport->vport;

	if (!mlx5_esw_devlink_port_supported(esw, vport_num))
		return 0;

	dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL);
	if (!dl_port)
		return -ENOMEM;

	mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(esw, vport_num,
						       &dl_port->dl_port);

	vport->dl_port = dl_port;
	mlx5_devlink_port_init(dl_port, vport);
	return 0;
}

void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw,
						  struct mlx5_vport *vport)
{
	if (!vport->dl_port)
		return;

	kfree(vport->dl_port);
	vport->dl_port = NULL;
}

static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
@@ -72,74 +94,37 @@ static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
	.port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
};

int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num)
static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
							struct devlink_port *dl_port,
							u32 controller, u32 sfnum)
{
	struct mlx5_core_dev *dev = esw->dev;
	struct devlink_port *dl_port;
	unsigned int dl_port_index;
	struct mlx5_vport *vport;
	struct devlink *devlink;
	int err;

	if (!mlx5_esw_devlink_port_supported(esw, vport_num))
		return 0;

	vport = mlx5_eswitch_get_vport(esw, vport_num);
	if (IS_ERR(vport))
		return PTR_ERR(vport);

	dl_port = mlx5_esw_dl_port_alloc(esw, vport_num);
	if (!dl_port)
		return -ENOMEM;
	struct netdev_phys_item_id ppid = {};
	u16 pfnum;

	devlink = priv_to_devlink(dev);
	dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
	err = devl_port_register_with_ops(devlink, dl_port, dl_port_index,
					  &mlx5_esw_pf_vf_dl_port_ops);
	if (err)
		goto reg_err;
	pfnum = mlx5_get_dev_index(dev);
	mlx5_esw_get_port_parent_id(dev, &ppid);
	memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
	dl_port->attrs.switch_id.id_len = ppid.id_len;
	devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
}

	err = devl_rate_leaf_create(dl_port, vport, NULL);
	if (err)
		goto rate_err;
int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
					   struct mlx5_devlink_port *dl_port,
					   u32 controller, u32 sfnum)
{
	mlx5_esw_offloads_sf_devlink_port_attrs_set(esw, &dl_port->dl_port, controller, sfnum);

	vport->dl_port = dl_port;
	mlx5_devlink_port_init(dl_port, vport);
	return 0;

rate_err:
	devl_port_unregister(dl_port);
reg_err:
	mlx5_esw_dl_port_free(dl_port);
	return err;
}

void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num)
void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
	struct mlx5_vport *vport;

	if (!mlx5_esw_devlink_port_supported(esw, vport_num))
		return;

	vport = mlx5_eswitch_get_vport(esw, vport_num);
	if (IS_ERR(vport))
		return;

	mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
	devl_rate_leaf_destroy(vport->dl_port);

	devl_port_unregister(vport->dl_port);
	mlx5_esw_dl_port_free(vport->dl_port);
	vport->dl_port = NULL;
}

struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num)
{
	struct mlx5_vport *vport;

	vport = mlx5_eswitch_get_vport(esw, vport_num);
	return IS_ERR(vport) ? ERR_CAST(vport) : vport->dl_port;
}

static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = {
#ifdef CONFIG_MLX5_SF_MANAGER
	.port_del = mlx5_devlink_sf_port_del,
@@ -154,56 +139,62 @@ static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = {
#endif
};

int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
				      u16 vport_num, u32 controller, u32 sfnum)
int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
	struct mlx5_core_dev *dev = esw->dev;
	struct netdev_phys_item_id ppid = {};
	const struct devlink_port_ops *ops;
	struct mlx5_devlink_port *dl_port;
	u16 vport_num = vport->vport;
	unsigned int dl_port_index;
	struct mlx5_vport *vport;
	struct devlink *devlink;
	u16 pfnum;
	int err;

	vport = mlx5_eswitch_get_vport(esw, vport_num);
	if (IS_ERR(vport))
		return PTR_ERR(vport);
	dl_port = vport->dl_port;
	if (!dl_port)
		return 0;

	if (mlx5_esw_is_sf_vport(esw, vport_num))
		ops = &mlx5_esw_dl_sf_port_ops;
	else if (mlx5_eswitch_is_pf_vf_vport(esw, vport_num))
		ops = &mlx5_esw_pf_vf_dl_port_ops;
	else
		ops = NULL;

	pfnum = mlx5_get_dev_index(dev);
	mlx5_esw_get_port_parent_id(dev, &ppid);
	memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
	dl_port->attrs.switch_id.id_len = ppid.id_len;
	devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
	devlink = priv_to_devlink(dev);
	dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
	err = devl_port_register_with_ops(devlink, dl_port, dl_port_index,
					  &mlx5_esw_dl_sf_port_ops);
	err = devl_port_register_with_ops(devlink, &dl_port->dl_port, dl_port_index, ops);
	if (err)
		return err;

	err = devl_rate_leaf_create(dl_port, vport, NULL);
	err = devl_rate_leaf_create(&dl_port->dl_port, vport, NULL);
	if (err)
		goto rate_err;

	vport->dl_port = dl_port;
	return 0;

rate_err:
	devl_port_unregister(dl_port);
	devl_port_unregister(&dl_port->dl_port);
	return err;
}

void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num)
void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
	struct mlx5_vport *vport;
	struct mlx5_devlink_port *dl_port;

	vport = mlx5_eswitch_get_vport(esw, vport_num);
	if (IS_ERR(vport))
	if (!vport->dl_port)
		return;
	dl_port = vport->dl_port;

	mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
	devl_rate_leaf_destroy(vport->dl_port);
	devl_rate_leaf_destroy(&dl_port->dl_port);

	devl_port_unregister(vport->dl_port);
	vport->dl_port = NULL;
	devl_port_unregister(&dl_port->dl_port);
}

struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num)
{
	struct mlx5_vport *vport;

	vport = mlx5_eswitch_get_vport(esw, vport_num);
	return IS_ERR(vport) ? ERR_CAST(vport) : &vport->dl_port->dl_port;
}
+122 −36
Original line number Diff line number Diff line
@@ -77,18 +77,31 @@ static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
	return 0;
}

struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink)
static struct mlx5_eswitch *__mlx5_devlink_eswitch_get(struct devlink *devlink, bool check)
{
	struct mlx5_core_dev *dev = devlink_priv(devlink);
	int err;

	if (check) {
		err = mlx5_eswitch_check(dev);
		if (err)
			return ERR_PTR(err);
	}

	return dev->priv.eswitch;
}

struct mlx5_eswitch *__must_check
mlx5_devlink_eswitch_get(struct devlink *devlink)
{
	return __mlx5_devlink_eswitch_get(devlink, true);
}

struct mlx5_eswitch *mlx5_devlink_eswitch_nocheck_get(struct devlink *devlink)
{
	return __mlx5_devlink_eswitch_get(devlink, false);
}

struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
@@ -882,16 +895,12 @@ static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
	esw_vport_cleanup_acl(esw, vport);
}

int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
			  enum mlx5_eswitch_vport_event enabled_events)
{
	struct mlx5_vport *vport;
	u16 vport_num = vport->vport;
	int ret;

	vport = mlx5_eswitch_get_vport(esw, vport_num);
	if (IS_ERR(vport))
		return PTR_ERR(vport);

	mutex_lock(&esw->state_lock);
	WARN_ON(vport->enabled);

@@ -912,7 +921,7 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
	    (!vport_num && mlx5_core_is_ecpf(esw->dev)))
		vport->info.trusted = true;

	if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
	if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
	    MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
		ret = mlx5_esw_vport_vhca_id_set(esw, vport_num);
		if (ret)
@@ -939,15 +948,12 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
	return ret;
}

void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
	struct mlx5_vport *vport;

	vport = mlx5_eswitch_get_vport(esw, vport_num);
	if (IS_ERR(vport))
		return;
	u16 vport_num = vport->vport;

	mutex_lock(&esw->state_lock);

	if (!vport->enabled)
		goto done;

@@ -957,9 +963,9 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)

	/* Disable events from this vport */
	if (MLX5_CAP_GEN(esw->dev, log_max_l2_table))
		arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
		arm_vport_context_events_cmd(esw->dev, vport_num, 0);

	if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
	if (!mlx5_esw_is_manager_vport(esw, vport_num) &&
	    MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
		mlx5_esw_vport_vhca_id_clear(esw, vport_num);

@@ -1068,30 +1074,104 @@ static void mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch *esw)
	}
}

static int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
static int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
				   enum mlx5_eswitch_vport_event enabled_events)
{
	int err;

	err = mlx5_esw_vport_enable(esw, vport_num, enabled_events);
	err = mlx5_esw_vport_enable(esw, vport, enabled_events);
	if (err)
		return err;

	err = mlx5_esw_offloads_load_rep(esw, vport_num);
	err = mlx5_esw_offloads_load_rep(esw, vport);
	if (err)
		goto err_rep;

	return err;

err_rep:
	mlx5_esw_vport_disable(esw, vport_num);
	mlx5_esw_vport_disable(esw, vport);
	return err;
}

static void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
static void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
	mlx5_esw_offloads_unload_rep(esw, vport_num);
	mlx5_esw_vport_disable(esw, vport_num);
	mlx5_esw_offloads_unload_rep(esw, vport);
	mlx5_esw_vport_disable(esw, vport);
}

static int mlx5_eswitch_load_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num,
					 enum mlx5_eswitch_vport_event enabled_events)
{
	struct mlx5_vport *vport;
	int err;

	vport = mlx5_eswitch_get_vport(esw, vport_num);
	if (IS_ERR(vport))
		return PTR_ERR(vport);

	err = mlx5_esw_offloads_init_pf_vf_rep(esw, vport);
	if (err)
		return err;

	err = mlx5_eswitch_load_vport(esw, vport, enabled_events);
	if (err)
		goto err_load;
	return 0;

err_load:
	mlx5_esw_offloads_cleanup_pf_vf_rep(esw, vport);
	return err;
}

static void mlx5_eswitch_unload_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
	struct mlx5_vport *vport;

	vport = mlx5_eswitch_get_vport(esw, vport_num);
	if (IS_ERR(vport))
		return;

	mlx5_eswitch_unload_vport(esw, vport);
	mlx5_esw_offloads_cleanup_pf_vf_rep(esw, vport);
}

int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num,
			       enum mlx5_eswitch_vport_event enabled_events,
			       struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum)
{
	struct mlx5_vport *vport;
	int err;

	vport = mlx5_eswitch_get_vport(esw, vport_num);
	if (IS_ERR(vport))
		return PTR_ERR(vport);

	err = mlx5_esw_offloads_init_sf_rep(esw, vport, dl_port, controller, sfnum);
	if (err)
		return err;

	err = mlx5_eswitch_load_vport(esw, vport, enabled_events);
	if (err)
		goto err_load;

	return 0;

err_load:
	mlx5_esw_offloads_cleanup_sf_rep(esw, vport);
	return err;
}

void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
	struct mlx5_vport *vport;

	vport = mlx5_eswitch_get_vport(esw, vport_num);
	if (IS_ERR(vport))
		return;

	mlx5_eswitch_unload_vport(esw, vport);
	mlx5_esw_offloads_cleanup_sf_rep(esw, vport);
}

void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
@@ -1102,7 +1182,7 @@ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
	mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
		if (!vport->enabled)
			continue;
		mlx5_eswitch_unload_vport(esw, vport->vport);
		mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
	}
}

@@ -1115,7 +1195,7 @@ static void mlx5_eswitch_unload_ec_vf_vports(struct mlx5_eswitch *esw,
	mlx5_esw_for_each_ec_vf_vport(esw, i, vport, num_ec_vfs) {
		if (!vport->enabled)
			continue;
		mlx5_eswitch_unload_vport(esw, vport->vport);
		mlx5_eswitch_unload_pf_vf_vport(esw, vport->vport);
	}
}

@@ -1127,7 +1207,7 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
	int err;

	mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
		err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
		err = mlx5_eswitch_load_pf_vf_vport(esw, vport->vport, enabled_events);
		if (err)
			goto vf_err;
	}
@@ -1147,7 +1227,7 @@ static int mlx5_eswitch_load_ec_vf_vports(struct mlx5_eswitch *esw, u16 num_ec_v
	int err;

	mlx5_esw_for_each_ec_vf_vport(esw, i, vport, num_ec_vfs) {
		err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
		err = mlx5_eswitch_load_pf_vf_vport(esw, vport->vport, enabled_events);
		if (err)
			goto vf_err;
	}
@@ -1189,7 +1269,7 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
	int ret;

	/* Enable PF vport */
	ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events);
	ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF, enabled_events);
	if (ret)
		return ret;

@@ -1200,7 +1280,7 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,

	/* Enable ECPF vport */
	if (mlx5_ecpf_vport_exists(esw->dev)) {
		ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
		ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_ECPF, enabled_events);
		if (ret)
			goto ecpf_err;
		if (mlx5_core_ec_sriov_enabled(esw->dev)) {
@@ -1223,11 +1303,11 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
		mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs);
ec_vf_err:
	if (mlx5_ecpf_vport_exists(esw->dev))
		mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
		mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
ecpf_err:
	host_pf_disable_hca(esw->dev);
pf_hca_err:
	mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
	mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
	return ret;
}

@@ -1241,11 +1321,11 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
	if (mlx5_ecpf_vport_exists(esw->dev)) {
		if (mlx5_core_ec_sriov_enabled(esw->dev))
			mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_vfs);
		mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
		mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
	}

	host_pf_disable_hca(esw->dev);
	mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
	mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
}

static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
@@ -1918,6 +1998,12 @@ bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
	return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF);
}

bool mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
	return vport_num == MLX5_VPORT_PF ||
		mlx5_eswitch_is_vf_vport(esw, vport_num);
}

bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
	return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF);
+59 −15
Original line number Diff line number Diff line
@@ -172,6 +172,29 @@ enum mlx5_eswitch_vport_event {
	MLX5_VPORT_PROMISC_CHANGE = BIT(3),
};

struct mlx5_vport;

struct mlx5_devlink_port {
	struct devlink_port dl_port;
	struct mlx5_vport *vport;
};

static inline void mlx5_devlink_port_init(struct mlx5_devlink_port *dl_port,
					  struct mlx5_vport *vport)
{
	dl_port->vport = vport;
}

static inline struct mlx5_devlink_port *mlx5_devlink_port_get(struct devlink_port *dl_port)
{
	return container_of(dl_port, struct mlx5_devlink_port, dl_port);
}

static inline struct mlx5_vport *mlx5_devlink_port_vport_get(struct devlink_port *dl_port)
{
	return mlx5_devlink_port_get(dl_port)->vport;
}

struct mlx5_vport {
	struct mlx5_core_dev    *dev;
	struct hlist_head       uc_list[MLX5_L2_ADDR_HASH_SIZE];
@@ -200,7 +223,7 @@ struct mlx5_vport {
	bool                    enabled;
	enum mlx5_eswitch_vport_event enabled_events;
	int index;
	struct devlink_port *dl_port;
	struct mlx5_devlink_port *dl_port;
};

struct mlx5_esw_indir_table;
@@ -675,11 +698,16 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
			  MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
			  (last) - 1)

struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
struct mlx5_eswitch *__must_check
mlx5_devlink_eswitch_get(struct devlink *devlink);

struct mlx5_eswitch *mlx5_devlink_eswitch_nocheck_get(struct devlink *devlink);

struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);

bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
bool mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);

int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
@@ -689,9 +717,9 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
				 enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);

int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
			  enum mlx5_eswitch_vport_event enabled_events);
void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport);

int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
@@ -729,24 +757,40 @@ void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
				   u16 vport,
				   struct mlx5_flow_spec *spec);

int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);

int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
				  struct mlx5_devlink_port *dl_port,
				  u32 controller, u32 sfnum);
void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);

int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport);

int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num,
			       enum mlx5_eswitch_vport_event enabled_events,
			       struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum);
void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);

int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
				enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);

int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw,
					      struct mlx5_vport *vport);
void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw,
						  struct mlx5_vport *vport);

int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
				      u16 vport_num, u32 controller, u32 sfnum);
void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
					   struct mlx5_devlink_port *dl_port,
					   u32 controller, u32 sfnum);
void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);

int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);

int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
				      u16 vport_num, u32 controller, u32 sfnum);
void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);

int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
Loading