Commit 014e4d48 authored by Shay Drory's avatar Shay Drory Committed by Saeed Mahameed
Browse files

net/mlx5: E-switch, generalize shared FDB creation



Shared FDB creation is hard coded for only two eswitches.
Generalize shared FDB creation so that any number of eswitches could
create shared FDB.

Signed-off-by: default avatarShay Drory <shayd@nvidia.com>
Reviewed-by: default avatarRoi Dayan <roid@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 5e0202eb
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -15,6 +15,18 @@ static void esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport *vport)
	vport->egress.offloads.fwd_rule = NULL;
}

void esw_acl_egress_ofld_bounce_rule_destroy(struct mlx5_vport *vport, int rule_index)
{
	struct mlx5_flow_handle *bounce_rule =
		xa_load(&vport->egress.offloads.bounce_rules, rule_index);

	if (!bounce_rule)
		return;

	mlx5_del_flow_rules(bounce_rule);
	xa_erase(&vport->egress.offloads.bounce_rules, rule_index);
}

static void esw_acl_egress_ofld_bounce_rules_destroy(struct mlx5_vport *vport)
{
	struct mlx5_flow_handle *bounce_rule;
+1 −0
Original line number Diff line number Diff line
@@ -10,6 +10,7 @@
/* Eswitch acl egress external APIs */
int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport);
void esw_acl_egress_ofld_bounce_rule_destroy(struct mlx5_vport *vport, int rule_index);
int mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch *esw, u16 active_vport_num,
				   u16 passive_vport_num);
int mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch *esw, u16 vport_num);
+6 −6
Original line number Diff line number Diff line
@@ -754,9 +754,9 @@ void esw_vport_change_handle_locked(struct mlx5_vport *vport);

bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller);

int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
					    struct mlx5_eswitch *slave_esw);
void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
					     struct mlx5_eswitch *slave_esw, int max_slaves);
void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
					      struct mlx5_eswitch *slave_esw);
int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);

@@ -808,14 +808,14 @@ mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
}

static inline int
mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
					struct mlx5_eswitch *slave_esw)
mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
					 struct mlx5_eswitch *slave_esw, int max_slaves)
{
	return 0;
}

static inline void
mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
					 struct mlx5_eswitch *slave_esw) {}

static inline int
+18 −14
Original line number Diff line number Diff line
@@ -2557,11 +2557,11 @@ static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
}

static int esw_master_egress_create_resources(struct mlx5_flow_namespace *egress_ns,
					      struct mlx5_vport *vport)
					      struct mlx5_vport *vport, size_t count)
{
	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
	struct mlx5_flow_table_attr ft_attr = {
		.max_fte = MLX5_MAX_PORTS, .prio = 0, .level = 0,
		.max_fte = count, .prio = 0, .level = 0,
		.flags = MLX5_FLOW_TABLE_OTHER_VPORT,
	};
	struct mlx5_flow_table *acl;
@@ -2595,7 +2595,7 @@ static int esw_master_egress_create_resources(struct mlx5_flow_namespace *egress
	MLX5_SET(create_flow_group_in, flow_group_in,
		 source_eswitch_owner_vhca_id_valid, 1);
	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, MLX5_MAX_PORTS);
	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, count);

	g = mlx5_create_flow_group(acl, flow_group_in);
	if (IS_ERR(g)) {
@@ -2626,7 +2626,7 @@ static void esw_master_egress_destroy_resources(struct mlx5_vport *vport)
}

static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
				      struct mlx5_core_dev *slave)
				      struct mlx5_core_dev *slave, size_t count)
{
	struct mlx5_eswitch *esw = master->priv.eswitch;
	u16 slave_index = MLX5_CAP_GEN(slave, vhca_id);
@@ -2647,7 +2647,7 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
	if (vport->egress.acl && vport->egress.type != VPORT_EGRESS_ACL_TYPE_SHARED_FDB)
		return 0;

	err = esw_master_egress_create_resources(egress_ns, vport);
	err = esw_master_egress_create_resources(egress_ns, vport, count);
	if (err)
		return err;

@@ -2665,19 +2665,24 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
	return err;
}

static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev)
static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev,
					 struct mlx5_core_dev *slave_dev)
{
	struct mlx5_vport *vport;

	vport = mlx5_eswitch_get_vport(dev->priv.eswitch,
				       dev->priv.eswitch->manager_vport);

	esw_acl_egress_ofld_bounce_rule_destroy(vport, MLX5_CAP_GEN(slave_dev, vhca_id));

	if (xa_empty(&vport->egress.offloads.bounce_rules)) {
		esw_acl_egress_ofld_cleanup(vport);
		xa_destroy(&vport->egress.offloads.bounce_rules);
	}
}

int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
					    struct mlx5_eswitch *slave_esw)
int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
					     struct mlx5_eswitch *slave_esw, int max_slaves)
{
	int err;

@@ -2687,7 +2692,7 @@ int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
		return err;

	err = esw_set_master_egress_rule(master_esw->dev,
					 slave_esw->dev);
					 slave_esw->dev, max_slaves);
	if (err)
		goto err_acl;

@@ -2695,15 +2700,14 @@ int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,

err_acl:
	esw_set_slave_root_fdb(NULL, slave_esw->dev);

	return err;
}

void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
					      struct mlx5_eswitch *slave_esw)
{
	esw_unset_master_egress_rule(master_esw->dev);
	esw_set_slave_root_fdb(NULL, slave_esw->dev);
	esw_unset_master_egress_rule(master_esw->dev, slave_esw->dev);
}

#define ESW_OFFLOADS_DEVCOM_PAIR	(0)
+29 −6
Original line number Diff line number Diff line
@@ -550,6 +550,29 @@ char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
	}
}

static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
{
	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
	struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
	int err;
	int i;

	for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
		struct mlx5_eswitch *slave_esw = ldev->pf[i].dev->priv.eswitch;

		err = mlx5_eswitch_offloads_single_fdb_add_one(master_esw,
							       slave_esw, ldev->ports);
		if (err)
			goto err;
	}
	return 0;
err:
	for (; i > MLX5_LAG_P1; i--)
		mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
							 ldev->pf[i].dev->priv.eswitch);
	return err;
}

static int mlx5_create_lag(struct mlx5_lag *ldev,
			   struct lag_tracker *tracker,
			   enum mlx5_lag_mode mode,
@@ -557,7 +580,6 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
{
	bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
	struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
	u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
	int err;

@@ -575,8 +597,7 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
	}

	if (shared_fdb) {
		err = mlx5_eswitch_offloads_config_single_fdb(dev0->priv.eswitch,
							      dev1->priv.eswitch);
		err = mlx5_lag_create_single_fdb(ldev);
		if (err)
			mlx5_core_err(dev0, "Can't enable single FDB mode\n");
		else
@@ -647,19 +668,21 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
	struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
	struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
	struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
	u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
	bool roce_lag = __mlx5_lag_is_roce(ldev);
	unsigned long flags = ldev->mode_flags;
	int err;
	int i;

	ldev->mode = MLX5_LAG_MODE_NONE;
	ldev->mode_flags = 0;
	mlx5_lag_mp_reset(ldev);

	if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) {
		mlx5_eswitch_offloads_destroy_single_fdb(dev0->priv.eswitch,
							 dev1->priv.eswitch);
		for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++)
			mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
								 ldev->pf[i].dev->priv.eswitch);
		clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
	}