Commit 90ca127c authored by Saeed Mahameed's avatar Saeed Mahameed
Browse files

net/mlx5: Devcom, introduce devcom_for_each_peer_entry



Introduce generic APIs which will retrieve all peers.
This API replace mlx5_devcom_get/release_peer_data which retrieve
only a single peer.

Signed-off-by: default avatarMark Bloch <mbloch@nvidia.com>
Signed-off-by: default avatarShay Drory <shayd@nvidia.com>
Reviewed-by: default avatarVlad Buslov <vladbu@nvidia.com>
Reviewed-by: default avatarRoi Dayan <roid@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 8611df72
Loading
Loading
Loading
Loading
+56 −38
Original line number Diff line number Diff line
@@ -398,25 +398,64 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
	}
}

static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep,
					   struct mlx5_devcom *devcom,
					   struct mlx5e_rep_sq *rep_sq, int i)
{
	struct mlx5_eswitch *peer_esw = NULL;
	struct mlx5_flow_handle *flow_rule;
	int tmp;

	mlx5_devcom_for_each_peer_entry(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
					peer_esw, tmp) {
		int peer_rule_idx = mlx5_get_dev_index(peer_esw->dev);
		struct mlx5e_rep_sq_peer *sq_peer;
		int err;

		sq_peer = kzalloc(sizeof(*sq_peer), GFP_KERNEL);
		if (!sq_peer)
			return -ENOMEM;

		flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw,
								rep, rep_sq->sqn);
		if (IS_ERR(flow_rule)) {
			kfree(sq_peer);
			return PTR_ERR(flow_rule);
		}

		sq_peer->rule = flow_rule;
		sq_peer->peer = peer_esw;
		err = xa_insert(&rep_sq->sq_peer, peer_rule_idx, sq_peer, GFP_KERNEL);
		if (err) {
			kfree(sq_peer);
			mlx5_eswitch_del_send_to_vport_rule(flow_rule);
			return err;
		}
	}

	return 0;
}

static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
				 struct mlx5_eswitch_rep *rep,
				 u32 *sqns_array, int sqns_num)
{
	struct mlx5_eswitch *peer_esw = NULL;
	struct mlx5_flow_handle *flow_rule;
	struct mlx5e_rep_sq_peer *sq_peer;
	struct mlx5e_rep_priv *rpriv;
	struct mlx5e_rep_sq *rep_sq;
	struct mlx5_devcom *devcom;
	bool devcom_locked = false;
	int err;
	int i;

	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
		return 0;

	devcom = esw->dev->priv.devcom;
	rpriv = mlx5e_rep_to_rep_priv(rep);
	if (mlx5_devcom_comp_is_ready(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS))
		peer_esw = mlx5_devcom_get_peer_data(esw->dev->priv.devcom,
						     MLX5_DEVCOM_ESW_OFFLOADS);
	if (mlx5_devcom_comp_is_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS) &&
	    mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
		devcom_locked = true;

	for (i = 0; i < sqns_num; i++) {
		rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
@@ -424,7 +463,6 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
			err = -ENOMEM;
			goto out_err;
		}
		xa_init(&rep_sq->sq_peer);

		/* Add re-inject rule to the PF/representor sqs */
		flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep,
@@ -437,50 +475,30 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
		rep_sq->send_to_vport_rule = flow_rule;
		rep_sq->sqn = sqns_array[i];

		if (peer_esw) {
			int peer_rule_idx = mlx5_get_dev_index(peer_esw->dev);

			sq_peer = kzalloc(sizeof(*sq_peer), GFP_KERNEL);
			if (!sq_peer) {
				err = -ENOMEM;
				goto out_sq_peer_err;
			}

			flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw,
									rep, sqns_array[i]);
			if (IS_ERR(flow_rule)) {
				err = PTR_ERR(flow_rule);
				goto out_flow_rule_err;
		xa_init(&rep_sq->sq_peer);
		if (devcom_locked) {
			err = mlx5e_sqs2vport_add_peers_rules(esw, rep, devcom, rep_sq, i);
			if (err) {
				mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
				xa_destroy(&rep_sq->sq_peer);
				kfree(rep_sq);
				goto out_err;
			}

			sq_peer->rule = flow_rule;
			sq_peer->peer = peer_esw;
			err = xa_insert(&rep_sq->sq_peer, peer_rule_idx, sq_peer, GFP_KERNEL);
			if (err)
				goto out_xa_err;
		}

		list_add(&rep_sq->list, &rpriv->vport_sqs_list);
	}

	if (peer_esw)
		mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS);
	if (devcom_locked)
		mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);

	return 0;

out_xa_err:
	mlx5_eswitch_del_send_to_vport_rule(flow_rule);
out_flow_rule_err:
	kfree(sq_peer);
out_sq_peer_err:
	mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
	xa_destroy(&rep_sq->sq_peer);
	kfree(rep_sq);
out_err:
	mlx5e_sqs2vport_stop(esw, rep);

	if (peer_esw)
		mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS);
	if (devcom_locked)
		mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);

	return err;
}
+26 −18
Original line number Diff line number Diff line
@@ -1670,6 +1670,7 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
	struct mlx5_eswitch *esw;
	u16 vhca_id;
	int err;
	int i;

	out_priv = netdev_priv(out_dev);
	esw = out_priv->mdev->priv.eswitch;
@@ -1686,8 +1687,13 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro

	rcu_read_lock();
	devcom = out_priv->mdev->priv.devcom;
	esw = mlx5_devcom_get_peer_data_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
	err = esw ? mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport) : -ENODEV;
	err = -ENODEV;
	mlx5_devcom_for_each_peer_entry_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
					    esw, i) {
		err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
		if (!err)
			break;
	}
	rcu_read_unlock();

	return err;
@@ -2025,15 +2031,14 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
{
	if (mlx5e_is_eswitch_flow(flow)) {
		struct mlx5_devcom *devcom = flow->priv->mdev->priv.devcom;
		struct mlx5_eswitch *peer_esw;

		peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
		if (!peer_esw) {
		if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) {
			mlx5e_tc_del_fdb_flow(priv, flow);
			return;
		}

		mlx5e_tc_del_fdb_peers_flow(flow);
		mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
		mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
		mlx5e_tc_del_fdb_flow(priv, flow);
	} else {
		mlx5e_tc_del_nic_flow(priv, flow);
@@ -4472,6 +4477,7 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
	struct mlx5_eswitch *peer_esw;
	struct mlx5e_tc_flow *flow;
	int err;
	int i;

	flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
				    in_mdev);
@@ -4483,23 +4489,27 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
		return 0;
	}

	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
	if (!peer_esw) {
	if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) {
		err = -ENODEV;
		goto clean_flow;
	}

	mlx5_devcom_for_each_peer_entry(devcom,
					MLX5_DEVCOM_ESW_OFFLOADS,
					peer_esw, i) {
		err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw);
		if (err)
			goto peer_clean;
	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
	}

	*__flow = flow;
	mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);

	*__flow = flow;
	return 0;

peer_clean:
	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
	mlx5e_tc_del_fdb_peers_flow(flow);
	mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
clean_flow:
	mlx5e_tc_del_fdb_flow(priv, flow);
	return err;
@@ -4719,7 +4729,6 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
{
	struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
	struct mlx5_eswitch *peer_esw;
	struct mlx5e_tc_flow *flow;
	struct mlx5_fc *counter;
	u64 lastuse = 0;
@@ -4754,8 +4763,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
	/* Under multipath it's possible for one rule to be currently
	 * un-offloaded while the other rule is offloaded.
	 */
	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
	if (!peer_esw)
	if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
		goto out;

	if (flow_flag_test(flow, DUP)) {
@@ -4786,7 +4794,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
	}

no_peer_counter:
	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
	mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
out:
	flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
			  FLOW_ACTION_HW_STATS_DELAYED);
+22 −8
Original line number Diff line number Diff line
@@ -647,22 +647,35 @@ mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
}

static struct mlx5_flow_handle *
mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, const unsigned char *addr,
mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
					 const unsigned char *addr,
					 struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
					 struct mlx5_esw_bridge *bridge)
{
	struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom;
	struct mlx5_eswitch *tmp, *peer_esw = NULL;
	static struct mlx5_flow_handle *handle;
	struct mlx5_eswitch *peer_esw;
	int i;

	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
	if (!peer_esw)
	if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
		return ERR_PTR(-ENODEV);

	mlx5_devcom_for_each_peer_entry(devcom,
					MLX5_DEVCOM_ESW_OFFLOADS,
					tmp, i) {
		if (mlx5_esw_is_owner(tmp, vport_num, esw_owner_vhca_id)) {
			peer_esw = tmp;
			break;
		}
	}
	if (!peer_esw) {
		mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
		return ERR_PTR(-ENODEV);
	}

	handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
							      bridge, peer_esw);

	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
	mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
	return handle;
}

@@ -1369,8 +1382,9 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
	entry->ingress_counter = counter;

	handle = peer ?
		mlx5_esw_bridge_ingress_flow_peer_create(vport_num, addr, vlan,
							 mlx5_fc_id(counter), bridge) :
		mlx5_esw_bridge_ingress_flow_peer_create(vport_num, esw_owner_vhca_id,
							 addr, vlan, mlx5_fc_id(counter),
							 bridge) :
		mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
						    mlx5_fc_id(counter), bridge);
	if (IS_ERR(handle)) {
+17 −4
Original line number Diff line number Diff line
@@ -540,16 +540,29 @@ static struct mlx5_flow_handle *
mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
{
	struct mlx5_devcom *devcom = port->bridge->br_offloads->esw->dev->priv.devcom;
	struct mlx5_eswitch *tmp, *peer_esw = NULL;
	static struct mlx5_flow_handle *handle;
	struct mlx5_eswitch *peer_esw;
	int i;

	peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
	if (!peer_esw)
	if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
		return ERR_PTR(-ENODEV);

	mlx5_devcom_for_each_peer_entry(devcom,
					MLX5_DEVCOM_ESW_OFFLOADS,
					tmp, i) {
		if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) {
			peer_esw = tmp;
			break;
		}
	}
	if (!peer_esw) {
		mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
		return ERR_PTR(-ENODEV);
	}

	handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);

	mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
	mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
	return handle;
}

+7 −0
Original line number Diff line number Diff line
@@ -585,6 +585,13 @@ mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
	return esw->manager_vport == vport_num;
}

static inline bool mlx5_esw_is_owner(struct mlx5_eswitch *esw, u16 vport_num,
				     u16 esw_owner_vhca_id)
{
	return esw_owner_vhca_id == MLX5_CAP_GEN(esw->dev, vhca_id) ||
		(vport_num == MLX5_VPORT_UPLINK && mlx5_lag_is_master(esw->dev));
}

static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
{
	return mlx5_core_is_ecpf_esw_manager(dev) ?
Loading