Commit 88d162b4 authored by Roi Dayan's avatar Roi Dayan Committed by Saeed Mahameed
Browse files

net/mlx5: Devcom, Infrastructure changes



Update devcom infrastructure to be more generic, without
depending on max supported ports definition or a device guid,
and also more encapsulated so callers don't need to pass
the register devcom component id per event call.

Signed-off-by: default avatarEli Cohen <elic@nvidia.com>
Signed-off-by: default avatarRoi Dayan <roid@nvidia.com>
Reviewed-by: default avatarShay Drory <shayd@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 02ceda65
Loading
Loading
Loading
Loading
+9 −12
Original line number Diff line number Diff line
@@ -399,15 +399,13 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
}

static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep,
					   struct mlx5_devcom *devcom,
					   struct mlx5e_rep_sq *rep_sq, int i)
{
	struct mlx5_eswitch *peer_esw = NULL;
	struct mlx5_flow_handle *flow_rule;
	int tmp;
	struct mlx5_devcom_comp_dev *tmp;
	struct mlx5_eswitch *peer_esw;

	mlx5_devcom_for_each_peer_entry(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
					peer_esw, tmp) {
	mlx5_devcom_for_each_peer_entry(esw->devcom, peer_esw, tmp) {
		u16 peer_rule_idx = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
		struct mlx5e_rep_sq_peer *sq_peer;
		int err;
@@ -443,7 +441,6 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
	struct mlx5_flow_handle *flow_rule;
	struct mlx5e_rep_priv *rpriv;
	struct mlx5e_rep_sq *rep_sq;
	struct mlx5_devcom *devcom;
	bool devcom_locked = false;
	int err;
	int i;
@@ -451,10 +448,10 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
	if (esw->mode != MLX5_ESWITCH_OFFLOADS)
		return 0;

	devcom = esw->dev->priv.devcom;
	rpriv = mlx5e_rep_to_rep_priv(rep);
	if (mlx5_devcom_comp_is_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS) &&
	    mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))

	if (mlx5_devcom_comp_is_ready(esw->devcom) &&
	    mlx5_devcom_for_each_peer_begin(esw->devcom))
		devcom_locked = true;

	for (i = 0; i < sqns_num; i++) {
@@ -477,7 +474,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,

		xa_init(&rep_sq->sq_peer);
		if (devcom_locked) {
			err = mlx5e_sqs2vport_add_peers_rules(esw, rep, devcom, rep_sq, i);
			err = mlx5e_sqs2vport_add_peers_rules(esw, rep, rep_sq, i);
			if (err) {
				mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
				xa_destroy(&rep_sq->sq_peer);
@@ -490,7 +487,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
	}

	if (devcom_locked)
		mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
		mlx5_devcom_for_each_peer_end(esw->devcom);

	return 0;

@@ -498,7 +495,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
	mlx5e_sqs2vport_stop(esw, rep);

	if (devcom_locked)
		mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
		mlx5_devcom_for_each_peer_end(esw->devcom);

	return err;
}
+15 −21
Original line number Diff line number Diff line
@@ -1668,11 +1668,10 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
{
	struct mlx5e_priv *out_priv, *route_priv;
	struct mlx5_core_dev *route_mdev;
	struct mlx5_devcom *devcom;
	struct mlx5_devcom_comp_dev *pos;
	struct mlx5_eswitch *esw;
	u16 vhca_id;
	int err;
	int i;

	out_priv = netdev_priv(out_dev);
	esw = out_priv->mdev->priv.eswitch;
@@ -1688,10 +1687,8 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
		return err;

	rcu_read_lock();
	devcom = out_priv->mdev->priv.devcom;
	err = -ENODEV;
	mlx5_devcom_for_each_peer_entry_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
					    esw, i) {
	mlx5_devcom_for_each_peer_entry_rcu(esw->devcom, esw, pos) {
		err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
		if (!err)
			break;
@@ -2031,15 +2028,15 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
			      struct mlx5e_tc_flow *flow)
{
	if (mlx5e_is_eswitch_flow(flow)) {
		struct mlx5_devcom *devcom = flow->priv->mdev->priv.devcom;
		struct mlx5_devcom_comp_dev *devcom = flow->priv->mdev->priv.eswitch->devcom;

		if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) {
		if (!mlx5_devcom_for_each_peer_begin(devcom)) {
			mlx5e_tc_del_fdb_flow(priv, flow);
			return;
		}

		mlx5e_tc_del_fdb_peers_flow(flow);
		mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
		mlx5_devcom_for_each_peer_end(devcom);
		mlx5e_tc_del_fdb_flow(priv, flow);
	} else {
		mlx5e_tc_del_nic_flow(priv, flow);
@@ -4216,8 +4213,7 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
		flow_flag_test(flow, INGRESS);
	bool act_is_encap = !!(attr->action &
			       MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
	bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.devcom,
						    MLX5_DEVCOM_ESW_OFFLOADS);
	bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.eswitch->devcom);

	if (!esw_paired)
		return false;
@@ -4471,14 +4467,13 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
		   struct net_device *filter_dev,
		   struct mlx5e_tc_flow **__flow)
{
	struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
	struct mlx5_devcom_comp_dev *devcom = priv->mdev->priv.eswitch->devcom, *pos;
	struct mlx5e_rep_priv *rpriv = priv->ppriv;
	struct mlx5_eswitch_rep *in_rep = rpriv->rep;
	struct mlx5_core_dev *in_mdev = priv->mdev;
	struct mlx5_eswitch *peer_esw;
	struct mlx5e_tc_flow *flow;
	int err;
	int i;

	flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
				    in_mdev);
@@ -4490,27 +4485,25 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
		return 0;
	}

	if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) {
	if (!mlx5_devcom_for_each_peer_begin(devcom)) {
		err = -ENODEV;
		goto clean_flow;
	}

	mlx5_devcom_for_each_peer_entry(devcom,
					MLX5_DEVCOM_ESW_OFFLOADS,
					peer_esw, i) {
	mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) {
		err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw);
		if (err)
			goto peer_clean;
	}

	mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
	mlx5_devcom_for_each_peer_end(devcom);

	*__flow = flow;
	return 0;

peer_clean:
	mlx5e_tc_del_fdb_peers_flow(flow);
	mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
	mlx5_devcom_for_each_peer_end(devcom);
clean_flow:
	mlx5e_tc_del_fdb_flow(priv, flow);
	return err;
@@ -4728,7 +4721,7 @@ int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
		       struct flow_cls_offload *f, unsigned long flags)
{
	struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
	struct rhashtable *tc_ht = get_tc_ht(priv, flags);
	struct mlx5e_tc_flow *flow;
	struct mlx5_fc *counter;
@@ -4764,7 +4757,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
	/* Under multipath it's possible for one rule to be currently
	 * un-offloaded while the other rule is offloaded.
	 */
	if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
	if (esw && !mlx5_devcom_for_each_peer_begin(esw->devcom))
		goto out;

	if (flow_flag_test(flow, DUP)) {
@@ -4795,7 +4788,8 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
	}

no_peer_counter:
	mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
	if (esw)
		mlx5_devcom_for_each_peer_end(esw->devcom);
out:
	flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
			  FLOW_ACTION_HW_STATS_DELAYED);
+11 −11
Original line number Diff line number Diff line
@@ -652,30 +652,30 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
					 struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
					 struct mlx5_esw_bridge *bridge)
{
	struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom;
	struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos;
	struct mlx5_eswitch *tmp, *peer_esw = NULL;
	static struct mlx5_flow_handle *handle;
	int i;

	if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
	if (!mlx5_devcom_for_each_peer_begin(devcom))
		return ERR_PTR(-ENODEV);

	mlx5_devcom_for_each_peer_entry(devcom,
					MLX5_DEVCOM_ESW_OFFLOADS,
					tmp, i) {
	mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
		if (mlx5_esw_is_owner(tmp, vport_num, esw_owner_vhca_id)) {
			peer_esw = tmp;
			break;
		}
	}

	if (!peer_esw) {
		mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
		return ERR_PTR(-ENODEV);
		handle = ERR_PTR(-ENODEV);
		goto out;
	}

	handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
							      bridge, peer_esw);
	mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);

out:
	mlx5_devcom_for_each_peer_end(devcom);
	return handle;
}

@@ -1391,8 +1391,8 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
						    mlx5_fc_id(counter), bridge);
	if (IS_ERR(handle)) {
		err = PTR_ERR(handle);
		esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n",
			 vport_num, err);
		esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n",
			 vport_num, err, peer);
		goto err_ingress_flow_create;
	}
	entry->ingress_handle = handle;
+8 −9
Original line number Diff line number Diff line
@@ -539,30 +539,29 @@ mlx5_esw_bridge_mcast_filter_flow_create(struct mlx5_esw_bridge_port *port)
static struct mlx5_flow_handle *
mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
{
	struct mlx5_devcom *devcom = port->bridge->br_offloads->esw->dev->priv.devcom;
	struct mlx5_devcom_comp_dev *devcom = port->bridge->br_offloads->esw->devcom, *pos;
	struct mlx5_eswitch *tmp, *peer_esw = NULL;
	static struct mlx5_flow_handle *handle;
	int i;

	if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
	if (!mlx5_devcom_for_each_peer_begin(devcom))
		return ERR_PTR(-ENODEV);

	mlx5_devcom_for_each_peer_entry(devcom,
					MLX5_DEVCOM_ESW_OFFLOADS,
					tmp, i) {
	mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
		if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) {
			peer_esw = tmp;
			break;
		}
	}

	if (!peer_esw) {
		mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
		return ERR_PTR(-ENODEV);
		handle = ERR_PTR(-ENODEV);
		goto out;
	}

	handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);

	mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
out:
	mlx5_devcom_for_each_peer_end(devcom);
	return handle;
}

+3 −0
Original line number Diff line number Diff line
@@ -354,6 +354,7 @@ struct mlx5_eswitch {
	}  params;
	struct blocking_notifier_head n_head;
	struct xarray paired;
	struct mlx5_devcom_comp_dev *devcom;
};

void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -383,6 +384,7 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw);
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
			       u16 vport, const u8 *mac);
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
@@ -818,6 +820,7 @@ static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool cle
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {}
static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
Loading