Commit 953bb24d authored by Mark Bloch's avatar Mark Bloch Committed by Saeed Mahameed
Browse files

net/mlx5e: en_tc, Extend peer flows to a list



Currently, mlx5e_flow is holding a pointer to a peer_flow, in case one
was created. e.g. There is an assumption that mlx5e_flow can have only
one peer.
In order to support more than one peer, refactor mlx5e_flow to hold a
list of peer flows.

Signed-off-by: default avatarMark Bloch <mbloch@nvidia.com>
Reviewed-by: default avatarRoi Dayan <roid@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 3f06760c
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -94,13 +94,13 @@ struct mlx5e_tc_flow {
	 * destinations.
	 */
	struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
	struct mlx5e_tc_flow *peer_flow;
	struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
	struct list_head hairpin; /* flows sharing the same hairpin */
	struct list_head peer;    /* flows with peer flow */
	struct list_head unready; /* flows not ready to be offloaded (e.g
				   * due to missing route)
				   */
	struct list_head peer_flows; /* flows on peer */
	struct net_device *orig_dev; /* netdev adding flow first */
	int tmp_entry_index;
	struct list_head tmp_list; /* temporary flow list used by neigh update */
+27 −16
Original line number Diff line number Diff line
@@ -1989,6 +1989,8 @@ void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
{
	struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
	struct mlx5e_tc_flow *peer_flow;
	struct mlx5e_tc_flow *tmp;

	if (!flow_flag_test(flow, ESWITCH) ||
	    !flow_flag_test(flow, DUP))
@@ -2000,12 +2002,13 @@ static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)

	flow_flag_clear(flow, DUP);

	if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
		mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
		kfree(flow->peer_flow);
	list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
		if (refcount_dec_and_test(&peer_flow->refcnt)) {
			mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
			list_del(&peer_flow->peer_flows);
			kfree(peer_flow);
		}
	}

	flow->peer_flow = NULL;
}

static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
@@ -4295,6 +4298,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
	INIT_LIST_HEAD(&flow->hairpin);
	INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
	INIT_LIST_HEAD(&flow->attrs);
	INIT_LIST_HEAD(&flow->peer_flows);
	refcount_set(&flow->refcnt, 1);
	init_completion(&flow->init_done);
	init_completion(&flow->del_hw_done);
@@ -4443,7 +4447,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
		goto out;
	}

	flow->peer_flow = peer_flow;
	list_add_tail(&peer_flow->peer_flows, &flow->peer_flows);
	flow_flag_set(flow, DUP);
	mutex_lock(&esw->offloads.peer_mutex);
	list_add_tail(&flow->peer, &esw->offloads.peer_flows);
@@ -4741,19 +4745,26 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
	if (!peer_esw)
		goto out;

	if (flow_flag_test(flow, DUP) &&
	    flow_flag_test(flow->peer_flow, OFFLOADED)) {
		u64 bytes2;
	if (flow_flag_test(flow, DUP)) {
		struct mlx5e_tc_flow *peer_flow;

		list_for_each_entry(peer_flow, &flow->peer_flows, peer_flows) {
			u64 packets2;
			u64 lastuse2;
			u64 bytes2;

			if (!flow_flag_test(peer_flow, OFFLOADED))
				continue;
			if (flow_flag_test(flow, USE_ACT_STATS)) {
				f->use_act_stats = true;
		} else {
			counter = mlx5e_tc_get_counter(flow->peer_flow);
				break;
			}

			counter = mlx5e_tc_get_counter(peer_flow);
			if (!counter)
				goto no_peer_counter;
			mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
			mlx5_fc_query_cached(counter, &bytes2, &packets2,
					     &lastuse2);

			bytes += bytes2;
			packets += packets2;