Commit ba46c96d authored by David S. Miller's avatar David S. Miller
Browse files

Merge tag 'mlx5-fixes-2023-05-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux



Saeed Mahameed says:

====================
mlx5-fixes-2023-05-22

This series provides bug fixes for the mlx5 driver.
Please pull and let me know if there is any problem.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 04910d8c 1da438c0
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -1920,9 +1920,10 @@ static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod
static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
			   u32 syndrome, int err)
{
	const char *namep = mlx5_command_str(opcode);
	struct mlx5_cmd_stats *stats;

	if (!err)
	if (!err || !(strcmp(namep, "unknown command opcode")))
		return;

	stats = &dev->cmd.stats[opcode];
+2 −0
Original line number Diff line number Diff line
@@ -175,6 +175,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
	/* ensure cq space is freed before enabling more cqes */
	wmb();

	mlx5e_txqsq_wake(&ptpsq->txqsq);

	return work_done == budget;
}

+3 −1
Original line number Diff line number Diff line
@@ -1369,11 +1369,13 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv,
	struct mlx5e_tc_flow *flow;

	list_for_each_entry(flow, encap_flows, tmp_list) {
		struct mlx5_flow_attr *attr = flow->attr;
		struct mlx5_esw_flow_attr *esw_attr;
		struct mlx5_flow_attr *attr;

		if (!mlx5e_is_offloaded_flow(flow))
			continue;

		attr = mlx5e_tc_get_encap_attr(flow);
		esw_attr = attr->esw_attr;

		if (flow_flag_test(flow, SLOW))
+2 −0
Original line number Diff line number Diff line
@@ -193,6 +193,8 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
	return pi;
}

void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq);

static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
	return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
+40 −17
Original line number Diff line number Diff line
@@ -1665,11 +1665,9 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
{
	struct mlx5e_priv *out_priv, *route_priv;
	struct mlx5_devcom *devcom = NULL;
	struct mlx5_core_dev *route_mdev;
	struct mlx5_eswitch *esw;
	u16 vhca_id;
	int err;

	out_priv = netdev_priv(out_dev);
	esw = out_priv->mdev->priv.eswitch;
@@ -1678,6 +1676,9 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro

	vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
	if (mlx5_lag_is_active(out_priv->mdev)) {
		struct mlx5_devcom *devcom;
		int err;

		/* In lag case we may get devices from different eswitch instances.
		 * If we failed to get vport num, it means, mostly, that we on the wrong
		 * eswitch.
@@ -1686,18 +1687,18 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
		if (err != -ENOENT)
			return err;

		rcu_read_lock();
		devcom = out_priv->mdev->priv.devcom;
		esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
		if (!esw)
			return -ENODEV;
	}
		esw = mlx5_devcom_get_peer_data_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
		err = esw ? mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport) : -ENODEV;
		rcu_read_unlock();

	err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
	if (devcom)
		mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
		return err;
	}

	return mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
}

static int
set_encap_dests(struct mlx5e_priv *priv,
		struct mlx5e_tc_flow *flow,
@@ -5301,6 +5302,8 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
		goto err_action_counter;
	}

	mlx5_esw_offloads_devcom_init(esw);

	return 0;

err_action_counter:
@@ -5329,7 +5332,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
	priv = netdev_priv(rpriv->netdev);
	esw = priv->mdev->priv.eswitch;

	mlx5e_tc_clean_fdb_peer_flows(esw);
	mlx5_esw_offloads_devcom_cleanup(esw);

	mlx5e_tc_tun_cleanup(uplink_priv->encap);

@@ -5643,22 +5646,43 @@ bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
				   0, NULL);
}

static struct mapping_ctx *
mlx5e_get_priv_obj_mapping(struct mlx5e_priv *priv)
{
	struct mlx5e_tc_table *tc;
	struct mlx5_eswitch *esw;
	struct mapping_ctx *ctx;

	if (is_mdev_switchdev_mode(priv->mdev)) {
		esw = priv->mdev->priv.eswitch;
		ctx = esw->offloads.reg_c0_obj_pool;
	} else {
		tc = mlx5e_fs_get_tc(priv->fs);
		ctx = tc->mapping;
	}

	return ctx;
}

int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
				     u64 act_miss_cookie, u32 *act_miss_mapping)
{
	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
	struct mlx5_mapped_obj mapped_obj = {};
	struct mlx5_eswitch *esw;
	struct mapping_ctx *ctx;
	int err;

	ctx = esw->offloads.reg_c0_obj_pool;

	ctx = mlx5e_get_priv_obj_mapping(priv);
	mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS;
	mapped_obj.act_miss_cookie = act_miss_cookie;
	err = mapping_add(ctx, &mapped_obj, act_miss_mapping);
	if (err)
		return err;

	if (!is_mdev_switchdev_mode(priv->mdev))
		return 0;

	esw = priv->mdev->priv.eswitch;
	attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
	if (IS_ERR(attr->act_id_restore_rule))
		goto err_rule;
@@ -5673,10 +5697,9 @@ int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_a
void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
				      u32 act_miss_mapping)
{
	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
	struct mapping_ctx *ctx;
	struct mapping_ctx *ctx = mlx5e_get_priv_obj_mapping(priv);

	ctx = esw->offloads.reg_c0_obj_pool;
	if (is_mdev_switchdev_mode(priv->mdev))
		mlx5_del_flow_rules(attr->act_id_restore_rule);
	mapping_remove(ctx, act_miss_mapping);
}
Loading