Commit fe5235ae authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge tag 'mlx5-fixes-2022-07-06' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2022-07-06

This series provides bug fixes to mlx5 driver.

* tag 'mlx5-fixes-2022-07-06' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Ring the TX doorbell on DMA errors
  net/mlx5e: Fix capability check for updating vnic env counters
  net/mlx5e: CT: Use own workqueue instead of mlx5e priv
  net/mlx5: Lag, correct get the port select mode str
  net/mlx5e: Fix enabling sriov while tc nic rules are offloaded
  net/mlx5e: kTLS, Fix build time constant test in RX
  net/mlx5e: kTLS, Fix build time constant test in TX
  net/mlx5: Lag, decouple FDB selection and shared FDB
  net/mlx5: TC, allow offload from uplink to other PF's VF
====================

Link: https://lore.kernel.org/r/20220706231309.38579-1-saeed@kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 0680e20a 5b759bf2
Loading
Loading
Loading
Loading
+12 −8
Original line number Diff line number Diff line
@@ -76,6 +76,7 @@ struct mlx5_tc_ct_priv {
	struct mlx5_ct_fs *fs;
	struct mlx5_ct_fs_ops *fs_ops;
	spinlock_t ht_lock; /* protects ft entries */
	struct workqueue_struct *wq;

	struct mlx5_tc_ct_debugfs debugfs;
};
@@ -941,14 +942,11 @@ static void mlx5_tc_ct_entry_del_work(struct work_struct *work)
static void
__mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
{
	struct mlx5e_priv *priv;

	if (!refcount_dec_and_test(&entry->refcnt))
		return;

	priv = netdev_priv(entry->ct_priv->netdev);
	INIT_WORK(&entry->work, mlx5_tc_ct_entry_del_work);
	queue_work(priv->wq, &entry->work);
	queue_work(entry->ct_priv->wq, &entry->work);
}

static struct mlx5_ct_counter *
@@ -1759,19 +1757,16 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
static void
mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
{
	struct mlx5e_priv *priv;

	if (!refcount_dec_and_test(&ft->refcount))
		return;

	flush_workqueue(ct_priv->wq);
	nf_flow_table_offload_del_cb(ft->nf_ft,
				     mlx5_tc_ct_block_flow_offload, ft);
	rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
	rhashtable_free_and_destroy(&ft->ct_entries_ht,
				    mlx5_tc_ct_flush_ft_entry,
				    ct_priv);
	priv = netdev_priv(ct_priv->netdev);
	flush_workqueue(priv->wq);
	mlx5_tc_ct_free_pre_ct_tables(ft);
	mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
	kfree(ft);
@@ -2176,6 +2171,12 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
	if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params))
		goto err_ct_tuples_nat_ht;

	ct_priv->wq = alloc_ordered_workqueue("mlx5e_ct_priv_wq", 0);
	if (!ct_priv->wq) {
		err = -ENOMEM;
		goto err_wq;
	}

	err = mlx5_tc_ct_fs_init(ct_priv);
	if (err)
		goto err_init_fs;
@@ -2184,6 +2185,8 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
	return ct_priv;

err_init_fs:
	destroy_workqueue(ct_priv->wq);
err_wq:
	rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
err_ct_tuples_nat_ht:
	rhashtable_destroy(&ct_priv->ct_tuples_ht);
@@ -2213,6 +2216,7 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
	if (!ct_priv)
		return;

	destroy_workqueue(ct_priv->wq);
	mlx5_ct_tc_remove_dbgfs(ct_priv);
	chains = ct_priv->chains;

+1 −2
Original line number Diff line number Diff line
@@ -231,8 +231,7 @@ mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx,
	struct mlx5e_ktls_offload_context_rx **ctx =
		__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX);

	BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) >
		     TLS_OFFLOAD_CONTEXT_SIZE_RX);
	BUILD_BUG_ON(sizeof(priv_rx) > TLS_DRIVER_STATE_SIZE_RX);

	*ctx = priv_rx;
}
+1 −2
Original line number Diff line number Diff line
@@ -68,8 +68,7 @@ mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
	struct mlx5e_ktls_offload_context_tx **ctx =
		__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);

	BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_tx *) >
		     TLS_OFFLOAD_CONTEXT_SIZE_TX);
	BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX);

	*ctx = priv_tx;
}
+1 −1
Original line number Diff line number Diff line
@@ -688,7 +688,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
	struct mlx5_core_dev *mdev = priv->mdev;

	if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard))
	if (!mlx5e_stats_grp_vnic_env_num_stats(priv))
		return;

	MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
+1 −1
Original line number Diff line number Diff line
@@ -3793,7 +3793,7 @@ static bool is_lag_dev(struct mlx5e_priv *priv,

static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev)
{
	if (mlx5e_eswitch_uplink_rep(out_dev) &&
	if (same_hw_reps(priv, out_dev) &&
	    MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) &&
	    MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up))
		return true;
Loading