Commit f42139ba authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Saeed Mahameed
Browse files

net/mlx5e: Use spin_lock_bh for async_icosq_lock



async_icosq_lock may be taken from softirq and non-softirq contexts. It
requires protection with spin_lock_bh, otherwise a softirq may be
triggered in the middle of the critical section, and it may deadlock if
it tries to take the same lock. This patch fixes such a scenario by
using spin_lock_bh to disable softirqs on that CPU while inside the
critical section.

Fixes: 8d94b590 ("net/mlx5e: Turn XSK ICOSQ into a general asynchronous one")
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@mellanox.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 78c906e4
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -122,9 +122,9 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
	set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
	/* TX queue is created active. */

	spin_lock(&c->async_icosq_lock);
	spin_lock_bh(&c->async_icosq_lock);
	mlx5e_trigger_irq(&c->async_icosq);
	spin_unlock(&c->async_icosq_lock);
	spin_unlock_bh(&c->async_icosq_lock);
}

void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
+2 −2
Original line number Diff line number Diff line
@@ -36,9 +36,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
		if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state))
			return 0;

		spin_lock(&c->async_icosq_lock);
		spin_lock_bh(&c->async_icosq_lock);
		mlx5e_trigger_irq(&c->async_icosq);
		spin_unlock(&c->async_icosq_lock);
		spin_unlock_bh(&c->async_icosq_lock);
	}

	return 0;
+7 −7
Original line number Diff line number Diff line
@@ -188,7 +188,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,

	err = 0;
	sq = &c->async_icosq;
	spin_lock(&c->async_icosq_lock);
	spin_lock_bh(&c->async_icosq_lock);

	cseg = post_static_params(sq, priv_rx);
	if (IS_ERR(cseg))
@@ -199,7 +199,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,

	mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
unlock:
	spin_unlock(&c->async_icosq_lock);
	spin_unlock_bh(&c->async_icosq_lock);

	return err;

@@ -265,10 +265,10 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,

	BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);

	spin_lock(&sq->channel->async_icosq_lock);
	spin_lock_bh(&sq->channel->async_icosq_lock);

	if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
		spin_unlock(&sq->channel->async_icosq_lock);
		spin_unlock_bh(&sq->channel->async_icosq_lock);
		err = -ENOSPC;
		goto err_dma_unmap;
	}
@@ -299,7 +299,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
	icosq_fill_wi(sq, pi, &wi);
	sq->pc++;
	mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
	spin_unlock(&sq->channel->async_icosq_lock);
	spin_unlock_bh(&sq->channel->async_icosq_lock);

	return 0;

@@ -360,7 +360,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
	err = 0;

	sq = &c->async_icosq;
	spin_lock(&c->async_icosq_lock);
	spin_lock_bh(&c->async_icosq_lock);

	cseg = post_static_params(sq, priv_rx);
	if (IS_ERR(cseg)) {
@@ -372,7 +372,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
	mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
	priv_rx->stats->tls_resync_res_ok++;
unlock:
	spin_unlock(&c->async_icosq_lock);
	spin_unlock_bh(&c->async_icosq_lock);

	return err;
}