Commit 312b8f79 authored by Mark Zhang's avatar Mark Zhang Committed by Leon Romanovsky
Browse files

RDMA/mlx: Calling qp event handler in workqueue context



Move the call of qp event handler from atomic to workqueue context,
so that the handler is able to block. This is needed by following
patches.

Signed-off-by: default avatarMark Zhang <markzhang@nvidia.com>
Reviewed-by: default avatarPatrisious Haddad <phaddad@nvidia.com>
Link: https://lore.kernel.org/r/0cd17b8331e445f03942f4bb28d447f24ac5669d.1672821186.git.leonro@nvidia.com


Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent 1ca49d26
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -3303,6 +3303,10 @@ static int __init mlx4_ib_init(void)
	if (!wq)
		return -ENOMEM;

	err = mlx4_ib_qp_event_init();
	if (err)
		goto clean_qp_event;

	err = mlx4_ib_cm_init();
	if (err)
		goto clean_wq;
@@ -3324,6 +3328,9 @@ static int __init mlx4_ib_init(void)
	mlx4_ib_cm_destroy();

clean_wq:
	mlx4_ib_qp_event_cleanup();

clean_qp_event:
	destroy_workqueue(wq);
	return err;
}
@@ -3333,6 +3340,7 @@ static void __exit mlx4_ib_cleanup(void)
	mlx4_unregister_interface(&mlx4_ib_interface);
	mlx4_ib_mcg_destroy();
	mlx4_ib_cm_destroy();
	mlx4_ib_qp_event_cleanup();
	destroy_workqueue(wq);
}

+3 −0
Original line number Diff line number Diff line
@@ -940,4 +940,7 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
int mlx4_ib_cm_init(void);
void mlx4_ib_cm_destroy(void);

int mlx4_ib_qp_event_init(void);
void mlx4_ib_qp_event_cleanup(void);

#endif /* MLX4_IB_H */
+85 −36
Original line number Diff line number Diff line
@@ -102,6 +102,14 @@ enum mlx4_ib_source_type {
	MLX4_IB_RWQ_SRC	= 1,
};

struct mlx4_ib_qp_event_work {
	struct work_struct work;
	struct mlx4_qp *qp;
	enum mlx4_event type;
};

static struct workqueue_struct *mlx4_ib_qp_event_wq;

static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
{
	if (!mlx4_is_master(dev->dev))
@@ -200,18 +208,17 @@ static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n)
	}
}

static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
static void mlx4_ib_handle_qp_event(struct work_struct *_work)
{
	struct ib_event event;
	struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
	struct mlx4_ib_qp_event_work *qpe_work =
		container_of(_work, struct mlx4_ib_qp_event_work, work);
	struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp;
	struct ib_event event = {};

	if (type == MLX4_EVENT_TYPE_PATH_MIG)
		to_mibqp(qp)->port = to_mibqp(qp)->alt_port;

	if (ibqp->event_handler) {
	event.device = ibqp->device;
	event.element.qp = ibqp;
		switch (type) {

	switch (qpe_work->type) {
	case MLX4_EVENT_TYPE_PATH_MIG:
		event.event = IB_EVENT_PATH_MIG;
		break;
@@ -237,13 +244,41 @@ static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
		event.event = IB_EVENT_QP_ACCESS_ERR;
		break;
	default:
			pr_warn("Unexpected event type %d "
			       "on QP %06x\n", type, qp->qpn);
			return;
		pr_warn("Unexpected event type %d on QP %06x\n",
			qpe_work->type, qpe_work->qp->qpn);
		goto out;
	}

	ibqp->event_handler(&event, ibqp->qp_context);

out:
	mlx4_put_qp(qpe_work->qp);
	kfree(qpe_work);
}

static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
{
	struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
	struct mlx4_ib_qp_event_work *qpe_work;

	if (type == MLX4_EVENT_TYPE_PATH_MIG)
		to_mibqp(qp)->port = to_mibqp(qp)->alt_port;

	if (!ibqp->event_handler)
		goto out_no_handler;

	qpe_work = kzalloc(sizeof(*qpe_work), GFP_ATOMIC);
	if (!qpe_work)
		goto out_no_handler;

	qpe_work->qp = qp;
	qpe_work->type = type;
	INIT_WORK(&qpe_work->work, mlx4_ib_handle_qp_event);
	queue_work(mlx4_ib_qp_event_wq, &qpe_work->work);
	return;

out_no_handler:
	mlx4_put_qp(qp);
}

static void mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type)
@@ -4468,3 +4503,17 @@ void mlx4_ib_drain_rq(struct ib_qp *qp)

	handle_drain_completion(cq, &rdrain, dev);
}

int mlx4_ib_qp_event_init(void)
{
	mlx4_ib_qp_event_wq = alloc_ordered_workqueue("mlx4_ib_qp_event_wq", 0);
	if (!mlx4_ib_qp_event_wq)
		return -ENOMEM;

	return 0;
}

void mlx4_ib_qp_event_cleanup(void)
{
	destroy_workqueue(mlx4_ib_qp_event_wq);
}
+7 −0
Original line number Diff line number Diff line
@@ -4403,6 +4403,10 @@ static int __init mlx5_ib_init(void)
		return -ENOMEM;
	}

	ret = mlx5_ib_qp_event_init();
	if (ret)
		goto qp_event_err;

	mlx5_ib_odp_init();
	ret = mlx5r_rep_init();
	if (ret)
@@ -4420,6 +4424,8 @@ static int __init mlx5_ib_init(void)
mp_err:
	mlx5r_rep_cleanup();
rep_err:
	mlx5_ib_qp_event_cleanup();
qp_event_err:
	destroy_workqueue(mlx5_ib_event_wq);
	free_page((unsigned long)xlt_emergency_page);
	return ret;
@@ -4431,6 +4437,7 @@ static void __exit mlx5_ib_cleanup(void)
	auxiliary_driver_unregister(&mlx5r_mp_driver);
	mlx5r_rep_cleanup();

	mlx5_ib_qp_event_cleanup();
	destroy_workqueue(mlx5_ib_event_wq);
	free_page((unsigned long)xlt_emergency_page);
}
+84 −35
Original line number Diff line number Diff line
@@ -71,6 +71,14 @@ struct mlx5_modify_raw_qp_param {
	u32 port;
};

struct mlx5_ib_qp_event_work {
	struct work_struct work;
	struct mlx5_core_qp *qp;
	int type;
};

static struct workqueue_struct *mlx5_ib_qp_event_wq;

static void get_cqs(enum ib_qp_type qp_type,
		    struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
		    struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);
@@ -302,20 +310,16 @@ int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
	return mlx5_ib_read_user_wqe_srq(srq, wqe_index, buffer, buflen, bc);
}

static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
static void mlx5_ib_handle_qp_event(struct work_struct *_work)
{
	struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
	struct ib_event event;

	if (type == MLX5_EVENT_TYPE_PATH_MIG) {
		/* This event is only valid for trans_qps */
		to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port;
	}
	struct mlx5_ib_qp_event_work *qpe_work =
		container_of(_work, struct mlx5_ib_qp_event_work, work);
	struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp;
	struct ib_event event = {};

	if (ibqp->event_handler) {
	event.device = ibqp->device;
	event.element.qp = ibqp;
		switch (type) {
	switch (qpe_work->type) {
	case MLX5_EVENT_TYPE_PATH_MIG:
		event.event = IB_EVENT_PATH_MIG;
		break;
@@ -341,12 +345,43 @@ static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
		event.event = IB_EVENT_QP_ACCESS_ERR;
		break;
	default:
			pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
			return;
		pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n",
			qpe_work->type, qpe_work->qp->qpn);
		goto out;
	}

	ibqp->event_handler(&event, ibqp->qp_context);

out:
	mlx5_core_res_put(&qpe_work->qp->common);
	kfree(qpe_work);
}

static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
{
	struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
	struct mlx5_ib_qp_event_work *qpe_work;

	if (type == MLX5_EVENT_TYPE_PATH_MIG) {
		/* This event is only valid for trans_qps */
		to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port;
	}

	if (!ibqp->event_handler)
		goto out_no_handler;

	qpe_work = kzalloc(sizeof(*qpe_work), GFP_ATOMIC);
	if (!qpe_work)
		goto out_no_handler;

	qpe_work->qp = qp;
	qpe_work->type = type;
	INIT_WORK(&qpe_work->work, mlx5_ib_handle_qp_event);
	queue_work(mlx5_ib_qp_event_wq, &qpe_work->work);
	return;

out_no_handler:
	mlx5_core_res_put(&qp->common);
}

static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
@@ -5720,3 +5755,17 @@ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
	mutex_unlock(&mqp->mutex);
	return err;
}

int mlx5_ib_qp_event_init(void)
{
	mlx5_ib_qp_event_wq = alloc_ordered_workqueue("mlx5_ib_qp_event_wq", 0);
	if (!mlx5_ib_qp_event_wq)
		return -ENOMEM;

	return 0;
}

void mlx5_ib_qp_event_cleanup(void)
{
	destroy_workqueue(mlx5_ib_qp_event_wq);
}
Loading