Unverified Commit 7b1712f0 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!13896 RDMA/hns: Fix flush cqe error when racing with destroy qp

Merge Pull Request from: @huwentao0417 
 
|commitID|commit message|conflict|
|--|--|--|
|377a2097705b91|RDMA/hns: Fix flush cqe error when racing with destroy qp|Y|
 
 
Link:https://gitee.com/openeuler/kernel/pulls/13896

 

Reviewed-by: default avatarChengchang Tang <tangchengchang@huawei.com>
Reviewed-by: default avatarLi Nan <linan122@huawei.com>
Signed-off-by: default avatarLi Nan <linan122@huawei.com>
parents 7d11d9cf 2b39c452
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -717,6 +717,7 @@ struct hns_roce_dev;

enum {
	HNS_ROCE_FLUSH_FLAG = 0,
	HNS_ROCE_STOP_FLUSH_FLAG = 1,
};

struct hns_roce_work {
@@ -777,6 +778,7 @@ struct hns_roce_qp {
	bool			delayed_destroy_flag;
	struct hns_roce_mtr_node *mtr_node;
	struct hns_roce_dip *dip;
	spinlock_t flush_lock;
};

struct hns_roce_ib_iboe {
+7 −0
Original line number Diff line number Diff line
@@ -6330,11 +6330,18 @@ int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
	unsigned long flags;
	int ret;

	if (hr_qp->congest_type == HNS_ROCE_CONGEST_TYPE_DIP)
		put_dip_ctx_idx(hr_dev, hr_qp);

	/* Make sure flush_cqe() is completed */
	spin_lock_irqsave(&hr_qp->flush_lock, flags);
	set_bit(HNS_ROCE_STOP_FLUSH_FLAG, &hr_qp->flush_flag);
	spin_unlock_irqrestore(&hr_qp->flush_lock, flags);
	flush_work(&hr_qp->flush_work.work);

	ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
	if (ret)
		ibdev_err_ratelimited(&hr_dev->ib_dev,
+13 −2
Original line number Diff line number Diff line
@@ -71,11 +71,18 @@ static void flush_work_handle(struct work_struct *work)
void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
	struct hns_roce_work *flush_work = &hr_qp->flush_work;
	unsigned long flags;

	spin_lock_irqsave(&hr_qp->flush_lock, flags);
	/* Exit directly after destroy_qp() */
	if (test_bit(HNS_ROCE_STOP_FLUSH_FLAG, &hr_qp->flush_flag)) {
		spin_unlock_irqrestore(&hr_qp->flush_lock, flags);
		return;
	}

	flush_work->hr_dev = hr_dev;
	INIT_WORK(&flush_work->work, flush_work_handle);
	refcount_inc(&hr_qp->refcount);
	queue_work(hr_dev->irq_workq, &flush_work->work);
	spin_unlock_irqrestore(&hr_qp->flush_lock, flags);
}

void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp)
@@ -1348,6 +1355,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
				     struct ib_udata *udata,
				     struct hns_roce_qp *hr_qp)
{
	struct hns_roce_work *flush_work = &hr_qp->flush_work;
	struct hns_roce_ib_create_qp_resp resp = {};
	struct ib_device *ibdev = &hr_dev->ib_dev;
	struct hns_roce_ib_create_qp ucmd = {};
@@ -1356,9 +1364,12 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
	mutex_init(&hr_qp->mutex);
	spin_lock_init(&hr_qp->sq.lock);
	spin_lock_init(&hr_qp->rq.lock);
	spin_lock_init(&hr_qp->flush_lock);

	hr_qp->state = IB_QPS_RESET;
	hr_qp->flush_flag = 0;
	flush_work->hr_dev = hr_dev;
	INIT_WORK(&flush_work->work, flush_work_handle);

	ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
	if (ret) {