Commit 1620f09b authored by Wenpeng Liang's avatar Wenpeng Liang Committed by Jason Gunthorpe
Browse files

RDMA/hns: Bugfix for checking whether the srq is full when post wr

If a user posts WR by wr_list, the head pointer of idx_queue won't be
updated until all wqes are filled, so the judgment of whether head equals
to tail will get a wrong result. Fix above issue and move the head and
tail pointer from the srq structure into the idx_queue structure. After
idx_queue is filled with wqe idx, the head pointer of it will increase.

Fixes: c7bcb134 ("RDMA/hns: Add SRQ support for hip08 kernel mode")
Link: https://lore.kernel.org/r/1611997090-48820-3-git-send-email-liweihang@huawei.com


Signed-off-by: default avatarWenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 9dd05247
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -494,6 +494,8 @@ struct hns_roce_idx_que {
	struct hns_roce_mtr		mtr;
	int				entry_shift;
	unsigned long			*bitmap;
	u32				head;
	u32				tail;
};

struct hns_roce_srq {
@@ -513,8 +515,6 @@ struct hns_roce_srq {
	u64		       *wrid;
	struct hns_roce_idx_que idx_que;
	spinlock_t		lock;
	u16			head;
	u16			tail;
	struct mutex		mutex;
	void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
};
+14 −5
Original line number Diff line number Diff line
@@ -849,11 +849,20 @@ static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
	spin_lock(&srq->lock);

	bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
	srq->tail++;
	srq->idx_que.tail++;

	spin_unlock(&srq->lock);
}

int hns_roce_srqwq_overflow(struct hns_roce_srq *srq, int nreq)
{
	struct hns_roce_idx_que *idx_que = &srq->idx_que;
	unsigned int cur;

	cur = idx_que->head - idx_que->tail;
	return cur + nreq >= srq->wqe_cnt - 1;
}

static int find_empty_entry(struct hns_roce_idx_que *idx_que,
			    unsigned long size)
{
@@ -889,7 +898,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,

	spin_lock_irqsave(&srq->lock, flags);

	ind = srq->head & (srq->wqe_cnt - 1);
	ind = srq->idx_que.head & (srq->wqe_cnt - 1);
	max_sge = srq->max_gs - srq->rsv_sge;

	for (nreq = 0; wr; ++nreq, wr = wr->next) {
@@ -902,7 +911,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
			break;
		}

		if (unlikely(srq->head == srq->tail)) {
		if (unlikely(hns_roce_srqwq_overflow(srq, nreq))) {
			ret = -ENOMEM;
			*bad_wr = wr;
			break;
@@ -938,7 +947,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
	}

	if (likely(nreq)) {
		srq->head += nreq;
		srq->idx_que.head += nreq;

		/*
		 * Make sure that descriptors are written before
@@ -950,7 +959,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
			cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
				    (srq->srqn & V2_DB_BYTE_4_TAG_M));
		srq_db.parameter =
			cpu_to_le32(srq->head & V2_DB_PARAMETER_IDX_M);
			cpu_to_le32(srq->idx_que.head & V2_DB_PARAMETER_IDX_M);

		hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
	}
+3 −2
Original line number Diff line number Diff line
@@ -245,6 +245,9 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
		}
	}

	idx_que->head = 0;
	idx_que->tail = 0;

	return 0;
err_idx_mtr:
	hns_roce_mtr_destroy(hr_dev, &idx_que->mtr);
@@ -263,8 +266,6 @@ static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)

static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
	srq->head = 0;
	srq->tail = srq->wqe_cnt - 1;
	srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
	if (!srq->wrid)
		return -ENOMEM;