Commit 6da06c62 authored by Weihang Li's avatar Weihang Li Committed by Jason Gunthorpe
Browse files

Revert "RDMA/hns: Reserve one sge in order to avoid local length error"

This patch caused some issues on SEND operation, and it should be reverted
to make the drivers work correctly. There will be a better solution that
has been tested carefully to solve the original problem.

This reverts commit 711195e5.

Fixes: 711195e5 ("RDMA/hns: Reserve one sge in order to avoid local length error")
Link: https://lore.kernel.org/r/1597829984-20223-1-git-send-email-liweihang@huawei.com


Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent b25e8e85
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -65,8 +65,6 @@
#define HNS_ROCE_CQE_WCMD_EMPTY_BIT		0x2
#define HNS_ROCE_MIN_CQE_CNT			16

#define HNS_ROCE_RESERVED_SGE			1

#define HNS_ROCE_MAX_IRQ_NUM			128

#define HNS_ROCE_SGE_IN_WQE			2
+4 −5
Original line number Diff line number Diff line
@@ -633,7 +633,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,

		wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);

		if (unlikely(wr->num_sge >= hr_qp->rq.max_gs)) {
		if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
			ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n",
				  wr->num_sge, hr_qp->rq.max_gs);
			ret = -EINVAL;
@@ -653,7 +653,6 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
		if (wr->num_sge < hr_qp->rq.max_gs) {
			dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
			dseg->addr = 0;
			dseg->len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
		}

		/* rq support inline data */
@@ -787,8 +786,8 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
		}

		if (wr->num_sge < srq->max_gs) {
			dseg[i].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH);
			dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
			dseg[i].len = 0;
			dseg[i].lkey = cpu_to_le32(0x100);
			dseg[i].addr = 0;
		}

@@ -5070,7 +5069,7 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)

	attr->srq_limit = limit_wl;
	attr->max_wr = srq->wqe_cnt - 1;
	attr->max_sge = srq->max_gs - HNS_ROCE_RESERVED_SGE;
	attr->max_sge = srq->max_gs;

out:
	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+1 −3
Original line number Diff line number Diff line
@@ -92,9 +92,7 @@
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ		PAGE_SIZE
#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED		0xFFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM		2
#define HNS_ROCE_INVALID_LKEY			0x0
#define HNS_ROCE_INVALID_SGE_LENGTH		0x80000000

#define HNS_ROCE_INVALID_LKEY			0x100
#define HNS_ROCE_CMQ_TX_TIMEOUT			30000
#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE	2
#define HNS_ROCE_V2_RSV_QPS			8
+2 −3
Original line number Diff line number Diff line
@@ -386,8 +386,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
		return -EINVAL;
	}

	hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
					      HNS_ROCE_RESERVED_SGE);
	hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge));

	if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
		hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
@@ -402,7 +401,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
		hr_qp->rq_inl_buf.wqe_cnt = 0;

	cap->max_recv_wr = cnt;
	cap->max_recv_sge = hr_qp->rq.max_gs - HNS_ROCE_RESERVED_SGE;
	cap->max_recv_sge = hr_qp->rq.max_gs;

	return 0;
}
+1 −1
Original line number Diff line number Diff line
@@ -297,7 +297,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
	spin_lock_init(&srq->lock);

	srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
	srq->max_gs = init_attr->attr.max_sge + HNS_ROCE_RESERVED_SGE;
	srq->max_gs = init_attr->attr.max_sge;

	if (udata) {
		ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));