Commit f9d4c848 authored by Xi Wang's avatar Xi Wang Committed by Zheng Zengkai
Browse files

RDMA/hns: Optimize the base address table config for MTR



driver inclusion
category: bugfix
bugzilla: 50786

------------------------------

The base address table is allocated by dma allocator, and the size is
always aligned to PAGE_SIZE. If use the fixed size to allocated the table,
the base address entries which stored in table will be smaller than the
actual memory space can store.

Signed-off-by: default avatarXi Wang <wangxi11@huawei.com>
Reviewed-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent 338fc539
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -156,7 +156,7 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
	buf_attr.fixed_page = true;

	err = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
				  hr_dev->caps.cqe_ba_pg_sz + HNS_HW_PAGE_SHIFT,
				  hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT,
				  udata, addr);
	if (err)
		ibdev_err(ibdev, "Failed to alloc CQ mtr, err %d\n", err);
+2 −2
Original line number Diff line number Diff line
@@ -5859,7 +5859,7 @@ static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
	else
		eq->hop_num = hr_dev->caps.eqe_hop_num;

	buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
	buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT;
	buf_attr.region[0].size = eq->entries * eq->eqe_size;
	buf_attr.region[0].hopnum = eq->hop_num;
	buf_attr.region_count = 1;
@@ -5867,7 +5867,7 @@ static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)

	err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
				  hr_dev->caps.eqe_ba_pg_sz +
				  HNS_HW_PAGE_SHIFT, NULL, 0);
				  PAGE_SHIFT, NULL, 0);
	if (err)
		dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err);

+1 −1
Original line number Diff line number Diff line
@@ -130,7 +130,7 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
	buf_attr.mtt_only = is_fast;

	err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
				  hr_dev->caps.pbl_ba_pg_sz + HNS_HW_PAGE_SHIFT,
				  hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT,
				  udata, start);
	if (err)
		ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
+1 −1
Original line number Diff line number Diff line
@@ -670,7 +670,7 @@ static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
		goto err_inline;
	}
	ret = hns_roce_mtr_create(hr_dev, &hr_qp->mtr, &buf_attr,
				  HNS_HW_PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz,
				  PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz,
				  udata, addr);
	if (ret) {
		ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret);
+1 −1
Original line number Diff line number Diff line
@@ -226,7 +226,7 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
	buf_attr.fixed_page = true;

	err = hns_roce_mtr_create(hr_dev, &idx_que->mtr, &buf_attr,
				  hr_dev->caps.idx_ba_pg_sz + HNS_HW_PAGE_SHIFT,
				  hr_dev->caps.idx_ba_pg_sz + PAGE_SHIFT,
				  udata, addr);
	if (err) {
		ibdev_err(ibdev, "Failed to alloc SRQ idx mtr, err %d\n", err);