Unverified Commit 5d94761f authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!15447 Some bug fix patches for RDMA/hns to olk-5.10

Merge Pull Request from: @ci-robot 
 
PR sync from: Junxian Huang <huangjunxian6@hisilicon.com>
https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/OVFKP7CRYMLOLE37GSIDH32BUN36W6TU/ 
From: Xinghai Cen <cenxinghai@h-partners.com>

Some bug fix patches for RDMA/hns to olk-5.10: 

Junxian Huang (7):
  RDMA/hns: Fix DCA error path in alloc_wqe_buf()
  RDMA/hns: Fix unmatched condition in error path of alloc_user_qp_db()
  RDMA/hns: Fix invalid sq params not being blocked
  RDMA/hns: Fix a missing rollback in error path of
    hns_roce_create_qp_common()
  RDMA/hns: Fix missing xa_destroy()
  RDMA/hns: Fix wrong value of max_sge_rd
  RDMA/hns: Fix udca not unregistered when reset entry allocation failed

Xinghai Cen (1):
  RDMA/hns: Fix default congestion control algorithm not set
    for kernel QP

 
https://gitee.com/openeuler/kernel/issues/IBSE4C 
 
Link:https://gitee.com/openeuler/kernel/pulls/15447

 

Reviewed-by: default avatarChengchang Tang <tangchengchang@huawei.com>
Reviewed-by: default avatarLi Nan <linan122@huawei.com>
Signed-off-by: default avatarLi Nan <linan122@huawei.com>
parents 3bd40dd7 4eedaaeb
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -169,8 +169,10 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
		ida_destroy(&hr_dev->xrcd_ida.ida);

	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
		ida_destroy(&hr_dev->srq_table.srq_ida.ida);
		xa_destroy(&hr_dev->srq_table.xa);
	}
	hns_roce_cleanup_qp_table(hr_dev);
	hns_roce_cleanup_cq_table(hr_dev);
	ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
+1 −0
Original line number Diff line number Diff line
@@ -722,5 +722,6 @@ void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)

	for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++)
		ida_destroy(&hr_dev->cq_table.bank[i].ida);
	xa_destroy(&hr_dev->cq_table.array);
	mutex_destroy(&hr_dev->cq_table.bank_mutex);
}
+6 −6
Original line number Diff line number Diff line
@@ -279,7 +279,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
				  IB_DEVICE_RC_RNR_NAK_GEN;
	props->max_send_sge = hr_dev->caps.max_sq_sg;
	props->max_recv_sge = hr_dev->caps.max_rq_sg;
	props->max_sge_rd = 1;
	props->max_sge_rd = hr_dev->caps.max_sq_sg;
	props->max_cq = hr_dev->caps.num_cqs;
	props->max_cqe = hr_dev->caps.max_cqes;
	props->max_mr = hr_dev->caps.num_mtpts;
@@ -633,10 +633,10 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
	return 0;

error_fail_copy_to_udata:
	hns_roce_unregister_udca(hr_dev, context);
	hns_roce_dealloc_reset_entry(context);

error_fail_reset_entry:
	hns_roce_unregister_udca(hr_dev, context);
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
	    hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
		mutex_destroy(&context->page_mutex);
@@ -656,20 +656,20 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
	struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
	struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);

	hns_roce_put_cq_bankid_for_uctx(context);
	hns_roce_unregister_uctx_debugfs(context);

	mutex_lock(&hr_dev->uctx_list_mutex);
	list_del(&context->list);
	mutex_unlock(&hr_dev->uctx_list_mutex);

	hns_roce_unregister_uctx_debugfs(context);

	hns_roce_dealloc_reset_entry(context);
	hns_roce_unregister_udca(hr_dev, context);
	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
	    hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
		mutex_destroy(&context->page_mutex);

	hns_roce_put_cq_bankid_for_uctx(context);
	hns_roce_dealloc_uar_entry(context);
	hns_roce_dealloc_reset_entry(context);

	ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
}
+14 −9
Original line number Diff line number Diff line
@@ -866,12 +866,13 @@ static int alloc_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
	if (IS_ERR(hr_qp->mtr)) {
		ret = PTR_ERR(hr_qp->mtr);
		ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret);
		if (dca_en)
			hns_roce_disable_dca(hr_dev, hr_qp, udata);
	} else if (dca_en) {
		ret = hns_roce_map_dca_safe_page(hr_dev, hr_qp);
	}

	if (ret && dca_en)
		hns_roce_disable_dca(hr_dev, hr_qp, udata);

	return ret;
}

@@ -1010,12 +1011,14 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
			    struct hns_roce_ib_create_qp *ucmd,
			    struct hns_roce_ib_create_qp_resp *resp)
{
	bool has_sdb = user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd);
	struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
		struct hns_roce_ucontext, ibucontext);
	bool has_rdb = user_qp_has_rdb(hr_dev, init_attr, udata, resp);
	struct ib_device *ibdev = &hr_dev->ib_dev;
	int ret;

	if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) {
	if (has_sdb) {
		ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb);
		if (ret) {
			ibdev_err(ibdev,
@@ -1026,7 +1029,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
		hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
	}

	if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) {
	if (has_rdb) {
		ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb);
		if (ret) {
			ibdev_err(ibdev,
@@ -1040,7 +1043,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
	return 0;

err_sdb:
	if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
	if (has_sdb)
		hns_roce_db_unmap_user(uctx, &hr_qp->sdb, false);
err_out:
	return ret;
@@ -1323,17 +1326,17 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
						ibucontext);
		hr_qp->config = uctx->config;
		ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
		if (ret)
		if (ret) {
			ibdev_err(ibdev, "Failed to set user SQ size, ret = %d\n",
				  ret);
			return ret;
		}

		ret = set_uqp_create_flag_param(hr_dev, hr_qp, init_attr, ucmd);
		if (ret)
			return ret;

		ret = set_congest_param(hr_dev, hr_qp, ucmd);
		if (ret)
			return ret;
	} else {
		if (init_attr->create_flags &
		    IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
@@ -1348,6 +1351,7 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,

		if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
			hr_qp->config = HNS_ROCE_EXSGE_FLAGS;
		default_congest_type(hr_dev, hr_qp);
		ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
		if (ret)
			ibdev_err(ibdev, "Failed to set kernel SQ size, ret = %d\n",
@@ -1431,7 +1435,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
				       min(udata->outlen, sizeof(resp)));
		if (ret) {
			ibdev_err(ibdev, "copy qp resp failed!\n");
			goto err_store;
			goto err_flow_ctrl;
		}
	}

@@ -1857,6 +1861,7 @@ void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev)
	for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++)
		ida_destroy(&hr_dev->qp_table.bank[i].ida);
	xa_destroy(&hr_dev->qp_table.dip_xa);
	xa_destroy(&hr_dev->qp_table_xa);
	mutex_destroy(&hr_dev->qp_table.bank_mutex);
	mutex_destroy(&hr_dev->qp_table.scc_mutex);
}