Unverified Commit 184f5b47 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!878 Bugfixes for RDMA/hns

Merge Pull Request from: @stinft 
 
1.#I7A2PB
2.#I7A2PK
3.#I7A2SA
4.#I7A2V2
5.#I7A2VV 
 
Link:https://gitee.com/openeuler/kernel/pulls/878

 

Reviewed-by: default avatarChengchang Tang <tangchengchang@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parents 2b25e7e1 4fa42930
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -1085,9 +1085,7 @@ static void cleanup_dca_context(struct hns_roce_dev *hr_dev,
	unsigned long flags;
	bool is_user;

	spin_lock(&ctx->aging_lock);
	cancel_delayed_work_sync(&ctx->aging_dwork);
	spin_unlock(&ctx->aging_lock);

	is_user = (ctx != &hr_dev->dca_ctx);
	spin_lock_irqsave(&ctx->pool_lock, flags);
+2 −1
Original line number Diff line number Diff line
@@ -92,6 +92,8 @@
/* Configure to HW for PAGE_SIZE larger than 4KB */
#define PG_SHIFT_OFFSET				(PAGE_SHIFT - 12)

#define ATOMIC_WR_LEN				8

#define HNS_ROCE_IDX_QUE_ENTRY_SZ		4
#define SRQ_DB_REG				0x230

@@ -795,7 +797,6 @@ struct hns_roce_caps {
	u32		max_rq_sg;
	u32		max_extend_sg;
	u32		num_qps;
	u32		num_pi_qps;
	u32		reserved_qps;
	int		num_qpc_timer;
	u32		num_srqs;
+31 −84
Original line number Diff line number Diff line
@@ -167,15 +167,22 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
	hr_reg_clear(fseg, FRMR_BLK_MODE);
}

static void set_atomic_seg(const struct ib_send_wr *wr,
static int set_atomic_seg(struct hns_roce_dev *hr_dev,
			  const struct ib_send_wr *wr,
			  struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
			   unsigned int valid_num_sge)
			  unsigned int valid_num_sge, u32 msg_len)
{
	struct hns_roce_v2_wqe_data_seg *dseg =
		(void *)rc_sq_wqe + sizeof(struct hns_roce_v2_rc_send_wqe);
	struct hns_roce_wqe_atomic_seg *aseg =
		(void *)dseg + sizeof(struct hns_roce_v2_wqe_data_seg);

	if (msg_len != ATOMIC_WR_LEN) {
		ibdev_err(&hr_dev->ib_dev, "invalid atomic wr len, len = %u.\n",
			  msg_len);
		return -EINVAL;
	}

	set_data_seg_v2(dseg, wr->sg_list);

	if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
@@ -188,6 +195,8 @@ static void set_atomic_seg(const struct ib_send_wr *wr,
	}

	hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, valid_num_sge);

	return 0;
}

static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
@@ -691,7 +700,8 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,

	if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
	    wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
		set_atomic_seg(wr, rc_sq_wqe, valid_num_sge);
		ret = set_atomic_seg(hr_dev, wr, rc_sq_wqe, valid_num_sge,
				     msg_len);
	else if (wr->opcode != IB_WR_REG_MR)
		ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
					&curr_idx, valid_num_sge);
@@ -853,7 +863,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
		qp->sq.head += nreq;
		qp->next_sge = sge_idx;

		if (nreq == 1 && (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
		if (nreq == 1 && !ret &&
		    (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
			write_dwqe(hr_dev, qp, wqe);
		else
			update_sq_db(hr_dev, qp);
@@ -1915,29 +1926,6 @@ static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf)
	return 0;
}

static int load_ext_cfg_caps(struct hns_roce_dev *hr_dev, bool is_vf)
{
	struct hns_roce_cmq_desc desc;
	struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
	struct hns_roce_caps *caps = &hr_dev->caps;
	u32 func_num, qp_num;
	int ret;

	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, true);
	ret = hns_roce_cmq_send(hr_dev, &desc, 1);
	if (ret)
		return ret;

	func_num = is_vf ? 1 : max_t(u32, 1, hr_dev->func_num);
	qp_num = hr_reg_read(req, EXT_CFG_QP_PI_NUM) / func_num;
	caps->num_pi_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM);

	qp_num = hr_reg_read(req, EXT_CFG_QP_NUM) / func_num;
	caps->num_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM);

	return 0;
}

static int load_pf_timer_res_caps(struct hns_roce_dev *hr_dev)
{
	struct hns_roce_cmq_desc desc;
@@ -1958,50 +1946,37 @@ static int load_pf_timer_res_caps(struct hns_roce_dev *hr_dev)
	return 0;
}

static int query_func_resource_caps(struct hns_roce_dev *hr_dev, bool is_vf)
static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
{
	struct device *dev = hr_dev->dev;
	int ret;

	ret = load_func_res_caps(hr_dev, is_vf);
	ret = load_func_res_caps(hr_dev, false);
	if (ret) {
		dev_err(dev, "failed to load res caps, ret = %d (%s).\n", ret,
			is_vf ? "vf" : "pf");
		dev_err(dev, "failed to load pf res caps, ret = %d.\n", ret);
		return ret;
	}

	if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
		ret = load_ext_cfg_caps(hr_dev, is_vf);
	ret = load_pf_timer_res_caps(hr_dev);
	if (ret)
			dev_err(dev, "failed to load ext cfg, ret = %d (%s).\n",
				ret, is_vf ? "vf" : "pf");
	}
		dev_err(dev, "failed to load pf timer resource, ret = %d.\n",
			ret);

	return ret;
}

static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
{
	struct device *dev = hr_dev->dev;
	int ret;

	ret = query_func_resource_caps(hr_dev, false);
	ret = load_func_res_caps(hr_dev, true);
	if (ret)
		return ret;

	ret = load_pf_timer_res_caps(hr_dev);
	if (ret)
		dev_err(dev, "failed to load pf timer resource, ret = %d.\n",
			ret);
		dev_err(dev, "failed to load vf res caps, ret = %d.\n", ret);

	return ret;
}

static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
{
	return query_func_resource_caps(hr_dev, true);
}

static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
					  u32 vf_id)
{
@@ -2084,24 +2059,6 @@ static int config_vf_hem_resource(struct hns_roce_dev *hr_dev, int vf_id)
	return hns_roce_cmq_send(hr_dev, desc, 2);
}

static int config_vf_ext_resource(struct hns_roce_dev *hr_dev, u32 vf_id)
{
	struct hns_roce_cmq_desc desc;
	struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
	struct hns_roce_caps *caps = &hr_dev->caps;

	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, false);

	hr_reg_write(req, EXT_CFG_VF_ID, vf_id);

	hr_reg_write(req, EXT_CFG_QP_PI_NUM, caps->num_pi_qps);
	hr_reg_write(req, EXT_CFG_QP_PI_IDX, vf_id * caps->num_pi_qps);
	hr_reg_write(req, EXT_CFG_QP_NUM, caps->num_qps);
	hr_reg_write(req, EXT_CFG_QP_IDX, vf_id * caps->num_qps);

	return hns_roce_cmq_send(hr_dev, &desc, 1);
}

static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
{
	u32 func_num = max_t(u32, 1, hr_dev->func_num);
@@ -2116,16 +2073,6 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
				vf_id, ret);
			return ret;
		}

		if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
			ret = config_vf_ext_resource(hr_dev, vf_id);
			if (ret) {
				dev_err(hr_dev->dev,
					"failed to config vf-%u ext res, ret = %d.\n",
					vf_id, ret);
				return ret;
			}
		}
	}

	return 0;
@@ -7328,20 +7275,20 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)

	if (hr_dev->is_vf && !check_vf_support(hr_dev->pci_dev)) {
		ret = -EOPNOTSUPP;
		goto error_failed_kzalloc;
		goto error_failed_roce_init;
	}

	ret = hns_roce_init(hr_dev);
	if (ret) {
		dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
		goto error_failed_cfg;
		goto error_failed_roce_init;
	}

	if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
		ret = free_mr_init(hr_dev);
		if (ret) {
			dev_err(hr_dev->dev, "failed to init free mr!\n");
			goto error_failed_roce_init;
			goto error_failed_free_mr_init;
		}
	}

@@ -7349,10 +7296,10 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)

	return 0;

error_failed_roce_init:
error_failed_free_mr_init:
	hns_roce_exit(hr_dev, true);

error_failed_cfg:
error_failed_roce_init:
	kfree(hr_dev->priv);

error_failed_kzalloc:
+0 −10
Original line number Diff line number Diff line
@@ -226,7 +226,6 @@ enum hns_roce_opcode_type {
	HNS_ROCE_OPC_CFG_GMV_TBL			= 0x850f,
	HNS_ROCE_OPC_CFG_GMV_BT				= 0x8510,
	HNS_ROCE_OPC_SYNC_MB				= 0x8511,
	HNS_ROCE_OPC_EXT_CFG				= 0x8512,
	HNS_ROCE_QUERY_RAM_ECC				= 0x8513,
	HNS_SWITCH_PARAMETER_CFG			= 0x1033,
	HNS_ROCE_OPC_SET_BOND_INFO			= 0x8601,
@@ -969,15 +968,6 @@ struct hns_roce_func_clear {
#define HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL	40
#define HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT	20

/* Fields of HNS_ROCE_OPC_EXT_CFG */
#define EXT_CFG_VF_ID CMQ_REQ_FIELD_LOC(31, 0)
#define EXT_CFG_QP_PI_IDX CMQ_REQ_FIELD_LOC(45, 32)
#define EXT_CFG_QP_PI_NUM CMQ_REQ_FIELD_LOC(63, 48)
#define EXT_CFG_QP_NUM CMQ_REQ_FIELD_LOC(87, 64)
#define EXT_CFG_QP_IDX CMQ_REQ_FIELD_LOC(119, 96)
#define EXT_CFG_LLM_IDX CMQ_REQ_FIELD_LOC(139, 128)
#define EXT_CFG_LLM_NUM CMQ_REQ_FIELD_LOC(156, 144)

#define CFG_LLM_A_BA_L CMQ_REQ_FIELD_LOC(31, 0)
#define CFG_LLM_A_BA_H CMQ_REQ_FIELD_LOC(63, 32)
#define CFG_LLM_A_DEPTH CMQ_REQ_FIELD_LOC(76, 64)
+1 −1
Original line number Diff line number Diff line
@@ -1057,7 +1057,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
		if (ret)
			return ret;
	}
	dma_set_max_seg_size(dev, UINT_MAX);
	dma_set_max_seg_size(dev, SZ_2G);

	if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_BOND) &&
	    (hr_dev->hw->bond_is_active(hr_dev)))