Commit 215568bf authored by Junxian Huang's avatar Junxian Huang Committed by Xinghai Cen
Browse files

RDMA/hns: Change mtr member to pointer in hns QP/CQ/MR/SRQ/EQ struct

driver inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/IBQK42



----------------------------------------------------------------------

Change mtr member to pointer in hns QP/CQ/MR/SRQ/EQ struct to decouple
the life cycle of mtr from these structs. This is the preparation for
the following refactoring. No functional changes.

Signed-off-by: default avatarJunxian Huang <huangjunxian6@hisilicon.com>
Signed-off-by: default avatarwenglianfa <wenglianfa@huawei.com>
Signed-off-by: default avatarXinghai Cen <cenxinghai@h-partners.com>
parent 22f72139
Loading
Loading
Loading
Loading
+10 −9
Original line number Diff line number Diff line
@@ -190,7 +190,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
	u64 mtts[MTT_MIN_COUNT] = {};
	int ret;

	ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts));
	ret = hns_roce_mtr_find(hr_dev, hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts));
	if (ret) {
		ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret);
		return ret;
@@ -211,7 +211,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
	}

	ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts,
				  hns_roce_get_mtr_ba(&hr_cq->mtr));
				  hns_roce_get_mtr_ba(hr_cq->mtr));
	if (ret)
		goto err_xa;

@@ -262,7 +262,7 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
{
	struct ib_device *ibdev = &hr_dev->ib_dev;
	struct hns_roce_buf_attr buf_attr = {};
	int ret;
	int ret = 0;

	hr_cq->mtr_node = kvmalloc(sizeof(*hr_cq->mtr_node), GFP_KERNEL);
	if (!hr_cq->mtr_node)
@@ -273,10 +273,11 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
	buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
	buf_attr.region_count = 1;

	ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
	hr_cq->mtr = hns_roce_mtr_create(hr_dev, &buf_attr,
					 hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT,
					 udata, addr);
	if (ret) {
	if (IS_ERR(hr_cq->mtr)) {
		ret = PTR_ERR(hr_cq->mtr);
		ibdev_err(ibdev, "Failed to alloc CQ mtr, ret = %d\n", ret);
		kvfree(hr_cq->mtr_node);
		hr_cq->mtr_node = NULL;
@@ -288,9 +289,9 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
	if (hr_cq->delayed_destroy_flag) {
		hns_roce_add_unfree_mtr(hr_cq->mtr_node, hr_dev, &hr_cq->mtr);
		hns_roce_add_unfree_mtr(hr_cq->mtr_node, hr_dev, hr_cq->mtr);
	} else {
		hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr);
		hns_roce_mtr_destroy(hr_dev, hr_cq->mtr);
		kvfree(hr_cq->mtr_node);
		hr_cq->mtr_node = NULL;
	}
+3 −3
Original line number Diff line number Diff line
@@ -324,7 +324,7 @@ int hns_roce_map_dca_safe_page(struct hns_roce_dev *hr_dev,
	for (i = 0; i < page_count; i++)
		pages[i] = hr_dev->dca_safe_page;

	ret = hns_roce_mtr_map(hr_dev, &hr_qp->mtr, pages, page_count);
	ret = hns_roce_mtr_map(hr_dev, hr_qp->mtr, pages, page_count);
	if (ret)
		ibdev_err(ibdev, "failed to map safe page for DCA, ret = %d.\n",
			  ret);
@@ -338,7 +338,7 @@ static int config_dca_qpc(struct hns_roce_dev *hr_dev,
			  int page_count)
{
	struct ib_device *ibdev = &hr_dev->ib_dev;
	struct hns_roce_mtr *mtr = &hr_qp->mtr;
	struct hns_roce_mtr *mtr = hr_qp->mtr;
	int ret;

	ret = hns_roce_mtr_map(hr_dev, mtr, pages, page_count);
@@ -698,7 +698,7 @@ static u32 alloc_buf_from_dca_mem(struct hns_roce_qp *hr_qp,
	buf_id = HNS_DCA_TO_BUF_ID(hr_qp->qpn, hr_qp->dca_cfg.attach_count);

	/* Assign pages from free pages */
	unit_pages = hr_qp->mtr.hem_cfg.is_direct ? buf_pages : 1;
	unit_pages = hr_qp->mtr->hem_cfg.is_direct ? buf_pages : 1;
	alloc_pages = assign_dca_pages(ctx, buf_id, buf_pages, unit_pages);
	if (buf_pages != alloc_pages) {
		if (alloc_pages > 0)
+11 −10
Original line number Diff line number Diff line
@@ -426,7 +426,7 @@ struct hns_roce_mr {
	int			enabled; /* MR's active status */
	int			type; /* MR's register type */
	u32			pbl_hop_num; /* multi-hop number */
	struct hns_roce_mtr	pbl_mtr;
	struct hns_roce_mtr	*pbl_mtr;
	u32			npages;
	dma_addr_t		*page_list;
	bool			delayed_destroy_flag;
@@ -540,7 +540,7 @@ struct hns_roce_notify_conf {

struct hns_roce_cq {
	struct ib_cq			ib_cq;
	struct hns_roce_mtr		mtr;
	struct hns_roce_mtr		*mtr;
	struct hns_roce_db		db;
	u32				flags;
	spinlock_t			lock;
@@ -565,7 +565,7 @@ struct hns_roce_cq {
};

struct hns_roce_idx_que {
	struct hns_roce_mtr		mtr;
	struct hns_roce_mtr		*mtr;
	u32				entry_shift;
	unsigned long			*bitmap;
	u32				head;
@@ -587,7 +587,7 @@ struct hns_roce_srq {
	refcount_t		refcount;
	struct completion	free;

	struct hns_roce_mtr	buf_mtr;
	struct hns_roce_mtr	*buf_mtr;

	u64		       *wrid;
	struct hns_roce_idx_que idx_que;
@@ -733,7 +733,7 @@ struct hns_roce_qp {
	enum ib_sig_type	sq_signal_bits;
	struct hns_roce_wq	sq;

	struct hns_roce_mtr	mtr;
	struct hns_roce_mtr	*mtr;
	struct hns_roce_dca_cfg	dca_cfg;

	u32			buff_size;
@@ -834,7 +834,7 @@ struct hns_roce_eq {
	int				coalesce;
	int				arm_st;
	int				hop_num;
	struct hns_roce_mtr		mtr;
	struct hns_roce_mtr		*mtr;
	u16				eq_max_cnt;
	u32				eq_period;
	int				shift;
@@ -1423,9 +1423,10 @@ static inline dma_addr_t hns_roce_get_mtr_ba(struct hns_roce_mtr *mtr)
}
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
		      u32 offset, u64 *mtt_buf, int mtt_max);
int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct hns_roce_mtr *hns_roce_mtr_create(struct hns_roce_dev *hr_dev,
					 struct hns_roce_buf_attr *buf_attr,
			unsigned int page_shift, struct ib_udata *udata,
					 unsigned int ba_page_shift,
					 struct ib_udata *udata,
					 unsigned long user_addr);
void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
			  struct hns_roce_mtr *mtr);
+44 −43
Original line number Diff line number Diff line
@@ -152,7 +152,7 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
	hr_reg_write_bool(fseg, FRMR_LW, wr->access & IB_ACCESS_LOCAL_WRITE);

	/* Data structure reuse may lead to confusion */
	pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
	pbl_ba = mr->pbl_mtr->hem_cfg.root_ba;
	rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba));
	rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba));

@@ -163,7 +163,7 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,

	hr_reg_write(fseg, FRMR_PBL_SIZE, mr->npages);
	hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
		     to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
		     to_hr_hw_page_shift(mr->pbl_mtr->hem_cfg.buf_pg_shift));
	hr_reg_clear(fseg, FRMR_BLK_MODE);
}

@@ -981,12 +981,12 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,

static void *get_srq_wqe_buf(struct hns_roce_srq *srq, u32 n)
{
	return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
	return hns_roce_buf_offset(srq->buf_mtr->kmem, n << srq->wqe_shift);
}

static void *get_idx_buf(struct hns_roce_idx_que *idx_que, u32 n)
{
	return hns_roce_buf_offset(idx_que->mtr.kmem,
	return hns_roce_buf_offset(idx_que->mtr->kmem,
				   n << idx_que->entry_shift);
}

@@ -3648,7 +3648,7 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
	int ret;
	int i;

	ret = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
	ret = hns_roce_mtr_find(hr_dev, mr->pbl_mtr, 0, pages,
				min_t(int, ARRAY_SIZE(pages), mr->npages));
	if (ret) {
		ibdev_err(ibdev, "failed to find PBL mtr, ret = %d.\n", ret);
@@ -3659,7 +3659,7 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
	for (i = 0; i < ARRAY_SIZE(pages); i++)
		pages[i] >>= MPT_PBL_BUF_ADDR_S;

	pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr);
	pbl_ba = hns_roce_get_mtr_ba(mr->pbl_mtr);

	mpt_entry->pbl_size = cpu_to_le32(mr->npages);
	mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> MPT_PBL_BA_ADDR_S);
@@ -3672,7 +3672,7 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
	mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
	hr_reg_write(mpt_entry, MPT_PA1_H, upper_32_bits(pages[1]));
	hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
		     to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
		     to_hr_hw_page_shift(mr->pbl_mtr->hem_cfg.buf_pg_shift));

	return 0;
}
@@ -3715,7 +3715,7 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
		hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, mr->pbl_hop_num);

	hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
		     to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
		     to_hr_hw_page_shift(mr->pbl_mtr->hem_cfg.ba_pg_shift));
	hr_reg_enable(mpt_entry, MPT_INNER_PA_VLD);

	return set_mtpt_pbl(hr_dev, mpt_entry, mr);
@@ -3759,7 +3759,7 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,

static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
{
	dma_addr_t pbl_ba = mr->pbl_mtr.hem_cfg.root_ba;
	dma_addr_t pbl_ba = mr->pbl_mtr->hem_cfg.root_ba;
	struct hns_roce_v2_mpt_entry *mpt_entry;

	mpt_entry = mb_buf;
@@ -3778,9 +3778,9 @@ static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)

	hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, 1);
	hr_reg_write(mpt_entry, MPT_PBL_BA_PG_SZ,
		     to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift));
		     to_hr_hw_page_shift(mr->pbl_mtr->hem_cfg.ba_pg_shift));
	hr_reg_write(mpt_entry, MPT_PBL_BUF_PG_SZ,
		     to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
		     to_hr_hw_page_shift(mr->pbl_mtr->hem_cfg.buf_pg_shift));

	mpt_entry->pbl_size = cpu_to_le32(mr->npages);

@@ -3919,7 +3919,7 @@ static void hns_roce_v2_dereg_mr(struct hns_roce_dev *hr_dev)

static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
{
	return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size);
	return hns_roce_buf_offset(hr_cq->mtr->kmem, n * hr_cq->cqe_size);
}

static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n)
@@ -4078,9 +4078,9 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
	hr_reg_write(cq_context, CQC_CQE_NEX_BLK_ADDR_H,
		     upper_32_bits(to_hr_hw_page_addr(mtts[1])));
	hr_reg_write(cq_context, CQC_CQE_BAR_PG_SZ,
		     to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
		     to_hr_hw_page_shift(hr_cq->mtr->hem_cfg.ba_pg_shift));
	hr_reg_write(cq_context, CQC_CQE_BUF_PG_SZ,
		     to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
		     to_hr_hw_page_shift(hr_cq->mtr->hem_cfg.buf_pg_shift));
	hr_reg_write(cq_context, CQC_CQE_BA_L, dma_handle >> CQC_CQE_BA_L_S);
	hr_reg_write(cq_context, CQC_CQE_BA_H, dma_handle >> CQC_CQE_BA_H_S);
	hr_reg_write_bool(cq_context, CQC_DB_RECORD_EN,
@@ -4957,7 +4957,7 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
	int ret;

	/* Search qp buf's mtts */
	ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.wqe_offset,
	ret = hns_roce_mtr_find(hr_dev, hr_qp->mtr, hr_qp->rq.wqe_offset,
				mtts, ARRAY_SIZE(mtts));
	if (hr_qp->rq.wqe_cnt && ret) {
		ibdev_err(&hr_dev->ib_dev,
@@ -4965,7 +4965,7 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
		return -EINVAL;
	}

	wqe_sge_ba = hns_roce_get_mtr_ba(&hr_qp->mtr);
	wqe_sge_ba = hns_roce_get_mtr_ba(hr_qp->mtr);

	context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
	qpc_mask->wqe_sge_ba = 0;
@@ -4996,11 +4996,11 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
	hr_reg_clear(qpc_mask, QPC_RQ_HOP_NUM);

	hr_reg_write(context, QPC_WQE_SGE_BA_PG_SZ,
		     to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift));
		     to_hr_hw_page_shift(hr_qp->mtr->hem_cfg.ba_pg_shift));
	hr_reg_clear(qpc_mask, QPC_WQE_SGE_BA_PG_SZ);

	hr_reg_write(context, QPC_WQE_SGE_BUF_PG_SZ,
		     to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift));
		     to_hr_hw_page_shift(hr_qp->mtr->hem_cfg.buf_pg_shift));
	hr_reg_clear(qpc_mask, QPC_WQE_SGE_BUF_PG_SZ);

	context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
@@ -5034,7 +5034,7 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
	int ret;

	/* search qp buf's mtts */
	ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->sq.wqe_offset,
	ret = hns_roce_mtr_find(hr_dev, hr_qp->mtr, hr_qp->sq.wqe_offset,
				&sq_cur_blk, 1);
	if (ret) {
		ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf, ret = %d.\n",
@@ -5042,9 +5042,8 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
		return ret;
	}
	if (hr_qp->sge.sge_cnt > 0) {
		ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
					  hr_qp->sge.wqe_offset, &sge_cur_blk,
					  1);
		ret = hns_roce_mtr_find(hr_dev, hr_qp->mtr,
					hr_qp->sge.wqe_offset, &sge_cur_blk, 1);
		if (ret) {
			ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf, ret = %d.\n",
				  hr_qp->qpn, ret);
@@ -6416,7 +6415,7 @@ static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq,
	int ret;

	/* Get physical address of idx que buf */
	ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx,
	ret = hns_roce_mtr_find(hr_dev, idx_que->mtr, 0, mtts_idx,
				ARRAY_SIZE(mtts_idx));
	if (ret) {
		ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
@@ -6424,7 +6423,7 @@ static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq,
		return ret;
	}

	dma_handle_idx = hns_roce_get_mtr_ba(&idx_que->mtr);
	dma_handle_idx = hns_roce_get_mtr_ba(idx_que->mtr);

	hr_reg_write(ctx, SRQC_IDX_HOP_NUM,
		     to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt));
@@ -6434,9 +6433,9 @@ static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq,
		     upper_32_bits(dma_handle_idx >> DMA_IDX_SHIFT));

	hr_reg_write(ctx, SRQC_IDX_BA_PG_SZ,
		     to_hr_hw_page_shift(idx_que->mtr.hem_cfg.ba_pg_shift));
		     to_hr_hw_page_shift(idx_que->mtr->hem_cfg.ba_pg_shift));
	hr_reg_write(ctx, SRQC_IDX_BUF_PG_SZ,
		     to_hr_hw_page_shift(idx_que->mtr.hem_cfg.buf_pg_shift));
		     to_hr_hw_page_shift(idx_que->mtr->hem_cfg.buf_pg_shift));

	hr_reg_write(ctx, SRQC_IDX_CUR_BLK_ADDR_L,
		     to_hr_hw_page_addr(mtts_idx[0]));
@@ -6463,7 +6462,7 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
	memset(ctx, 0, sizeof(*ctx));

	/* Get the physical address of srq buf */
	ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
	ret = hns_roce_mtr_find(hr_dev, srq->buf_mtr, 0, mtts_wqe,
				ARRAY_SIZE(mtts_wqe));
	if (ret) {
		ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
@@ -6471,7 +6470,7 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
		return ret;
	}

	dma_handle_wqe = hns_roce_get_mtr_ba(&srq->buf_mtr);
	dma_handle_wqe = hns_roce_get_mtr_ba(srq->buf_mtr);

	hr_reg_write(ctx, SRQC_SRQ_ST, 1);
	hr_reg_write_bool(ctx, SRQC_SRQ_TYPE,
@@ -6493,9 +6492,9 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
		     upper_32_bits(dma_handle_wqe >> DMA_WQE_SHIFT));

	hr_reg_write(ctx, SRQC_WQE_BA_PG_SZ,
		     to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift));
		     to_hr_hw_page_shift(srq->buf_mtr->hem_cfg.ba_pg_shift));
	hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ,
		     to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift));
		     to_hr_hw_page_shift(srq->buf_mtr->hem_cfg.buf_pg_shift));

	if (srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB) {
		hr_reg_enable(ctx, SRQC_DB_RECORD_EN);
@@ -6848,7 +6847,7 @@ static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
{
	struct hns_roce_aeqe *aeqe;

	aeqe = hns_roce_buf_offset(eq->mtr.kmem,
	aeqe = hns_roce_buf_offset(eq->mtr->kmem,
				   (eq->cons_index & (eq->entries - 1)) *
				   eq->eqe_size);

@@ -6915,7 +6914,7 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
{
	struct hns_roce_ceqe *ceqe;

	ceqe = hns_roce_buf_offset(eq->mtr.kmem,
	ceqe = hns_roce_buf_offset(eq->mtr->kmem,
				   (eq->cons_index & (eq->entries - 1)) *
				   eq->eqe_size);

@@ -7155,7 +7154,7 @@ static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,

static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
{
	hns_roce_mtr_destroy(hr_dev, &eq->mtr);
	hns_roce_mtr_destroy(hr_dev, eq->mtr);
}

static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev,
@@ -7202,14 +7201,14 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
	init_eq_config(hr_dev, eq);

	/* if not multi-hop, eqe buffer only use one trunk */
	ret = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba,
	ret = hns_roce_mtr_find(hr_dev, eq->mtr, 0, eqe_ba,
				ARRAY_SIZE(eqe_ba));
	if (ret) {
		dev_err(hr_dev->dev, "failed to find EQE mtr, ret = %d\n", ret);
		return ret;
	}

	bt_ba = hns_roce_get_mtr_ba(&eq->mtr);
	bt_ba = hns_roce_get_mtr_ba(eq->mtr);

	hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID);
	hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num);
@@ -7219,9 +7218,9 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
	hr_reg_write(eqc, EQC_EQN, eq->eqn);
	hr_reg_write(eqc, EQC_EQE_CNT, HNS_ROCE_EQ_INIT_EQE_CNT);
	hr_reg_write(eqc, EQC_EQE_BA_PG_SZ,
		     to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
		     to_hr_hw_page_shift(eq->mtr->hem_cfg.ba_pg_shift));
	hr_reg_write(eqc, EQC_EQE_BUF_PG_SZ,
		     to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
		     to_hr_hw_page_shift(eq->mtr->hem_cfg.buf_pg_shift));
	hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
	hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);

@@ -7254,7 +7253,7 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
{
	struct hns_roce_buf_attr buf_attr = {};
	int err;
	int err = 0;

	if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0)
		eq->hop_num = 0;
@@ -7266,11 +7265,13 @@ static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
	buf_attr.region[0].hopnum = eq->hop_num;
	buf_attr.region_count = 1;

	err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr,
				  hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL,
				  0);
	if (err)
	eq->mtr = hns_roce_mtr_create(hr_dev, &buf_attr,
				      hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT,
				      NULL, 0);
	if (IS_ERR(eq->mtr)) {
		err = PTR_ERR(eq->mtr);
		dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err);
	}

	return err;
}
+39 −24
Original line number Diff line number Diff line
@@ -97,7 +97,7 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
	struct ib_device *ibdev = &hr_dev->ib_dev;
	bool is_fast = mr->type == MR_TYPE_FRMR;
	struct hns_roce_buf_attr buf_attr = {};
	int err;
	int err = 0;

	mr->mtr_node = kvmalloc(sizeof(*mr->mtr_node), GFP_KERNEL);
	if (!mr->mtr_node)
@@ -116,17 +116,18 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
	/* pagesize and hopnum is fixed for fast MR */
	buf_attr.adaptive = !is_fast;

	err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
				  hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT,
	mr->pbl_mtr = hns_roce_mtr_create(hr_dev, &buf_attr,
					  hr_dev->caps.pbl_ba_pg_sz + HNS_HW_PAGE_SHIFT,
					  udata, start);
	if (err) {
	if (IS_ERR(mr->pbl_mtr)) {
		err = PTR_ERR(mr->pbl_mtr);
		ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
		kvfree(mr->mtr_node);
		mr->mtr_node = NULL;
		return err;
	}

	mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
	mr->npages = mr->pbl_mtr->hem_cfg.buf_pg_count;
	mr->pbl_hop_num = buf_attr.region[0].hopnum;

	return err;
@@ -135,9 +136,9 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
{
	if (mr->delayed_destroy_flag && mr->type != MR_TYPE_DMA) {
		hns_roce_add_unfree_mtr(mr->mtr_node, hr_dev, &mr->pbl_mtr);
		hns_roce_add_unfree_mtr(mr->mtr_node, hr_dev, mr->pbl_mtr);
	} else {
		hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
		hns_roce_mtr_destroy(hr_dev, mr->pbl_mtr);
		kvfree(mr->mtr_node);
		mr->mtr_node = NULL;
	}
@@ -213,18 +214,22 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
	struct hns_roce_mr *mr;
	int ret;
	int ret = -ENOMEM;

	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
	if (mr == NULL)
		return  ERR_PTR(-ENOMEM);

	mr->pbl_mtr = kvzalloc(sizeof(*mr->pbl_mtr), GFP_KERNEL);
	if (!mr->pbl_mtr)
		goto err_mtr;

	mr->type = MR_TYPE_DMA;
	mr->pd = to_hr_pd(pd)->pdn;
	mr->access = acc;

	/* Allocate memory region key */
	hns_roce_hem_list_init(&mr->pbl_mtr.hem_list);
	hns_roce_hem_list_init(&mr->pbl_mtr->hem_list);
	ret = alloc_mr_key(hr_dev, mr);
	if (ret)
		goto err_free;
@@ -240,6 +245,8 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
	free_mr_key(hr_dev, mr);

err_free:
	kvfree(mr->pbl_mtr);
err_mtr:
	kfree(mr);
	return ERR_PTR(ret);
}
@@ -437,7 +444,7 @@ static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
{
	struct hns_roce_mr *mr = to_hr_mr(ibmr);

	if (likely(mr->npages < mr->pbl_mtr.hem_cfg.buf_pg_count)) {
	if (likely(mr->npages < mr->pbl_mtr->hem_cfg.buf_pg_count)) {
		mr->page_list[mr->npages++] = addr;
		return 0;
	}
@@ -452,7 +459,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
	struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
	struct ib_device *ibdev = &hr_dev->ib_dev;
	struct hns_roce_mr *mr = to_hr_mr(ibmr);
	struct hns_roce_mtr *mtr = &mr->pbl_mtr;
	struct hns_roce_mtr *mtr = mr->pbl_mtr;
	int ret, sg_num = 0;

	if (!IS_ALIGNED(sg_offset, HNS_ROCE_FRMR_ALIGN_SIZE) ||
@@ -461,7 +468,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
		return sg_num;

	mr->npages = 0;
	mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
	mr->page_list = kvcalloc(mr->pbl_mtr->hem_cfg.buf_pg_count,
				 sizeof(dma_addr_t), GFP_KERNEL);
	if (!mr->page_list)
		return sg_num;
@@ -469,7 +476,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
	sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset_p, hns_roce_set_page);
	if (sg_num < 1) {
		ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
			  mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
			  mr->npages, mr->pbl_mtr->hem_cfg.buf_pg_count, sg_num);
		goto err_page_list;
	}

@@ -482,7 +489,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
		ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
		sg_num = 0;
	} else {
		mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
		mr->pbl_mtr->hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
	}

err_page_list:
@@ -1189,26 +1196,31 @@ static void mtr_free_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
 * hns_roce_mtr_create - Create hns memory translate region.
 *
 * @hr_dev: RoCE device struct pointer
 * @mtr: memory translate region
 * @buf_attr: buffer attribute for creating mtr
 * @ba_page_shift: page shift for multi-hop base address table
 * @udata: user space context, if it's NULL, means kernel space
 * @user_addr: userspace virtual address to start at
 */
int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct hns_roce_mtr *hns_roce_mtr_create(struct hns_roce_dev *hr_dev,
					 struct hns_roce_buf_attr *buf_attr,
			unsigned int ba_page_shift, struct ib_udata *udata,
					 unsigned int ba_page_shift,
					 struct ib_udata *udata,
					 unsigned long user_addr)
{
	struct ib_device *ibdev = &hr_dev->ib_dev;
	struct hns_roce_mtr *mtr;
	int ret;

	mtr = kvzalloc(sizeof(*mtr), GFP_KERNEL);
	if (!mtr)
		return ERR_PTR(-ENOMEM);

	if (!buf_attr->mtt_only) {
		ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr);
		if (ret) {
			ibdev_err(ibdev,
				  "failed to alloc mtr bufs, ret = %d.\n", ret);
			return ret;
			goto err_out;
		}

		ret = get_best_page_shift(hr_dev, mtr, buf_attr);
@@ -1236,7 +1248,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
	if (buf_attr->mtt_only) {
		mtr->umem = NULL;
		mtr->kmem = NULL;
		return 0;
		return mtr;
	}

	/* Write buffer's dma address to MTT */
@@ -1245,13 +1257,15 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
		ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret);
		goto err_alloc_mtt;
	}
	return 0;
	return mtr;

err_alloc_mtt:
	mtr_free_mtt(hr_dev, mtr);
err_init_buf:
	mtr_free_bufs(hr_dev, mtr);
	return ret;
err_out:
	kvfree(mtr);
	return ERR_PTR(ret);
}

void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
@@ -1261,6 +1275,7 @@ void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)

	/* free buffers */
	mtr_free_bufs(hr_dev, mtr);
	kvfree(mtr);
}

static void hns_roce_copy_mtr(struct hns_roce_mtr *new_mtr, struct hns_roce_mtr *old_mtr)
Loading