Commit 33a7922b authored by Junxian Huang's avatar Junxian Huang Committed by Xinghai Cen
Browse files

RDMA/hns: Move mtr_node into the mtr struct

driver inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/IBQK42



----------------------------------------------------------------------

Previously driver had to copy a new mtr and store it in mtr_node so
that it could be found when freeing delayed-destruction resources,
because the life cycle of the origin mtr was over when QP/CQ/MR/SRQ
structs were freed.

But since the life cycle of mtr has been decoupled, driver don't
need to copy the mtr now. Move mtr_node into the mtr struct so that
mtr can be found with no need to copying a new one.

Signed-off-by: default avatarJunxian Huang <huangjunxian6@hisilicon.com>
Signed-off-by: default avatarXinghai Cen <cenxinghai@h-partners.com>
parent 215568bf
Loading
Loading
Loading
Loading
+3 −12
Original line number Diff line number Diff line
@@ -264,10 +264,6 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
	struct hns_roce_buf_attr buf_attr = {};
	int ret = 0;

	hr_cq->mtr_node = kvmalloc(sizeof(*hr_cq->mtr_node), GFP_KERNEL);
	if (!hr_cq->mtr_node)
		return -ENOMEM;

	buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + PAGE_SHIFT;
	buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
	buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
@@ -279,8 +275,6 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
	if (IS_ERR(hr_cq->mtr)) {
		ret = PTR_ERR(hr_cq->mtr);
		ibdev_err(ibdev, "Failed to alloc CQ mtr, ret = %d\n", ret);
		kvfree(hr_cq->mtr_node);
		hr_cq->mtr_node = NULL;
	}

	return ret;
@@ -288,13 +282,10 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,

static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
	if (hr_cq->delayed_destroy_flag) {
		hns_roce_add_unfree_mtr(hr_cq->mtr_node, hr_dev, hr_cq->mtr);
	} else {
	if (hr_cq->delayed_destroy_flag)
		hns_roce_add_unfree_mtr(hr_dev, hr_cq->mtr);
	else
		hns_roce_mtr_destroy(hr_dev, hr_cq->mtr);
		kvfree(hr_cq->mtr_node);
		hr_cq->mtr_node = NULL;
	}
}

static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+2 −12
Original line number Diff line number Diff line
@@ -391,6 +391,7 @@ struct hns_roce_mtr {
	struct ib_umem		*umem; /* user space buffer */
	struct hns_roce_buf	*kmem; /* kernel space buffer */
	struct hns_roce_hem_cfg  hem_cfg; /* config for hardware addressing */
	struct list_head	 node; /* list node for delay-destruction */
};

/* DCA config */
@@ -430,7 +431,6 @@ struct hns_roce_mr {
	u32			npages;
	dma_addr_t		*page_list;
	bool			delayed_destroy_flag;
	struct hns_roce_mtr_node *mtr_node;
};

struct hns_roce_mr_table {
@@ -561,7 +561,6 @@ struct hns_roce_cq {
	u8				poe_channel;
	bool				delayed_destroy_flag;
	struct hns_roce_notify_conf	write_notify;
	struct hns_roce_mtr_node *mtr_node;
};

struct hns_roce_idx_que {
@@ -570,7 +569,6 @@ struct hns_roce_idx_que {
	unsigned long			*bitmap;
	u32				head;
	u32				tail;
	struct hns_roce_mtr_node *mtr_node;
};

struct hns_roce_srq {
@@ -597,7 +595,6 @@ struct hns_roce_srq {
	struct hns_roce_db	rdb;
	u32			cap_flags;
	bool			delayed_destroy_flag;
	struct hns_roce_mtr_node *mtr_node;
};

struct hns_roce_uar_table {
@@ -771,7 +768,6 @@ struct hns_roce_qp {
	u8			tc_mode;
	u8			priority;
	bool			delayed_destroy_flag;
	struct hns_roce_mtr_node *mtr_node;
	spinlock_t flush_lock;
	struct hns_roce_dip *dip;
};
@@ -1146,11 +1142,6 @@ struct hns_roce_port {
	struct hns_roce_cnp_pri_param *cnp_pri_param;
};

struct hns_roce_mtr_node {
	struct hns_roce_mtr mtr;
	struct list_head list;
};

struct hns_roce_dev {
	struct ib_device	ib_dev;
	struct pci_dev		*pci_dev;
@@ -1549,8 +1540,7 @@ int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr);
void hns_roce_add_unfree_umem(struct hns_roce_user_db_page *user_page,
			      struct hns_roce_dev *hr_dev);
void hns_roce_free_unfree_umem(struct hns_roce_dev *hr_dev);
void hns_roce_add_unfree_mtr(struct hns_roce_mtr_node *pos,
			     struct hns_roce_dev *hr_dev,
void hns_roce_add_unfree_mtr(struct hns_roce_dev *hr_dev,
			     struct hns_roce_mtr *mtr);
void hns_roce_free_unfree_mtr(struct hns_roce_dev *hr_dev);
struct hns_user_mmap_entry *
+9 −45
Original line number Diff line number Diff line
@@ -99,10 +99,6 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
	struct hns_roce_buf_attr buf_attr = {};
	int err = 0;

	mr->mtr_node = kvmalloc(sizeof(*mr->mtr_node), GFP_KERNEL);
	if (!mr->mtr_node)
		return -ENOMEM;

	mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
	buf_attr.page_shift = is_fast ? PAGE_SHIFT :
			      hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
@@ -122,8 +118,6 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
	if (IS_ERR(mr->pbl_mtr)) {
		err = PTR_ERR(mr->pbl_mtr);
		ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
		kvfree(mr->mtr_node);
		mr->mtr_node = NULL;
		return err;
	}

@@ -135,13 +129,10 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,

static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
{
	if (mr->delayed_destroy_flag && mr->type != MR_TYPE_DMA) {
		hns_roce_add_unfree_mtr(mr->mtr_node, hr_dev, mr->pbl_mtr);
	} else {
	if (mr->delayed_destroy_flag && mr->type != MR_TYPE_DMA)
		hns_roce_add_unfree_mtr(hr_dev, mr->pbl_mtr);
	else
		hns_roce_mtr_destroy(hr_dev, mr->pbl_mtr);
		kvfree(mr->mtr_node);
		mr->mtr_node = NULL;
	}
}

static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
@@ -1278,49 +1269,22 @@ void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
	kvfree(mtr);
}

static void hns_roce_copy_mtr(struct hns_roce_mtr *new_mtr, struct hns_roce_mtr *old_mtr)
{
	struct list_head *new_head, *old_head;
	int i, j;

	memcpy(new_mtr, old_mtr, sizeof(*old_mtr));

	for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
		for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++) {
			new_head = &new_mtr->hem_list.mid_bt[i][j];
			old_head = &old_mtr->hem_list.mid_bt[i][j];
			list_replace(old_head, new_head);
		}

	new_head = &new_mtr->hem_list.root_bt;
	old_head = &old_mtr->hem_list.root_bt;
	list_replace(old_head, new_head);

	new_head = &new_mtr->hem_list.btm_bt;
	old_head = &old_mtr->hem_list.btm_bt;
	list_replace(old_head, new_head);
}

void hns_roce_add_unfree_mtr(struct hns_roce_mtr_node *pos,
			     struct hns_roce_dev *hr_dev,
void hns_roce_add_unfree_mtr(struct hns_roce_dev *hr_dev,
			     struct hns_roce_mtr *mtr)
{
	hns_roce_copy_mtr(&pos->mtr, mtr);

	mutex_lock(&hr_dev->mtr_unfree_list_mutex);
	list_add_tail(&pos->list, &hr_dev->mtr_unfree_list);
	list_add_tail(&mtr->node, &hr_dev->mtr_unfree_list);
	mutex_unlock(&hr_dev->mtr_unfree_list_mutex);
}

void hns_roce_free_unfree_mtr(struct hns_roce_dev *hr_dev)
{
	struct hns_roce_mtr_node *pos, *next;
	struct hns_roce_mtr *mtr, *next;

	mutex_lock(&hr_dev->mtr_unfree_list_mutex);
	list_for_each_entry_safe(pos, next, &hr_dev->mtr_unfree_list, list) {
		list_del(&pos->list);
		hns_roce_mtr_destroy(hr_dev, &pos->mtr);
		kvfree(pos);
	list_for_each_entry_safe(mtr, next, &hr_dev->mtr_unfree_list, node) {
		list_del(&mtr->node);
		hns_roce_mtr_destroy(hr_dev, mtr);
	}
	mutex_unlock(&hr_dev->mtr_unfree_list_mutex);
}
+3 −14
Original line number Diff line number Diff line
@@ -838,18 +838,12 @@ static int alloc_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
	struct ib_device *ibdev = &hr_dev->ib_dev;
	int ret = 0;

	hr_qp->mtr_node = kvmalloc(sizeof(*hr_qp->mtr_node), GFP_KERNEL);
	if (!hr_qp->mtr_node)
		return -ENOMEM;

	if (dca_en) {
		/* DCA must be enabled after the buffer attr is configured. */
		ret = hns_roce_enable_dca(hr_qp, udata);
		if (ret) {
			ibdev_err(ibdev, "failed to enable DCA, ret = %d.\n",
				  ret);
			kvfree(hr_qp->mtr_node);
			hr_qp->mtr_node = NULL;
			return ret;
		}

@@ -874,8 +868,6 @@ static int alloc_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
		ibdev_err(ibdev, "failed to create WQE mtr, ret = %d.\n", ret);
		if (dca_en)
			hns_roce_disable_dca(hr_dev, hr_qp, udata);
		kvfree(hr_qp->mtr_node);
		hr_qp->mtr_node = NULL;
	} else if (dca_en) {
		ret = hns_roce_map_dca_safe_page(hr_dev, hr_qp);
	}
@@ -886,13 +878,10 @@ static int alloc_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
static void free_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
			 struct ib_udata *udata)
{
	if (hr_qp->delayed_destroy_flag) {
		hns_roce_add_unfree_mtr(hr_qp->mtr_node, hr_dev, hr_qp->mtr);
	} else {
	if (hr_qp->delayed_destroy_flag)
		hns_roce_add_unfree_mtr(hr_dev, hr_qp->mtr);
	else
		hns_roce_mtr_destroy(hr_dev, hr_qp->mtr);
		kvfree(hr_qp->mtr_node);
		hr_qp->mtr_node = NULL;
	}

	if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DYNAMIC_CTX_ATTACH)
		hns_roce_disable_dca(hr_dev, hr_qp, udata);
+7 −26
Original line number Diff line number Diff line
@@ -185,10 +185,6 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
	struct hns_roce_buf_attr buf_attr = {};
	int ret = 0;

	idx_que->mtr_node = kvmalloc(sizeof(*idx_que->mtr_node), GFP_KERNEL);
	if (!idx_que->mtr_node)
		return -ENOMEM;

	srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ);

	buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + PAGE_SHIFT;
@@ -203,7 +199,7 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
	if (IS_ERR(idx_que->mtr)) {
		ret = PTR_ERR(idx_que->mtr);
		ibdev_err(ibdev, "Failed to alloc SRQ idx mtr, ret = %d.\n", ret);
		goto err_kvmalloc;
		return ret;
	}

	if (!udata) {
@@ -221,9 +217,6 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
	return 0;
err_idx_mtr:
	hns_roce_mtr_destroy(hr_dev, idx_que->mtr);
err_kvmalloc:
	kvfree(idx_que->mtr_node);
	idx_que->mtr_node = NULL;

	return ret;
}
@@ -234,13 +227,10 @@ static void free_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)

	bitmap_free(idx_que->bitmap);
	idx_que->bitmap = NULL;
	if (srq->delayed_destroy_flag) {
		hns_roce_add_unfree_mtr(idx_que->mtr_node, hr_dev, idx_que->mtr);
	} else {
	if (srq->delayed_destroy_flag)
		hns_roce_add_unfree_mtr(hr_dev, idx_que->mtr);
	else
		hns_roce_mtr_destroy(hr_dev, idx_que->mtr);
		kvfree(idx_que->mtr_node);
		idx_que->mtr_node = NULL;
	}
}

static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev,
@@ -251,10 +241,6 @@ static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev,
	struct hns_roce_buf_attr buf_attr = {};
	int ret = 0;

	srq->mtr_node = kvmalloc(sizeof(*srq->mtr_node), GFP_KERNEL);
	if (!srq->mtr_node)
		return -ENOMEM;

	srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
						      HNS_ROCE_SGE_SIZE *
						      srq->max_gs)));
@@ -272,8 +258,6 @@ static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev,
		ret = PTR_ERR(srq->buf_mtr);
		ibdev_err(ibdev,
			  "failed to alloc SRQ buf mtr, ret = %d.\n", ret);
		kvfree(srq->mtr_node);
		srq->mtr_node = NULL;
	}

	return ret;
@@ -282,13 +266,10 @@ static int alloc_srq_wqe_buf(struct hns_roce_dev *hr_dev,
static void free_srq_wqe_buf(struct hns_roce_dev *hr_dev,
			     struct hns_roce_srq *srq)
{
	if (srq->delayed_destroy_flag) {
		hns_roce_add_unfree_mtr(srq->mtr_node, hr_dev, srq->buf_mtr);
	} else {
	if (srq->delayed_destroy_flag)
		hns_roce_add_unfree_mtr(hr_dev, srq->buf_mtr);
	else
		hns_roce_mtr_destroy(hr_dev, srq->buf_mtr);
		kvfree(srq->mtr_node);
		srq->mtr_node = NULL;
	}
}

static int alloc_srq_wrid(struct hns_roce_srq *srq)