Unverified Commit af590008 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!15711 Some bug fix patches for RDMA/hns to olk-6.6

Merge Pull Request from: @ci-robot 
 
PR sync from: Junxian Huang <huangjunxian6@hisilicon.com>
https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/NUKMS2IWBJ3BNEKA3EGMXPBNE37NNZ3R/ 
From: Xinghai Cen <cenxinghai@h-partners.com>

Some bug fix patches for RDMA/hns to olk-6.6

Chengchang Tang (1):
  RDMA/hns: Fix a meaningless loop in free_buffer_pages_proc()

Junxian Huang (7):
  RDMA/hns: Change mtr member to pointer in hns QP/CQ/MR/SRQ/EQ struct
  RDMA/hns: Move mtr_node into the mtr struct
  RDMA/hns: Fix delayed destruction of db not taking effect
  RDMA/hns: Fix delay-destruction mechanism not processing kernel db
  RDMA/hns: Fix mismatched kzalloc vs kvfree
  RDMA/hns: Fix DCA error path in alloc_wqe_buf()
  RDMA/hns: Reorder uctx deallocation

Yuyu Li (1):
  RDMA/hns: Fix remove debugfs after device has been unregistered

 
https://gitee.com/openeuler/kernel/issues/IBV8UW 
 
Link:https://gitee.com/openeuler/kernel/pulls/15711

 

Reviewed-by: default avatarChengchang Tang <tangchengchang@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents ad5bd47c 9a8ae930
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -606,7 +606,7 @@ static struct hns_roce_die_info *alloc_die_info(int bus_num)
static void dealloc_die_info(struct hns_roce_die_info *die_info, u8 bus_num)
{
	xa_erase(&roce_bond_xa, bus_num);
	kvfree(die_info);
	kfree(die_info);
}

static int alloc_bond_id(struct hns_roce_bond_group *bond_grp)
+14 −21
Original line number Diff line number Diff line
@@ -190,7 +190,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
	u64 mtts[MTT_MIN_COUNT] = {};
	int ret;

	ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts));
	ret = hns_roce_mtr_find(hr_dev, hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts));
	if (ret) {
		ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret);
		return ret;
@@ -211,7 +211,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
	}

	ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts,
				  hns_roce_get_mtr_ba(&hr_cq->mtr));
				  hns_roce_get_mtr_ba(hr_cq->mtr));
	if (ret)
		goto err_xa;

@@ -261,24 +261,19 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
{
	struct ib_device *ibdev = &hr_dev->ib_dev;
	struct hns_roce_buf_attr buf_attr = {};
	int ret;

	hr_cq->mtr_node = kvmalloc(sizeof(*hr_cq->mtr_node), GFP_KERNEL);
	if (!hr_cq->mtr_node)
		return -ENOMEM;
	int ret = 0;

	buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + PAGE_SHIFT;
	buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
	buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
	buf_attr.region_count = 1;

	ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
	hr_cq->mtr = hns_roce_mtr_create(hr_dev, &buf_attr,
					 hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT,
					 udata, addr);
	if (ret) {
	if (IS_ERR(hr_cq->mtr)) {
		ret = PTR_ERR(hr_cq->mtr);
		ibdev_err(ibdev, "failed to alloc CQ mtr, ret = %d.\n", ret);
		kvfree(hr_cq->mtr_node);
		hr_cq->mtr_node = NULL;
	}

	return ret;
@@ -286,13 +281,10 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,

static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
	if (hr_cq->delayed_destroy_flag) {
		hns_roce_add_unfree_mtr(hr_cq->mtr_node, hr_dev, &hr_cq->mtr);
	} else {
		hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr);
		kvfree(hr_cq->mtr_node);
		hr_cq->mtr_node = NULL;
	}
	if (hr_cq->delayed_destroy_flag)
		hns_roce_add_unfree_mtr(hr_dev, hr_cq->mtr);
	else
		hns_roce_mtr_destroy(hr_dev, hr_cq->mtr);
}

static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
@@ -346,7 +338,8 @@ static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
		hns_roce_db_unmap_user(uctx, &hr_cq->db,
				       hr_cq->delayed_destroy_flag);
	} else {
		hns_roce_free_db(hr_dev, &hr_cq->db);
		hns_roce_free_db(hr_dev, &hr_cq->db,
				 hr_cq->delayed_destroy_flag);
	}
}

+54 −27
Original line number Diff line number Diff line
@@ -12,6 +12,7 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
{
	unsigned long page_addr = virt & PAGE_MASK;
	struct hns_roce_user_db_page *page;
	struct ib_umem *umem;
	unsigned int offset;
	int ret = 0;

@@ -29,32 +30,33 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,

	refcount_set(&page->refcount, 1);
	page->user_virt = page_addr;
	page->umem = ib_umem_get(context->ibucontext.device, page_addr,
				 PAGE_SIZE, 0);
	if (IS_ERR(page->umem)) {
		ret = PTR_ERR(page->umem);
	page->db_node = kvzalloc(sizeof(*page->db_node), GFP_KERNEL);
	if (!page->db_node) {
		ret = -ENOMEM;
		goto err_page;
	}
	page->umem_node = kvmalloc(sizeof(*page->umem_node), GFP_KERNEL);
	if (!page->umem_node) {
		ret = -ENOMEM;
		goto err_umem;

	umem = ib_umem_get(context->ibucontext.device, page_addr, PAGE_SIZE, 0);
	if (IS_ERR(umem)) {
		ret = PTR_ERR(umem);
		goto err_dbnode;
	}

	page->db_node->umem = umem;
	list_add(&page->list, &context->page_list);

found:
	offset = virt - page_addr;
	db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset;
	db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset;
	db->dma = sg_dma_address(page->db_node->umem->sgt_append.sgt.sgl) + offset;
	db->virt_addr = sg_virt(page->db_node->umem->sgt_append.sgt.sgl) + offset;
	db->u.user_page = page;
	refcount_inc(&page->refcount);

	mutex_unlock(&context->page_mutex);
	return 0;

err_umem:
	ib_umem_release(page->umem);
err_dbnode:
	kvfree(page->db_node);
err_page:
	kfree(page);
err_out:
@@ -68,17 +70,20 @@ void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
			    bool delayed_unmap_flag)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(context->ibucontext.device);
	struct hns_roce_db_pg_node *db_node = db->u.user_page->db_node;

	mutex_lock(&context->page_mutex);

	db_node->delayed_unmap_flag |= delayed_unmap_flag;

	refcount_dec(&db->u.user_page->refcount);
	if (refcount_dec_if_one(&db->u.user_page->refcount)) {
		list_del(&db->u.user_page->list);
		if (delayed_unmap_flag) {
			hns_roce_add_unfree_umem(db->u.user_page, hr_dev);
		if (db_node->delayed_unmap_flag) {
			hns_roce_add_unfree_db(db_node, hr_dev);
		} else {
			ib_umem_release(db->u.user_page->umem);
			kvfree(db->u.user_page->umem_node);
			ib_umem_release(db_node->umem);
			kvfree(db_node);
		}
		kfree(db->u.user_page);
	}
@@ -90,6 +95,8 @@ static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir(
					struct device *dma_device)
{
	struct hns_roce_db_pgdir *pgdir;
	dma_addr_t db_dma;
	u32 *page;

	pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
	if (!pgdir)
@@ -99,14 +106,24 @@ static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir(
		    HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
	pgdir->bits[0] = pgdir->order0;
	pgdir->bits[1] = pgdir->order1;
	pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE,
					 &pgdir->db_dma, GFP_KERNEL);
	if (!pgdir->page) {
		kfree(pgdir);
		return NULL;
	}
	pgdir->db_node = kvzalloc(sizeof(*pgdir->db_node), GFP_KERNEL);
	if (!pgdir->db_node)
		goto err_node;

	page = dma_alloc_coherent(dma_device, PAGE_SIZE, &db_dma, GFP_KERNEL);
	if (!page)
		goto err_dma;

	pgdir->db_node->kdb.page = page;
	pgdir->db_node->kdb.db_dma = db_dma;

	return pgdir;

err_dma:
	kvfree(pgdir->db_node);
err_node:
	kfree(pgdir);
	return NULL;
}

static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir,
@@ -133,8 +150,8 @@ static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir,

	db->u.pgdir	= pgdir;
	db->index	= i;
	db->db_record	= pgdir->page + db->index;
	db->dma		= pgdir->db_dma  + db->index * HNS_ROCE_DB_UNIT_SIZE;
	db->db_record	= pgdir->db_node->kdb.page + db->index;
	db->dma	= pgdir->db_node->kdb.db_dma + db->index * HNS_ROCE_DB_UNIT_SIZE;
	db->order	= order;

	return 0;
@@ -169,13 +186,17 @@ int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
	return ret;
}

void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db)
void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
		      bool delayed_unmap_flag)
{
	struct hns_roce_db_pg_node *db_node = db->u.pgdir->db_node;
	unsigned long o;
	unsigned long i;

	mutex_lock(&hr_dev->pgdir_mutex);

	db_node->delayed_unmap_flag |= delayed_unmap_flag;

	o = db->order;
	i = db->index;

@@ -189,9 +210,15 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db)

	if (bitmap_full(db->u.pgdir->order1,
			HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT)) {
		dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page,
				  db->u.pgdir->db_dma);
		list_del(&db->u.pgdir->list);
		if (db_node->delayed_unmap_flag) {
			hns_roce_add_unfree_db(db_node, hr_dev);
		} else {
			dma_free_coherent(hr_dev->dev, PAGE_SIZE,
					  db_node->kdb.page,
					  db_node->kdb.db_dma);
			kvfree(db_node);
		}
		kfree(db->u.pgdir);
	}

+4 −4
Original line number Diff line number Diff line
@@ -327,7 +327,7 @@ int hns_roce_map_dca_safe_page(struct hns_roce_dev *hr_dev,
	for (i = 0; i < page_count; i++)
		pages[i] = hr_dev->dca_safe_page;

	ret = hns_roce_mtr_map(hr_dev, &hr_qp->mtr, pages, page_count);
	ret = hns_roce_mtr_map(hr_dev, hr_qp->mtr, pages, page_count);
	if (ret)
		ibdev_err(ibdev, "failed to map safe page for DCA, ret = %d.\n",
			  ret);
@@ -341,7 +341,7 @@ static int config_dca_qpc(struct hns_roce_dev *hr_dev,
			  int page_count)
{
	struct ib_device *ibdev = &hr_dev->ib_dev;
	struct hns_roce_mtr *mtr = &hr_qp->mtr;
	struct hns_roce_mtr *mtr = hr_qp->mtr;
	int ret;

	ret = hns_roce_mtr_map(hr_dev, mtr, pages, page_count);
@@ -701,7 +701,7 @@ static u32 alloc_buf_from_dca_mem(struct hns_roce_qp *hr_qp,
	buf_id = HNS_DCA_TO_BUF_ID(hr_qp->qpn, hr_qp->dca_cfg.attach_count);

	/* Assign pages from free pages */
	unit_pages = hr_qp->mtr.hem_cfg.is_direct ? buf_pages : 1;
	unit_pages = hr_qp->mtr->hem_cfg.is_direct ? buf_pages : 1;
	alloc_pages = assign_dca_pages(ctx, buf_id, buf_pages, unit_pages);
	if (buf_pages != alloc_pages) {
		if (alloc_pages > 0)
@@ -899,7 +899,7 @@ static int free_buffer_pages_proc(struct dca_mem *mem, int index, void *param)
	}

	for (; changed && i < mem->page_count; i++)
		if (dca_page_is_free(state))
		if (dca_page_is_free(&mem->states[i]))
			free_pages++;

	if (changed && free_pages == mem->page_count)
+8 −2
Original line number Diff line number Diff line
@@ -486,9 +486,14 @@ void hns_roce_register_uctx_debugfs(struct hns_roce_dev *hr_dev,
				     hr_dev, uctx);
}

void hns_roce_unregister_uctx_debugfs(struct hns_roce_ucontext *uctx)
void hns_roce_unregister_uctx_debugfs(struct hns_roce_dev *hr_dev,
					struct hns_roce_ucontext *uctx)
{
	debugfs_remove_recursive(uctx->dca_dbgfs.root);
	struct hns_dca_debugfs *dca_dbgfs = &hr_dev->dbgfs.dca_root;
	char name[DCA_CTX_PID_LEN];

	snprintf(name, sizeof(name), "%d", uctx->pid);
	debugfs_lookup_and_remove(name, dca_dbgfs->root);
}

/* debugfs for device */
@@ -508,6 +513,7 @@ void hns_roce_register_debugfs(struct hns_roce_dev *hr_dev)
void hns_roce_unregister_debugfs(struct hns_roce_dev *hr_dev)
{
	debugfs_remove_recursive(hr_dev->dbgfs.root);
	memset(&hr_dev->dbgfs, 0, sizeof(hr_dev->dbgfs));
}

/* debugfs for hns module */
Loading