Unverified Commit 495d892e authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!15426 Some bug fix patches for OLK-5.10 hns RoCE

Merge Pull Request from: @cxh269 
 
PR sync from: Junxian Huang huangjunxian6@hisilicon.com
https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/JFGBCBZRFRXAREKVSD5SEJ2YSVXVL2HU/
From: Xinghai Cen cenxinghai@h-partners.com

Some bug fix patches for OLK-5.10 hns RoCE:

Junxian Huang (5):
RDMA/hns: Change mtr member to pointer in hns QP/CQ/MR/SRQ/EQ struct
RDMA/hns: Move mtr_node into the mtr struct
RDMA/hns: Fix delayed destruction of db not taking effect
RDMA/hns: Fix delay-destruction mechanism not processing kernel db
RDMA/hns: Fix soft lockup by adding cond_resched() to bt pages loop

Xinghai Cen (1):
RDMA/hns: Fix unmatched kmalloc and kvfree

Yuyu Li (1):
RDMA/hns: Fix ifnullfree static warning

#IBQK42:[openEuler-22.03-LTS-SP4] Some bug fix patches for openEuler-22.03-LTS-SP4 hns RoCE 
 
Link:https://gitee.com/openeuler/kernel/pulls/15426

 

Reviewed-by: default avatarChengchang Tang <tangchengchang@huawei.com>
Reviewed-by: default avatarLi Nan <linan122@huawei.com>
Signed-off-by: default avatarLi Nan <linan122@huawei.com>
parents 20ecc2fe 8391587a
Loading
Loading
Loading
Loading
+14 −21
Original line number Diff line number Diff line
@@ -190,7 +190,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
	u64 mtts[MTT_MIN_COUNT] = {};
	int ret;

	ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts));
	ret = hns_roce_mtr_find(hr_dev, hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts));
	if (ret) {
		ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret);
		return ret;
@@ -211,7 +211,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
	}

	ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts,
				  hns_roce_get_mtr_ba(&hr_cq->mtr));
				  hns_roce_get_mtr_ba(hr_cq->mtr));
	if (ret)
		goto err_xa;

@@ -262,24 +262,19 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
{
	struct ib_device *ibdev = &hr_dev->ib_dev;
	struct hns_roce_buf_attr buf_attr = {};
	int ret;

	hr_cq->mtr_node = kvmalloc(sizeof(*hr_cq->mtr_node), GFP_KERNEL);
	if (!hr_cq->mtr_node)
		return -ENOMEM;
	int ret = 0;

	buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + PAGE_SHIFT;
	buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
	buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
	buf_attr.region_count = 1;

	ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
	hr_cq->mtr = hns_roce_mtr_create(hr_dev, &buf_attr,
					 hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT,
					 udata, addr);
	if (ret) {
	if (IS_ERR(hr_cq->mtr)) {
		ret = PTR_ERR(hr_cq->mtr);
		ibdev_err(ibdev, "Failed to alloc CQ mtr, ret = %d\n", ret);
		kvfree(hr_cq->mtr_node);
		hr_cq->mtr_node = NULL;
	}

	return ret;
@@ -287,13 +282,10 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,

static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{
	if (hr_cq->delayed_destroy_flag) {
		hns_roce_add_unfree_mtr(hr_cq->mtr_node, hr_dev, &hr_cq->mtr);
	} else {
		hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr);
		kvfree(hr_cq->mtr_node);
		hr_cq->mtr_node = NULL;
	}
	if (hr_cq->delayed_destroy_flag)
		hns_roce_add_unfree_mtr(hr_dev, hr_cq->mtr);
	else
		hns_roce_mtr_destroy(hr_dev, hr_cq->mtr);
}

static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
@@ -347,7 +339,8 @@ static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
		hns_roce_db_unmap_user(uctx, &hr_cq->db,
				       hr_cq->delayed_destroy_flag);
	} else {
		hns_roce_free_db(hr_dev, &hr_cq->db);
		hns_roce_free_db(hr_dev, &hr_cq->db,
				 hr_cq->delayed_destroy_flag);
	}
}

+57 −29
Original line number Diff line number Diff line
@@ -12,6 +12,7 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
{
	unsigned long page_addr = virt & PAGE_MASK;
	struct hns_roce_user_db_page *page;
	struct ib_umem *umem;
	unsigned int offset;
	int ret = 0;

@@ -29,33 +30,34 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,

	refcount_set(&page->refcount, 1);
	page->user_virt = page_addr;
	page->umem = ib_umem_get(context->ibucontext.device, page_addr,
				 PAGE_SIZE, 0);
	if (IS_ERR(page->umem)) {
		ret = PTR_ERR(page->umem);
	page->db_node = kvzalloc(sizeof(*page->db_node), GFP_KERNEL);
	if (!page->db_node) {
		ret = -ENOMEM;
		goto err_page;
	}
	page->umem_node = kvmalloc(sizeof(*page->umem_node), GFP_KERNEL);
	if (!page->umem_node) {
		ret = -ENOMEM;
		goto err_umem;

	umem = ib_umem_get(context->ibucontext.device, page_addr, PAGE_SIZE, 0);
	if (IS_ERR(umem)) {
		ret = PTR_ERR(umem);
		goto err_dbnode;
	}

	page->db_node->umem = umem;
	list_add(&page->list, &context->page_list);

found:
	offset = virt - page_addr;
	db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
	db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
	db->dma = sg_dma_address(page->db_node->umem->sg_head.sgl) + offset;
	db->virt_addr = sg_virt(page->db_node->umem->sg_head.sgl) + offset;
	db->u.user_page = page;
	refcount_inc(&page->refcount);
	mutex_unlock(&context->page_mutex);
	return 0;

err_umem:
	ib_umem_release(page->umem);
err_dbnode:
	kvfree(page->db_node);
err_page:
	kvfree(page);
	kfree(page);
err_out:
	mutex_unlock(&context->page_mutex);

@@ -67,17 +69,20 @@ void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
			    bool delayed_unmap_flag)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(context->ibucontext.device);
	struct hns_roce_db_pg_node *db_node = db->u.user_page->db_node;

	mutex_lock(&context->page_mutex);

	db_node->delayed_unmap_flag |= delayed_unmap_flag;

	refcount_dec(&db->u.user_page->refcount);
	if (refcount_dec_if_one(&db->u.user_page->refcount)) {
		list_del(&db->u.user_page->list);
		if (delayed_unmap_flag) {
			hns_roce_add_unfree_umem(db->u.user_page, hr_dev);
		if (db_node->delayed_unmap_flag) {
			hns_roce_add_unfree_db(db_node, hr_dev);
		} else {
			ib_umem_release(db->u.user_page->umem);
			kvfree(db->u.user_page->umem_node);
			ib_umem_release(db_node->umem);
			kvfree(db_node);
		}
		kfree(db->u.user_page);
	}
@@ -89,6 +94,8 @@ static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir(
					struct device *dma_device)
{
	struct hns_roce_db_pgdir *pgdir;
	dma_addr_t db_dma;
	u32 *page;

	pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
	if (!pgdir)
@@ -98,14 +105,25 @@ static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir(
			HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
	pgdir->bits[0] = pgdir->order0;
	pgdir->bits[1] = pgdir->order1;
	pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE,
					 &pgdir->db_dma, GFP_KERNEL);
	if (!pgdir->page) {
	pgdir->db_node = kvzalloc(sizeof(*pgdir->db_node), GFP_KERNEL);
	if (!pgdir->db_node)
		goto err_node;

	page = dma_alloc_coherent(dma_device, PAGE_SIZE, &db_dma, GFP_KERNEL);
	if (!page)
		goto err_dma;

	pgdir->db_node->kdb.page = page;
	pgdir->db_node->kdb.db_dma = db_dma;

	return pgdir;

err_dma:
	kvfree(pgdir->db_node);
err_node:
	kfree(pgdir);
	return NULL;
	}

	return pgdir;
}

static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir,
@@ -132,8 +150,8 @@ static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir,

	db->u.pgdir	= pgdir;
	db->index	= i;
	db->db_record	= pgdir->page + db->index;
	db->dma		= pgdir->db_dma  + db->index * HNS_ROCE_DB_UNIT_SIZE;
	db->db_record	= pgdir->db_node->kdb.page + db->index;
	db->dma	= pgdir->db_node->kdb.db_dma + db->index * HNS_ROCE_DB_UNIT_SIZE;
	db->order	= order;

	return 0;
@@ -168,13 +186,17 @@ int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
	return ret;
}

void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db)
void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
		      bool delayed_unmap_flag)
{
	struct hns_roce_db_pg_node *db_node = db->u.pgdir->db_node;
	unsigned long o;
	unsigned long i;

	mutex_lock(&hr_dev->pgdir_mutex);

	db_node->delayed_unmap_flag |= delayed_unmap_flag;

	o = db->order;
	i = db->index;

@@ -188,9 +210,15 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db)

	if (bitmap_full(db->u.pgdir->order1,
			HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT)) {
		dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page,
				  db->u.pgdir->db_dma);
		list_del(&db->u.pgdir->list);
		if (db_node->delayed_unmap_flag) {
			hns_roce_add_unfree_db(db_node, hr_dev);
		} else {
			dma_free_coherent(hr_dev->dev, PAGE_SIZE,
					  db_node->kdb.page,
							  db_node->kdb.db_dma);
			kvfree(db_node);
		}
		kfree(db->u.pgdir);
	}

+3 −3
Original line number Diff line number Diff line
@@ -324,7 +324,7 @@ int hns_roce_map_dca_safe_page(struct hns_roce_dev *hr_dev,
	for (i = 0; i < page_count; i++)
		pages[i] = hr_dev->dca_safe_page;

	ret = hns_roce_mtr_map(hr_dev, &hr_qp->mtr, pages, page_count);
	ret = hns_roce_mtr_map(hr_dev, hr_qp->mtr, pages, page_count);
	if (ret)
		ibdev_err(ibdev, "failed to map safe page for DCA, ret = %d.\n",
			  ret);
@@ -338,7 +338,7 @@ static int config_dca_qpc(struct hns_roce_dev *hr_dev,
			  int page_count)
{
	struct ib_device *ibdev = &hr_dev->ib_dev;
	struct hns_roce_mtr *mtr = &hr_qp->mtr;
	struct hns_roce_mtr *mtr = hr_qp->mtr;
	int ret;

	ret = hns_roce_mtr_map(hr_dev, mtr, pages, page_count);
@@ -698,7 +698,7 @@ static u32 alloc_buf_from_dca_mem(struct hns_roce_qp *hr_qp,
	buf_id = HNS_DCA_TO_BUF_ID(hr_qp->qpn, hr_qp->dca_cfg.attach_count);

	/* Assign pages from free pages */
	unit_pages = hr_qp->mtr.hem_cfg.is_direct ? buf_pages : 1;
	unit_pages = hr_qp->mtr->hem_cfg.is_direct ? buf_pages : 1;
	alloc_pages = assign_dca_pages(ctx, buf_id, buf_pages, unit_pages);
	if (buf_pages != alloc_pages) {
		if (alloc_pages > 0)
+32 −37
Original line number Diff line number Diff line
@@ -391,6 +391,7 @@ struct hns_roce_mtr {
	struct ib_umem		*umem; /* user space buffer */
	struct hns_roce_buf	*kmem; /* kernel space buffer */
	struct hns_roce_hem_cfg  hem_cfg; /* config for hardware addressing */
	struct list_head	 node; /* list node for delay-destruction */
};

/* DCA config */
@@ -426,11 +427,10 @@ struct hns_roce_mr {
	int			enabled; /* MR's active status */
	int			type; /* MR's register type */
	u32			pbl_hop_num; /* multi-hop number */
	struct hns_roce_mtr	pbl_mtr;
	struct hns_roce_mtr	*pbl_mtr;
	u32			npages;
	dma_addr_t		*page_list;
	bool			delayed_destroy_flag;
	struct hns_roce_mtr_node *mtr_node;
};

struct hns_roce_mr_table {
@@ -488,26 +488,29 @@ struct hns_roce_buf {
	unsigned int			page_shift;
};

struct hns_roce_db_pgdir {
struct hns_roce_db_pg_node {
	struct list_head list;
	DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE);
	DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
	unsigned long		*bits[HNS_ROCE_DB_TYPE_COUNT];
	struct ib_umem *umem;
	struct {
		u32 *page;
		dma_addr_t db_dma;
	} kdb;
	bool delayed_unmap_flag;
};

struct hns_roce_umem_node {
	struct ib_umem *umem;
struct hns_roce_db_pgdir {
	struct list_head	list;
	DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE);
	DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
	unsigned long		*bits[HNS_ROCE_DB_TYPE_COUNT];
	struct hns_roce_db_pg_node *db_node;
};

struct hns_roce_user_db_page {
	struct list_head	list;
	struct ib_umem		*umem;
	unsigned long		user_virt;
	refcount_t		refcount;
	struct hns_roce_umem_node *umem_node;
	struct hns_roce_db_pg_node *db_node;
};

struct hns_roce_db {
@@ -540,7 +543,7 @@ struct hns_roce_notify_conf {

struct hns_roce_cq {
	struct ib_cq			ib_cq;
	struct hns_roce_mtr		mtr;
	struct hns_roce_mtr		*mtr;
	struct hns_roce_db		db;
	u32				flags;
	spinlock_t			lock;
@@ -561,16 +564,14 @@ struct hns_roce_cq {
	u8				poe_channel;
	bool				delayed_destroy_flag;
	struct hns_roce_notify_conf	write_notify;
	struct hns_roce_mtr_node *mtr_node;
};

struct hns_roce_idx_que {
	struct hns_roce_mtr		mtr;
	struct hns_roce_mtr		*mtr;
	u32				entry_shift;
	unsigned long			*bitmap;
	u32				head;
	u32				tail;
	struct hns_roce_mtr_node *mtr_node;
};

struct hns_roce_srq {
@@ -587,7 +588,7 @@ struct hns_roce_srq {
	refcount_t		refcount;
	struct completion	free;

	struct hns_roce_mtr	buf_mtr;
	struct hns_roce_mtr	*buf_mtr;

	u64		       *wrid;
	struct hns_roce_idx_que idx_que;
@@ -597,7 +598,6 @@ struct hns_roce_srq {
	struct hns_roce_db	rdb;
	u32			cap_flags;
	bool			delayed_destroy_flag;
	struct hns_roce_mtr_node *mtr_node;
};

struct hns_roce_uar_table {
@@ -733,7 +733,7 @@ struct hns_roce_qp {
	enum ib_sig_type	sq_signal_bits;
	struct hns_roce_wq	sq;

	struct hns_roce_mtr	mtr;
	struct hns_roce_mtr	*mtr;
	struct hns_roce_dca_cfg	dca_cfg;

	u32			buff_size;
@@ -771,7 +771,6 @@ struct hns_roce_qp {
	u8			tc_mode;
	u8			priority;
	bool			delayed_destroy_flag;
	struct hns_roce_mtr_node *mtr_node;
	spinlock_t flush_lock;
	struct hns_roce_dip *dip;
};
@@ -834,7 +833,7 @@ struct hns_roce_eq {
	int				coalesce;
	int				arm_st;
	int				hop_num;
	struct hns_roce_mtr		mtr;
	struct hns_roce_mtr		*mtr;
	u16				eq_max_cnt;
	u32				eq_period;
	int				shift;
@@ -1146,11 +1145,6 @@ struct hns_roce_port {
	struct hns_roce_cnp_pri_param *cnp_pri_param;
};

struct hns_roce_mtr_node {
	struct hns_roce_mtr mtr;
	struct list_head list;
};

struct hns_roce_dev {
	struct ib_device	ib_dev;
	struct pci_dev		*pci_dev;
@@ -1233,8 +1227,8 @@ struct hns_roce_dev {
	size_t notify_num;
	struct list_head mtr_unfree_list; /* list of unfree mtr on this dev */
	struct mutex mtr_unfree_list_mutex; /* protect mtr_unfree_list */
	struct list_head umem_unfree_list; /* list of unfree umem on this dev */
	struct mutex umem_unfree_list_mutex; /* protect umem_unfree_list */
	struct list_head db_unfree_list; /* list of unfree db on this dev */
	struct mutex db_unfree_list_mutex; /* protect db_unfree_list */

	void *dca_safe_buf;
	dma_addr_t dca_safe_page;
@@ -1423,9 +1417,10 @@ static inline dma_addr_t hns_roce_get_mtr_ba(struct hns_roce_mtr *mtr)
}
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
		      u32 offset, u64 *mtt_buf, int mtt_max);
int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct hns_roce_mtr *hns_roce_mtr_create(struct hns_roce_dev *hr_dev,
					 struct hns_roce_buf_attr *buf_attr,
			unsigned int page_shift, struct ib_udata *udata,
					 unsigned int ba_page_shift,
					 struct ib_udata *udata,
					 unsigned long user_addr);
void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
			  struct hns_roce_mtr *mtr);
@@ -1527,7 +1522,8 @@ void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
			    bool delayed_unmap_flag);
int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
		      int order);
void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);
void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
		      bool delayed_unmap_flag);

void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
@@ -1545,11 +1541,10 @@ int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp);
int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp);
int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr);
int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr);
void hns_roce_add_unfree_umem(struct hns_roce_user_db_page *user_page,
void hns_roce_add_unfree_db(struct hns_roce_db_pg_node *db_node,
			    struct hns_roce_dev *hr_dev);
void hns_roce_free_unfree_umem(struct hns_roce_dev *hr_dev);
void hns_roce_add_unfree_mtr(struct hns_roce_mtr_node *pos,
			     struct hns_roce_dev *hr_dev,
void hns_roce_free_unfree_db(struct hns_roce_dev *hr_dev);
void hns_roce_add_unfree_mtr(struct hns_roce_dev *hr_dev,
			     struct hns_roce_mtr *mtr);
void hns_roce_free_unfree_mtr(struct hns_roce_dev *hr_dev);
struct hns_user_mmap_entry *
+1 −2
Original line number Diff line number Diff line
@@ -133,7 +133,6 @@ int rdma_unregister_notify_addr(struct ib_device *ib_dev)
	if (!is_hns_roce(ib_dev) || !is_write_notify_supported(hr_dev))
		return -EOPNOTSUPP;

	if (hr_dev->notify_tbl)
	kvfree(hr_dev->notify_tbl);

	hr_dev->notify_tbl = NULL;
Loading