Unverified Commit 8fc66242 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!14312 OLK-6.6: Some patches of RDMA/hns from Linux to olk-6.6 v2

Merge Pull Request from: @cxh269 
 
Chengchang Tang (3):
RDMA/hns: Fix accessing invalid dip_ctx during destroying QP
RDMA/hns: Fix warning storm caused by invalid input in IO path
RDMA/hns: Fix missing flush CQE for DWQE

Xinghai Cen (1):
Revert "RDMA/hns: Fix accessing invalid dip_ctx during destroying QP"

wenglianfa (1):
RDMA/hns: Fix mapping error of zero-hop WQE buffer


https://gitee.com/openeuler/kernel/issues/IBDW6Z 
 
Link:https://gitee.com/openeuler/kernel/pulls/14312

 

Reviewed-by: default avatarChengchang Tang <tangchengchang@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents e3af77bb a35a3c16
Loading
Loading
Loading
Loading
+29 −14
Original line number Diff line number Diff line
@@ -932,6 +932,7 @@ struct hns_roce_hem_item {
	size_t count; /* max ba numbers */
	int start; /* start buf offset in this hem */
	int end; /* end buf offset in this hem */
	bool exist_bt;
};

/* All HEM items are linked in a tree structure */
@@ -960,6 +961,7 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
		}
	}

	hem->exist_bt = exist_bt;
	hem->count = count;
	hem->start = start;
	hem->end = end;
@@ -970,22 +972,22 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
}

static void hem_list_free_item(struct hns_roce_dev *hr_dev,
			       struct hns_roce_hem_item *hem, bool exist_bt)
			       struct hns_roce_hem_item *hem)
{
	if (exist_bt)
	if (hem->exist_bt)
		dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
				  hem->addr, hem->dma_addr);
	kfree(hem);
}

static void hem_list_free_all(struct hns_roce_dev *hr_dev,
			      struct list_head *head, bool exist_bt)
			      struct list_head *head)
{
	struct hns_roce_hem_item *hem, *temp_hem;

	list_for_each_entry_safe(hem, temp_hem, head, list) {
		list_del(&hem->list);
		hem_list_free_item(hr_dev, hem, exist_bt);
		hem_list_free_item(hr_dev, hem);
	}
}

@@ -1085,6 +1087,10 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,

	for (i = 0; i < region_cnt; i++) {
		r = (struct hns_roce_buf_region *)&regions[i];
		/* when r->hopnum = 0, the region should not occupy root_ba. */
		if (!r->hopnum)
			continue;

		if (r->hopnum > 1) {
			step = hem_list_calc_ba_range(r->hopnum, 1, unit);
			if (step > 0)
@@ -1178,7 +1184,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,

err_exit:
	for (level = 1; level < hopnum; level++)
		hem_list_free_all(hr_dev, &temp_list[level], true);
		hem_list_free_all(hr_dev, &temp_list[level]);

	return ret;
}
@@ -1219,16 +1225,26 @@ static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
{
	struct hns_roce_hem_item *hem;

	/* This is on the has_mtt branch, if r->hopnum
	 * is 0, there is no root_ba to reuse for the
	 * region's fake hem, so a dma_alloc request is
	 * necessary here.
	 */
	hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
				  r->count, false);
				  r->count, !r->hopnum);
	if (!hem)
		return -ENOMEM;

	/* The root_ba can be reused only when r->hopnum > 0. */
	if (r->hopnum)
		hem_list_assign_bt(hem, cpu_base, phy_base);
	list_add(&hem->list, branch_head);
	list_add(&hem->sibling, leaf_head);

	return r->count;
	/* If r->hopnum == 0, 0 is returned,
	 * so that the root_bt entry is not occupied.
	 */
	return r->hopnum ? r->count : 0;
}

static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
@@ -1272,7 +1288,7 @@ setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
		return -ENOMEM;

	total = 0;
	for (i = 0; i < region_cnt && total < max_ba_num; i++) {
	for (i = 0; i < region_cnt && total <= max_ba_num; i++) {
		r = &regions[i];
		if (!r->count)
			continue;
@@ -1338,9 +1354,9 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
			     region_cnt);
	if (ret) {
		for (i = 0; i < region_cnt; i++)
			hem_list_free_all(hr_dev, &head.branch[i], false);
			hem_list_free_all(hr_dev, &head.branch[i]);

		hem_list_free_all(hr_dev, &head.root, true);
		hem_list_free_all(hr_dev, &head.root);
	}

	return ret;
@@ -1403,10 +1419,9 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,

	for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
		for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
			hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j],
					  j != 0);
			hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j]);

	hem_list_free_all(hr_dev, &hem_list->root_bt, true);
	hem_list_free_all(hr_dev, &hem_list->root_bt);
	INIT_LIST_HEAD(&hem_list->btm_bt);
	hem_list->root_ba = 0;
}
+6 −2
Original line number Diff line number Diff line
@@ -558,7 +558,7 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
	valid_num_sge = calc_wr_sge_num(wr, &msg_len);

	ret = set_ud_opcode(ud_sq_wqe, wr);
	if (WARN_ON(ret))
	if (WARN_ON_ONCE(ret))
		return ret;

	ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
@@ -670,7 +670,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
	rc_sq_wqe->msg_len = cpu_to_le32(msg_len);

	ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
	if (WARN_ON(ret))
	if (WARN_ON_ONCE(ret))
		return ret;

	hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SO,
@@ -771,6 +771,10 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
#define HNS_ROCE_SL_SHIFT 2
	struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;

	if (unlikely(qp->state == IB_QPS_ERR)) {
		flush_cqe(hr_dev, qp);
		return;
	}
	/* All kinds of DirectWQE have the same header field layout */
	hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG);
	hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl);
+0 −5
Original line number Diff line number Diff line
@@ -828,11 +828,6 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
	for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
	     mapped_cnt < page_cnt; i++) {
		r = &mtr->hem_cfg.region[i];
		/* if hopnum is 0, no need to map pages in this region */
		if (!r->hopnum) {
			mapped_cnt += r->count;
			continue;
		}

		if (r->offset + r->count > page_cnt) {
			ret = -EINVAL;