Unverified Commit a3e2493c authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!11187 Bugfix patches for hns RoCE

Merge Pull Request from: @ci-robot 
 
PR sync from: Chengchang Tang <tangchengchang@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/DLO6REXNI5GT472EKIMUNTWNRDKFJQ7H/ 
From: Xinghai Cen <cenxinghai@h-partners.com>

Some bugfix patches for hns roce.

Chengchang Tang (4):
  RDMA/hns: Fix 1bit-ECC recovery address in non-4K OS
  RDMA/hns: Fix possible RAS when DCA is not attached
  RDMA/hns: Fix a meaningless loop in active_dca_pages_proc()
  RDMA/hns: Fix list_*_careful() not being used in pairs

Junxian Huang (1):
  RDMA/hns: Fix VF triggering PF reset in abnormal interrupt handler


--
2.33.0
 
https://gitee.com/openeuler/kernel/issues/IAMO4D 
 
Link:https://gitee.com/openeuler/kernel/pulls/11187

 

Reviewed-by: default avatarChengchang Tang <tangchengchang@huawei.com>
Signed-off-by: default avatarYang Yingliang <yangyingliang@huawei.com>
parents b32a887d 3278942d
Loading
Loading
Loading
Loading
+56 −4
Original line number Diff line number Diff line
@@ -306,6 +306,33 @@ hr_qp_to_dca_ctx(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
	return to_hr_dca_ctx(hr_dev, uctx);
}

int hns_roce_map_dca_safe_page(struct hns_roce_dev *hr_dev,
			       struct hns_roce_qp *hr_qp)
{
	unsigned int page_count = hr_qp->dca_cfg.npages;
	struct ib_device *ibdev = &hr_dev->ib_dev;
	dma_addr_t *pages;
	unsigned int i;
	int ret;

	pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
	if (IS_ERR_OR_NULL(pages)) {
		ibdev_err(ibdev, "failed to alloc DCA safe page array.\n");
		return IS_ERR(pages) ? PTR_ERR(pages) : -ENOMEM;
	}

	for (i = 0; i < page_count; i++)
		pages[i] = hr_dev->dca_safe_page;

	ret = hns_roce_mtr_map(hr_dev, &hr_qp->mtr, pages, page_count);
	if (ret)
		ibdev_err(ibdev, "failed to map safe page for DCA, ret = %d.\n",
			  ret);

	kvfree(pages);
	return ret;
}

static int config_dca_qpc(struct hns_roce_dev *hr_dev,
			  struct hns_roce_qp *hr_qp, dma_addr_t *pages,
			  int page_count)
@@ -332,6 +359,29 @@ static int config_dca_qpc(struct hns_roce_dev *hr_dev,
	return 0;
}

static int config_dca_qpc_to_safe_page(struct hns_roce_dev *hr_dev,
				       struct hns_roce_qp *hr_qp)
{
	unsigned int page_count = hr_qp->dca_cfg.npages;
	dma_addr_t *pages;
	unsigned int i;
	int ret;

	might_sleep();

	pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
	if (IS_ERR_OR_NULL(pages))
		return -ENOMEM;

	for (i = 0; i < page_count; i++)
		pages[i] = hr_dev->dca_safe_page;

	ret = config_dca_qpc(hr_dev, hr_qp, pages, page_count);

	kvfree(pages);
	return ret;
}

static int setup_dca_buf_to_hw(struct hns_roce_dev *hr_dev,
			       struct hns_roce_qp *hr_qp,
			       struct hns_roce_dca_ctx *ctx, u32 buf_id,
@@ -564,7 +614,7 @@ static int active_dca_pages_proc(struct dca_mem *mem, int index, void *param)
	}

	for (; changed && i < mem->page_count; i++)
		if (dca_page_is_free(state))
		if (dca_page_is_free(&mem->states[i]))
			free_pages++;

	/* Clean mem changed to dirty */
@@ -969,7 +1019,7 @@ static void process_aging_dca_mem(struct hns_roce_dev *hr_dev,
	list_for_each_entry_safe(cfg, tmp_cfg, &ctx->aging_new_list, aging_node)
		list_move(&cfg->aging_node, &ctx->aging_proc_list);

	while (!ctx->exit_aging && !list_empty(&ctx->aging_proc_list)) {
	while (!ctx->exit_aging && !list_empty_careful(&ctx->aging_proc_list)) {
		cfg = list_first_entry(&ctx->aging_proc_list,
				       struct hns_roce_dca_cfg, aging_node);
		list_del_init_careful(&cfg->aging_node);
@@ -977,8 +1027,10 @@ static void process_aging_dca_mem(struct hns_roce_dev *hr_dev,
		spin_unlock(&ctx->aging_lock);

		if (start_free_dca_buf(ctx, cfg->dcan)) {
			if (hr_dev->hw->chk_dca_buf_inactive(hr_dev, hr_qp))
			if (hr_dev->hw->chk_dca_buf_inactive(hr_dev, hr_qp)) {
				if (!config_dca_qpc_to_safe_page(hr_dev, hr_qp))
					free_buf_from_dca_mem(ctx, cfg);
			}

			stop_free_dca_buf(ctx, cfg->dcan);
		}
+2 −0
Original line number Diff line number Diff line
@@ -74,4 +74,6 @@ void hns_roce_modify_dca(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,

void hns_roce_enum_dca_pool(struct hns_roce_dca_ctx *dca_ctx, void *param,
			    hns_dca_enum_callback cb);
int hns_roce_map_dca_safe_page(struct hns_roce_dev *hr_dev,
			       struct hns_roce_qp *hr_qp);
#endif
+3 −0
Original line number Diff line number Diff line
@@ -1234,6 +1234,9 @@ struct hns_roce_dev {
	struct mutex mtr_unfree_list_mutex; /* protect mtr_unfree_list */
	struct list_head umem_unfree_list; /* list of unfree umem on this dev */
	struct mutex umem_unfree_list_mutex; /* protect umem_unfree_list */

	void *dca_safe_buf;
	dma_addr_t dca_safe_page;
};

static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
+6 −3
Original line number Diff line number Diff line
@@ -6927,6 +6927,7 @@ static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev,
	struct pci_dev *pdev = hr_dev->pci_dev;
	struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
	const struct hnae3_ae_ops *ops = ae_dev->ops;
	enum hnae3_reset_type reset_type;
	irqreturn_t int_work = IRQ_NONE;
	u32 int_en;

@@ -6938,10 +6939,12 @@ static irqreturn_t abnormal_interrupt_basic(struct hns_roce_dev *hr_dev,
		roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG,
			   1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S);

		reset_type = hr_dev->is_vf ?
			     HNAE3_VF_FUNC_RESET : HNAE3_FUNC_RESET;

		/* Set reset level for reset_event() */
		if (ops->set_default_reset_request)
			ops->set_default_reset_request(ae_dev,
						       HNAE3_FUNC_RESET);
			ops->set_default_reset_request(ae_dev, reset_type);
		if (ops->reset_event)
			ops->reset_event(pdev, NULL);

@@ -7011,7 +7014,7 @@ static u64 fmea_get_ram_res_addr(u32 res_type, __le64 *data)
	    res_type == ECC_RESOURCE_SCCC)
		return le64_to_cpu(*data);

	return le64_to_cpu(*data) << PAGE_SHIFT;
	return le64_to_cpu(*data) << HNS_HW_PAGE_SHIFT;
}

static int fmea_recover_others(struct hns_roce_dev *hr_dev, u32 res_type,
+12 −0
Original line number Diff line number Diff line
@@ -1489,6 +1489,17 @@ static void hns_roce_dealloc_dfx_cnt(struct hns_roce_dev *hr_dev)
	kvfree(hr_dev->dfx_cnt);
}

static void hns_roce_free_dca_safe_buf(struct hns_roce_dev *hr_dev)
{
	if (!hr_dev->dca_safe_buf)
		return;

	dma_free_coherent(hr_dev->dev, PAGE_SIZE, hr_dev->dca_safe_buf,
			  hr_dev->dca_safe_page);
	hr_dev->dca_safe_page = 0;
	hr_dev->dca_safe_buf = NULL;
}

int hns_roce_init(struct hns_roce_dev *hr_dev)
{
	struct device *dev = hr_dev->dev;
@@ -1599,6 +1610,7 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev, bool bond_cleanup)
	hns_roce_unregister_device(hr_dev, bond_cleanup);
	hns_roce_unregister_debugfs(hr_dev);
	hns_roce_unregister_poe_ch(hr_dev);
	hns_roce_free_dca_safe_buf(hr_dev);

	if (hr_dev->hw->hw_exit)
		hr_dev->hw->hw_exit(hr_dev);
Loading