Unverified Commit 67e5d5d2 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!13376 Some patches for RDMA/hns

Merge Pull Request from: @ci-robot 
 
PR sync from: Chengchang Tang <tangchengchang@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/DFULCFUEWGWFQT2G5MJEP2G2PJLNQAXM/ 
From: Xinghai Cen <cenxinghai@h-partners.com>

Some patches for RDMA/hns

Chengchang Tang (6):
  RDMA/hns: Fix HW UAF when destroy context timeout
  RDMA/hns: Fix integer overflow in calc_loading_percent()
  RDMA/hns: Fix possible RAS when DCA is not attached
  RDMA/hns: Fix a meaningless loop in active_dca_pages_proc()
  RDMA/hns: Fix list_*_careful() not being used in pairs
  RDMA/hns: Use one CQ bank per context

Junxian Huang (5):
  RDMA/hns: Fix wrong output of sysfs scc pram when configuration failed
  RDMA/hns: Fix concurrency between sysfs store and FW configuration of
    scc params
  RDMA/hns: Fix mixed use of u32 and __le32 in sysfs
  RDMA/hns: Fix dereference of noderef expression
  RDMA/hns: Fix "Should it be static?" warnings

wenglianfa (2):
  RDMA/hns: Fix the modification of max_send_sge
  RDMA/hns: Fix RoCEE hang when multiple QP banks use EXT_SGE EXT_SGE


--
2.33.0
 
https://gitee.com/openeuler/kernel/issues/IB30V8 
 
Link:https://gitee.com/openeuler/kernel/pulls/13376

 

Reviewed-by: default avatarChengchang Tang <tangchengchang@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents d1d14cfb 209f4040
Loading
Loading
Loading
Loading
+7 −1
Original line number Diff line number Diff line
@@ -93,10 +93,16 @@ bool hns_roce_bond_is_active(struct hns_roce_dev *hr_dev)
static inline bool is_active_slave(struct net_device *net_dev,
				   struct hns_roce_bond_group *bond_grp)
{
	struct net_device *slave_dev;

	if (!bond_grp || !bond_grp->bond || !bond_grp->bond->curr_active_slave)
		return false;

	return net_dev == bond_grp->bond->curr_active_slave->dev;
	rcu_read_lock();
	slave_dev = bond_option_active_slave_get_rcu(bond_grp->bond);
	rcu_read_unlock();

	return net_dev == slave_dev;
}

struct net_device *hns_roce_get_bond_netdev(struct hns_roce_dev *hr_dev)
+66 −7
Original line number Diff line number Diff line
@@ -37,6 +37,47 @@
#include "hns_roce_hem.h"
#include "hns_roce_common.h"

void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device);
	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;

	if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
		return;

	mutex_lock(&cq_table->bank_mutex);
	cq_table->ctx_num[uctx->cq_bank_id]--;
	mutex_unlock(&cq_table->bank_mutex);
}

void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx)
{
#define INVALID_LOAD_CQNUM 0xFFFFFFFF
	struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device);
	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
	u32 least_load = INVALID_LOAD_CQNUM;
	u8 bankid = 0;
	u8 i;

	if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
		return;

	mutex_lock(&cq_table->bank_mutex);
	for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++) {
		if (!(cq_table->valid_cq_bank_mask & BIT(i)))
			continue;

		if (cq_table->ctx_num[i] < least_load) {
			least_load = cq_table->ctx_num[i];
			bankid = i;
		}
	}
	cq_table->ctx_num[bankid]++;
	mutex_unlock(&cq_table->bank_mutex);

	uctx->cq_bank_id = bankid;
}

static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
{
	u32 least_load = bank[0].inuse;
@@ -55,7 +96,21 @@ static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
	return bankid;
}

static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
static u8 select_cq_bankid(struct hns_roce_dev *hr_dev, struct hns_roce_bank *bank,
			struct ib_udata *udata)
{
	struct hns_roce_ucontext *uctx = udata ?
		rdma_udata_to_drv_context(udata, struct hns_roce_ucontext,
					  ibucontext) : NULL;
	/* only HIP08 is not applied now, and use bank 0 for kernel */
	if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
		return uctx ? uctx->cq_bank_id : 0;

	return get_least_load_bankid_for_cq(bank);
}

static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
		     struct ib_udata *udata)
{
	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
	struct hns_roce_bank *bank;
@@ -63,7 +118,7 @@ static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
	int id;

	mutex_lock(&cq_table->bank_mutex);
	bankid = get_least_load_bankid_for_cq(cq_table->bank);
	bankid = select_cq_bankid(hr_dev, cq_table->bank, udata);
	bank = &cq_table->bank[bankid];

	id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
@@ -178,12 +233,11 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)

	ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_CQC,
				      hr_cq->cqn);
	if (ret)
	if (ret) {
		hr_cq->delayed_destroy_flag = true;
		dev_err_ratelimited(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n",
				    ret, hr_cq->cqn);

	if (ret == -EBUSY)
		hr_cq->delayed_destroy_flag = true;
	}

	xa_erase(&cq_table->array, hr_cq->cqn);
	xa_erase_irq(&cq_table->array, hr_cq->cqn);
@@ -417,7 +471,7 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
		goto err_cq_buf;
	}

	ret = alloc_cqn(hr_dev, hr_cq);
	ret = alloc_cqn(hr_dev, hr_cq, udata);
	if (ret) {
		ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
		goto err_cq_db;
@@ -550,6 +604,11 @@ void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
		cq_table->bank[i].max = hr_dev->caps.num_cqs /
					HNS_ROCE_CQ_BANK_NUM - 1;
	}

	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_LIMIT_BANK)
		cq_table->valid_cq_bank_mask = VALID_CQ_BANK_MASK_LIMIT;
	else
		cq_table->valid_cq_bank_mask = VALID_CQ_BANK_MASK_DEFAULT;
}

void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
+56 −4
Original line number Diff line number Diff line
@@ -309,6 +309,33 @@ hr_qp_to_dca_ctx(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
	return to_hr_dca_ctx(hr_dev, uctx);
}

int hns_roce_map_dca_safe_page(struct hns_roce_dev *hr_dev,
			       struct hns_roce_qp *hr_qp)
{
	unsigned int page_count = hr_qp->dca_cfg.npages;
	struct ib_device *ibdev = &hr_dev->ib_dev;
	dma_addr_t *pages;
	unsigned int i;
	int ret;

	pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
	if (IS_ERR_OR_NULL(pages)) {
		ibdev_err(ibdev, "failed to alloc DCA safe page array.\n");
		return -ENOMEM;
	}

	for (i = 0; i < page_count; i++)
		pages[i] = hr_dev->dca_safe_page;

	ret = hns_roce_mtr_map(hr_dev, &hr_qp->mtr, pages, page_count);
	if (ret)
		ibdev_err(ibdev, "failed to map safe page for DCA, ret = %d.\n",
			  ret);

	kvfree(pages);
	return ret;
}

static int config_dca_qpc(struct hns_roce_dev *hr_dev,
			  struct hns_roce_qp *hr_qp, dma_addr_t *pages,
			  int page_count)
@@ -335,6 +362,29 @@ static int config_dca_qpc(struct hns_roce_dev *hr_dev,
	return 0;
}

static int config_dca_qpc_to_safe_page(struct hns_roce_dev *hr_dev,
				       struct hns_roce_qp *hr_qp)
{
	unsigned int page_count = hr_qp->dca_cfg.npages;
	dma_addr_t *pages;
	unsigned int i;
	int ret;

	might_sleep();

	pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
	if (IS_ERR_OR_NULL(pages))
		return -ENOMEM;

	for (i = 0; i < page_count; i++)
		pages[i] = hr_dev->dca_safe_page;

	ret = config_dca_qpc(hr_dev, hr_qp, pages, page_count);

	kvfree(pages);
	return ret;
}

static int setup_dca_buf_to_hw(struct hns_roce_dev *hr_dev,
			       struct hns_roce_qp *hr_qp,
			       struct hns_roce_dca_ctx *ctx, u32 buf_id,
@@ -567,7 +617,7 @@ static int active_dca_pages_proc(struct dca_mem *mem, int index, void *param)
	}

	for (; changed && i < mem->page_count; i++)
		if (dca_page_is_free(state))
		if (dca_page_is_free(&mem->states[i]))
			free_pages++;

	/* Clean mem changed to dirty */
@@ -972,7 +1022,7 @@ static void process_aging_dca_mem(struct hns_roce_dev *hr_dev,
	list_for_each_entry_safe(cfg, tmp_cfg, &ctx->aging_new_list, aging_node)
		list_move(&cfg->aging_node, &ctx->aging_proc_list);

	while (!ctx->exit_aging && !list_empty(&ctx->aging_proc_list)) {
	while (!ctx->exit_aging && !list_empty_careful(&ctx->aging_proc_list)) {
		cfg = list_first_entry(&ctx->aging_proc_list,
				       struct hns_roce_dca_cfg, aging_node);
		list_del_init_careful(&cfg->aging_node);
@@ -980,8 +1030,10 @@ static void process_aging_dca_mem(struct hns_roce_dev *hr_dev,
		spin_unlock(&ctx->aging_lock);

		if (start_free_dca_buf(ctx, cfg->dcan)) {
			if (hr_dev->hw->chk_dca_buf_inactive(hr_dev, hr_qp))
			if (hr_dev->hw->chk_dca_buf_inactive(hr_dev, hr_qp)) {
				if (!config_dca_qpc_to_safe_page(hr_dev, hr_qp))
					free_buf_from_dca_mem(ctx, cfg);
			}

			stop_free_dca_buf(ctx, cfg->dcan);
		}
+2 −0
Original line number Diff line number Diff line
@@ -75,4 +75,6 @@ void hns_roce_modify_dca(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,

void hns_roce_enum_dca_pool(struct hns_roce_dca_ctx *dca_ctx, void *param,
			    hns_dca_enum_callback cb);
int hns_roce_map_dca_safe_page(struct hns_roce_dev *hr_dev,
			       struct hns_roce_qp *hr_qp);
#endif
+8 −2
Original line number Diff line number Diff line
@@ -187,8 +187,8 @@ static void dca_setup_pool_name(pid_t pid, bool is_kdca, char *name, int size)

static u64 calc_loading_percent(size_t total, size_t free, u32 *out_rem)
{
	u32 all_pages, used_pages, free_pages, scale;
	u64 percent = 0;
	u64 used_pages, scale, all_pages, free_pages;
	u64 percent = U64_MAX;
	u32 rem = 0;

	all_pages = total >> HNS_HW_PAGE_SHIFT;
@@ -214,6 +214,9 @@ static void dca_print_pool_stats(struct hns_roce_dca_ctx *ctx, pid_t pid,
	u32 rem = 0;

	percent = calc_loading_percent(ctx->total_size, ctx->free_size, &rem);
	if (percent == U64_MAX)
		return;

	dca_setup_pool_name(pid, is_kdca, name, sizeof(name));
	seq_printf(file, "%-10s %-16ld %-16ld %-16u %llu.%0*u\n", name,
		   ctx->total_size / KB, ctx->free_size / KB, ctx->free_mems,
@@ -366,6 +369,9 @@ static void dca_stats_ctx_mem_in_seqfile(struct hns_roce_dca_ctx *ctx,

	dca_ctx_stats_mem(ctx, &stats);
	percent = calc_loading_percent(stats.total_size, stats.free_size, &rem);
	if (percent == U64_MAX)
		return;

	seq_printf(file, DCA_STAT_NAME_FMT "%llu.%0*u\n", "Loading:", percent,
		   LOADING_PERCENT_SHIFT, rem);
	dca_ctx_print_mem_kb(file, "Total:", stats.total_size);
Loading