Unverified Commit 47f16eba authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!11036 v2 Some bugfix for HNS RoCE

Merge Pull Request from: @ci-robot 
 
PR sync from: Chengchang Tang <tangchengchang@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/C3CP6J4YTH2PWKCS7CUNNM6IS2PZYHDT/ 
Some bugfix patches for HNS RoCE:

Chengchang Tang (4):
  RDMA/hns: Use one CQ bank per context
  RDMA/hns: Fix HW UAF when destroy context timeout
  RDMA/hns: Fix spin_unlock_irqrestore() called with IRQs enabled
  RDMA/hns: Fix integer overflow in calc_loading_percent()

Feng Fang (1):
  RDMA/hns: Fix different dgids mapping to the same dip_idx

Junxian Huang (6):
  RDMA/hns: Fix a potential Sleep-in-Atomic-Context
  RDMA/hns: Fix soft lockup under heavy CEQE load
  RDMA/hns: Fix mixed use of u32 and __le32 in sysfs
  RDMA/hns: Fix wrong output of sysfs scc pram when configuration failed
  RDMA/hns: Fix concurrency between sysfs store and FW configuration of
    scc params
  RDMA/hns: Fix creating sysfs before allocating resources

wenglianfa (3):
  RDMA/hns: Fix the overflow risk of hem_list_calc_ba_range()
  RDMA/hns: Fix long waiting cmd event when reset
  RDMA/hns: Fix sleeping in spin_lock critical section


--
2.33.0
 
https://gitee.com/openeuler/kernel/issues/IAL7SX 
 
Link:https://gitee.com/openeuler/kernel/pulls/11036

 

Reviewed-by: default avatarChengchang Tang <tangchengchang@huawei.com>
Signed-off-by: default avatarYang Yingliang <yangyingliang@huawei.com>
parents bdc23b8f 756c3e67
Loading
Loading
Loading
Loading
+57 −6
Original line number Diff line number Diff line
@@ -37,6 +37,43 @@
#include "hns_roce_hem.h"
#include "hns_roce_common.h"

void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device);
	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;

	if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP10)
		return;

	mutex_lock(&cq_table->bank_mutex);
	cq_table->ctx_num[uctx->cq_bank_id]--;
	mutex_unlock(&cq_table->bank_mutex);
}

void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device);
	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
	u32 least_load = cq_table->ctx_num[0];
	u8 bankid = 0;
	u8 i;

	if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP10)
		return;

	mutex_lock(&cq_table->bank_mutex);
	for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) {
		if (cq_table->ctx_num[i] < least_load) {
			least_load = cq_table->ctx_num[i];
			bankid = i;
		}
	}
	cq_table->ctx_num[bankid]++;
	mutex_unlock(&cq_table->bank_mutex);

	uctx->cq_bank_id = bankid;
}

static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
{
	u32 least_load = bank[0].inuse;
@@ -55,7 +92,21 @@ static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
	return bankid;
}

static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
static u8 select_cq_bankid(struct hns_roce_dev *hr_dev, struct hns_roce_bank *bank,
			struct ib_udata *udata)
{
	struct hns_roce_ucontext *uctx = udata ?
		rdma_udata_to_drv_context(udata, struct hns_roce_ucontext,
					  ibucontext) : NULL;
	/* only apply for HIP10 now, and use bank 0 for kernel */
	if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP10)
		return uctx ? uctx->cq_bank_id : 0;

	return get_least_load_bankid_for_cq(bank);
}

static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
		     struct ib_udata *udata)
{
	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
	struct hns_roce_bank *bank;
@@ -63,7 +114,7 @@ static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
	int id;

	mutex_lock(&cq_table->bank_mutex);
	bankid = get_least_load_bankid_for_cq(cq_table->bank);
	bankid = select_cq_bankid(hr_dev, cq_table->bank, udata);
	bank = &cq_table->bank[bankid];

	id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
@@ -178,12 +229,12 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)

	ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_CQC,
				      hr_cq->cqn);
	if (ret)
	if (ret) {
		hr_cq->delayed_destroy_flag = true;
		dev_err_ratelimited(dev,
				    "DESTROY_CQ failed (%d) for CQN %06lx\n",
				    ret, hr_cq->cqn);
	if (ret == -EBUSY)
		hr_cq->delayed_destroy_flag = true;
	}

	xa_erase_irq(&cq_table->array, hr_cq->cqn);

@@ -523,7 +574,7 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
		goto err_cq_buf;
	}

	ret = alloc_cqn(hr_dev, hr_cq);
	ret = alloc_cqn(hr_dev, hr_cq, udata);
	if (ret) {
		ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
		goto err_cq_db;
+8 −2
Original line number Diff line number Diff line
@@ -243,8 +243,8 @@ static void dca_setup_pool_name(pid_t pid, bool is_kdca, char *name, int size)

static u64 calc_loading_percent(size_t total, size_t free, u32 *out_rem)
{
	u32 all_pages, used_pages, free_pages, scale;
	u64 percent = 0;
	u64 used_pages, scale, all_pages, free_pages;
	u64 percent = U64_MAX;
	u32 rem = 0;

	all_pages = total >> HNS_HW_PAGE_SHIFT;
@@ -270,6 +270,9 @@ static void dca_print_pool_stats(struct hns_roce_dca_ctx *ctx, pid_t pid,
	u32 rem = 0;

	percent = calc_loading_percent(ctx->total_size, ctx->free_size, &rem);
	if (percent == U64_MAX)
		return;

	dca_setup_pool_name(pid, is_kdca, name, sizeof(name));
	seq_printf(file, "%-10s %-16ld %-16ld %-16u %llu.%0*u\n", name,
		   ctx->total_size / KB, ctx->free_size / KB, ctx->free_mems,
@@ -422,6 +425,9 @@ static void dca_stats_ctx_mem_in_seqfile(struct hns_roce_dca_ctx *ctx,

	dca_ctx_stats_mem(ctx, &stats);
	percent = calc_loading_percent(stats.total_size, stats.free_size, &rem);
	if (percent == U64_MAX)
		return;

	seq_printf(file, DCA_STAT_NAME_FMT "%llu.%0*u\n", "Loading:", percent,
		   LOADING_PERCENT_SHIFT, rem);
	dca_ctx_print_mem_kb(file, "Total:", stats.total_size);
+13 −6
Original line number Diff line number Diff line
@@ -293,6 +293,7 @@ struct hns_roce_ucontext {
	u32			config;
	struct hns_roce_dca_ctx	dca_ctx;
	struct hns_dca_ctx_debugfs dca_dbgfs;
	u8 cq_bank_id;
};

struct hns_roce_pd {
@@ -606,9 +607,8 @@ struct hns_roce_bank {
};

struct hns_roce_idx_table {
	u32 *spare_idx;
	u32 head;
	u32 tail;
	unsigned long *qpn_bitmap;
	unsigned long *dip_idx_bitmap;
};

struct hns_roce_qp_table {
@@ -627,6 +627,7 @@ struct hns_roce_cq_table {
	struct hns_roce_hem_table	table;
	struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
	struct mutex			bank_mutex;
	u32 ctx_num[HNS_ROCE_CQ_BANK_NUM];
};

struct hns_roce_srq_table {
@@ -768,6 +769,7 @@ struct hns_roce_qp {
	u8			priority;
	bool			delayed_destroy_flag;
	struct hns_roce_mtr_node *mtr_node;
	struct hns_roce_dip *dip;
};

struct hns_roce_ib_iboe {
@@ -834,6 +836,7 @@ struct hns_roce_eq {
	int				shift;
	int				event_type;
	int				sub_type;
	struct tasklet_struct		tasklet;
};

struct hns_roce_eq_table {
@@ -1121,12 +1124,14 @@ struct hns_roce_cnp_pri_param {
#define HNS_ROCE_SCC_PARAM_SIZE 4
struct hns_roce_scc_param {
	__le32 param[HNS_ROCE_SCC_PARAM_SIZE];
	u32 lifespan;
	__le32 lifespan;
	unsigned long timestamp;
	enum hns_roce_scc_algo algo_type;
	struct delayed_work scc_cfg_dwork;
	struct hns_roce_dev *hr_dev;
	u8 port_num;
	__le32 latest_param[HNS_ROCE_SCC_PARAM_SIZE];
	struct mutex scc_mutex; /* protect @param and @latest_param */
};

struct hns_roce_port {
@@ -1226,9 +1231,9 @@ struct hns_roce_dev {
	struct rdma_notify_mem *notify_tbl;
	size_t notify_num;
	struct list_head mtr_unfree_list; /* list of unfree mtr on this dev */
	spinlock_t mtr_unfree_list_lock; /* protect mtr_unfree_list */
	struct mutex mtr_unfree_list_mutex; /* protect mtr_unfree_list */
	struct list_head umem_unfree_list; /* list of unfree umem on this dev */
	spinlock_t umem_unfree_list_lock; /* protect umem_unfree_list */
	struct mutex umem_unfree_list_mutex; /* protect umem_unfree_list */
};

static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
@@ -1553,4 +1558,6 @@ int hns_roce_register_poe_channel(struct hns_roce_dev *hr_dev, u8 channel,
				  u64 poe_addr);
int hns_roce_unregister_poe_channel(struct hns_roce_dev *hr_dev, u8 channel);
bool hns_roce_is_srq_exist(struct hns_roce_dev *hr_dev, u32 srqn);
void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx);
void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx);
#endif /* _HNS_ROCE_DEVICE_H */
+6 −6
Original line number Diff line number Diff line
@@ -1100,9 +1100,9 @@ static bool hem_list_is_bottom_bt(int hopnum, int bt_level)
 * @bt_level: base address table level
 * @unit: ba entries per bt page
 */
static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
static u64 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
{
	u32 step;
	u64 step;
	int max;
	int i;

@@ -1138,7 +1138,7 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
{
	struct hns_roce_buf_region *r;
	int total = 0;
	int step;
	u64 step;
	int i;

	for (i = 0; i < region_cnt; i++) {
@@ -1169,7 +1169,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
	int ret = 0;
	int max_ofs;
	int level;
	u32 step;
	u64 step;
	int end;

	if (hopnum <= 1)
@@ -1206,7 +1206,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
		}

		start_aligned = (distance / step) * step + r->offset;
		end = min_t(int, start_aligned + step - 1, max_ofs);
		end = min_t(u64, start_aligned + step - 1, max_ofs);
		cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
					  true);
		if (!cur) {
@@ -1294,7 +1294,7 @@ static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
	struct hns_roce_hem_item *hem, *temp_hem;
	int total = 0;
	int offset;
	int step;
	u64 step;

	step = hem_list_calc_ba_range(r->hopnum, 1, unit);
	if (step < 1)
+139 −50
Original line number Diff line number Diff line
@@ -5308,21 +5308,24 @@ static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
{
	const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
	u32 *spare_idx = hr_dev->qp_table.idx_table.spare_idx;
	u32 *head =  &hr_dev->qp_table.idx_table.head;
	u32 *tail =  &hr_dev->qp_table.idx_table.tail;
	unsigned long *dip_idx_bitmap = hr_dev->qp_table.idx_table.dip_idx_bitmap;
	unsigned long *qpn_bitmap = hr_dev->qp_table.idx_table.qpn_bitmap;
	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
	struct hns_roce_dip *hr_dip;
	unsigned long flags;
	int ret = 0;
	u32 idx;

	spin_lock_irqsave(&hr_dev->dip_list_lock, flags);

	spare_idx[*tail] = ibqp->qp_num;
	*tail = (*tail == hr_dev->caps.num_qps - 1) ? 0 : (*tail + 1);
	if (!test_bit(ibqp->qp_num, dip_idx_bitmap))
		set_bit(ibqp->qp_num, qpn_bitmap);

	list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
		if (!memcmp(grh->dgid.raw, hr_dip->dgid, GID_LEN_V2)) {
			*dip_idx = hr_dip->dip_idx;
			hr_dip->qp_cnt++;
			hr_qp->dip = hr_dip;
			goto out;
		}
	}
@@ -5336,9 +5339,21 @@ static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
		goto out;
	}

	idx = find_first_bit(qpn_bitmap, hr_dev->caps.num_qps);
	if (idx < hr_dev->caps.num_qps) {
		*dip_idx = idx;
		clear_bit(idx, qpn_bitmap);
		set_bit(idx, dip_idx_bitmap);
	} else {
		ret = -ENOENT;
		kfree(hr_dip);
		goto out;
	}

	memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
	hr_dip->dip_idx = *dip_idx = spare_idx[*head];
	*head = (*head == hr_dev->caps.num_qps - 1) ? 0 : (*head + 1);
	hr_dip->dip_idx = *dip_idx;
	hr_dip->qp_cnt++;
	hr_qp->dip = hr_dip;
	list_add_tail(&hr_dip->node, &hr_dev->dip_list);

out:
@@ -6247,11 +6262,13 @@ int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
		/* Modify qp to reset before destroying qp */
		ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
					    hr_qp->state, IB_QPS_RESET, udata);
		if (ret)
		if (ret) {
			hr_qp->delayed_destroy_flag = true;
			ibdev_err_ratelimited(ibdev,
				  "failed to modify QP to RST, ret = %d.\n",
				  ret);
		}
	}

	send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
	recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
@@ -6278,21 +6295,47 @@ int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
	return ret;
}

static void put_dip_ctx_idx(struct hns_roce_dev *hr_dev,
							struct hns_roce_qp *hr_qp)
{
	unsigned long *dip_idx_bitmap = hr_dev->qp_table.idx_table.dip_idx_bitmap;
	unsigned long *qpn_bitmap = hr_dev->qp_table.idx_table.qpn_bitmap;
	struct hns_roce_dip *hr_dip = hr_qp->dip;
	unsigned long flags;

	spin_lock_irqsave(&hr_dev->dip_list_lock, flags);

	if (hr_dip) {
		hr_dip->qp_cnt--;
		if (!hr_dip->qp_cnt) {
			clear_bit(hr_dip->dip_idx, dip_idx_bitmap);
			set_bit(hr_dip->dip_idx, qpn_bitmap);

			list_del(&hr_dip->node);
		} else {
			hr_dip = NULL;
		}
	}

	spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
	kfree(hr_dip);
}

int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{
	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
	int ret;

	if (hr_qp->congest_type == HNS_ROCE_CONGEST_TYPE_DIP)
		put_dip_ctx_idx(hr_dev, hr_qp);

	ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
	if (ret)
		ibdev_err_ratelimited(&hr_dev->ib_dev,
			  "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n",
			  hr_qp->qpn, ret);

	if (ret == -EBUSY)
		hr_qp->delayed_destroy_flag = true;

	hns_roce_qp_destroy(hr_dev, hr_qp, udata);

	return 0;
@@ -6855,33 +6898,11 @@ static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
		!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
}

static irqreturn_t hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
				       struct hns_roce_eq *eq)
static irqreturn_t hns_roce_v2_ceq_int(struct hns_roce_eq *eq)
{
	struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
	irqreturn_t ceqe_found = IRQ_NONE;
	u32 cqn;
	tasklet_schedule(&eq->tasklet);

	while (ceqe) {
		/* Make sure we read CEQ entry after we have checked the
		 * ownership bit
		 */
		dma_rmb();

		cqn = hr_reg_read(ceqe, CEQE_CQN);

		hns_roce_cq_completion(hr_dev, cqn);

		++eq->cons_index;
		ceqe_found = IRQ_HANDLED;
		atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CEQE_CNT]);

		ceqe = next_ceqe_sw_v2(eq);
	}

	update_eq_db(eq);

	return IRQ_RETVAL(ceqe_found);
	return IRQ_HANDLED;
}

static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
@@ -6892,7 +6913,7 @@ static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)

	if (eq->type_flag == HNS_ROCE_CEQ)
		/* Completion event interrupt */
		int_work = hns_roce_v2_ceq_int(hr_dev, eq);
		int_work = hns_roce_v2_ceq_int(eq);
	else
		/* Asychronous event interrupt */
		int_work = hns_roce_v2_aeq_int(hr_dev, eq);
@@ -7264,6 +7285,34 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
	return ret;
}

static void hns_roce_ceq_task(struct tasklet_struct *task)
{
	struct hns_roce_eq *eq = from_tasklet(eq, task, tasklet);
	struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
	struct hns_roce_dev *hr_dev = eq->hr_dev;
	int ceqe_num = 0;
	u32 cqn;

	while (ceqe && ceqe_num < hr_dev->caps.ceqe_depth) {
		/* Make sure we read CEQ entry after we have checked the
		 * ownership bit
		 */
		dma_rmb();

		cqn = hr_reg_read(ceqe, CEQE_CQN);

		hns_roce_cq_completion(hr_dev, cqn);

		++eq->cons_index;
		++ceqe_num;
		atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CEQE_CNT]);

		ceqe = next_ceqe_sw_v2(eq);
	}

	update_eq_db(eq);
}

static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
				  int comp_num, int aeq_num, int other_num)
{
@@ -7295,21 +7344,24 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
			 j - other_num - aeq_num);

	for (j = 0; j < irq_num; j++) {
		if (j < other_num)
		if (j < other_num) {
			ret = request_irq(hr_dev->irq[j],
					  hns_roce_v2_msix_interrupt_abn,
					  0, hr_dev->irq_names[j], hr_dev);

		else if (j < (other_num + comp_num))
		} else if (j < (other_num + comp_num)) {
			tasklet_setup(&eq_table->eq[j - other_num].tasklet,
				      hns_roce_ceq_task);
			ret = request_irq(eq_table->eq[j - other_num].irq,
					  hns_roce_v2_msix_interrupt_eq,
					  0, hr_dev->irq_names[j + aeq_num],
					  &eq_table->eq[j - other_num]);
		else
		} else {
			ret = request_irq(eq_table->eq[j - other_num].irq,
					  hns_roce_v2_msix_interrupt_eq,
					  0, hr_dev->irq_names[j - comp_num],
					  &eq_table->eq[j - other_num]);
		}

		if (ret) {
			dev_err(hr_dev->dev, "Request irq error!\n");
			goto err_request_failed;
@@ -7319,12 +7371,16 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
	return 0;

err_request_failed:
	for (j -= 1; j >= 0; j--)
		if (j < other_num)
	for (j -= 1; j >= 0; j--) {
		if (j < other_num) {
			free_irq(hr_dev->irq[j], hr_dev);
		else
			continue;
		}
		free_irq(eq_table->eq[j - other_num].irq,
			 &eq_table->eq[j - other_num]);
		if (j < other_num + comp_num)
			tasklet_kill(&eq_table->eq[j - other_num].tasklet);
	}

err_kzalloc_failed:
	for (i -= 1; i >= 0; i--)
@@ -7345,8 +7401,12 @@ static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
	for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
		free_irq(hr_dev->irq[i], hr_dev);

	for (i = 0; i < eq_num; i++)
	for (i = 0; i < eq_num; i++) {
		free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
		if (i < hr_dev->caps.num_comp_vectors)
			tasklet_kill(&hr_dev->eq_table.eq[i].tasklet);
	}


	for (i = 0; i < irq_num; i++)
		kfree(hr_dev->irq_names[i]);
@@ -7537,16 +7597,24 @@ static int hns_roce_v2_config_scc_param(struct hns_roce_dev *hr_dev,
	hns_roce_cmq_setup_basic_desc(&desc, scc_opcode[algo], false);
	pdata = &hr_dev->port_data[port_num - 1];
	scc_param = &pdata->scc_param[algo];
	mutex_lock(&scc_param->scc_mutex);
	memcpy(&desc.data, scc_param, sizeof(scc_param->param));

	ret = hns_roce_cmq_send(hr_dev, &desc, 1);
	if (ret)
	if (ret) {
		ibdev_err_ratelimited(&hr_dev->ib_dev,
				      "failed to configure scc param, opcode: 0x%x, ret = %d.\n",
			le16_to_cpu(desc.opcode), ret);
		return ret;
	}

	memcpy(scc_param->latest_param, &desc.data,
	       sizeof(scc_param->latest_param));
	mutex_unlock(&scc_param->scc_mutex);

	return 0;
}

static int hns_roce_v2_query_scc_param(struct hns_roce_dev *hr_dev,
				       u8 port_num, enum hns_roce_scc_algo algo)
{
@@ -7580,7 +7648,11 @@ static int hns_roce_v2_query_scc_param(struct hns_roce_dev *hr_dev,

	pdata = &hr_dev->port_data[port_num - 1];
	scc_param = &pdata->scc_param[algo];
	memcpy(scc_param, &desc.data, sizeof(scc_param->param));
	mutex_lock(&scc_param->scc_mutex);
	memcpy(scc_param->param, &desc.data, sizeof(scc_param->param));
	memcpy(scc_param->latest_param, &desc.data,
	       sizeof(scc_param->latest_param));
	mutex_unlock(&scc_param->scc_mutex);

	return 0;
}
@@ -8013,6 +8085,20 @@ static void hns_roce_v2_reset_notify_user(struct hns_roce_dev *hr_dev)
	mutex_unlock(&hr_dev->uctx_list_mutex);
}

static void hns_roce_v2_reset_notify_cmd(struct hns_roce_dev *hr_dev)
{
	struct hns_roce_cmdq *hr_cmd = &hr_dev->cmd;
	int i;

	if (!hr_dev->cmd_mod)
		return;

	for (i = 0; i < hr_cmd->max_cmds; i++) {
		hr_cmd->context[i].result = -EBUSY;
		complete(&hr_cmd->context[i].done);
	}
}

static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
{
	struct hns_roce_dev *hr_dev;
@@ -8036,6 +8122,9 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)

	hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;

	/* Complete the CMDQ event in advance during the reset. */
	hns_roce_v2_reset_notify_cmd(hr_dev);

	return 0;
}

Loading