Commit a905628c authored by Weibo Zhao's avatar Weibo Zhao Committed by JiangShui
Browse files

hns3 udma: support of flush cqe

driver inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I85R2F


CVE: NA

----------------------------------------------------------

Driver send sq_pi to hardware so hardware can report
flush cqes. The user-mode driver invokes user_ctrl to
instruct the kernel-mode driver to execute flush_cqe.
The kernel-mode driver pass sq_pi to hardware by CMD
of modify_qp_to_error.

Signed-off-by: default avatarWeibo Zhao <zhaoweibo3@huawei.com>
parent e44392a8
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -148,4 +148,14 @@ struct udma_create_ctx_resp {
	uint32_t max_jfs_sge;
};

struct flush_cqe_param {
	uint32_t qpn;
	uint32_t sq_producer_idx;
};

enum udma_user_ctl_handlers {
	UDMA_USER_CTL_FLUSH_CQE,
	UDMA_OPCODE_NUM,
};

#endif /* _UDMA_ABI_H */
+67 −0
Original line number Diff line number Diff line
@@ -317,6 +317,72 @@ static int udma_query_device_status(const struct ubcore_device *dev,
	return 0;
}

int udma_user_ctl_flush_cqe(struct ubcore_ucontext *uctx, struct ubcore_user_ctl_in *in,
			    struct ubcore_user_ctl_out *out,
			    struct ubcore_udrv_priv *udrv_data)
{
	struct udma_dev *udma_device = to_udma_dev(uctx->ub_dev);
	struct flush_cqe_param fcp;
	struct udma_qp *udma_qp;
	uint32_t sq_pi;
	uint32_t qpn;
	int ret;

	ret = (int)copy_from_user(&fcp, (void *)in->addr,
				  sizeof(struct flush_cqe_param));
	if (ret != 0) {
		dev_err(udma_device->dev,
			"copy_from_user failed in flush_cqe, ret:%d.\n", ret);
		return -EFAULT;
	}
	sq_pi = fcp.sq_producer_idx;
	qpn = fcp.qpn;

	xa_lock(&udma_device->qp_table.xa);
	udma_qp = (struct udma_qp *)xa_load(&udma_device->qp_table.xa, qpn);
	if (!udma_qp) {
		dev_err(udma_device->dev, "get qp(0x%x) error.\n", qpn);
		xa_unlock(&udma_device->qp_table.xa);
		return -EINVAL;
	}
	refcount_inc(&udma_qp->refcount);
	xa_unlock(&udma_device->qp_table.xa);

	ret = udma_flush_cqe(udma_device, udma_qp, sq_pi);

	if (refcount_dec_and_test(&udma_qp->refcount))
		complete(&udma_qp->free);

	return ret;
}

typedef int (*udma_user_ctl_opcode)(struct ubcore_ucontext *uctx,
				    struct ubcore_user_ctl_in *in,
				    struct ubcore_user_ctl_out *out,
				    struct ubcore_udrv_priv *udrv_data);

static udma_user_ctl_opcode g_udma_user_ctl_opcodes[] = {
	[UDMA_USER_CTL_FLUSH_CQE] = udma_user_ctl_flush_cqe,
};

int udma_user_ctl(struct ubcore_user_ctl *k_user_ctl)
{
	struct ubcore_udrv_priv udrv_data = k_user_ctl->udrv_data;
	struct ubcore_user_ctl_out out = k_user_ctl->out;
	struct ubcore_ucontext *uctx = k_user_ctl->uctx;
	struct ubcore_user_ctl_in in = k_user_ctl->in;
	struct udma_dev *udma_device;

	udma_device = to_udma_dev(uctx->ub_dev);
	if (in.opcode >= UDMA_OPCODE_NUM ||
	    !g_udma_user_ctl_opcodes[in.opcode]) {
		dev_err(udma_device->dev, "bad user_ctl opcode: 0x%x.\n",
			(int)in.opcode);
		return -EINVAL;
	}
	return g_udma_user_ctl_opcodes[in.opcode](uctx, &in, &out, &udrv_data);
}

static struct ubcore_ops g_udma_dev_ops = {
	.owner = THIS_MODULE,
	.abi_version = 1,
@@ -347,6 +413,7 @@ static struct ubcore_ops g_udma_dev_ops = {
	.create_tp = udma_create_tp,
	.modify_tp = udma_modify_tp,
	.destroy_tp = udma_destroy_tp,
	.user_ctl = udma_user_ctl,
};

static void udma_cleanup_uar_table(struct udma_dev *dev)
+51 −0
Original line number Diff line number Diff line
@@ -1628,6 +1628,45 @@ void udma_cleanup_qp_table(struct udma_dev *dev)
	kfree(dev->qp_table.idx_table.spare_idx);
}

int udma_flush_cqe(struct udma_dev *udma_dev, struct udma_qp *udma_qp,
		   uint32_t sq_pi)
{
	struct udma_qp_context *qp_context;
	struct udma_cmd_mailbox *mailbox;
	struct udma_qp_context *qpc_mask;
	struct udma_cmq_desc desc;
	struct udma_mbox *mb;
	int ret;

	udma_qp->state = QPS_ERR;

	mailbox = udma_alloc_cmd_mailbox(udma_dev);
	if (IS_ERR(mailbox))
		return PTR_ERR(mailbox);
	qp_context = (struct udma_qp_context *)mailbox->buf;
	qpc_mask = (struct udma_qp_context *)mailbox->buf + 1;
	memset(qpc_mask, 0xff, sizeof(struct udma_qp_context));

	udma_reg_write(qp_context, QPC_QP_ST, udma_qp->state);
	udma_reg_clear(qpc_mask, QPC_QP_ST);

	udma_reg_write(qp_context, QPC_SQ_PRODUCER_IDX, sq_pi);
	udma_reg_clear(qpc_mask, QPC_SQ_PRODUCER_IDX);

	mb = (struct udma_mbox *)desc.data;
	udma_cmq_setup_basic_desc(&desc, UDMA_OPC_POST_MB, false);
	mbox_desc_init(mb, mailbox->dma, 0, udma_qp->qpn, UDMA_CMD_MODIFY_QPC);

	ret = udma_cmd_mbox(udma_dev, &desc, UDMA_CMD_TIMEOUT_MSECS, 0);
	if (ret)
		dev_err(udma_dev->dev, "flush cqe qp(0x%llx) cmd error(%d).\n",
			udma_qp->qpn, ret);

	udma_free_cmd_mailbox(udma_dev, mailbox);

	return ret;
}

void udma_qp_event(struct udma_dev *udma_dev, uint32_t qpn, int event_type)
{
	struct device *dev = udma_dev->dev;
@@ -1644,6 +1683,18 @@ void udma_qp_event(struct udma_dev *udma_dev, uint32_t qpn, int event_type)
		return;
	}

	if (event_type == UDMA_EVENT_TYPE_JFR_LAST_WQE_REACH ||
	    event_type == UDMA_EVENT_TYPE_WQ_CATAS_ERROR ||
	    event_type == UDMA_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
	    event_type == UDMA_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR) {
		qp->state = QPS_ERR;

		if (qp->sdb.virt_addr)
			qp->sq.head = *(int *)(qp->sdb.virt_addr);

		udma_flush_cqe(udma_dev, qp, qp->sq.head);
	}

	if (qp->event)
		qp->event(qp, (enum udma_event)event_type);

+2 −0
Original line number Diff line number Diff line
@@ -284,6 +284,8 @@ void init_jetty_x_qpn_bitmap(struct udma_dev *dev,
			     uint32_t jetty_x_shift, uint32_t prefix,
			     uint32_t jid);
void clean_jetty_x_qpn_bitmap(struct udma_qpn_bitmap *qpn_map);
int udma_flush_cqe(struct udma_dev *udma_dev, struct udma_qp *udma_qp,
		   uint32_t sq_pi);
void udma_qp_event(struct udma_dev *udma_dev, uint32_t qpn, int event_type);
void copy_send_jfc(struct udma_qp *from_qp, struct udma_qp *to_qp);