Commit c1037783 authored by Longfang Liu's avatar Longfang Liu Committed by JiangShui
Browse files

crypto: hisilicon/qm - fix EQ/AEQ interrupt issue

mainline inclusion
from mainline-v6.7-rc1
commit 5acab6eb592387191c1bb745ba9b815e1e076db5
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I8FI6W
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=5acab6eb592387191c1bb745ba9b815e1e076db5



----------------------------------------------------------------------

During hisilicon accelerator live migration operation. In order to
prevent the problem of EQ/AEQ interrupt loss. Migration driver will
trigger an EQ/AEQ doorbell at the end of the migration.

This operation may cause double interruption of EQ/AEQ events.
To ensure that the EQ/AEQ interrupt processing function is normal.
The interrupt handling functionality of EQ/AEQ needs to be updated.
Used to handle repeated interrupts event.

Fixes: b0eed085 ("hisi_acc_vfio_pci: Add support for VFIO live migration")
Signed-off-by: default avatarLongfang Liu <liulongfang@huawei.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarJiangShui Yang <yangjiangshui@h-partners.com>
parent 56d3645e
Loading
Loading
Loading
Loading
+40 −65
Original line number Diff line number Diff line
@@ -1030,47 +1030,15 @@ static void qm_poll_req_cb(struct hisi_qp *qp)
	qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1);
}

static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
{
	struct hisi_qm *qm = poll_data->qm;
	struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
	u16 eq_depth = qm->eq_depth;
	int eqe_num = 0;
	u16 cqn;

	while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
		cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
		poll_data->qp_finish_id[eqe_num] = cqn;
		eqe_num++;

		if (qm->status.eq_head == eq_depth - 1) {
			qm->status.eqc_phase = !qm->status.eqc_phase;
			eqe = qm->eqe;
			qm->status.eq_head = 0;
		} else {
			eqe++;
			qm->status.eq_head++;
		}

		if (eqe_num == (eq_depth >> 1) - 1)
			break;
	}

	qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);

	return eqe_num;
}

static void qm_work_process(struct work_struct *work)
{
	struct hisi_qm_poll_data *poll_data =
		container_of(work, struct hisi_qm_poll_data, work);
	struct hisi_qm *qm = poll_data->qm;
	u16 eqe_num = poll_data->eqe_num;
	struct hisi_qp *qp;
	int eqe_num, i;
	int i;

	/* Get qp id of completed tasks and re-enable the interrupt. */
	eqe_num = qm_get_complete_eqe_num(poll_data);
	for (i = eqe_num - 1; i >= 0; i--) {
		qp = &qm->qp_array[poll_data->qp_finish_id[i]];
		if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
@@ -1086,39 +1054,55 @@ static void qm_work_process(struct work_struct *work)
	}
}

static bool do_qm_eq_irq(struct hisi_qm *qm)
static void qm_get_complete_eqe_num(struct hisi_qm *qm)
{
	struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
	struct hisi_qm_poll_data *poll_data;
	u16 cqn;
	struct hisi_qm_poll_data *poll_data = NULL;
	u16 eq_depth = qm->eq_depth;
	u16 cqn, eqe_num = 0;

	if (!readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
		return false;
	if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) {
		atomic64_inc(&qm->debug.dfx.err_irq_cnt);
		qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
		return;
	}

	if (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
	cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
	if (unlikely(cqn >= qm->qp_num))
		return;
	poll_data = &qm->poll_data[cqn];
		queue_work(qm->wq, &poll_data->work);

		return true;
	while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
		cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
		poll_data->qp_finish_id[eqe_num] = cqn;
		eqe_num++;

		if (qm->status.eq_head == eq_depth - 1) {
			qm->status.eqc_phase = !qm->status.eqc_phase;
			eqe = qm->eqe;
			qm->status.eq_head = 0;
		} else {
			eqe++;
			qm->status.eq_head++;
		}

	return false;
		if (eqe_num == (eq_depth >> 1) - 1)
			break;
	}

	poll_data->eqe_num = eqe_num;
	queue_work(qm->wq, &poll_data->work);
	qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
}

static irqreturn_t qm_eq_irq(int irq, void *data)
{
	struct hisi_qm *qm = data;
	bool ret;

	ret = do_qm_eq_irq(qm);
	if (ret)
		return IRQ_HANDLED;
	/* Get qp id of completed tasks and re-enable the interrupt */
	qm_get_complete_eqe_num(qm);

	atomic64_inc(&qm->debug.dfx.err_irq_cnt);
	qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);

	return IRQ_NONE;
	return IRQ_HANDLED;
}

static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
@@ -1200,6 +1184,8 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
	u16 aeq_depth = qm->aeq_depth;
	u32 type, qp_id;

	atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);

	while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
		type = (le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT) &
			QM_AEQE_TYPE_MASK;
@@ -1238,17 +1224,6 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
	return IRQ_HANDLED;
}

static irqreturn_t qm_aeq_irq(int irq, void *data)
{
	struct hisi_qm *qm = data;

	atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
	if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
		return IRQ_NONE;

	return IRQ_WAKE_THREAD;
}

static void qm_init_qp_status(struct hisi_qp *qp)
{
	struct hisi_qp_status *qp_status = &qp->qp_status;
@@ -5218,8 +5193,8 @@ static int qm_register_aeq_irq(struct hisi_qm *qm)
		return 0;

	irq_vector = val & QM_IRQ_VECTOR_MASK;
	ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq,
						   qm_aeq_thread, 0, qm->dev_name, qm);
	ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), NULL,
						   qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm);
	if (ret)
		dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);

+1 −0
Original line number Diff line number Diff line
@@ -305,6 +305,7 @@ struct hisi_qm_poll_data {
	struct hisi_qm *qm;
	struct work_struct work;
	u16 *qp_finish_id;
	u16 eqe_num;
};

/**