Unverified Commit b10f5eaa authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!1032 bugfix the lost interruption problem after live migration

Merge Pull Request from: @xiao_jiang_shui 
 
The problem of service interruption and loss may be triggered during
live migration. In order to ensure normal business, it is necessary to
resend an interrupt in the live migration driver.

And in the QM service driver, it is guaranteed that the retransmission
interrupt can be received normally.

issue: https://gitee.com/openeuler/kernel/issues/I7BTMW 
 
Link:https://gitee.com/openeuler/kernel/pulls/1032

 

Reviewed-by: default avatarYang Shen <shenyang39@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents eab20e19 a60b29d3
Loading
Loading
Loading
Loading
+41 −25
Original line number Diff line number Diff line
@@ -173,6 +173,27 @@ static int qm_config_get(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue)
	return 0;
}

static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd,
	u16 index, u8 priority)
{
	void __iomem *io_base = qm->io_base;
	u16 randata = 0;
	u64 doorbell;

	if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
		io_base = qm->db_io_base + (u64)qn * qm->db_interval +
			  QM_DOORBELL_SQ_CQ_BASE_V2;
	else
		io_base += QM_DOORBELL_EQ_AEQ_BASE_V2;

	doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
		   ((u64)randata << QM_DB_RAND_SHIFT_V2) |
		   ((u64)index << QM_DB_INDEX_SHIFT_V2) |
		   ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);

	writeq(doorbell, io_base);
}

/*
 * Each state Reg is checked 100 times,
 * with a delay of 100 microseconds after each check
@@ -438,6 +459,19 @@ static int qm_rw_regs_write(struct hisi_qm *qm, struct acc_vf_data *vf_data)
	return 0;
}

static void vf_qm_xeqc_save(struct hisi_qm *qm,
	struct acc_vf_migration *acc_vf_dev)
{
	struct acc_vf_data *vf_data = acc_vf_dev->vf_data;
	u16 eq_head, aeq_head;

	eq_head = vf_data->qm_eqc_dw[0] & 0xFFFF;
	qm_db(qm, 0, QM_DOORBELL_CMD_EQ, eq_head, 0);

	aeq_head = vf_data->qm_aeqc_dw[0] & 0xFFFF;
	qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, aeq_head, 0);
}

/*
 * the vf QM have unbind from host, insmod in the VM
 * so, qm just have the addr from pci dev
@@ -461,12 +495,12 @@ static int vf_migration_data_store(struct hisi_qm *qm,
	 * every Reg is 32 bit, the dma address is 64 bit
	 * so, the dma address is store in the Reg2 and Reg1
	 */
	vf_data->eqe_dma = vf_data->qm_eqc_dw[2];
	vf_data->eqe_dma = vf_data->qm_eqc_dw[QM_XQC_ADDR_HIGH];
	vf_data->eqe_dma <<= QM_XQC_ADDR_OFFSET;
	vf_data->eqe_dma |= vf_data->qm_eqc_dw[1];
	vf_data->aeqe_dma = vf_data->qm_aeqc_dw[2];
	vf_data->eqe_dma |= vf_data->qm_eqc_dw[QM_XQC_ADDR_LOW];
	vf_data->aeqe_dma = vf_data->qm_aeqc_dw[QM_XQC_ADDR_HIGH];
	vf_data->aeqe_dma <<= QM_XQC_ADDR_OFFSET;
	vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[1];
	vf_data->aeqe_dma |= vf_data->qm_aeqc_dw[QM_XQC_ADDR_LOW];

	/* Through SQC_BT/CQC_BT to get sqc and cqc address */
	ret = qm_config_get(qm, &vf_data->sqc_dma, QM_MB_CMD_SQC_BT, 0);
@@ -481,6 +515,9 @@ static int vf_migration_data_store(struct hisi_qm *qm,
		return -EINVAL;
	}

	/* Save eqc and aeqc interrupt information */
	vf_qm_xeqc_save(qm, acc_vf_dev);

	return 0;
}

@@ -493,27 +530,6 @@ static void qm_dev_cmd_init(struct hisi_qm *qm)
	writel(0x0, qm->io_base + QM_IFC_INT_MASK);
}

static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd,
	u16 index, u8 priority)
{
	void __iomem *io_base = qm->io_base;
	u16 randata = 0;
	u64 doorbell;

	if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
		io_base = qm->db_io_base + (u64)qn * qm->db_interval +
			  QM_DOORBELL_SQ_CQ_BASE_V2;
	else
		io_base += QM_DOORBELL_EQ_AEQ_BASE_V2;

	doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
		   ((u64)randata << QM_DB_RAND_SHIFT_V2) |
		   ((u64)index << QM_DB_INDEX_SHIFT_V2) |
		   ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);

	writeq(doorbell, io_base);
}

static void vf_qm_fun_restart(struct hisi_qm *qm,
	struct acc_vf_migration *acc_vf_dev)
{
+2 −0
Original line number Diff line number Diff line
@@ -79,6 +79,8 @@
#define QM_REG_ADDR_OFFSET		0x0004

#define QM_XQC_ADDR_OFFSET		32U
#define QM_XQC_ADDR_LOW		0x1
#define QM_XQC_ADDR_HIGH	0x2
#define QM_VF_AEQ_INT_MASK		0x0004
#define QM_VF_EQ_INT_MASK		0x000c
#define QM_IFC_INT_SOURCE_V		0x0020
+42 −65
Original line number Diff line number Diff line
@@ -1028,47 +1028,15 @@ static void qm_poll_req_cb(struct hisi_qp *qp)
	qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1);
}

static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
{
	struct hisi_qm *qm = poll_data->qm;
	struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
	u16 eq_depth = qm->eq_depth;
	int eqe_num = 0;
	u16 cqn;

	while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
		cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
		poll_data->qp_finish_id[eqe_num] = cqn;
		eqe_num++;

		if (qm->status.eq_head == eq_depth - 1) {
			qm->status.eqc_phase = !qm->status.eqc_phase;
			eqe = qm->eqe;
			qm->status.eq_head = 0;
		} else {
			eqe++;
			qm->status.eq_head++;
		}

		if (eqe_num == (eq_depth >> 1) - 1)
			break;
	}

	qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);

	return eqe_num;
}

static void qm_work_process(struct work_struct *work)
{
	struct hisi_qm_poll_data *poll_data =
		container_of(work, struct hisi_qm_poll_data, work);
	struct hisi_qm *qm = poll_data->qm;
	u16 eqe_num = poll_data->eqe_num;
	struct hisi_qp *qp;
	int eqe_num, i;
	int i;

	/* Get qp id of completed tasks and re-enable the interrupt. */
	eqe_num = qm_get_complete_eqe_num(poll_data);
	for (i = eqe_num - 1; i >= 0; i--) {
		qp = &qm->qp_array[poll_data->qp_finish_id[i]];
		if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
@@ -1084,39 +1052,57 @@ static void qm_work_process(struct work_struct *work)
	}
}

static bool do_qm_eq_irq(struct hisi_qm *qm)
static void qm_get_complete_eqe_num(struct hisi_qm *qm)
{
	struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
	struct hisi_qm_poll_data *poll_data;
	u16 cqn;
	struct hisi_qm_poll_data *poll_data = NULL;
	u16 eq_depth = qm->eq_depth;
	u16 cqn, eqe_num = 0;

	if (!readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
		return false;
	if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) {
		qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
		return;
	}

	if (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
	cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
	if (cqn >= qm->qp_num)
		return;
	poll_data = &qm->poll_data[cqn];
		queue_work(qm->wq, &poll_data->work);

		return true;
	while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
		cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
		poll_data->qp_finish_id[eqe_num] = cqn;
		eqe_num++;

		if (qm->status.eq_head == eq_depth - 1) {
			qm->status.eqc_phase = !qm->status.eqc_phase;
			eqe = qm->eqe;
			qm->status.eq_head = 0;
		} else {
			eqe++;
			qm->status.eq_head++;
		}

	return false;
		if (eqe_num == (eq_depth >> 1) - 1)
			break;
	}

	qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);

	if (poll_data) {
		poll_data->eqe_num = eqe_num;
		queue_work(qm->wq, &poll_data->work);
	}
}

static irqreturn_t qm_eq_irq(int irq, void *data)
{
	struct hisi_qm *qm = data;
	bool ret;

	ret = do_qm_eq_irq(qm);
	if (ret)
		return IRQ_HANDLED;
	/* Get qp id of completed tasks and re-enable the interrupt */
	qm_get_complete_eqe_num(qm);

	atomic64_inc(&qm->debug.dfx.err_irq_cnt);
	qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);

	return IRQ_NONE;
	return IRQ_HANDLED;
}

static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
@@ -1193,6 +1179,8 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
	u16 aeq_depth = qm->aeq_depth;
	u32 type, qp_id;

	atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);

	while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
		type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
		qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK;
@@ -1230,17 +1218,6 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
	return IRQ_HANDLED;
}

static irqreturn_t qm_aeq_irq(int irq, void *data)
{
	struct hisi_qm *qm = data;

	atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
	if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
		return IRQ_NONE;

	return IRQ_WAKE_THREAD;
}

static void qm_init_qp_status(struct hisi_qp *qp)
{
	struct hisi_qp_status *qp_status = &qp->qp_status;
@@ -5187,8 +5164,8 @@ static int qm_register_aeq_irq(struct hisi_qm *qm)
		return 0;

	irq_vector = val & QM_IRQ_VECTOR_MASK;
	ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq,
						   qm_aeq_thread, 0, qm->dev_name, qm);
	ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), NULL,
						   qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm);
	if (ret)
		dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);

+1 −0
Original line number Diff line number Diff line
@@ -302,6 +302,7 @@ struct hisi_qm_poll_data {
	struct hisi_qm *qm;
	struct work_struct work;
	u16 *qp_finish_id;
	u16 eqe_num;
};

/**