Unverified Commit 2fc2c202 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!392 OpenEuler-22.03-LTS Fixes Some Bugs in Accelerator Disk Storage...

!392 OpenEuler-22.03-LTS Fixes Some Bugs in Accelerator Disk Storage Encryption and Decryption Scenarios

Merge Pull Request from: @xiao_jiang_shui 
 
The service interruption handling process in the QM is modified. The CRYPTO_ALG_ALLOCATE_MEMRY flag is deleted from the SEC algorithm.

issuse:https://gitee.com/openeuler/kernel/issues/I6DRLU 
 
Link:https://gitee.com/openeuler/kernel/pulls/392

 

Reviewed-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
Reviewed-by: default avatarLing Mingqiang <lingmingqiang@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parents b982dab6 f2905c16
Loading
Loading
Loading
Loading
+128 −71
Original line number Diff line number Diff line
@@ -839,13 +839,6 @@ static void qm_pm_put_sync(struct hisi_qm *qm)
	pm_runtime_put_autosuspend(dev);
}

static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
{
	u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;

	return &qm->qp_array[cqn];
}

static void qm_cq_head_update(struct hisi_qp *qp)
{
	if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
@@ -856,18 +849,10 @@ static void qm_cq_head_update(struct hisi_qp *qp)
	}
}

static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
static void qm_poll_req_cb(struct hisi_qp *qp)
{
	if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
		return;

	if (qp->event_cb) {
		qp->event_cb(qp);
		return;
	}

	if (qp->req_cb) {
	struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
	struct hisi_qm *qm = qp->qm;

	while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
		dma_rmb();
@@ -878,25 +863,25 @@ static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
		qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
		      qp->qp_status.cq_head, 0);
		atomic_dec(&qp->qp_status.used);

		cond_resched();
	}

	/* set c_flag */
		qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
		      qp->qp_status.cq_head, 1);
	}
	qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1);
}

static void qm_work_process(struct work_struct *work)
static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
{
	struct hisi_qm *qm = container_of(work, struct hisi_qm, work);
	struct hisi_qm *qm = poll_data->qm;
	struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
	struct hisi_qp *qp;
	int eqe_num = 0;
	u16 cqn;

	while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
		cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
		poll_data->qp_finish_id[eqe_num] = cqn;
		eqe_num++;
		qp = qm_to_hisi_qp(qm, eqe);
		qm_poll_qp(qp, qm);

		if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
			qm->status.eqc_phase = !qm->status.eqc_phase;
@@ -907,24 +892,53 @@ static void qm_work_process(struct work_struct *work)
			qm->status.eq_head++;
		}

		if (eqe_num == QM_EQ_DEPTH / 2 - 1) {
			eqe_num = 0;
		if (eqe_num == (QM_EQ_DEPTH >> 1) - 1)
			break;
	}

	qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);

	return eqe_num;
}

static void qm_work_process(struct work_struct *work)
{
	struct hisi_qm_poll_data *poll_data =
		container_of(work, struct hisi_qm_poll_data, work);
	struct hisi_qm *qm = poll_data->qm;
	struct hisi_qp *qp;
	int eqe_num, i;

	/* Get qp id of completed tasks and re-enable the interrupt. */
	eqe_num = qm_get_complete_eqe_num(poll_data);
	for (i = eqe_num - 1; i >= 0; i--) {
		qp = &qm->qp_array[poll_data->qp_finish_id[i]];
		if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
			continue;

		if (qp->event_cb) {
			qp->event_cb(qp);
			continue;
		}

	qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
		if (likely(qp->req_cb))
			qm_poll_req_cb(qp);
	}
}

static irqreturn_t do_qm_irq(int irq, void *data)
static irqreturn_t do_qm_irq(struct hisi_qm *qm)
{
	struct hisi_qm *qm = (struct hisi_qm *)data;
	struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
	struct hisi_qm_poll_data *poll_data;
	u16 cqn;

	/* the workqueue created by device driver of QM */
	if (qm->wq)
		queue_work(qm->wq, &qm->work);
	else
		schedule_work(&qm->work);
	if (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
		cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
		poll_data = &qm->poll_data[cqn];
		queue_work(qm->wq, &poll_data->work);
	} else {
		qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
	}

	return IRQ_HANDLED;
}
@@ -933,11 +947,10 @@ static irqreturn_t qm_irq(int irq, void *data)
{
	struct hisi_qm *qm = data;

	if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
		return do_qm_irq(irq, data);
	if (likely(readl(qm->io_base + QM_VF_EQ_INT_SOURCE)))
		return do_qm_irq(qm);

	atomic64_inc(&qm->debug.dfx.err_irq_cnt);
	dev_err(&qm->pdev->dev, "invalid int source\n");
	qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);

	return IRQ_NONE;
@@ -2943,11 +2956,8 @@ static int qm_stop_qp_nolock(struct hisi_qp *qp)
	if (ret)
		dev_err(dev, "Failed to drain out data for stopping!\n");

	if (qp->qm->wq)
		flush_workqueue(qp->qm->wq);
	else
		flush_work(&qp->qm->work);

	flush_workqueue(qp->qm->wq);
	if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
		qp_stop_fail_cb(qp);

@@ -3375,8 +3385,10 @@ static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
	for (i = num - 1; i >= 0; i--) {
		qdma = &qm->qp_array[i].qdma;
		dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
		kfree(qm->poll_data[i].qp_finish_id);
	}

	kfree(qm->poll_data);
	kfree(qm->qp_array);
}

@@ -3385,12 +3397,18 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
	struct device *dev = &qm->pdev->dev;
	size_t off = qm->sqe_size * QM_Q_DEPTH;
	struct hisi_qp *qp;
	int ret = -ENOMEM;

	qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16),
						 GFP_KERNEL);
	if (!qm->poll_data[id].qp_finish_id)
		return -ENOMEM;

	qp = &qm->qp_array[id];
	qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
					 GFP_KERNEL);
	if (!qp->qdma.va)
		return -ENOMEM;
		goto err_free_qp_finish_id;

	qp->sqe = qp->qdma.va;
	qp->sqe_dma = qp->qdma.dma;
@@ -3401,6 +3419,10 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
	qp->qp_id = id;

	return 0;

err_free_qp_finish_id:
	kfree(qm->poll_data[id].qp_finish_id);
	return ret;
}

static void hisi_qm_set_state(struct hisi_qm *qm, enum vf_state state)
@@ -3480,6 +3502,26 @@ static void hisi_qm_pci_uninit(struct hisi_qm *qm)
	pci_disable_device(pdev);
}

static void hisi_qm_unint_work(struct hisi_qm *qm)
{
	destroy_workqueue(qm->wq);
}

static void hisi_qm_memory_uninit(struct hisi_qm *qm)
{
	struct device *dev = &qm->pdev->dev;

	hisi_qp_memory_uninit(qm, qm->qp_num);
	if (qm->qdma.va) {
		hisi_qm_cache_wb(qm);
		dma_free_coherent(dev, qm->qdma.size,
				  qm->qdma.va, qm->qdma.dma);
	}

	idr_destroy(&qm->qp_idr);
	kfree(qm->factor);
}

/**
 * hisi_qm_uninit() - Uninitialize qm.
 * @qm: The qm needed uninit.
@@ -3488,26 +3530,15 @@ static void hisi_qm_pci_uninit(struct hisi_qm *qm)
 */
void hisi_qm_uninit(struct hisi_qm *qm)
{
	struct pci_dev *pdev = qm->pdev;
	struct device *dev = &pdev->dev;

	qm_cmd_uninit(qm);
	kfree(qm->factor);
	hisi_qm_unint_work(qm);
	down_write(&qm->qps_lock);

	if (!qm_avail_state(qm, QM_CLOSE)) {
		up_write(&qm->qps_lock);
		return;
	}

	hisi_qp_memory_uninit(qm, qm->qp_num);
	idr_destroy(&qm->qp_idr);

	if (qm->qdma.va) {
		hisi_qm_cache_wb(qm);
		dma_free_coherent(dev, qm->qdma.size,
				  qm->qdma.va, qm->qdma.dma);
	}
	hisi_qm_memory_uninit(qm);
	up_write(&qm->qps_lock);

	hisi_qm_set_state(qm, VF_NOT_READY);
@@ -5796,14 +5827,28 @@ static int hisi_qm_pci_init(struct hisi_qm *qm)
	return ret;
}

static void hisi_qm_init_work(struct hisi_qm *qm)
static int hisi_qm_init_work(struct hisi_qm *qm)
{
	INIT_WORK(&qm->work, qm_work_process);
	int i;

	for (i = 0; i < qm->qp_num; i++)
		INIT_WORK(&qm->poll_data[i].work, qm_work_process);

	if (qm->fun_type == QM_HW_PF)
		INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);

	if (qm->ver > QM_HW_V2)
		INIT_WORK(&qm->cmd_process, qm_cmd_process);

	qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
				 WQ_UNBOUND, num_online_cpus(),
				 pci_name(qm->pdev));
	if (!qm->wq) {
		pci_err(qm->pdev, "failed to alloc workqueue!\n");
		return -ENOMEM;
	}

	return 0;
}

static int hisi_qp_alloc_memory(struct hisi_qm *qm)
@@ -5816,11 +5861,18 @@ static int hisi_qp_alloc_memory(struct hisi_qm *qm)
	if (!qm->qp_array)
		return -ENOMEM;

	qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL);
	if (!qm->poll_data) {
		kfree(qm->qp_array);
		return -ENOMEM;
	}

	/* one more page for device or qp statuses */
	qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
		      sizeof(struct qm_cqe) * QM_Q_DEPTH;
	qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE;
	for (i = 0; i < qm->qp_num; i++) {
		qm->poll_data[i].qm = qm;
		ret = hisi_qp_memory_init(qm, qp_dma_size, i);
		if (ret)
			goto err_init_qp_mem;
@@ -5931,12 +5983,17 @@ int hisi_qm_init(struct hisi_qm *qm)
	if (ret)
		goto err_alloc_uacce;

	hisi_qm_init_work(qm);
	ret = hisi_qm_init_work(qm);
	if (ret)
		goto err_free_qm_memory;

	qm_cmd_init(qm);
	atomic_set(&qm->status.flags, QM_INIT);

	return 0;

err_free_qm_memory:
	hisi_qm_memory_uninit(qm);
err_alloc_uacce:
	if (qm->use_sva) {
		uacce_remove(qm->uacce);
+7 −0
Original line number Diff line number Diff line
@@ -220,6 +220,12 @@ struct hisi_qm_list {
	void (*unregister_from_crypto)(struct hisi_qm *qm);
};

struct hisi_qm_poll_data {
	struct hisi_qm *qm;
	struct work_struct work;
	u16 *qp_finish_id;
};

struct hisi_qm {
	enum qm_hw_ver ver;
	enum qm_fun_type fun_type;
@@ -257,6 +263,7 @@ struct hisi_qm {
	struct rw_semaphore qps_lock;
	struct idr qp_idr;
	struct hisi_qp *qp_array;
	struct hisi_qm_poll_data *poll_data;

	struct mutex mailbox_lock;

+0 −2
Original line number Diff line number Diff line
@@ -2113,7 +2113,6 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
		.cra_driver_name = "hisi_sec_"sec_cra_name,\
		.cra_priority = SEC_PRIORITY,\
		.cra_flags = CRYPTO_ALG_ASYNC |\
		 CRYPTO_ALG_ALLOCATES_MEMORY |\
		 CRYPTO_ALG_NEED_FALLBACK,\
		.cra_blocksize = blk_size,\
		.cra_ctxsize = sizeof(struct sec_ctx),\
@@ -2366,7 +2365,6 @@ static int sec_aead_decrypt(struct aead_request *a_req)
		.cra_driver_name = "hisi_sec_"sec_cra_name,\
		.cra_priority = SEC_PRIORITY,\
		.cra_flags = CRYPTO_ALG_ASYNC |\
		 CRYPTO_ALG_ALLOCATES_MEMORY |\
		 CRYPTO_ALG_NEED_FALLBACK,\
		.cra_blocksize = blk_size,\
		.cra_ctxsize = sizeof(struct sec_ctx),\
+1 −23
Original line number Diff line number Diff line
@@ -868,8 +868,6 @@ static int sec_pf_probe_init(struct sec_dev *sec)

static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
	int ret;

	qm->pdev = pdev;
	qm->ver = pdev->revision;
	qm->algs = "cipher\ndigest\naead";
@@ -895,25 +893,7 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
		qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
	}

	/*
	 * WQ_HIGHPRI: SEC request must be low delayed,
	 * so need a high priority workqueue.
	 * WQ_UNBOUND: SEC task is likely with long
	 * running CPU intensive workloads.
	 */
	qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
				 WQ_UNBOUND, num_online_cpus(),
				 pci_name(qm->pdev));
	if (!qm->wq) {
		pci_err(qm->pdev, "fail to alloc workqueue\n");
		return -ENOMEM;
	}

	ret = hisi_qm_init(qm);
	if (ret)
		destroy_workqueue(qm->wq);

	return ret;
	return hisi_qm_init(qm);
}

static void sec_qm_uninit(struct hisi_qm *qm)
@@ -944,8 +924,6 @@ static int sec_probe_init(struct sec_dev *sec)
static void sec_probe_uninit(struct hisi_qm *qm)
{
	hisi_qm_dev_err_uninit(qm);

	destroy_workqueue(qm->wq);
}

static void sec_iommu_used_check(struct sec_dev *sec)
+1 −16
Original line number Diff line number Diff line
@@ -826,8 +826,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)

static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
	int ret;

	qm->pdev = pdev;
	qm->ver = pdev->revision;
	if (pdev->revision >= QM_HW_V3)
@@ -857,25 +855,12 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
		qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM;
	}

	qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
				 WQ_UNBOUND, num_online_cpus(),
				 pci_name(qm->pdev));
	if (!qm->wq) {
		pci_err(qm->pdev, "fail to alloc workqueue\n");
		return -ENOMEM;
	}

	ret = hisi_qm_init(qm);
	if (ret)
		destroy_workqueue(qm->wq);

	return ret;
	return hisi_qm_init(qm);
}

static void hisi_zip_qm_uninit(struct hisi_qm *qm)
{
	hisi_qm_uninit(qm);
	destroy_workqueue(qm->wq);
}

static int hisi_zip_probe_init(struct hisi_zip *hisi_zip)