Commit a1666f44 authored by Weili Qian's avatar Weili Qian Committed by JiangShui
Browse files

crypto: hisilicon/qm - support no-sva feature

driver inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I773SD


CVE: NA

----------------------------------------------------------------------

support no-sva feature.

Signed-off-by: default avatarWeili Qian <qianweili@huawei.com>
Signed-off-by: default avatarJiangShui Yang <yangjiangshui@h-partners.com>
parent 92e58150
Loading
Loading
Loading
Loading
+5 −8
Original line number Diff line number Diff line
@@ -11,7 +11,6 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/topology.h>
#include <linux/uacce.h>
#include "hpre.h"

#define HPRE_QM_ABNML_INT_MASK		0x100004
@@ -366,7 +365,7 @@ static int hpre_set_qm_algs(struct hisi_qm *qm)
	u32 alg_msk;
	int i;

	if (!qm->use_sva)
	if (!qm->use_uacce)
		return 0;

	algs = devm_kzalloc(dev, HPRE_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
@@ -1398,13 +1397,11 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
		goto err_with_qm_start;
	}

	if (qm->uacce) {
		ret = uacce_register(qm->uacce);
	ret = qm_register_uacce(qm);
	if (ret) {
			pci_err(pdev, "failed to register uacce (%d)!\n", ret);
		pci_err(pdev, "Failed to register uacce (%d)!\n", ret);
		goto err_with_alg_register;
	}
	}

	if (qm->fun_type == QM_HW_PF && vfs_num) {
		ret = hisi_qm_sriov_enable(pdev, vfs_num);
+182 −68
Original line number Diff line number Diff line
@@ -7,12 +7,12 @@
#include <linux/dma-mapping.h>
#include <linux/idr.h>
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/irqreturn.h>
#include <linux/log2.h>
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/uacce.h>
#include <linux/uaccess.h>
#include <uapi/misc/uacce/hisi_qm.h>
#include <linux/hisi_acc_qm.h>
@@ -829,6 +829,33 @@ static void qm_cq_head_update(struct hisi_qp *qp)
	}
}

static void qm_poll_user_event_cb(struct hisi_qp *qp)
{
	struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
	struct uacce_queue *q = qp->uacce_q;
	bool updated = 0;

	/*
	 * If multi thread poll one queue, each thread will produce
	 * one event, so we query one cqe and break out of the loop.
	 * If only one thread poll one queue, we need query all cqe
	 * to ensure that we poll a cleaned queue next time.
	 */
	while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
		dma_rmb();
		qm_cq_head_update(qp);
		cqe = qp->cqe + qp->qp_status.cq_head;
		updated = 1;
		if (!wq_has_single_sleeper(&q->wait))
			break;
	}

	if (updated) {
		atomic_inc(&qp->qp_status.complete_task);
		qp->event_cb(qp);
	}
}

static void qm_poll_req_cb(struct hisi_qp *qp)
{
	struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
@@ -896,7 +923,7 @@ static void qm_work_process(struct work_struct *work)
			continue;

		if (qp->event_cb) {
			qp->event_cb(qp);
			qm_poll_user_event_cb(qp);
			continue;
		}

@@ -2306,7 +2333,7 @@ static void hisi_qm_cache_wb(struct hisi_qm *qm)

static void qm_qp_event_notifier(struct hisi_qp *qp)
{
	wake_up_interruptible(&qp->uacce_q->wait);
	uacce_wake_up(qp->uacce_q);
}

 /* This function returns free number of qp in qm. */
@@ -2447,18 +2474,8 @@ static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
static int hisi_qm_is_q_updated(struct uacce_queue *q)
{
	struct hisi_qp *qp = q->priv;
	struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
	int updated = 0;

	while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
		/* make sure to read data from memory */
		dma_rmb();
		qm_cq_head_update(qp);
		cqe = qp->cqe + qp->qp_status.cq_head;
		updated = 1;
	}

	return updated;
	return atomic_add_unless(&qp->qp_status.complete_task, -1, 0);
}

static void qm_set_sqctype(struct uacce_queue *q, u16 type)
@@ -2489,7 +2506,7 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
		qm_set_sqctype(q, qp_ctx.qc_type);
		qp_ctx.id = qp->qp_id;

		if (copy_to_user((void __user *)arg, &qp_ctx,
		if (copy_to_user((void __user *)(uintptr_t)arg, &qp_ctx,
				 sizeof(struct hisi_qp_ctx)))
			return -EFAULT;

@@ -2513,6 +2530,78 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
	return -EINVAL;
}

static enum uacce_dev_state hisi_qm_get_state(struct uacce_device *uacce)
{
	struct hisi_qm *qm = uacce->priv;
	enum qm_state curr;

	curr = atomic_read(&qm->status.flags);
	if (curr == QM_STOP)
		return UACCE_DEV_ERR;

	return UACCE_DEV_NORMAL;
}
static void qm_uacce_api_ver_init(struct hisi_qm *qm)
{
	struct uacce_device *uacce = qm->uacce;

	if (uacce->flags & UACCE_DEV_IOMMU) {
		qm->use_sva = uacce->flags & UACCE_DEV_SVA ? true : false;

		if (qm->ver == QM_HW_V1)
			uacce->api_ver = HISI_QM_API_VER_BASE;
		else if (qm->ver == QM_HW_V2)
			uacce->api_ver = HISI_QM_API_VER2_BASE;
		else
			uacce->api_ver = HISI_QM_API_VER3_BASE;
	} else {
		qm->use_sva = false;

		if (qm->ver == QM_HW_V1)
			uacce->api_ver = HISI_QM_API_VER_BASE
					 UACCE_API_VER_NOIOMMU_SUBFIX;
		else if (qm->ver == QM_HW_V2)
			uacce->api_ver = HISI_QM_API_VER2_BASE
					 UACCE_API_VER_NOIOMMU_SUBFIX;
		else
			uacce->api_ver = HISI_QM_API_VER3_BASE
					 UACCE_API_VER_NOIOMMU_SUBFIX;
	}
}

static void qm_uacce_base_init(struct hisi_qm *qm)
{
	unsigned long dus_page_nr, mmio_page_nr;
	struct uacce_device *uacce = qm->uacce;
	struct pci_dev *pdev = qm->pdev;
	u16 sq_depth, cq_depth;

	qm_uacce_api_ver_init(qm);

	if (qm->ver == QM_HW_V1)
		mmio_page_nr = QM_DOORBELL_PAGE_NR;
	else if (qm->ver == QM_HW_V2 ||
		!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
		mmio_page_nr = QM_DOORBELL_PAGE_NR +
			QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
	else
		mmio_page_nr = QM_QP_DB_INTERVAL / PAGE_SIZE;

	uacce->is_vf = pdev->is_virtfn;
	uacce->priv = qm;
	uacce->algs = qm->algs;
	uacce->parent = &pdev->dev;
	qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);

	/* Add one more page for device or qp status */
	dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth +
			sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >>
					PAGE_SHIFT;

	uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
	uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
}

/**
 * qm_hw_err_isolate() - Try to set the isolation status of the uacce device
 * according to user's configuration of error threshold.
@@ -2628,6 +2717,7 @@ static const struct uacce_ops uacce_qm_ops = {
	.stop_queue = hisi_qm_uacce_stop_queue,
	.mmap = hisi_qm_uacce_mmap,
	.ioctl = hisi_qm_uacce_ioctl,
	.get_dev_state = hisi_qm_get_state,
	.is_q_updated = hisi_qm_is_q_updated,
	.get_isolate_state = hisi_qm_get_isolate_state,
	.isolate_err_threshold_write = hisi_qm_isolate_threshold_write,
@@ -2638,7 +2728,7 @@ static void qm_remove_uacce(struct hisi_qm *qm)
{
	struct uacce_device *uacce = qm->uacce;

	if (qm->use_sva) {
	if (qm->use_uacce) {
		qm_hw_err_destroy(qm);
		uacce_remove(uacce);
		qm->uacce = NULL;
@@ -2648,61 +2738,42 @@ static void qm_remove_uacce(struct hisi_qm *qm)
static int qm_alloc_uacce(struct hisi_qm *qm)
{
	struct pci_dev *pdev = qm->pdev;
	struct uacce_interface interface;
	struct uacce_device *uacce;
	unsigned long mmio_page_nr;
	unsigned long dus_page_nr;
	u16 sq_depth, cq_depth;
	struct uacce_interface interface = {
		.flags = UACCE_DEV_SVA,
		.ops = &uacce_qm_ops,
	};
	int ret;

	ret = strscpy(interface.name, dev_driver_string(&pdev->dev),
		      sizeof(interface.name));
	if (ret < 0)
		return -ENAMETOOLONG;
	int name_len;

	uacce = uacce_alloc(&pdev->dev, &interface);
	if (IS_ERR(uacce))
		return PTR_ERR(uacce);
	if (!qm->use_uacce)
		return 0;

	if (uacce->flags & UACCE_DEV_SVA) {
		qm->use_sva = true;
	} else {
		/* only consider sva case */
		qm_remove_uacce(qm);
	name_len = strlen(pdev->driver->name);
	if (name_len >= UACCE_MAX_NAME_SIZE) {
		pci_err(pdev, "The driver name(%d) is longer than %d!\n",
			name_len, UACCE_MAX_NAME_SIZE);
		return -EINVAL;
	}

	uacce->is_vf = pdev->is_virtfn;
	uacce->priv = qm;

	if (qm->ver == QM_HW_V1)
		uacce->api_ver = HISI_QM_API_VER_BASE;
	else if (qm->ver == QM_HW_V2)
		uacce->api_ver = HISI_QM_API_VER2_BASE;
	else
		uacce->api_ver = HISI_QM_API_VER3_BASE;

	if (qm->ver == QM_HW_V1)
		mmio_page_nr = QM_DOORBELL_PAGE_NR;
	else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
		mmio_page_nr = QM_DOORBELL_PAGE_NR +
			QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
	else
		mmio_page_nr = qm->db_interval / PAGE_SIZE;
	strncpy(interface.name, pdev->driver->name, name_len);
	interface.name[name_len] = '\0';

	qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
	interface.flags = qm->use_iommu ? UACCE_DEV_IOMMU : UACCE_DEV_NOIOMMU;
	if (qm->mode == UACCE_MODE_SVA) {
		if (!qm->use_iommu) {
			pci_err(pdev, "iommu not support sva!\n");
			return -EINVAL;
		}

	/* Add one more page for device or qp status */
	dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth +
		       sizeof(struct qm_cqe) * cq_depth  + PAGE_SIZE) >>
					 PAGE_SHIFT;
		interface.flags |= UACCE_DEV_SVA;
	}

	uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
	uacce->qf_pg_num[UACCE_QFRT_DUS]  = dus_page_nr;
	interface.ops = &uacce_qm_ops;
	uacce = uacce_alloc(&pdev->dev, &interface);
	if (IS_ERR(uacce)) {
		pci_err(pdev, "fail to alloc uacce device\n!");
		return PTR_ERR(uacce);
	}
	qm->uacce = uacce;

	qm_uacce_base_init(qm);
	qm->uacce = uacce;
	INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs);
	mutex_init(&qm->isolate_data.isolate_lock);
@@ -2710,6 +2781,16 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
	return 0;
}

int qm_register_uacce(struct hisi_qm *qm)
{
	if (!qm->use_uacce)
		return 0;

	dev_info(&qm->pdev->dev, "qm register to uacce\n");
	return uacce_register(qm->uacce);
}
EXPORT_SYMBOL_GPL(qm_register_uacce);

/**
 * qm_frozen() - Try to froze QM to cut continuous queue request. If
 * there is user on the QM, return failure without doing anything.
@@ -2840,7 +2921,20 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id,
	return ret;
}

static void hisi_qm_pre_init(struct hisi_qm *qm)
static inline bool is_iommu_used(struct device *dev)
{
	struct iommu_domain *domain;

	domain = iommu_get_domain_for_dev(dev);
	if (domain) {
		dev_info(dev, "iommu domain type = %u\n", domain->type);
		return domain->type & __IOMMU_DOMAIN_PAGING;
	}

	return false;
}

static int hisi_qm_pre_init(struct hisi_qm *qm)
{
	struct pci_dev *pdev = qm->pdev;

@@ -2851,15 +2945,30 @@ static void hisi_qm_pre_init(struct hisi_qm *qm)
	else
		qm->ops = &qm_hw_ops_v3;

	switch (qm->mode) {
	case UACCE_MODE_NOUACCE:
		qm->use_uacce = false;
		break;
	case UACCE_MODE_SVA:
	case UACCE_MODE_NOIOMMU:
		qm->use_uacce = true;
		break;
	default:
		pci_err(pdev, "uacce mode error!\n");
		return -EINVAL;
	}

	pci_set_drvdata(pdev, qm);
	mutex_init(&qm->mailbox_lock);
	init_rwsem(&qm->qps_lock);
	qm->qp_in_used = 0;
	qm->misc_ctl = false;
	qm->use_iommu = is_iommu_used(&pdev->dev);
	if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
		if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
			dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
	}
	return 0;
}

static void qm_cmd_uninit(struct hisi_qm *qm)
@@ -2960,6 +3069,7 @@ void hisi_qm_uninit(struct hisi_qm *qm)
	hisi_qm_set_state(qm, VF_NOT_READY);
	up_write(&qm->qps_lock);

	qm_remove_uacce(qm);
	qm_irqs_unregister(qm);
	hisi_qm_pci_uninit(qm);
	if (qm->use_sva) {
@@ -4197,7 +4307,7 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
		return ret;
	}

	if (qm->use_sva) {
	if (qm->use_uacce) {
		ret = qm_hw_err_isolate(qm);
		if (ret)
			pci_err(pdev, "failed to isolate hw err!\n");
@@ -4688,6 +4798,8 @@ void hisi_qm_dev_shutdown(struct pci_dev *pdev)
	ret = hisi_qm_stop(qm, QM_NORMAL);
	if (ret)
		dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");

	qm_remove_uacce(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);

@@ -5406,7 +5518,9 @@ int hisi_qm_init(struct hisi_qm *qm)
	struct device *dev = &pdev->dev;
	int ret;

	hisi_qm_pre_init(qm);
	ret = hisi_qm_pre_init(qm);
	if (ret)
		return ret;

	ret = hisi_qm_pci_init(qm);
	if (ret)
@@ -5425,10 +5539,10 @@ int hisi_qm_init(struct hisi_qm *qm)
		}
	}

	if (qm->mode == UACCE_MODE_SVA) {
	ret = qm_alloc_uacce(qm);
		if (ret < 0)
			dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
	if (ret < 0) {
		dev_err(dev, "failed to alloc uacce (%d)\n", ret);
		goto err_irq_register;
	}

	ret = hisi_qm_memory_init(qm);
+0 −1
Original line number Diff line number Diff line
@@ -189,7 +189,6 @@ struct sec_dev {
	struct hisi_qm qm;
	struct sec_debug debug;
	u32 ctx_q_num;
	bool iommu_used;
};

enum sec_cap_type {
+1 −1
Original line number Diff line number Diff line
@@ -599,7 +599,7 @@ static int sec_ctx_base_init(struct sec_ctx *ctx)
	ctx->dev = &sec->qm.pdev->dev;
	ctx->hlf_q_num = sec->ctx_q_num >> 1;

	ctx->pbuf_supported = ctx->sec->iommu_used;
	ctx->pbuf_supported = sec->qm.use_iommu;

	/* Half of queue depth is taken as fake requests limit in the queue. */
	ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1;
+1 −19
Original line number Diff line number Diff line
@@ -1082,7 +1082,7 @@ static int sec_set_qm_algs(struct hisi_qm *qm)
	u64 alg_mask;
	int i;

	if (!qm->use_sva)
	if (!qm->use_uacce)
		return 0;

	algs = devm_kzalloc(dev, SEC_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL);
@@ -1177,23 +1177,6 @@ static void sec_probe_uninit(struct hisi_qm *qm)
	hisi_qm_dev_err_uninit(qm);
}

static void sec_iommu_used_check(struct sec_dev *sec)
{
	struct iommu_domain *domain;
	struct device *dev = &sec->qm.pdev->dev;

	domain = iommu_get_domain_for_dev(dev);

	/* Check if iommu is used */
	sec->iommu_used = false;
	if (domain) {
		if (domain->type & __IOMMU_DOMAIN_PAGING)
			sec->iommu_used = true;
		dev_info(dev, "SMMU Opened, the iommu type = %u\n",
			domain->type);
	}
}

static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct sec_dev *sec;
@@ -1212,7 +1195,6 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
	}

	sec->ctx_q_num = ctx_q_num;
	sec_iommu_used_check(sec);

	ret = sec_probe_init(sec);
	if (ret) {
Loading