Unverified Commit d90cff02 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!3154 crypto: hisilicon - fix the process to obtain capability register value

Merge Pull Request from: @xiao_jiang_shui 
 
This seires patch modify the process to obtain the value of capablity registers.
Pre-store the valid values of them.

issue: https://gitee.com/openeuler/kernel/issues/I8KHDV 
 
Link:https://gitee.com/openeuler/kernel/pulls/3154

 

Reviewed-by: default avatarYang Shen <shenyang39@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents 500c38d3 2c5d6e2a
Loading
Loading
Loading
Loading
+23 −2
Original line number Diff line number Diff line
@@ -57,6 +57,9 @@ struct hpre_ctx;
#define HPRE_DRV_ECDH_MASK_CAP		BIT(2)
#define HPRE_DRV_X25519_MASK_CAP	BIT(5)

static DEFINE_MUTEX(hpre_algs_lock);
static unsigned int hpre_available_devs;

typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);

struct hpre_rsa_ctx {
@@ -2192,11 +2195,17 @@ static void hpre_unregister_x25519(struct hisi_qm *qm)

int hpre_algs_register(struct hisi_qm *qm)
{
	int ret;
	int ret = 0;

	mutex_lock(&hpre_algs_lock);
	if (hpre_available_devs) {
		hpre_available_devs++;
		goto unlock;
	}

	ret = hpre_register_rsa(qm);
	if (ret)
		return ret;
		goto unlock;

	ret = hpre_register_dh(qm);
	if (ret)
@@ -2210,6 +2219,9 @@ int hpre_algs_register(struct hisi_qm *qm)
	if (ret)
		goto unreg_ecdh;

	hpre_available_devs++;
	mutex_unlock(&hpre_algs_lock);

	return ret;

unreg_ecdh:
@@ -2218,13 +2230,22 @@ int hpre_algs_register(struct hisi_qm *qm)
	hpre_unregister_dh(qm);
unreg_rsa:
	hpre_unregister_rsa(qm);
unlock:
	mutex_unlock(&hpre_algs_lock);
	return ret;
}

void hpre_algs_unregister(struct hisi_qm *qm)
{
	mutex_lock(&hpre_algs_lock);
	if (--hpre_available_devs)
		goto unlock;

	hpre_unregister_x25519(qm);
	hpre_unregister_ecdh(qm);
	hpre_unregister_dh(qm);
	hpre_unregister_rsa(qm);

unlock:
	mutex_unlock(&hpre_algs_lock);
}
+59 −33
Original line number Diff line number Diff line
@@ -107,6 +107,7 @@
#define HPRE_VIA_MSI_DSM		1
#define HPRE_SQE_MASK_OFFSET		8
#define HPRE_SQE_MASK_LEN		24
#define HPRE_CTX_Q_NUM_DEF		1

#define HPRE_DFX_BASE		0x301000
#define HPRE_DFX_COMMON1		0x301400
@@ -225,14 +226,18 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
	{HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
};

enum hpre_cap_reg_record_idx {
enum hpre_pre_store_cap_idx {
	HPRE_CLUSTER_NUM_CAP_IDX = 0x0,
	HPRE_CORE_ENABLE_BITMAP_CAP_IDX,
	HPRE_DRV_ALG_BITMAP_CAP_IDX,
	HPRE_DEV_ALG_BITMAP_CAP_IDX,
};

static struct hisi_qm_cap_record hpre_cap_reg_record[] = {
	{HPRE_DRV_ALG_BITMAP_CAP,	0x27},
	{HPRE_DEV_ALG_BITMAP_CAP,	0x7F},
static const u32 hpre_pre_store_caps[] = {
	HPRE_CLUSTER_NUM_CAP,
	HPRE_CORE_ENABLE_BITMAP_CAP,
	HPRE_DRV_ALG_BITMAP_CAP,
	HPRE_DEV_ALG_BITMAP_CAP,
};

static const struct hpre_hw_error hpre_hw_errors[] = {
@@ -357,7 +362,7 @@ bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
{
	u32 cap_val;

	cap_val = hpre_cap_reg_record[HPRE_DRV_ALG_BITMAP_CAP_IDX].cap_val;
	cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP_CAP_IDX].cap_val;
	if (alg & cap_val)
		return true;

@@ -433,16 +438,6 @@ static u32 vfs_num;
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");

static inline int hpre_cluster_num(struct hisi_qm *qm)
{
	return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CLUSTER_NUM_CAP, qm->cap_ver);
}

static inline int hpre_cluster_core_mask(struct hisi_qm *qm)
{
	return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CORE_ENABLE_BITMAP_CAP, qm->cap_ver);
}

struct hisi_qp *hpre_create_qp(u8 type)
{
	int node = cpu_to_node(smp_processor_id());
@@ -509,13 +504,15 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm)

static int hpre_set_cluster(struct hisi_qm *qm)
{
	u32 cluster_core_mask = hpre_cluster_core_mask(qm);
	u8 clusters_num = hpre_cluster_num(qm);
	struct device *dev = &qm->pdev->dev;
	unsigned long offset;
	u32 cluster_core_mask;
	u8 clusters_num;
	u32 val = 0;
	int ret, i;

	cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_ENABLE_BITMAP_CAP_IDX].cap_val;
	clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
	for (i = 0; i < clusters_num; i++) {
		offset = i * HPRE_CLSTR_ADDR_INTRVL;

@@ -710,11 +707,12 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)

static void hpre_cnt_regs_clear(struct hisi_qm *qm)
{
	u8 clusters_num = hpre_cluster_num(qm);
	unsigned long offset;
	u8 clusters_num;
	int i;

	/* clear clusterX/cluster_ctrl */
	clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
	for (i = 0; i < clusters_num; i++) {
		offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
		writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
@@ -1001,13 +999,14 @@ static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)

static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
{
	u8 clusters_num = hpre_cluster_num(qm);
	struct device *dev = &qm->pdev->dev;
	char buf[HPRE_DBGFS_VAL_MAX_LEN];
	struct debugfs_regset32 *regset;
	struct dentry *tmp_d;
	u8 clusters_num;
	int i, ret;

	clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
	for (i = 0; i < clusters_num; i++) {
		ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
		if (ret >= HPRE_DBGFS_VAL_MAX_LEN)
@@ -1112,15 +1111,32 @@ static void hpre_debugfs_exit(struct hisi_qm *qm)
	debugfs_remove_recursive(qm->debug.debug_root);
}

static void hpre_pre_store_cap_reg(struct hisi_qm *qm)
static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
{
	int i, size;
	struct hisi_qm_cap_record *hpre_cap;
	struct device *dev = &qm->pdev->dev;
	size_t i, size;

	size = ARRAY_SIZE(hpre_pre_store_caps);
	hpre_cap = devm_kzalloc(dev, sizeof(*hpre_cap) * size, GFP_KERNEL);
	if (!hpre_cap)
		return -ENOMEM;

	size = ARRAY_SIZE(hpre_cap_reg_record);
	for (i = 0; i < size; i++) {
		hpre_cap_reg_record[i].cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info,
						 hpre_cap_reg_record[i].type, qm->cap_ver);
		hpre_cap[i].type = hpre_pre_store_caps[i];
		hpre_cap[i].cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info,
				      hpre_pre_store_caps[i], qm->cap_ver);
	}

	if (hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val > HPRE_CLUSTERS_NUM_MAX) {
		dev_err(dev, "Device cluster num %u is out of range for driver supports %d!\n",
			hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val, HPRE_CLUSTERS_NUM_MAX);
		return -EINVAL;
	}

	qm->cap_tables.dev_cap_table = hpre_cap;

	return 0;
}

static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
@@ -1157,9 +1173,14 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
	}

	/* Fetch and save the value of capability registers */
	hpre_pre_store_cap_reg(qm);
	ret = hpre_pre_store_cap_reg(qm);
	if (ret) {
		pci_err(pdev, "Failed to pre-store capability registers!\n");
		hisi_qm_uninit(qm);
		return ret;
	}

	alg_msk = hpre_cap_reg_record[HPRE_DEV_ALG_BITMAP_CAP_IDX].cap_val;
	alg_msk = qm->cap_tables.dev_cap_table[HPRE_DEV_ALG_BITMAP_CAP_IDX].cap_val;
	ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs));
	if (ret) {
		pci_err(pdev, "Failed to set hpre algs!\n");
@@ -1173,11 +1194,12 @@ static int hpre_show_last_regs_init(struct hisi_qm *qm)
{
	int cluster_dfx_regs_num =  ARRAY_SIZE(hpre_cluster_dfx_regs);
	int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
	u8 clusters_num = hpre_cluster_num(qm);
	struct qm_debug *debug = &qm->debug;
	void __iomem *io_base;
	u8 clusters_num;
	int i, j, idx;

	clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
	debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num +
			com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
	if (!debug->last_words)
@@ -1214,10 +1236,10 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
{
	int cluster_dfx_regs_num =  ARRAY_SIZE(hpre_cluster_dfx_regs);
	int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
	u8 clusters_num = hpre_cluster_num(qm);
	struct qm_debug *debug = &qm->debug;
	struct pci_dev *pdev = qm->pdev;
	void __iomem *io_base;
	u8 clusters_num;
	int i, j, idx;
	u32 val;

@@ -1232,6 +1254,7 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
			  hpre_com_dfx_regs[i].name, debug->last_words[i], val);
	}

	clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val;
	for (i = 0; i < clusters_num; i++) {
		io_base = qm->io_base + hpre_cluster_offsets[i];
		for (j = 0; j <  cluster_dfx_regs_num; j++) {
@@ -1406,10 +1429,11 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
	if (ret)
		dev_warn(&pdev->dev, "init debugfs fail!\n");

	ret = hisi_qm_alg_register(qm, &hpre_devices);
	hisi_qm_add_list(qm, &hpre_devices);
	ret = hisi_qm_alg_register(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF);
	if (ret < 0) {
		pci_err(pdev, "fail to register algs to crypto!\n");
		goto err_with_qm_start;
		goto err_qm_del_list;
	}

	ret = qm_register_uacce(qm);
@@ -1429,9 +1453,10 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
	return 0;

err_with_alg_register:
	hisi_qm_alg_unregister(qm, &hpre_devices);
	hisi_qm_alg_unregister(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF);

err_with_qm_start:
err_qm_del_list:
	hisi_qm_del_list(qm, &hpre_devices);
	hpre_debugfs_exit(qm);
	hisi_qm_stop(qm, QM_NORMAL);

@@ -1451,7 +1476,8 @@ static void hpre_remove(struct pci_dev *pdev)

	hisi_qm_pm_uninit(qm);
	hisi_qm_wait_task_finish(qm, &hpre_devices);
	hisi_qm_alg_unregister(qm, &hpre_devices);
	hisi_qm_alg_unregister(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF);
	hisi_qm_del_list(qm, &hpre_devices);
	if (qm->fun_type == QM_HW_PF && qm->vfs_num)
		hisi_qm_sriov_disable(pdev, true);

+56 −56
Original line number Diff line number Diff line
@@ -302,11 +302,11 @@ enum qm_basic_type {
	QM_VF_IRQ_NUM_CAP,
};

enum qm_irq_type_caps_idx {
	QM_EQ_IRQ_TYPE_CAP_IDX,
enum qm_pre_store_cap_idx {
	QM_EQ_IRQ_TYPE_CAP_IDX = 0x0,
	QM_AEQ_IRQ_TYPE_CAP_IDX,
	QM_ABN_IRQ_TYPE_CAP_IDX,
	QM_PF2VF_IRQ_TYPE_CAP_IDX
	QM_PF2VF_IRQ_TYPE_CAP_IDX,
};

static const struct hisi_qm_cap_info qm_cap_info_comm[] = {
@@ -338,11 +338,11 @@ static const struct hisi_qm_cap_info qm_basic_info[] = {
	{QM_VF_IRQ_NUM_CAP,     0x311c,   0,  GENMASK(15, 0), 0x1,       0x2,       0x3},
};

static struct hisi_qm_cap_record qm_irq_type_caps[] = {
	{QM_EQ_IRQ_TYPE_CAP,    0x10000},
	{QM_AEQ_IRQ_TYPE_CAP,   0x10001},
	{QM_ABN_IRQ_TYPE_CAP,   0x10003},
	{QM_PF2VF_IRQ_TYPE_CAP, 0x10002},
static const u32 qm_pre_store_caps[] = {
	QM_EQ_IRQ_TYPE_CAP,
	QM_AEQ_IRQ_TYPE_CAP,
	QM_ABN_IRQ_TYPE_CAP,
	QM_PF2VF_IRQ_TYPE_CAP,
};

struct qm_mailbox {
@@ -4963,62 +4963,47 @@ static void qm_cmd_process(struct work_struct *cmd_process)
}

/**
 * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
 * hisi_qm_alg_register() - Register alg to crypto.
 * @qm: The qm needs add.
 * @qm_list: The qm list.
 * @guard: Guard of qp_num.
 *
 * This function adds qm to qm list, and will register algorithm to
 * crypto when the qm list is empty.
 * Register algorithm to crypto when the function is satisfy guard.
 */
int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard)
{
	struct device *dev = &qm->pdev->dev;
	int flag = 0;
	int ret = 0;

	mutex_lock(&qm_list->lock);
	if (list_empty(&qm_list->list))
		flag = 1;
	list_add_tail(&qm->list, &qm_list->list);
	mutex_unlock(&qm_list->lock);

	if (qm->ver <= QM_HW_V2 && qm->use_sva) {
		dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n");
		return 0;
	}

	if (flag) {
		ret = qm_list->register_to_crypto(qm);
		if (ret) {
			mutex_lock(&qm_list->lock);
			list_del(&qm->list);
			mutex_unlock(&qm_list->lock);
		}
	if (qm->qp_num < guard) {
		dev_info(dev, "qp_num is less than task need.\n");
		return 0;
	}

	return ret;
	return qm_list->register_to_crypto(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_alg_register);

/**
 * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from
 * qm list.
 * hisi_qm_alg_unregister() - Unregister alg from crypto.
 * @qm: The qm needs delete.
 * @qm_list: The qm list.
 * @guard: Guard of qp_num.
 *
 * This function deletes qm from qm list, and will unregister algorithm
 * from crypto when the qm list is empty.
 * Unregister algorithm from crypto when the last function is satisfy guard.
 */
void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard)
{
	mutex_lock(&qm_list->lock);
	list_del(&qm->list);
	mutex_unlock(&qm_list->lock);

	if (qm->ver <= QM_HW_V2 && qm->use_sva)
		return;

	if (list_empty(&qm_list->list))
	if (qm->qp_num < guard)
		return;

	qm_list->unregister_from_crypto(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
@@ -5031,7 +5016,7 @@ static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
	if (qm->fun_type == QM_HW_VF)
		return;

	val = qm_irq_type_caps[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
	val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
		return;

@@ -5048,7 +5033,7 @@ static int qm_register_abnormal_irq(struct hisi_qm *qm)
	if (qm->fun_type == QM_HW_VF)
		return 0;

	val = qm_irq_type_caps[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
	val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val;
	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
		return 0;

@@ -5065,7 +5050,7 @@ static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
	struct pci_dev *pdev = qm->pdev;
	u32 irq_vector, val;

	val = qm_irq_type_caps[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
	val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
		return;

@@ -5079,7 +5064,7 @@ static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
	u32 irq_vector, val;
	int ret;

	val = qm_irq_type_caps[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
	val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val;
	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
		return 0;

@@ -5096,7 +5081,7 @@ static void qm_unregister_aeq_irq(struct hisi_qm *qm)
	struct pci_dev *pdev = qm->pdev;
	u32 irq_vector, val;

	val = qm_irq_type_caps[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
	val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
		return;

@@ -5110,7 +5095,7 @@ static int qm_register_aeq_irq(struct hisi_qm *qm)
	u32 irq_vector, val;
	int ret;

	val = qm_irq_type_caps[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
	val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val;
	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
		return 0;

@@ -5128,7 +5113,7 @@ static void qm_unregister_eq_irq(struct hisi_qm *qm)
	struct pci_dev *pdev = qm->pdev;
	u32 irq_vector, val;

	val = qm_irq_type_caps[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
	val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
		return;

@@ -5142,7 +5127,7 @@ static int qm_register_eq_irq(struct hisi_qm *qm)
	u32 irq_vector, val;
	int ret;

	val = qm_irq_type_caps[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
	val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val;
	if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK))
		return 0;

@@ -5229,17 +5214,29 @@ static int qm_get_qp_num(struct hisi_qm *qm)
	return 0;
}

static void qm_pre_store_irq_type_caps(struct hisi_qm *qm)
static int qm_pre_store_irq_type_caps(struct hisi_qm *qm)
{
	int i, size;
	struct hisi_qm_cap_record *qm_cap;
	struct pci_dev *pdev = qm->pdev;
	size_t i, size;

	size = ARRAY_SIZE(qm_pre_store_caps);
	qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL);
	if (!qm_cap)
		return -ENOMEM;

	size = ARRAY_SIZE(qm_irq_type_caps);
	for (i = 0; i < size; i++)
		qm_irq_type_caps[i].cap_val = hisi_qm_get_hw_info(qm, qm_basic_info,
					      qm_irq_type_caps[i].type, qm->cap_ver);
	for (i = 0; i < size; i++) {
		qm_cap[i].type = qm_pre_store_caps[i];
		qm_cap[i].cap_val = hisi_qm_get_hw_info(qm, qm_basic_info,
							qm_pre_store_caps[i], qm->cap_ver);
	}

static void qm_get_hw_caps(struct hisi_qm *qm)
	qm->cap_tables.qm_cap_table = qm_cap;

	return 0;
}

static int qm_get_hw_caps(struct hisi_qm *qm)
{
	const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ?
						  qm_cap_info_pf : qm_cap_info_vf;
@@ -5272,7 +5269,7 @@ static void qm_get_hw_caps(struct hisi_qm *qm)
	}

	/* Fetch and save the value of irq type related capability registers */
	qm_pre_store_irq_type_caps(qm);
	return qm_pre_store_irq_type_caps(qm);
}

static int qm_get_pci_res(struct hisi_qm *qm)
@@ -5294,7 +5291,10 @@ static int qm_get_pci_res(struct hisi_qm *qm)
		goto err_request_mem_regions;
	}

	qm_get_hw_caps(qm);
	ret = qm_get_hw_caps(qm);
	if (ret)
		goto err_ioremap;

	if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
		qm->db_interval = QM_QP_DB_INTERVAL;
		qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
+1 −1
Original line number Diff line number Diff line
@@ -220,7 +220,7 @@ enum sec_cap_type {
};

enum sec_cap_reg_record_idx {
	SEC_DRV_ALG_BITMAP_LOW_IDX,
	SEC_DRV_ALG_BITMAP_LOW_IDX = 0x0,
	SEC_DRV_ALG_BITMAP_HIGH_IDX,
	SEC_DEV_ALG_BITMAP_LOW_IDX,
	SEC_DEV_ALG_BITMAP_HIGH_IDX,
+29 −3
Original line number Diff line number Diff line
@@ -104,6 +104,9 @@
#define IV_CTR_INIT		0x1
#define IV_BYTE_OFFSET		0x8

static DEFINE_MUTEX(sec_algs_lock);
static unsigned int sec_available_devs;

struct sec_skcipher {
	u64 alg_msk;
	struct skcipher_alg alg;
@@ -2536,18 +2539,34 @@ static int sec_register_aead(u64 alg_mask)
int sec_register_to_crypto(struct hisi_qm *qm)
{
	u64 alg_mask;
	int ret;
	int ret = 0;

	alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
				      SEC_DRV_ALG_BITMAP_LOW_IDX);

	mutex_lock(&sec_algs_lock);
	if (sec_available_devs) {
		sec_available_devs++;
		goto unlock;
	}

	ret = sec_register_skcipher(alg_mask);
	if (ret)
		return ret;
		goto unlock;

	ret = sec_register_aead(alg_mask);
	if (ret)
		sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
		goto unreg_skcipher;

	sec_available_devs++;
	mutex_unlock(&sec_algs_lock);

	return 0;

unreg_skcipher:
	sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
unlock:
	mutex_unlock(&sec_algs_lock);
	return ret;
}

@@ -2558,6 +2577,13 @@ void sec_unregister_from_crypto(struct hisi_qm *qm)
	alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
				      SEC_DRV_ALG_BITMAP_LOW_IDX);

	mutex_lock(&sec_algs_lock);
	if (--sec_available_devs)
		goto unlock;

	sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));
	sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));

unlock:
	mutex_unlock(&sec_algs_lock);
}
Loading