Unverified Commit aed150e2 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!14152 crypto: hisilicon/sec2 - the aead algorithm of sec2 is fixed.

Merge Pull Request from: @ci-robot 
 
PR sync from: Weili Qian <qianweili@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/MMZOKPOCXVTIHV22RJDLUQLMH4UNH676/ 
From: JiangShui Yang <yangjiangshui@h-partners.com>

Weili Qian (2):
  crypto: hisilicon/qm - disable error report before flr
  crypto: hisilicon/trng - support to obtain random numbers from soft
    algorithm

Wenkai Lin (2):
  crypto: hisilicon/sec2 - fix for aead icv error
  crypto: hisilicon/sec2 - fix for aead invalid authsize


-- 
2.43.0
 
https://gitee.com/openeuler/kernel/issues/IBATCK 
 
Link:https://gitee.com/openeuler/kernel/pulls/14152

 

Reviewed-by: default avatarYang Shen <shenyang39@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents 32b5a32c e623afb4
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -80,5 +80,6 @@ config CRYPTO_DEV_HISI_TRNG
	depends on ARM64 && ACPI
	depends on ARM64 && ACPI
	select HW_RANDOM
	select HW_RANDOM
	select CRYPTO_RNG
	select CRYPTO_RNG
	select CRYPTO_DRBG_CTR
	help
	help
	  Support for HiSilicon TRNG Driver.
	  Support for HiSilicon TRNG Driver.
+32 −17
Original line number Original line Diff line number Diff line
@@ -4670,22 +4670,30 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
	u32 delay = 0;
	u32 delay = 0;
	int ret;
	int ret;


	hisi_qm_dev_err_uninit(pf_qm);
	while (true) {
		ret = qm_reset_prepare_ready(qm);
		if (ret) {
			pci_err(pdev, "FLR not ready!\n");
			return;
		}


		hisi_qm_dev_err_uninit(pf_qm);
		/*
		/*
	 * Check whether there is an ECC mbit error, If it occurs, need to
		 * Check whether there is an ECC mbit error,
	 * wait for soft reset to fix it.
		 * If it occurs, need to wait for soft reset
		 * to fix it.
		 */
		 */
	while (qm_check_dev_error(pf_qm)) {
		if (qm_check_dev_error(qm)) {
		msleep(++delay);
			qm_reset_bit_clear(qm);
		if (delay > QM_RESET_WAIT_TIMEOUT)
			if (delay > QM_RESET_WAIT_TIMEOUT) {
				pci_err(pdev, "the hardware error was not recovered!\n");
				return;
				return;
			}
			}


	ret = qm_reset_prepare_ready(qm);
			msleep(++delay);
	if (ret) {
		} else {
		pci_err(pdev, "FLR not ready!\n");
			break;
		return;
		}
	}
	}


	/* PF obtains the information of VF by querying the register. */
	/* PF obtains the information of VF by querying the register. */
@@ -4699,16 +4707,23 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
	ret = hisi_qm_stop(qm, QM_DOWN);
	ret = hisi_qm_stop(qm, QM_DOWN);
	if (ret) {
	if (ret) {
		pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
		pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
		hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
		goto err_prepare;
		hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
		return;
	}
	}


	ret = qm_wait_vf_prepare_finish(qm);
	ret = qm_wait_vf_prepare_finish(qm);
	if (ret)
	if (ret)
		pci_err(pdev, "failed to stop by vfs in FLR!\n");
		pci_err(pdev, "failed to stop by vfs in FLR!\n");


	hisi_qm_cache_wb(qm);
	pci_info(pdev, "FLR resetting...\n");
	pci_info(pdev, "FLR resetting...\n");
	return;

err_prepare:
	pci_info(pdev, "FLR resetting prepare failed!\n");
	hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
	hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
	atomic_set(&qm->status.flags, QM_STOP);
	hisi_qm_cache_wb(qm);
}
}
EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);


+1 −2
Original line number Original line Diff line number Diff line
@@ -37,6 +37,7 @@ struct sec_aead_req {
	u8 *a_ivin;
	u8 *a_ivin;
	dma_addr_t a_ivin_dma;
	dma_addr_t a_ivin_dma;
	struct aead_request *aead_req;
	struct aead_request *aead_req;
	bool fallback;
};
};


/* SEC request of Crypto */
/* SEC request of Crypto */
@@ -89,9 +90,7 @@ struct sec_auth_ctx {
	dma_addr_t a_key_dma;
	dma_addr_t a_key_dma;
	u8 *a_key;
	u8 *a_key;
	u8 a_key_len;
	u8 a_key_len;
	u8 mac_len;
	u8 a_alg;
	u8 a_alg;
	bool fallback;
	struct crypto_shash *hash_tfm;
	struct crypto_shash *hash_tfm;
	struct crypto_aead *fallback_aead_tfm;
	struct crypto_aead *fallback_aead_tfm;
};
};
+75 −86
Original line number Original line Diff line number Diff line
@@ -947,15 +947,14 @@ static int sec_aead_mac_init(struct sec_aead_req *req)
	struct aead_request *aead_req = req->aead_req;
	struct aead_request *aead_req = req->aead_req;
	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
	size_t authsize = crypto_aead_authsize(tfm);
	size_t authsize = crypto_aead_authsize(tfm);
	u8 *mac_out = req->out_mac;
	struct scatterlist *sgl = aead_req->src;
	struct scatterlist *sgl = aead_req->src;
	u8 *mac_out = req->out_mac;
	size_t copy_size;
	size_t copy_size;
	off_t skip_size;
	off_t skip_size;


	/* Copy input mac */
	/* Copy input mac */
	skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
	skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
	copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out,
	copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size);
				       authsize, skip_size);
	if (unlikely(copy_size != authsize))
	if (unlikely(copy_size != authsize))
		return -EINVAL;
		return -EINVAL;


@@ -1119,10 +1118,7 @@ static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
	struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
	struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;


	if (unlikely(a_ctx->fallback_aead_tfm))
	return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
	return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);

	return 0;
}
}


static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
@@ -1138,7 +1134,6 @@ static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
			   const u32 keylen, const enum sec_hash_alg a_alg,
			   const u32 keylen, const enum sec_hash_alg a_alg,
			   const enum sec_calg c_alg,
			   const enum sec_calg c_alg,
			   const enum sec_mac_len mac_len,
			   const enum sec_cmode c_mode)
			   const enum sec_cmode c_mode)
{
{
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
@@ -1150,7 +1145,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,


	ctx->a_ctx.a_alg = a_alg;
	ctx->a_ctx.a_alg = a_alg;
	ctx->c_ctx.c_alg = c_alg;
	ctx->c_ctx.c_alg = c_alg;
	ctx->a_ctx.mac_len = mac_len;
	c_ctx->c_mode = c_mode;
	c_ctx->c_mode = c_mode;


	if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
	if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
@@ -1161,13 +1155,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
		}
		}
		memcpy(c_ctx->c_key, key, keylen);
		memcpy(c_ctx->c_key, key, keylen);


		if (unlikely(a_ctx->fallback_aead_tfm)) {
		return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
			ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
			if (ret)
				return ret;
		}

		return 0;
	}
	}


	ret = crypto_authenc_extractkeys(&keys, key, keylen);
	ret = crypto_authenc_extractkeys(&keys, key, keylen);
@@ -1186,10 +1174,15 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
		goto bad_key;
		goto bad_key;
	}
	}


	if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK)  ||
	if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) {
	    (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
		ret = -EINVAL;
		ret = -EINVAL;
		dev_err(dev, "MAC or AUTH key length error!\n");
		dev_err(dev, "AUTH key length error!\n");
		goto bad_key;
	}

	ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
	if (ret) {
		dev_err(dev, "set sec fallback key err!\n");
		goto bad_key;
		goto bad_key;
	}
	}


@@ -1201,27 +1194,19 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
}
}




#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode)	\
#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode)				\
static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key,	\
static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen)	\
	u32 keylen)							\
{											\
{											\
	return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\
	return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode);			\
}
}


GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1,
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC)
			 SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC)
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC)
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256,
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC)
			 SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC)
GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM)
GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512,
GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM)
			 SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC)
GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM)
GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES,
GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM)
			 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES,
			 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)
GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4,
			 SEC_HMAC_CCM_MAC, SEC_CMODE_CCM)
GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4,
			 SEC_HMAC_GCM_MAC, SEC_CMODE_GCM)


static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
{
@@ -1443,9 +1428,10 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
{
{
	struct aead_request *aead_req = req->aead_req.aead_req;
	struct aead_request *aead_req = req->aead_req.aead_req;
	struct sec_cipher_req *c_req = &req->c_req;
	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
	size_t authsize = crypto_aead_authsize(tfm);
	struct sec_aead_req *a_req = &req->aead_req;
	struct sec_aead_req *a_req = &req->aead_req;
	size_t authsize = ctx->a_ctx.mac_len;
	struct sec_cipher_req *c_req = &req->c_req;
	u32 data_size = aead_req->cryptlen;
	u32 data_size = aead_req->cryptlen;
	u8 flage = 0;
	u8 flage = 0;
	u8 cm, cl;
	u8 cm, cl;
@@ -1486,10 +1472,8 @@ static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
{
{
	struct aead_request *aead_req = req->aead_req.aead_req;
	struct aead_request *aead_req = req->aead_req.aead_req;
	struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
	size_t authsize = crypto_aead_authsize(tfm);
	struct sec_cipher_req *c_req = &req->c_req;
	struct sec_aead_req *a_req = &req->aead_req;
	struct sec_aead_req *a_req = &req->aead_req;
	struct sec_cipher_req *c_req = &req->c_req;


	memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
	memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);


@@ -1497,15 +1481,11 @@ static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
		/*
		/*
		 * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
		 * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
		 * the  counter must set to 0x01
		 * the  counter must set to 0x01
		 * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length}
		 */
		 */
		ctx->a_ctx.mac_len = authsize;
		/* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */
		set_aead_auth_iv(ctx, req);
		set_aead_auth_iv(ctx, req);
	}
	} else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {

		/* GCM 12Byte Cipher_IV == Auth_IV */
		/* GCM 12Byte Cipher_IV == Auth_IV */
	if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
		ctx->a_ctx.mac_len = authsize;
		memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
		memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
	}
	}
}
}
@@ -1515,9 +1495,11 @@ static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
{
{
	struct sec_aead_req *a_req = &req->aead_req;
	struct sec_aead_req *a_req = &req->aead_req;
	struct aead_request *aq = a_req->aead_req;
	struct aead_request *aq = a_req->aead_req;
	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
	size_t authsize = crypto_aead_authsize(tfm);


	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
	sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len);
	sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize);


	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
	sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
	sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
@@ -1541,9 +1523,11 @@ static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
{
{
	struct sec_aead_req *a_req = &req->aead_req;
	struct sec_aead_req *a_req = &req->aead_req;
	struct aead_request *aq = a_req->aead_req;
	struct aead_request *aq = a_req->aead_req;
	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
	size_t authsize = crypto_aead_authsize(tfm);


	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
	/* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
	sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3);
	sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3);


	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
	/* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
	sqe3->a_key_addr = sqe3->c_key_addr;
	sqe3->a_key_addr = sqe3->c_key_addr;
@@ -1567,11 +1551,12 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
	struct sec_aead_req *a_req = &req->aead_req;
	struct sec_aead_req *a_req = &req->aead_req;
	struct sec_cipher_req *c_req = &req->c_req;
	struct sec_cipher_req *c_req = &req->c_req;
	struct aead_request *aq = a_req->aead_req;
	struct aead_request *aq = a_req->aead_req;
	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
	size_t authsize = crypto_aead_authsize(tfm);


	sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
	sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);


	sec_sqe->type2.mac_key_alg =
	sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE);
			cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE);


	sec_sqe->type2.mac_key_alg |=
	sec_sqe->type2.mac_key_alg |=
			cpu_to_le32((u32)((ctx->a_key_len) /
			cpu_to_le32((u32)((ctx->a_key_len) /
@@ -1621,11 +1606,13 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
	struct sec_aead_req *a_req = &req->aead_req;
	struct sec_aead_req *a_req = &req->aead_req;
	struct sec_cipher_req *c_req = &req->c_req;
	struct sec_cipher_req *c_req = &req->c_req;
	struct aead_request *aq = a_req->aead_req;
	struct aead_request *aq = a_req->aead_req;
	struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
	size_t authsize = crypto_aead_authsize(tfm);


	sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
	sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);


	sqe3->auth_mac_key |=
	sqe3->auth_mac_key |=
			cpu_to_le32((u32)(ctx->mac_len /
			cpu_to_le32((u32)(authsize /
			SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
			SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);


	sqe3->auth_mac_key |=
	sqe3->auth_mac_key |=
@@ -1676,9 +1663,9 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
{
{
	struct aead_request *a_req = req->aead_req.aead_req;
	struct aead_request *a_req = req->aead_req.aead_req;
	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
	struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
	size_t authsize = crypto_aead_authsize(tfm);
	struct sec_aead_req *aead_req = &req->aead_req;
	struct sec_aead_req *aead_req = &req->aead_req;
	struct sec_cipher_req *c_req = &req->c_req;
	struct sec_cipher_req *c_req = &req->c_req;
	size_t authsize = crypto_aead_authsize(tfm);
	size_t sz;
	size_t sz;


	if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
	if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
@@ -1688,10 +1675,8 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
	if (!err && c_req->encrypt) {
	if (!err && c_req->encrypt) {
		struct scatterlist *sgl = a_req->dst;
		struct scatterlist *sgl = a_req->dst;


		sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl),
		sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac,
					  aead_req->out_mac,
					  authsize, a_req->cryptlen + a_req->assoclen);
					  authsize, a_req->cryptlen +
					  a_req->assoclen);
		if (unlikely(sz != authsize)) {
		if (unlikely(sz != authsize)) {
			dev_err(c->dev, "copy out mac err!\n");
			dev_err(c->dev, "copy out mac err!\n");
			err = -EINVAL;
			err = -EINVAL;
@@ -1894,8 +1879,10 @@ static void sec_aead_exit(struct crypto_aead *tfm)


static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
{
{
	struct aead_alg *alg = crypto_aead_alg(tfm);
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
	struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
	const char *aead_name = alg->base.cra_name;
	int ret;
	int ret;


	ret = sec_aead_init(tfm);
	ret = sec_aead_init(tfm);
@@ -1904,11 +1891,20 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
		return ret;
		return ret;
	}
	}


	auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
	a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
	if (IS_ERR(auth_ctx->hash_tfm)) {
	if (IS_ERR(a_ctx->hash_tfm)) {
		dev_err(ctx->dev, "aead alloc shash error!\n");
		dev_err(ctx->dev, "aead alloc shash error!\n");
		sec_aead_exit(tfm);
		sec_aead_exit(tfm);
		return PTR_ERR(auth_ctx->hash_tfm);
		return PTR_ERR(a_ctx->hash_tfm);
	}

	a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
						     CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
	if (IS_ERR(a_ctx->fallback_aead_tfm)) {
		dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
		crypto_free_shash(ctx->a_ctx.hash_tfm);
		sec_aead_exit(tfm);
		return PTR_ERR(a_ctx->fallback_aead_tfm);
	}
	}


	return 0;
	return 0;
@@ -1918,6 +1914,7 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm)
{
{
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);


	crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
	crypto_free_shash(ctx->a_ctx.hash_tfm);
	crypto_free_shash(ctx->a_ctx.hash_tfm);
	sec_aead_exit(tfm);
	sec_aead_exit(tfm);
}
}
@@ -1944,7 +1941,6 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
		sec_aead_exit(tfm);
		sec_aead_exit(tfm);
		return PTR_ERR(a_ctx->fallback_aead_tfm);
		return PTR_ERR(a_ctx->fallback_aead_tfm);
	}
	}
	a_ctx->fallback = false;


	return 0;
	return 0;
}
}
@@ -2199,21 +2195,20 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
{
{
	struct aead_request *req = sreq->aead_req.aead_req;
	struct aead_request *req = sreq->aead_req.aead_req;
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	size_t authsize = crypto_aead_authsize(tfm);
	size_t sz = crypto_aead_authsize(tfm);
	u8 c_mode = ctx->c_ctx.c_mode;
	u8 c_mode = ctx->c_ctx.c_mode;
	struct device *dev = ctx->dev;
	struct device *dev = ctx->dev;
	int ret;
	int ret;


	if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
	/* Hardware does not handle cases where authsize is less than 4 bytes */
	    req->assoclen > SEC_MAX_AAD_LEN)) {
	if (unlikely(sz < MIN_MAC_LEN)) {
		dev_err(dev, "aead input spec error!\n");
		sreq->aead_req.fallback = true;
		return -EINVAL;
		return -EINVAL;
	}
	}


	if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) ||
	if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
	   (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN ||
	    req->assoclen > SEC_MAX_AAD_LEN)) {
		authsize & MAC_LEN_MASK)))) {
		dev_err(dev, "aead input spec error!\n");
		dev_err(dev, "aead input mac length error!\n");
		return -EINVAL;
		return -EINVAL;
	}
	}


@@ -2232,7 +2227,7 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
	if (sreq->c_req.encrypt)
	if (sreq->c_req.encrypt)
		sreq->c_req.c_len = req->cryptlen;
		sreq->c_req.c_len = req->cryptlen;
	else
	else
		sreq->c_req.c_len = req->cryptlen - authsize;
		sreq->c_req.c_len = req->cryptlen - sz;
	if (c_mode == SEC_CMODE_CBC) {
	if (c_mode == SEC_CMODE_CBC) {
		if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
		if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
			dev_err(dev, "aead crypto length error!\n");
			dev_err(dev, "aead crypto length error!\n");
@@ -2259,7 +2254,7 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
	if (ctx->sec->qm.ver == QM_HW_V2) {
	if (ctx->sec->qm.ver == QM_HW_V2) {
		if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
		if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
			     req->cryptlen <= authsize))) {
			     req->cryptlen <= authsize))) {
			ctx->a_ctx.fallback = true;
			sreq->aead_req.fallback = true;
			return -EINVAL;
			return -EINVAL;
		}
		}
	}
	}
@@ -2287,16 +2282,9 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx,
				bool encrypt)
				bool encrypt)
{
{
	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
	struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
	struct device *dev = ctx->dev;
	struct aead_request *subreq;
	struct aead_request *subreq;
	int ret;
	int ret;


	/* Kunpeng920 aead mode not support input 0 size */
	if (!a_ctx->fallback_aead_tfm) {
		dev_err(dev, "aead fallback tfm is NULL!\n");
		return -EINVAL;
	}

	subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
	subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
	if (!subreq)
	if (!subreq)
		return -ENOMEM;
		return -ENOMEM;
@@ -2323,6 +2311,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
	struct sec_req *req = aead_request_ctx(a_req);
	struct sec_req *req = aead_request_ctx(a_req);
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
	struct sec_ctx *ctx = crypto_aead_ctx(tfm);
	int ret;
	int ret;
	req->aead_req.fallback = false;


	req->flag = a_req->base.flags;
	req->flag = a_req->base.flags;
	req->aead_req.aead_req = a_req;
	req->aead_req.aead_req = a_req;
@@ -2332,7 +2321,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)


	ret = sec_aead_param_check(ctx, req);
	ret = sec_aead_param_check(ctx, req);
	if (unlikely(ret)) {
	if (unlikely(ret)) {
		if (ctx->a_ctx.fallback)
		if (req->aead_req.fallback)
			return sec_aead_soft_crypto(ctx, a_req, encrypt);
			return sec_aead_soft_crypto(ctx, a_req, encrypt);
		return -EINVAL;
		return -EINVAL;
	}
	}
+0 −11
Original line number Original line Diff line number Diff line
@@ -23,17 +23,6 @@ enum sec_hash_alg {
	SEC_A_HMAC_SHA512 = 0x15,
	SEC_A_HMAC_SHA512 = 0x15,
};
};


enum sec_mac_len {
	SEC_HMAC_CCM_MAC   = 16,
	SEC_HMAC_GCM_MAC   = 16,
	SEC_SM3_MAC        = 32,
	SEC_HMAC_SM3_MAC   = 32,
	SEC_HMAC_MD5_MAC   = 16,
	SEC_HMAC_SHA1_MAC   = 20,
	SEC_HMAC_SHA256_MAC = 32,
	SEC_HMAC_SHA512_MAC = 64,
};

enum sec_cmode {
enum sec_cmode {
	SEC_CMODE_ECB    = 0x0,
	SEC_CMODE_ECB    = 0x0,
	SEC_CMODE_CBC    = 0x1,
	SEC_CMODE_CBC    = 0x1,
Loading