Commit e6e6600c authored by Gilad Ben-Yossef's avatar Gilad Ben-Yossef Committed by Herbert Xu
Browse files

crypto: ccree - drop legacy ivgen support



ccree had a mechanism for IV generation which was not compatible
with the Linux seqiv or echainiv iv generator and was never used
in any of the upstream versions so drop all the code implementing it.

Signed-off-by: default avatarGilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent aca24d48
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -2,7 +2,7 @@
# Copyright (C) 2012-2019 ARM Limited (or its affiliates).

obj-$(CONFIG_CRYPTO_DEV_CCREE) := ccree.o
ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_ivgen.o cc_sram_mgr.o
ccree-y := cc_driver.o cc_buffer_mgr.o cc_request_mgr.o cc_cipher.o cc_hash.o cc_aead.o cc_sram_mgr.o
ccree-$(CONFIG_CRYPTO_FIPS) += cc_fips.o
ccree-$(CONFIG_DEBUG_FS) += cc_debugfs.o
ccree-$(CONFIG_PM) += cc_pm.o
+8 −68
Original line number Diff line number Diff line
@@ -239,29 +239,14 @@ static void cc_aead_complete(struct device *dev, void *cc_req, int err)
			cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
			err = -EBADMSG;
		}
	} else { /*ENCRYPT*/
		if (areq_ctx->is_icv_fragmented) {
	/*ENCRYPT*/
	} else if (areq_ctx->is_icv_fragmented) {
		u32 skip = areq->cryptlen + areq_ctx->dst_offset;

			cc_copy_sg_portion(dev, areq_ctx->mac_buf,
					   areq_ctx->dst_sgl, skip,
					   (skip + ctx->authsize),
		cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
				   skip, (skip + ctx->authsize),
				   CC_SG_FROM_BUF);
	}

		/* If an IV was generated, copy it back to the user provided
		 * buffer.
		 */
		if (areq_ctx->backup_giv) {
			if (ctx->cipher_mode == DRV_CIPHER_CTR)
				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
				       CTR_RFC3686_NONCE_SIZE,
				       CTR_RFC3686_IV_SIZE);
			else if (ctx->cipher_mode == DRV_CIPHER_CCM)
				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
				       CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
		}
	}
done:
	aead_request_complete(areq, err);
}
@@ -1975,9 +1960,8 @@ static int cc_proc_aead(struct aead_request *req,
		 */
		memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
		       CTR_RFC3686_NONCE_SIZE);
		if (!areq_ctx->backup_giv) /*User none-generated IV*/
			memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
			       req->iv, CTR_RFC3686_IV_SIZE);
		memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv,
		       CTR_RFC3686_IV_SIZE);
		/* Initialize counter portion of counter block */
		*(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
			    CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
@@ -2023,40 +2007,6 @@ static int cc_proc_aead(struct aead_request *req,
		goto exit;
	}

	/* do we need to generate IV? */
	if (areq_ctx->backup_giv) {
		/* set the DMA mapped IV address*/
		if (ctx->cipher_mode == DRV_CIPHER_CTR) {
			cc_req.ivgen_dma_addr[0] =
				areq_ctx->gen_ctx.iv_dma_addr +
				CTR_RFC3686_NONCE_SIZE;
			cc_req.ivgen_dma_addr_len = 1;
		} else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
			/* In ccm, the IV needs to exist both inside B0 and
			 * inside the counter.It is also copied to iv_dma_addr
			 * for other reasons (like returning it to the user).
			 * So, using 3 (identical) IV outputs.
			 */
			cc_req.ivgen_dma_addr[0] =
				areq_ctx->gen_ctx.iv_dma_addr +
				CCM_BLOCK_IV_OFFSET;
			cc_req.ivgen_dma_addr[1] =
				sg_dma_address(&areq_ctx->ccm_adata_sg) +
				CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
			cc_req.ivgen_dma_addr[2] =
				sg_dma_address(&areq_ctx->ccm_adata_sg) +
				CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
			cc_req.ivgen_dma_addr_len = 3;
		} else {
			cc_req.ivgen_dma_addr[0] =
				areq_ctx->gen_ctx.iv_dma_addr;
			cc_req.ivgen_dma_addr_len = 1;
		}

		/* set the IV size (8/16 B long)*/
		cc_req.ivgen_size = crypto_aead_ivsize(tfm);
	}

	/* STAT_PHASE_2: Create sequence */

	/* Load MLLI tables to SRAM if necessary */
@@ -2107,7 +2057,6 @@ static int cc_aead_encrypt(struct aead_request *req)
	/* No generated IV required */
	areq_ctx->backup_iv = req->iv;
	areq_ctx->assoclen = req->assoclen;
	areq_ctx->backup_giv = NULL;
	areq_ctx->is_gcm4543 = false;

	areq_ctx->plaintext_authenticate_only = false;
@@ -2139,7 +2088,6 @@ static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
	/* No generated IV required */
	areq_ctx->backup_iv = req->iv;
	areq_ctx->assoclen = req->assoclen;
	areq_ctx->backup_giv = NULL;
	areq_ctx->is_gcm4543 = true;

	cc_proc_rfc4309_ccm(req);
@@ -2161,7 +2109,6 @@ static int cc_aead_decrypt(struct aead_request *req)
	/* No generated IV required */
	areq_ctx->backup_iv = req->iv;
	areq_ctx->assoclen = req->assoclen;
	areq_ctx->backup_giv = NULL;
	areq_ctx->is_gcm4543 = false;

	areq_ctx->plaintext_authenticate_only = false;
@@ -2191,7 +2138,6 @@ static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
	/* No generated IV required */
	areq_ctx->backup_iv = req->iv;
	areq_ctx->assoclen = req->assoclen;
	areq_ctx->backup_giv = NULL;

	areq_ctx->is_gcm4543 = true;
	cc_proc_rfc4309_ccm(req);
@@ -2311,8 +2257,6 @@ static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
	/* No generated IV required */
	areq_ctx->backup_iv = req->iv;
	areq_ctx->assoclen = req->assoclen;
	areq_ctx->backup_giv = NULL;

	areq_ctx->plaintext_authenticate_only = false;

	cc_proc_rfc4_gcm(req);
@@ -2340,7 +2284,6 @@ static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
	/* No generated IV required */
	areq_ctx->backup_iv = req->iv;
	areq_ctx->assoclen = req->assoclen;
	areq_ctx->backup_giv = NULL;

	cc_proc_rfc4_gcm(req);
	areq_ctx->is_gcm4543 = true;
@@ -2372,8 +2315,6 @@ static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
	/* No generated IV required */
	areq_ctx->backup_iv = req->iv;
	areq_ctx->assoclen = req->assoclen;
	areq_ctx->backup_giv = NULL;

	areq_ctx->plaintext_authenticate_only = false;

	cc_proc_rfc4_gcm(req);
@@ -2401,7 +2342,6 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
	/* No generated IV required */
	areq_ctx->backup_iv = req->iv;
	areq_ctx->assoclen = req->assoclen;
	areq_ctx->backup_giv = NULL;

	cc_proc_rfc4_gcm(req);
	areq_ctx->is_gcm4543 = true;
+1 −2
Original line number Diff line number Diff line
@@ -65,8 +65,7 @@ struct aead_req_ctx {
	unsigned int hw_iv_size ____cacheline_aligned;
	/* used to prevent cache coherence problem */
	u8 backup_mac[MAX_MAC_SIZE];
	u8 *backup_iv; /*store iv for generated IV flow*/
	u8 *backup_giv; /*store iv for rfc3686(ctr) flow*/
	u8 *backup_iv; /* store orig iv */
	u32 assoclen; /* internal assoclen */
	dma_addr_t mac_buf_dma_addr; /* internal ICV DMA buffer */
	/* buffer for internal ccm configurations */
+1 −11
Original line number Diff line number Diff line
@@ -22,7 +22,6 @@
#include "cc_cipher.h"
#include "cc_aead.h"
#include "cc_hash.h"
#include "cc_ivgen.h"
#include "cc_sram_mgr.h"
#include "cc_pm.h"
#include "cc_fips.h"
@@ -503,17 +502,11 @@ static int init_cc_resources(struct platform_device *plat_dev)
		goto post_buf_mgr_err;
	}

	rc = cc_ivgen_init(new_drvdata);
	if (rc) {
		dev_err(dev, "cc_ivgen_init failed\n");
		goto post_buf_mgr_err;
	}

	/* Allocate crypto algs */
	rc = cc_cipher_alloc(new_drvdata);
	if (rc) {
		dev_err(dev, "cc_cipher_alloc failed\n");
		goto post_ivgen_err;
		goto post_buf_mgr_err;
	}

	/* hash must be allocated before aead since hash exports APIs */
@@ -544,8 +537,6 @@ static int init_cc_resources(struct platform_device *plat_dev)
	cc_hash_free(new_drvdata);
post_cipher_err:
	cc_cipher_free(new_drvdata);
post_ivgen_err:
	cc_ivgen_fini(new_drvdata);
post_buf_mgr_err:
	 cc_buffer_mgr_fini(new_drvdata);
post_req_mgr_err:
@@ -577,7 +568,6 @@ static void cleanup_cc_resources(struct platform_device *plat_dev)
	cc_aead_free(drvdata);
	cc_hash_free(drvdata);
	cc_cipher_free(drvdata);
	cc_ivgen_fini(drvdata);
	cc_pm_fini(drvdata);
	cc_buffer_mgr_fini(drvdata);
	cc_req_mgr_fini(drvdata);
+0 −10
Original line number Diff line number Diff line
@@ -126,15 +126,6 @@ struct cc_cpp_req {
struct cc_crypto_req {
	void (*user_cb)(struct device *dev, void *req, int err);
	void *user_arg;
	dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES];
	/* For the first 'ivgen_dma_addr_len' addresses of this array,
	 * generated IV would be placed in it by send_request().
	 * Same generated IV for all addresses!
	 */
	/* Amount of 'ivgen_dma_addr' elements to be filled. */
	unsigned int ivgen_dma_addr_len;
	/* The generated IV size required, 8/16 B allowed. */
	unsigned int ivgen_size;
	struct completion seq_compl; /* request completion */
	struct cc_cpp_req cpp;
};
@@ -158,7 +149,6 @@ struct cc_drvdata {
	void *aead_handle;
	void *request_mgr_handle;
	void *fips_handle;
	void *ivgen_handle;
	void *sram_mgr_handle;
	void *debugfs;
	struct clk *clk;
Loading