Commit 99c6b20e authored by Herbert Xu's avatar Herbert Xu
Browse files

crypto: ccp - Set DMA alignment explicitly



This driver has been implicitly relying on kmalloc alignment
to be sufficient for DMA.  This may no longer be the case with
upcoming arm64 changes.

This patch changes it to explicitly request DMA alignment from
the Crypto API.

Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 1c64a7e1
Loading
Loading
Loading
Loading
+11 −10
Original line number Diff line number Diff line
@@ -25,7 +25,7 @@ static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
{
	struct ahash_request *req = ahash_request_cast(async_req);
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req);
	unsigned int digest_size = crypto_ahash_digestsize(tfm);

	if (ret)
@@ -56,8 +56,8 @@ static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
			      unsigned int final)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
	struct ccp_ctx *ctx = crypto_ahash_ctx_dma(tfm);
	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req);
	struct scatterlist *sg, *cmac_key_sg = NULL;
	unsigned int block_size =
		crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
@@ -182,7 +182,7 @@ static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,

static int ccp_aes_cmac_init(struct ahash_request *req)
{
	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req);

	memset(rctx, 0, sizeof(*rctx));

@@ -219,7 +219,7 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)

static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
{
	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req);
	struct ccp_aes_cmac_exp_ctx state;

	/* Don't let anything leak to 'out' */
@@ -238,7 +238,7 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)

static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
{
	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req);
	struct ccp_aes_cmac_exp_ctx state;

	/* 'in' may not be aligned so memcpy to local variable */
@@ -256,7 +256,7 @@ static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
			       unsigned int key_len)
{
	struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
	struct ccp_ctx *ctx = crypto_ahash_ctx_dma(tfm);
	struct ccp_crypto_ahash_alg *alg =
		ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
	u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo;
@@ -334,13 +334,14 @@ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,

static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm)
{
	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
	struct ccp_ctx *ctx = crypto_tfm_ctx_dma(tfm);
	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);

	ctx->complete = ccp_aes_cmac_complete;
	ctx->u.aes.key_len = 0;

	crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx));
	crypto_ahash_set_reqsize_dma(ahash,
				     sizeof(struct ccp_aes_cmac_req_ctx));

	return 0;
}
@@ -382,7 +383,7 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
			  CRYPTO_ALG_KERN_DRIVER_ONLY |
			  CRYPTO_ALG_NEED_FALLBACK;
	base->cra_blocksize = AES_BLOCK_SIZE;
	base->cra_ctxsize = sizeof(struct ccp_ctx);
	base->cra_ctxsize = sizeof(struct ccp_ctx) + crypto_dma_padding();
	base->cra_priority = CCP_CRA_PRIORITY;
	base->cra_init = ccp_aes_cmac_cra_init;
	base->cra_module = THIS_MODULE;
+6 −6
Original line number Diff line number Diff line
@@ -29,7 +29,7 @@ static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret)
static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
			      unsigned int key_len)
{
	struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
	struct ccp_ctx *ctx = crypto_aead_ctx_dma(tfm);

	switch (key_len) {
	case AES_KEYSIZE_128:
@@ -76,8 +76,8 @@ static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
{
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
	struct ccp_aes_req_ctx *rctx = aead_request_ctx(req);
	struct ccp_ctx *ctx = crypto_aead_ctx_dma(tfm);
	struct ccp_aes_req_ctx *rctx = aead_request_ctx_dma(req);
	struct scatterlist *iv_sg = NULL;
	unsigned int iv_len = 0;
	int i;
@@ -148,12 +148,12 @@ static int ccp_aes_gcm_decrypt(struct aead_request *req)

static int ccp_aes_gcm_cra_init(struct crypto_aead *tfm)
{
	struct ccp_ctx *ctx = crypto_aead_ctx(tfm);
	struct ccp_ctx *ctx = crypto_aead_ctx_dma(tfm);

	ctx->complete = ccp_aes_gcm_complete;
	ctx->u.aes.key_len = 0;

	crypto_aead_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
	crypto_aead_set_reqsize_dma(tfm, sizeof(struct ccp_aes_req_ctx));

	return 0;
}
@@ -176,7 +176,7 @@ static struct aead_alg ccp_aes_gcm_defaults = {
				  CRYPTO_ALG_KERN_DRIVER_ONLY |
				  CRYPTO_ALG_NEED_FALLBACK,
		.cra_blocksize	= AES_BLOCK_SIZE,
		.cra_ctxsize	= sizeof(struct ccp_ctx),
		.cra_ctxsize	= sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING,
		.cra_priority	= CCP_CRA_PRIORITY,
		.cra_exit	= ccp_aes_gcm_cra_exit,
		.cra_module	= THIS_MODULE,
+11 −9
Original line number Diff line number Diff line
@@ -62,7 +62,7 @@ static struct ccp_unit_size_map xts_unit_sizes[] = {
static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
{
	struct skcipher_request *req = skcipher_request_cast(async_req);
	struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
	struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req);

	if (ret)
		return ret;
@@ -75,7 +75,7 @@ static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
static int ccp_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
			      unsigned int key_len)
{
	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
	unsigned int ccpversion = ccp_version();
	int ret;

@@ -105,8 +105,8 @@ static int ccp_aes_xts_crypt(struct skcipher_request *req,
			     unsigned int encrypt)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
	struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
	struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req);
	unsigned int ccpversion = ccp_version();
	unsigned int fallback = 0;
	unsigned int unit;
@@ -196,7 +196,7 @@ static int ccp_aes_xts_decrypt(struct skcipher_request *req)

static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm)
{
	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
	struct crypto_skcipher *fallback_tfm;

	ctx->complete = ccp_aes_xts_complete;
@@ -210,7 +210,8 @@ static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm)
	}
	ctx->u.aes.tfm_skcipher = fallback_tfm;

	crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx) +
	crypto_skcipher_set_reqsize_dma(tfm,
					sizeof(struct ccp_aes_req_ctx) +
					crypto_skcipher_reqsize(fallback_tfm));

	return 0;
@@ -218,7 +219,7 @@ static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm)

static void ccp_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
{
	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);

	crypto_free_skcipher(ctx->u.aes.tfm_skcipher);
}
@@ -246,7 +247,8 @@ static int ccp_register_aes_xts_alg(struct list_head *head,
				  CRYPTO_ALG_KERN_DRIVER_ONLY |
				  CRYPTO_ALG_NEED_FALLBACK;
	alg->base.cra_blocksize	= AES_BLOCK_SIZE;
	alg->base.cra_ctxsize	= sizeof(struct ccp_ctx);
	alg->base.cra_ctxsize	= sizeof(struct ccp_ctx) +
				  crypto_dma_padding();
	alg->base.cra_priority	= CCP_CRA_PRIORITY;
	alg->base.cra_module	= THIS_MODULE;

+15 −14
Original line number Diff line number Diff line
@@ -22,8 +22,9 @@
static int ccp_aes_complete(struct crypto_async_request *async_req, int ret)
{
	struct skcipher_request *req = skcipher_request_cast(async_req);
	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
	struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
	struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(
		crypto_skcipher_reqtfm(req));
	struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req);

	if (ret)
		return ret;
@@ -38,7 +39,7 @@ static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
			  unsigned int key_len)
{
	struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm);
	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);

	switch (key_len) {
	case AES_KEYSIZE_128:
@@ -65,8 +66,8 @@ static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
	struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
	struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req);
	struct scatterlist *iv_sg = NULL;
	unsigned int iv_len = 0;

@@ -118,7 +119,7 @@ static int ccp_aes_decrypt(struct skcipher_request *req)

static int ccp_aes_init_tfm(struct crypto_skcipher *tfm)
{
	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);

	ctx->complete = ccp_aes_complete;
	ctx->u.aes.key_len = 0;
@@ -132,7 +133,7 @@ static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req,
				    int ret)
{
	struct skcipher_request *req = skcipher_request_cast(async_req);
	struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
	struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req);

	/* Restore the original pointer */
	req->iv = rctx->rfc3686_info;
@@ -143,7 +144,7 @@ static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req,
static int ccp_aes_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
				  unsigned int key_len)
{
	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);

	if (key_len < CTR_RFC3686_NONCE_SIZE)
		return -EINVAL;
@@ -157,8 +158,8 @@ static int ccp_aes_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
static int ccp_aes_rfc3686_crypt(struct skcipher_request *req, bool encrypt)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
	struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
	struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req);
	u8 *iv;

	/* Initialize the CTR block */
@@ -190,12 +191,12 @@ static int ccp_aes_rfc3686_decrypt(struct skcipher_request *req)

static int ccp_aes_rfc3686_init_tfm(struct crypto_skcipher *tfm)
{
	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);

	ctx->complete = ccp_aes_rfc3686_complete;
	ctx->u.aes.key_len = 0;

	crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx));
	crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct ccp_aes_req_ctx));

	return 0;
}
@@ -213,7 +214,7 @@ static const struct skcipher_alg ccp_aes_defaults = {
				  CRYPTO_ALG_KERN_DRIVER_ONLY |
				  CRYPTO_ALG_NEED_FALLBACK,
	.base.cra_blocksize	= AES_BLOCK_SIZE,
	.base.cra_ctxsize	= sizeof(struct ccp_ctx),
	.base.cra_ctxsize	= sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING,
	.base.cra_priority	= CCP_CRA_PRIORITY,
	.base.cra_module	= THIS_MODULE,
};
@@ -231,7 +232,7 @@ static const struct skcipher_alg ccp_aes_rfc3686_defaults = {
				  CRYPTO_ALG_KERN_DRIVER_ONLY |
				  CRYPTO_ALG_NEED_FALLBACK,
	.base.cra_blocksize	= CTR_RFC3686_BLOCK_SIZE,
	.base.cra_ctxsize	= sizeof(struct ccp_ctx),
	.base.cra_ctxsize	= sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING,
	.base.cra_priority	= CCP_CRA_PRIORITY,
	.base.cra_module	= THIS_MODULE,
};
+9 −8
Original line number Diff line number Diff line
@@ -21,8 +21,9 @@
static int ccp_des3_complete(struct crypto_async_request *async_req, int ret)
{
	struct skcipher_request *req = skcipher_request_cast(async_req);
	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
	struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req);
	struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(
		crypto_skcipher_reqtfm(req));
	struct ccp_des3_req_ctx *rctx = skcipher_request_ctx_dma(req);

	if (ret)
		return ret;
@@ -37,7 +38,7 @@ static int ccp_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
		unsigned int key_len)
{
	struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm);
	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
	int err;

	err = verify_skcipher_des3_key(tfm, key);
@@ -60,8 +61,8 @@ static int ccp_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt)
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct ccp_des3_req_ctx *rctx = skcipher_request_ctx(req);
	struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
	struct ccp_des3_req_ctx *rctx = skcipher_request_ctx_dma(req);
	struct scatterlist *iv_sg = NULL;
	unsigned int iv_len = 0;

@@ -114,12 +115,12 @@ static int ccp_des3_decrypt(struct skcipher_request *req)

static int ccp_des3_init_tfm(struct crypto_skcipher *tfm)
{
	struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
	struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm);

	ctx->complete = ccp_des3_complete;
	ctx->u.des3.key_len = 0;

	crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_des3_req_ctx));
	crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct ccp_des3_req_ctx));

	return 0;
}
@@ -137,7 +138,7 @@ static const struct skcipher_alg ccp_des3_defaults = {
				  CRYPTO_ALG_KERN_DRIVER_ONLY |
				  CRYPTO_ALG_NEED_FALLBACK,
	.base.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
	.base.cra_ctxsize	= sizeof(struct ccp_ctx),
	.base.cra_ctxsize	= sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING,
	.base.cra_priority	= CCP_CRA_PRIORITY,
	.base.cra_module	= THIS_MODULE,
};
Loading