Commit 4367d760 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge branch 'tls-expand-tls_cipher_size_desc-to-simplify-getsockopt-setsockopt'

Sabrina Dubroca says:

====================
tls: expand tls_cipher_size_desc to simplify getsockopt/setsockopt

Commit 2d2c5ea2 ("net/tls: Describe ciphers sizes by const
structs") introduced tls_cipher_size_desc to describe the size of the
fields of the per-cipher crypto_info structs, and commit ea7a9d88
("net/tls: Use cipher sizes structs") used it, but only in
tls_device.c and tls_device_fallback.c, and skipped converting similar
code in tls_main.c and tls_sw.c.

This series expands tls_cipher_size_desc (renamed to tls_cipher_desc
to better fit this expansion) to fully describe a cipher:
 - offset of the fields within the per-cipher crypto_info
 - size of the full struct (for copies to/from userspace)
 - offload flag
 - algorithm name used by SW crypto

With these additions, we can remove ~350L of
     switch (crypto_info->cipher_type) { ... }
from tls_set_device_offload, tls_sw_fallback_init,
do_tls_getsockopt_conf, do_tls_setsockopt_conf, tls_set_sw_offload
(mainly do_tls_getsockopt_conf and tls_set_sw_offload).

This series also adds the ARIA ciphers to the tls selftests, and some
more getsockopt/setsockopt tests to cover more of the code changed by
this series.
====================

Link: https://lore.kernel.org/r/cover.1692977948.git.sd@queasysnail.net


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 5447b080 f3e444e3
Loading
Loading
Loading
Loading
+0 −10
Original line number Diff line number Diff line
@@ -51,16 +51,6 @@

struct tls_rec;

struct tls_cipher_size_desc {
	unsigned int iv;
	unsigned int key;
	unsigned int salt;
	unsigned int tag;
	unsigned int rec_seq;
};

extern const struct tls_cipher_size_desc tls_cipher_size_desc[];

/* Maximum data size carried in a TLS record */
#define TLS_MAX_PAYLOAD_SIZE		((size_t)1 << 14)

+53 −0
Original line number Diff line number Diff line
@@ -51,6 +51,59 @@
#define TLS_DEC_STATS(net, field)				\
	SNMP_DEC_STATS((net)->mib.tls_statistics, field)

struct tls_cipher_desc {
	unsigned int nonce;
	unsigned int iv;
	unsigned int key;
	unsigned int salt;
	unsigned int tag;
	unsigned int rec_seq;
	unsigned int iv_offset;
	unsigned int key_offset;
	unsigned int salt_offset;
	unsigned int rec_seq_offset;
	char *cipher_name;
	bool offloadable;
	size_t crypto_info;
};

#define TLS_CIPHER_MIN TLS_CIPHER_AES_GCM_128
#define TLS_CIPHER_MAX TLS_CIPHER_ARIA_GCM_256
extern const struct tls_cipher_desc tls_cipher_desc[TLS_CIPHER_MAX + 1 - TLS_CIPHER_MIN];

static inline const struct tls_cipher_desc *get_cipher_desc(u16 cipher_type)
{
	if (cipher_type < TLS_CIPHER_MIN || cipher_type > TLS_CIPHER_MAX)
		return NULL;

	return &tls_cipher_desc[cipher_type - TLS_CIPHER_MIN];
}

static inline char *crypto_info_iv(struct tls_crypto_info *crypto_info,
				   const struct tls_cipher_desc *cipher_desc)
{
	return (char *)crypto_info + cipher_desc->iv_offset;
}

static inline char *crypto_info_key(struct tls_crypto_info *crypto_info,
				    const struct tls_cipher_desc *cipher_desc)
{
	return (char *)crypto_info + cipher_desc->key_offset;
}

static inline char *crypto_info_salt(struct tls_crypto_info *crypto_info,
				     const struct tls_cipher_desc *cipher_desc)
{
	return (char *)crypto_info + cipher_desc->salt_offset;
}

static inline char *crypto_info_rec_seq(struct tls_crypto_info *crypto_info,
					const struct tls_cipher_desc *cipher_desc)
{
	return (char *)crypto_info + cipher_desc->rec_seq_offset;
}


/* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
 * allocated or mapped for each TLS record. After encryption, the records are
 * stores in a linked list.
+19 −33
Original line number Diff line number Diff line
@@ -884,7 +884,7 @@ static int
tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
{
	struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
	const struct tls_cipher_size_desc *cipher_sz;
	const struct tls_cipher_desc *cipher_desc;
	int err, offset, copy, data_len, pos;
	struct sk_buff *skb, *skb_iter;
	struct scatterlist sg[1];
@@ -898,10 +898,10 @@ tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
	default:
		return -EINVAL;
	}
	cipher_sz = &tls_cipher_size_desc[tls_ctx->crypto_recv.info.cipher_type];
	cipher_desc = get_cipher_desc(tls_ctx->crypto_recv.info.cipher_type);

	rxm = strp_msg(tls_strp_msg(sw_ctx));
	orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv,
	orig_buf = kmalloc(rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv,
			   sk->sk_allocation);
	if (!orig_buf)
		return -ENOMEM;
@@ -917,8 +917,8 @@ tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)

	sg_init_table(sg, 1);
	sg_set_buf(&sg[0], buf,
		   rxm->full_len + TLS_HEADER_SIZE + cipher_sz->iv);
	err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_sz->iv);
		   rxm->full_len + TLS_HEADER_SIZE + cipher_desc->iv);
	err = skb_copy_bits(skb, offset, buf, TLS_HEADER_SIZE + cipher_desc->iv);
	if (err)
		goto free_buf;

@@ -929,7 +929,7 @@ tls_device_reencrypt(struct sock *sk, struct tls_context *tls_ctx)
	else
		err = 0;

	data_len = rxm->full_len - cipher_sz->tag;
	data_len = rxm->full_len - cipher_desc->tag;

	if (skb_pagelen(skb) > offset) {
		copy = min_t(int, skb_pagelen(skb) - offset, data_len);
@@ -1046,7 +1046,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
{
	struct tls_context *tls_ctx = tls_get_ctx(sk);
	struct tls_prot_info *prot = &tls_ctx->prot_info;
	const struct tls_cipher_size_desc *cipher_sz;
	const struct tls_cipher_desc *cipher_desc;
	struct tls_record_info *start_marker_record;
	struct tls_offload_context_tx *offload_ctx;
	struct tls_crypto_info *crypto_info;
@@ -1079,46 +1079,32 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
		goto release_netdev;
	}

	switch (crypto_info->cipher_type) {
	case TLS_CIPHER_AES_GCM_128:
		iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
		rec_seq =
		 ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
		break;
	case TLS_CIPHER_AES_GCM_256:
		iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
		rec_seq =
		 ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
		break;
	default:
	cipher_desc = get_cipher_desc(crypto_info->cipher_type);
	if (!cipher_desc || !cipher_desc->offloadable) {
		rc = -EINVAL;
		goto release_netdev;
	}
	cipher_sz = &tls_cipher_size_desc[crypto_info->cipher_type];

	/* Sanity-check the rec_seq_size for stack allocations */
	if (cipher_sz->rec_seq > TLS_MAX_REC_SEQ_SIZE) {
		rc = -EINVAL;
		goto release_netdev;
	}
	iv = crypto_info_iv(crypto_info, cipher_desc);
	rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc);

	prot->version = crypto_info->version;
	prot->cipher_type = crypto_info->cipher_type;
	prot->prepend_size = TLS_HEADER_SIZE + cipher_sz->iv;
	prot->tag_size = cipher_sz->tag;
	prot->prepend_size = TLS_HEADER_SIZE + cipher_desc->iv;
	prot->tag_size = cipher_desc->tag;
	prot->overhead_size = prot->prepend_size + prot->tag_size;
	prot->iv_size = cipher_sz->iv;
	prot->salt_size = cipher_sz->salt;
	ctx->tx.iv = kmalloc(cipher_sz->iv + cipher_sz->salt, GFP_KERNEL);
	prot->iv_size = cipher_desc->iv;
	prot->salt_size = cipher_desc->salt;
	ctx->tx.iv = kmalloc(cipher_desc->iv + cipher_desc->salt, GFP_KERNEL);
	if (!ctx->tx.iv) {
		rc = -ENOMEM;
		goto release_netdev;
	}

	memcpy(ctx->tx.iv + cipher_sz->salt, iv, cipher_sz->iv);
	memcpy(ctx->tx.iv + cipher_desc->salt, iv, cipher_desc->iv);

	prot->rec_seq_size = cipher_sz->rec_seq;
	ctx->tx.rec_seq = kmemdup(rec_seq, cipher_sz->rec_seq, GFP_KERNEL);
	prot->rec_seq_size = cipher_desc->rec_seq;
	ctx->tx.rec_seq = kmemdup(rec_seq, cipher_desc->rec_seq, GFP_KERNEL);
	if (!ctx->tx.rec_seq) {
		rc = -ENOMEM;
		goto free_iv;
+27 −35
Original line number Diff line number Diff line
@@ -55,7 +55,7 @@ static int tls_enc_record(struct aead_request *aead_req,
			  struct tls_prot_info *prot)
{
	unsigned char buf[TLS_HEADER_SIZE + MAX_IV_SIZE];
	const struct tls_cipher_size_desc *cipher_sz;
	const struct tls_cipher_desc *cipher_desc;
	struct scatterlist sg_in[3];
	struct scatterlist sg_out[3];
	unsigned int buf_size;
@@ -69,9 +69,9 @@ static int tls_enc_record(struct aead_request *aead_req,
	default:
		return -EINVAL;
	}
	cipher_sz = &tls_cipher_size_desc[prot->cipher_type];
	cipher_desc = get_cipher_desc(prot->cipher_type);

	buf_size = TLS_HEADER_SIZE + cipher_sz->iv;
	buf_size = TLS_HEADER_SIZE + cipher_desc->iv;
	len = min_t(int, *in_len, buf_size);

	scatterwalk_copychunks(buf, in, len, 0);
@@ -85,11 +85,11 @@ static int tls_enc_record(struct aead_request *aead_req,
	scatterwalk_pagedone(out, 1, 1);

	len = buf[4] | (buf[3] << 8);
	len -= cipher_sz->iv;
	len -= cipher_desc->iv;

	tls_make_aad(aad, len - cipher_sz->tag, (char *)&rcd_sn, buf[0], prot);
	tls_make_aad(aad, len - cipher_desc->tag, (char *)&rcd_sn, buf[0], prot);

	memcpy(iv + cipher_sz->salt, buf + TLS_HEADER_SIZE, cipher_sz->iv);
	memcpy(iv + cipher_desc->salt, buf + TLS_HEADER_SIZE, cipher_desc->iv);

	sg_init_table(sg_in, ARRAY_SIZE(sg_in));
	sg_init_table(sg_out, ARRAY_SIZE(sg_out));
@@ -100,7 +100,7 @@ static int tls_enc_record(struct aead_request *aead_req,

	*in_len -= len;
	if (*in_len < 0) {
		*in_len += cipher_sz->tag;
		*in_len += cipher_desc->tag;
		/* the input buffer doesn't contain the entire record.
		 * trim len accordingly. The resulting authentication tag
		 * will contain garbage, but we don't care, so we won't
@@ -121,7 +121,7 @@ static int tls_enc_record(struct aead_request *aead_req,
		scatterwalk_pagedone(out, 1, 1);
	}

	len -= cipher_sz->tag;
	len -= cipher_desc->tag;
	aead_request_set_crypt(aead_req, sg_in, sg_out, len, iv);

	rc = crypto_aead_encrypt(aead_req);
@@ -309,14 +309,14 @@ static void fill_sg_out(struct scatterlist sg_out[3], void *buf,
			int sync_size,
			void *dummy_buf)
{
	const struct tls_cipher_size_desc *cipher_sz =
		&tls_cipher_size_desc[tls_ctx->crypto_send.info.cipher_type];
	const struct tls_cipher_desc *cipher_desc =
		get_cipher_desc(tls_ctx->crypto_send.info.cipher_type);

	sg_set_buf(&sg_out[0], dummy_buf, sync_size);
	sg_set_buf(&sg_out[1], nskb->data + tcp_payload_offset, payload_len);
	/* Add room for authentication tag produced by crypto */
	dummy_buf += sync_size;
	sg_set_buf(&sg_out[2], dummy_buf, cipher_sz->tag);
	sg_set_buf(&sg_out[2], dummy_buf, cipher_desc->tag);
}

static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
@@ -328,7 +328,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
	int tcp_payload_offset = skb_tcp_all_headers(skb);
	int payload_len = skb->len - tcp_payload_offset;
	const struct tls_cipher_size_desc *cipher_sz;
	const struct tls_cipher_desc *cipher_desc;
	void *buf, *iv, *aad, *dummy_buf, *salt;
	struct aead_request *aead_req;
	struct sk_buff *nskb = NULL;
@@ -348,16 +348,16 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
	default:
		goto free_req;
	}
	cipher_sz = &tls_cipher_size_desc[tls_ctx->crypto_send.info.cipher_type];
	buf_len = cipher_sz->salt + cipher_sz->iv + TLS_AAD_SPACE_SIZE +
		  sync_size + cipher_sz->tag;
	cipher_desc = get_cipher_desc(tls_ctx->crypto_send.info.cipher_type);
	buf_len = cipher_desc->salt + cipher_desc->iv + TLS_AAD_SPACE_SIZE +
		  sync_size + cipher_desc->tag;
	buf = kmalloc(buf_len, GFP_ATOMIC);
	if (!buf)
		goto free_req;

	iv = buf;
	memcpy(iv, salt, cipher_sz->salt);
	aad = buf + cipher_sz->salt + cipher_sz->iv;
	memcpy(iv, salt, cipher_desc->salt);
	aad = buf + cipher_desc->salt + cipher_desc->iv;
	dummy_buf = aad + TLS_AAD_SPACE_SIZE;

	nskb = alloc_skb(skb_headroom(skb) + skb->len, GFP_ATOMIC);
@@ -471,12 +471,15 @@ int tls_sw_fallback_init(struct sock *sk,
			 struct tls_offload_context_tx *offload_ctx,
			 struct tls_crypto_info *crypto_info)
{
	const struct tls_cipher_size_desc *cipher_sz;
	const u8 *key;
	const struct tls_cipher_desc *cipher_desc;
	int rc;

	cipher_desc = get_cipher_desc(crypto_info->cipher_type);
	if (!cipher_desc || !cipher_desc->offloadable)
		return -EINVAL;

	offload_ctx->aead_send =
	    crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
	    crypto_alloc_aead(cipher_desc->cipher_name, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(offload_ctx->aead_send)) {
		rc = PTR_ERR(offload_ctx->aead_send);
		pr_err_ratelimited("crypto_alloc_aead failed rc=%d\n", rc);
@@ -484,24 +487,13 @@ int tls_sw_fallback_init(struct sock *sk,
		goto err_out;
	}

	switch (crypto_info->cipher_type) {
	case TLS_CIPHER_AES_GCM_128:
		key = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->key;
		break;
	case TLS_CIPHER_AES_GCM_256:
		key = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->key;
		break;
	default:
		rc = -EINVAL;
		goto free_aead;
	}
	cipher_sz = &tls_cipher_size_desc[crypto_info->cipher_type];

	rc = crypto_aead_setkey(offload_ctx->aead_send, key, cipher_sz->key);
	rc = crypto_aead_setkey(offload_ctx->aead_send,
				crypto_info_key(crypto_info, cipher_desc),
				cipher_desc->key);
	if (rc)
		goto free_aead;

	rc = crypto_aead_setauthsize(offload_ctx->aead_send, cipher_sz->tag);
	rc = crypto_aead_setauthsize(offload_ctx->aead_send, cipher_desc->tag);
	if (rc)
		goto free_aead;

+70 −202
Original line number Diff line number Diff line
@@ -58,23 +58,66 @@ enum {
	TLS_NUM_PROTS,
};

#define CIPHER_SIZE_DESC(cipher) [cipher] = { \
#define CHECK_CIPHER_DESC(cipher,ci)				\
	static_assert(cipher ## _IV_SIZE <= MAX_IV_SIZE);		\
	static_assert(cipher ## _REC_SEQ_SIZE <= TLS_MAX_REC_SEQ_SIZE);	\
	static_assert(cipher ## _TAG_SIZE == TLS_TAG_SIZE);		\
	static_assert(sizeof_field(struct ci, iv) == cipher ## _IV_SIZE);	\
	static_assert(sizeof_field(struct ci, key) == cipher ## _KEY_SIZE);	\
	static_assert(sizeof_field(struct ci, salt) == cipher ## _SALT_SIZE);	\
	static_assert(sizeof_field(struct ci, rec_seq) == cipher ## _REC_SEQ_SIZE);

#define __CIPHER_DESC(ci) \
	.iv_offset = offsetof(struct ci, iv), \
	.key_offset = offsetof(struct ci, key), \
	.salt_offset = offsetof(struct ci, salt), \
	.rec_seq_offset = offsetof(struct ci, rec_seq), \
	.crypto_info = sizeof(struct ci)

#define CIPHER_DESC(cipher,ci,algname,_offloadable) [cipher - TLS_CIPHER_MIN] = {	\
	.nonce = cipher ## _IV_SIZE, \
	.iv = cipher ## _IV_SIZE, \
	.key = cipher ## _KEY_SIZE, \
	.salt = cipher ## _SALT_SIZE, \
	.tag = cipher ## _TAG_SIZE, \
	.rec_seq = cipher ## _REC_SEQ_SIZE, \
	.cipher_name = algname,	\
	.offloadable = _offloadable, \
	__CIPHER_DESC(ci), \
}

const struct tls_cipher_size_desc tls_cipher_size_desc[] = {
	CIPHER_SIZE_DESC(TLS_CIPHER_AES_GCM_128),
	CIPHER_SIZE_DESC(TLS_CIPHER_AES_GCM_256),
	CIPHER_SIZE_DESC(TLS_CIPHER_AES_CCM_128),
	CIPHER_SIZE_DESC(TLS_CIPHER_CHACHA20_POLY1305),
	CIPHER_SIZE_DESC(TLS_CIPHER_SM4_GCM),
	CIPHER_SIZE_DESC(TLS_CIPHER_SM4_CCM),
#define CIPHER_DESC_NONCE0(cipher,ci,algname,_offloadable) [cipher - TLS_CIPHER_MIN] = { \
	.nonce = 0, \
	.iv = cipher ## _IV_SIZE, \
	.key = cipher ## _KEY_SIZE, \
	.salt = cipher ## _SALT_SIZE, \
	.tag = cipher ## _TAG_SIZE, \
	.rec_seq = cipher ## _REC_SEQ_SIZE, \
	.cipher_name = algname,	\
	.offloadable = _offloadable, \
	__CIPHER_DESC(ci), \
}

const struct tls_cipher_desc tls_cipher_desc[TLS_CIPHER_MAX + 1 - TLS_CIPHER_MIN] = {
	CIPHER_DESC(TLS_CIPHER_AES_GCM_128, tls12_crypto_info_aes_gcm_128, "gcm(aes)", true),
	CIPHER_DESC(TLS_CIPHER_AES_GCM_256, tls12_crypto_info_aes_gcm_256, "gcm(aes)", true),
	CIPHER_DESC(TLS_CIPHER_AES_CCM_128, tls12_crypto_info_aes_ccm_128, "ccm(aes)", false),
	CIPHER_DESC_NONCE0(TLS_CIPHER_CHACHA20_POLY1305, tls12_crypto_info_chacha20_poly1305, "rfc7539(chacha20,poly1305)", false),
	CIPHER_DESC(TLS_CIPHER_SM4_GCM, tls12_crypto_info_sm4_gcm, "gcm(sm4)", false),
	CIPHER_DESC(TLS_CIPHER_SM4_CCM, tls12_crypto_info_sm4_ccm, "ccm(sm4)", false),
	CIPHER_DESC(TLS_CIPHER_ARIA_GCM_128, tls12_crypto_info_aria_gcm_128, "gcm(aria)", false),
	CIPHER_DESC(TLS_CIPHER_ARIA_GCM_256, tls12_crypto_info_aria_gcm_256, "gcm(aria)", false),
};

CHECK_CIPHER_DESC(TLS_CIPHER_AES_GCM_128, tls12_crypto_info_aes_gcm_128);
CHECK_CIPHER_DESC(TLS_CIPHER_AES_GCM_256, tls12_crypto_info_aes_gcm_256);
CHECK_CIPHER_DESC(TLS_CIPHER_AES_CCM_128, tls12_crypto_info_aes_ccm_128);
CHECK_CIPHER_DESC(TLS_CIPHER_CHACHA20_POLY1305, tls12_crypto_info_chacha20_poly1305);
CHECK_CIPHER_DESC(TLS_CIPHER_SM4_GCM, tls12_crypto_info_sm4_gcm);
CHECK_CIPHER_DESC(TLS_CIPHER_SM4_CCM, tls12_crypto_info_sm4_ccm);
CHECK_CIPHER_DESC(TLS_CIPHER_ARIA_GCM_128, tls12_crypto_info_aria_gcm_128);
CHECK_CIPHER_DESC(TLS_CIPHER_ARIA_GCM_256, tls12_crypto_info_aria_gcm_256);

static const struct proto *saved_tcpv6_prot;
static DEFINE_MUTEX(tcpv6_prot_mutex);
static const struct proto *saved_tcpv4_prot;
@@ -392,6 +435,7 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
				  int __user *optlen, int tx)
{
	int rc = 0;
	const struct tls_cipher_desc *cipher_desc;
	struct tls_context *ctx = tls_get_ctx(sk);
	struct tls_crypto_info *crypto_info;
	struct cipher_context *cctx;
@@ -430,172 +474,19 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
		goto out;
	}

	switch (crypto_info->cipher_type) {
	case TLS_CIPHER_AES_GCM_128: {
		struct tls12_crypto_info_aes_gcm_128 *
		  crypto_info_aes_gcm_128 =
		  container_of(crypto_info,
			       struct tls12_crypto_info_aes_gcm_128,
			       info);

		if (len != sizeof(*crypto_info_aes_gcm_128)) {
			rc = -EINVAL;
			goto out;
		}
		memcpy(crypto_info_aes_gcm_128->iv,
		       cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
		       TLS_CIPHER_AES_GCM_128_IV_SIZE);
		memcpy(crypto_info_aes_gcm_128->rec_seq, cctx->rec_seq,
		       TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
		if (copy_to_user(optval,
				 crypto_info_aes_gcm_128,
				 sizeof(*crypto_info_aes_gcm_128)))
			rc = -EFAULT;
		break;
	}
	case TLS_CIPHER_AES_GCM_256: {
		struct tls12_crypto_info_aes_gcm_256 *
		  crypto_info_aes_gcm_256 =
		  container_of(crypto_info,
			       struct tls12_crypto_info_aes_gcm_256,
			       info);

		if (len != sizeof(*crypto_info_aes_gcm_256)) {
			rc = -EINVAL;
			goto out;
		}
		memcpy(crypto_info_aes_gcm_256->iv,
		       cctx->iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
		       TLS_CIPHER_AES_GCM_256_IV_SIZE);
		memcpy(crypto_info_aes_gcm_256->rec_seq, cctx->rec_seq,
		       TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
		if (copy_to_user(optval,
				 crypto_info_aes_gcm_256,
				 sizeof(*crypto_info_aes_gcm_256)))
			rc = -EFAULT;
		break;
	}
	case TLS_CIPHER_AES_CCM_128: {
		struct tls12_crypto_info_aes_ccm_128 *aes_ccm_128 =
			container_of(crypto_info,
				struct tls12_crypto_info_aes_ccm_128, info);

		if (len != sizeof(*aes_ccm_128)) {
			rc = -EINVAL;
			goto out;
		}
		memcpy(aes_ccm_128->iv,
		       cctx->iv + TLS_CIPHER_AES_CCM_128_SALT_SIZE,
		       TLS_CIPHER_AES_CCM_128_IV_SIZE);
		memcpy(aes_ccm_128->rec_seq, cctx->rec_seq,
		       TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE);
		if (copy_to_user(optval, aes_ccm_128, sizeof(*aes_ccm_128)))
			rc = -EFAULT;
		break;
	}
	case TLS_CIPHER_CHACHA20_POLY1305: {
		struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305 =
			container_of(crypto_info,
				struct tls12_crypto_info_chacha20_poly1305,
				info);

		if (len != sizeof(*chacha20_poly1305)) {
			rc = -EINVAL;
			goto out;
		}
		memcpy(chacha20_poly1305->iv,
		       cctx->iv + TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE,
		       TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE);
		memcpy(chacha20_poly1305->rec_seq, cctx->rec_seq,
		       TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE);
		if (copy_to_user(optval, chacha20_poly1305,
				sizeof(*chacha20_poly1305)))
			rc = -EFAULT;
		break;
	}
	case TLS_CIPHER_SM4_GCM: {
		struct tls12_crypto_info_sm4_gcm *sm4_gcm_info =
			container_of(crypto_info,
				struct tls12_crypto_info_sm4_gcm, info);

		if (len != sizeof(*sm4_gcm_info)) {
			rc = -EINVAL;
			goto out;
		}
		memcpy(sm4_gcm_info->iv,
		       cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE,
		       TLS_CIPHER_SM4_GCM_IV_SIZE);
		memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq,
		       TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE);
		if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info)))
			rc = -EFAULT;
		break;
	}
	case TLS_CIPHER_SM4_CCM: {
		struct tls12_crypto_info_sm4_ccm *sm4_ccm_info =
			container_of(crypto_info,
				struct tls12_crypto_info_sm4_ccm, info);

		if (len != sizeof(*sm4_ccm_info)) {
	cipher_desc = get_cipher_desc(crypto_info->cipher_type);
	if (!cipher_desc || len != cipher_desc->crypto_info) {
		rc = -EINVAL;
		goto out;
	}
		memcpy(sm4_ccm_info->iv,
		       cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE,
		       TLS_CIPHER_SM4_CCM_IV_SIZE);
		memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq,
		       TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE);
		if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info)))
			rc = -EFAULT;
		break;
	}
	case TLS_CIPHER_ARIA_GCM_128: {
		struct tls12_crypto_info_aria_gcm_128 *
		  crypto_info_aria_gcm_128 =
		  container_of(crypto_info,
			       struct tls12_crypto_info_aria_gcm_128,
			       info);

		if (len != sizeof(*crypto_info_aria_gcm_128)) {
			rc = -EINVAL;
			goto out;
		}
		memcpy(crypto_info_aria_gcm_128->iv,
		       cctx->iv + TLS_CIPHER_ARIA_GCM_128_SALT_SIZE,
		       TLS_CIPHER_ARIA_GCM_128_IV_SIZE);
		memcpy(crypto_info_aria_gcm_128->rec_seq, cctx->rec_seq,
		       TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE);
		if (copy_to_user(optval,
				 crypto_info_aria_gcm_128,
				 sizeof(*crypto_info_aria_gcm_128)))
			rc = -EFAULT;
		break;
	}
	case TLS_CIPHER_ARIA_GCM_256: {
		struct tls12_crypto_info_aria_gcm_256 *
		  crypto_info_aria_gcm_256 =
		  container_of(crypto_info,
			       struct tls12_crypto_info_aria_gcm_256,
			       info);
	memcpy(crypto_info_iv(crypto_info, cipher_desc),
	       cctx->iv + cipher_desc->salt, cipher_desc->iv);
	memcpy(crypto_info_rec_seq(crypto_info, cipher_desc),
	       cctx->rec_seq, cipher_desc->rec_seq);

		if (len != sizeof(*crypto_info_aria_gcm_256)) {
			rc = -EINVAL;
			goto out;
		}
		memcpy(crypto_info_aria_gcm_256->iv,
		       cctx->iv + TLS_CIPHER_ARIA_GCM_256_SALT_SIZE,
		       TLS_CIPHER_ARIA_GCM_256_IV_SIZE);
		memcpy(crypto_info_aria_gcm_256->rec_seq, cctx->rec_seq,
		       TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE);
		if (copy_to_user(optval,
				 crypto_info_aria_gcm_256,
				 sizeof(*crypto_info_aria_gcm_256)))
	if (copy_to_user(optval, crypto_info, cipher_desc->crypto_info))
		rc = -EFAULT;
		break;
	}
	default:
		rc = -EINVAL;
	}

out:
	return rc;
@@ -696,7 +587,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
	struct tls_crypto_info *crypto_info;
	struct tls_crypto_info *alt_crypto_info;
	struct tls_context *ctx = tls_get_ctx(sk);
	size_t optsize;
	const struct tls_cipher_desc *cipher_desc;
	int rc = 0;
	int conf;

@@ -737,46 +628,23 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
		}
	}

	switch (crypto_info->cipher_type) {
	case TLS_CIPHER_AES_GCM_128:
		optsize = sizeof(struct tls12_crypto_info_aes_gcm_128);
		break;
	case TLS_CIPHER_AES_GCM_256: {
		optsize = sizeof(struct tls12_crypto_info_aes_gcm_256);
		break;
	}
	case TLS_CIPHER_AES_CCM_128:
		optsize = sizeof(struct tls12_crypto_info_aes_ccm_128);
		break;
	case TLS_CIPHER_CHACHA20_POLY1305:
		optsize = sizeof(struct tls12_crypto_info_chacha20_poly1305);
		break;
	case TLS_CIPHER_SM4_GCM:
		optsize = sizeof(struct tls12_crypto_info_sm4_gcm);
		break;
	case TLS_CIPHER_SM4_CCM:
		optsize = sizeof(struct tls12_crypto_info_sm4_ccm);
		break;
	case TLS_CIPHER_ARIA_GCM_128:
		if (crypto_info->version != TLS_1_2_VERSION) {
	cipher_desc = get_cipher_desc(crypto_info->cipher_type);
	if (!cipher_desc) {
		rc = -EINVAL;
		goto err_crypto_info;
	}
		optsize = sizeof(struct tls12_crypto_info_aria_gcm_128);
		break;

	switch (crypto_info->cipher_type) {
	case TLS_CIPHER_ARIA_GCM_128:
	case TLS_CIPHER_ARIA_GCM_256:
		if (crypto_info->version != TLS_1_2_VERSION) {
			rc = -EINVAL;
			goto err_crypto_info;
		}
		optsize = sizeof(struct tls12_crypto_info_aria_gcm_256);
		break;
	default:
		rc = -EINVAL;
		goto err_crypto_info;
	}

	if (optlen != optsize) {
	if (optlen != cipher_desc->crypto_info) {
		rc = -EINVAL;
		goto err_crypto_info;
	}
Loading