Commit 91bb7b8b authored by Jakub Kicinski's avatar Jakub Kicinski Committed by Ziyang Xuan
Browse files

net: tls: factor out tls_*crypt_async_wait()

mainline inclusion
from mainline-v6.8-rc5
commit c57ca512f3b68ddcd62bda9cc24a8f5584ab01b1
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I92REK
CVE: CVE-2024-26583

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=c57ca512f3b68ddcd62bda9cc24a8f5584ab01b1



--------------------------------

Factor out waiting for async encrypt and decrypt to finish.
There are already multiple copies and a subsequent fix will
need more. No functional changes.

Note that crypto_wait_req() returns wait->err

Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
Reviewed-by: default avatarSimon Horman <horms@kernel.org>
Reviewed-by: default avatarSabrina Dubroca <sd@queasysnail.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Conflicts:
	net/tls/tls_sw.c
Signed-off-by: default avatarZiyang Xuan <william.xuanziyang@huawei.com>
parent 300a4e32
Loading
Loading
Loading
Loading
+49 −40
Original line number Diff line number Diff line
@@ -221,6 +221,20 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
	spin_unlock_bh(&ctx->decrypt_compl_lock);
}

static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
{
	int pending;

	spin_lock_bh(&ctx->decrypt_compl_lock);
	reinit_completion(&ctx->async_wait.completion);
	pending = atomic_read(&ctx->decrypt_pending);
	spin_unlock_bh(&ctx->decrypt_compl_lock);
	if (pending)
		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);

	return ctx->async_wait.err;
}

static int tls_do_decryption(struct sock *sk,
			     struct sk_buff *skb,
			     struct scatterlist *sgin,
@@ -491,6 +505,28 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
		schedule_delayed_work(&ctx->tx_work.work, 1);
}

static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
{
	int pending;

	spin_lock_bh(&ctx->encrypt_compl_lock);
	ctx->async_notify = true;

	pending = atomic_read(&ctx->encrypt_pending);
	spin_unlock_bh(&ctx->encrypt_compl_lock);
	if (pending)
		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
	else
		reinit_completion(&ctx->async_wait.completion);

	/* There can be no concurrent accesses, since we have no
	 * pending encrypt operations
	 */
	WRITE_ONCE(ctx->async_notify, false);

	return ctx->async_wait.err;
}

static int tls_do_encryption(struct sock *sk,
			     struct tls_context *tls_ctx,
			     struct tls_sw_context_tx *ctx,
@@ -947,7 +983,6 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
	int num_zc = 0;
	int orig_size;
	int ret = 0;
	int pending;

	if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
			       MSG_CMSG_COMPAT))
@@ -1116,24 +1151,12 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
	if (!num_async) {
		goto send_end;
	} else if (num_zc) {
		/* Wait for pending encryptions to get completed */
		spin_lock_bh(&ctx->encrypt_compl_lock);
		ctx->async_notify = true;

		pending = atomic_read(&ctx->encrypt_pending);
		spin_unlock_bh(&ctx->encrypt_compl_lock);
		if (pending)
			crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
		else
			reinit_completion(&ctx->async_wait.completion);

		/* There can be no concurrent accesses, since we have no
		 * pending encrypt operations
		 */
		WRITE_ONCE(ctx->async_notify, false);
		int err;

		if (ctx->async_wait.err) {
			ret = ctx->async_wait.err;
		/* Wait for pending encryptions to get completed */
		err = tls_encrypt_async_wait(ctx);
		if (err) {
			ret = err;
			copied = 0;
		}
	}
@@ -1774,7 +1797,6 @@ int tls_sw_recvmsg(struct sock *sk,
	bool is_peek = flags & MSG_PEEK;
	bool bpf_strp_enabled;
	int num_async = 0;
	int pending;

	flags |= nonblock;

@@ -1952,12 +1974,7 @@ int tls_sw_recvmsg(struct sock *sk,
recv_end:
	if (num_async) {
		/* Wait for all previously submitted records to be decrypted */
		spin_lock_bh(&ctx->decrypt_compl_lock);
		reinit_completion(&ctx->async_wait.completion);
		pending = atomic_read(&ctx->decrypt_pending);
		spin_unlock_bh(&ctx->decrypt_compl_lock);
		if (pending) {
			err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
		err = tls_decrypt_async_wait(ctx);
		if (err) {
			/* one of async decrypt failed */
			tls_err_abort(sk, err);
@@ -1965,7 +1982,6 @@ int tls_sw_recvmsg(struct sock *sk,
			decrypted = 0;
			goto end;
		}
		}

		/* Drain records from the rx_list & copy if required */
		if (is_peek || is_kvec)
@@ -2178,16 +2194,9 @@ void tls_sw_release_resources_tx(struct sock *sk)
	struct tls_context *tls_ctx = tls_get_ctx(sk);
	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
	struct tls_rec *rec, *tmp;
	int pending;

	/* Wait for any pending async encryptions to complete */
	spin_lock_bh(&ctx->encrypt_compl_lock);
	ctx->async_notify = true;
	pending = atomic_read(&ctx->encrypt_pending);
	spin_unlock_bh(&ctx->encrypt_compl_lock);

	if (pending)
		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
	tls_encrypt_async_wait(ctx);

	tls_tx_records(sk, -1);