Unverified Commit 7839b9d4 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!4803 CVE-2024-26583

Merge Pull Request from: @ci-robot 
 
PR sync from: Ziyang Xuan <william.xuanziyang@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/WRBJNBOMPNGRKKYWYGNZQVYPJOV4KF4C/ 
Patchset for CVE-2024-26583.

Jakub Kicinski (4):
  tls: rx: simplify async wait
  net: tls: factor out tls_*crypt_async_wait()
  tls: fix race between async notify and socket close
  tls: fix race between tx work scheduling and socket close


-- 
2.25.1
 
https://gitee.com/src-openeuler/kernel/issues/I92REK 
 
Link:https://gitee.com/openeuler/kernel/pulls/4803

 

Reviewed-by: default avatarYue Haibing <yuehaibing@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents ebba5515 d2631e61
Loading
Loading
Loading
Loading
+0 −6
Original line number Diff line number Diff line
@@ -137,9 +137,6 @@ struct tls_sw_context_tx {
	struct tls_rec *open_rec;
	struct list_head tx_list;
	atomic_t encrypt_pending;
	/* protect crypto_wait with encrypt_pending */
	spinlock_t encrypt_compl_lock;
	int async_notify;
	u8 async_capable:1;

#define BIT_TX_SCHEDULED	0
@@ -159,9 +156,6 @@ struct tls_sw_context_rx {
	u8 async_capable:1;
	u8 decrypted:1;
	atomic_t decrypt_pending;
	/* protect crypto_wait with decrypt_pending*/
	spinlock_t decrypt_compl_lock;
	bool async_notify;
};

struct tls_record_info {
+39 −69
Original line number Diff line number Diff line
@@ -169,7 +169,6 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
	struct scatterlist *sg;
	struct sk_buff *skb;
	unsigned int pages;
	int pending;

	skb = (struct sk_buff *)req->data;
	tls_ctx = tls_get_ctx(skb->sk);
@@ -216,12 +215,17 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)

	kfree(aead_req);

	spin_lock_bh(&ctx->decrypt_compl_lock);
	pending = atomic_dec_return(&ctx->decrypt_pending);

	if (!pending && ctx->async_notify)
	if (atomic_dec_and_test(&ctx->decrypt_pending))
		complete(&ctx->async_wait.completion);
	spin_unlock_bh(&ctx->decrypt_compl_lock);
}

static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx)
{
	if (!atomic_dec_and_test(&ctx->decrypt_pending))
		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
	atomic_inc(&ctx->decrypt_pending);

	return ctx->async_wait.err;
}

static int tls_do_decryption(struct sock *sk,
@@ -443,8 +447,6 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
	struct scatterlist *sge;
	struct sk_msg *msg_en;
	struct tls_rec *rec;
	bool ready = false;
	int pending;

	rec = container_of(aead_req, struct tls_rec, aead_req);
	msg_en = &rec->msg_encrypted;
@@ -475,23 +477,25 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
		/* If received record is at head of tx_list, schedule tx */
		first_rec = list_first_entry(&ctx->tx_list,
					     struct tls_rec, list);
		if (rec == first_rec)
			ready = true;
		if (rec == first_rec) {
			/* Schedule the transmission */
			if (!test_and_set_bit(BIT_TX_SCHEDULED,
					      &ctx->tx_bitmask))
				schedule_delayed_work(&ctx->tx_work.work, 1);
		}
	}

	spin_lock_bh(&ctx->encrypt_compl_lock);
	pending = atomic_dec_return(&ctx->encrypt_pending);

	if (!pending && ctx->async_notify)
	if (atomic_dec_and_test(&ctx->encrypt_pending))
		complete(&ctx->async_wait.completion);
	spin_unlock_bh(&ctx->encrypt_compl_lock);
}

	if (!ready)
		return;
static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
{
	if (!atomic_dec_and_test(&ctx->encrypt_pending))
		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
	atomic_inc(&ctx->encrypt_pending);

	/* Schedule the transmission */
	if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
		schedule_delayed_work(&ctx->tx_work.work, 1);
	return ctx->async_wait.err;
}

static int tls_do_encryption(struct sock *sk,
@@ -950,7 +954,6 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
	int num_zc = 0;
	int orig_size;
	int ret = 0;
	int pending;

	if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
			       MSG_CMSG_COMPAT))
@@ -1119,24 +1122,12 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
	if (!num_async) {
		goto send_end;
	} else if (num_zc) {
		/* Wait for pending encryptions to get completed */
		spin_lock_bh(&ctx->encrypt_compl_lock);
		ctx->async_notify = true;

		pending = atomic_read(&ctx->encrypt_pending);
		spin_unlock_bh(&ctx->encrypt_compl_lock);
		if (pending)
			crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
		else
			reinit_completion(&ctx->async_wait.completion);

		/* There can be no concurrent accesses, since we have no
		 * pending encrypt operations
		 */
		WRITE_ONCE(ctx->async_notify, false);
		int err;

		if (ctx->async_wait.err) {
			ret = ctx->async_wait.err;
		/* Wait for pending encryptions to get completed */
		err = tls_encrypt_async_wait(ctx);
		if (err) {
			ret = err;
			copied = 0;
		}
	}
@@ -1777,7 +1768,6 @@ int tls_sw_recvmsg(struct sock *sk,
	bool is_peek = flags & MSG_PEEK;
	bool bpf_strp_enabled;
	int num_async = 0;
	int pending;

	flags |= nonblock;

@@ -1955,12 +1945,7 @@ int tls_sw_recvmsg(struct sock *sk,
recv_end:
	if (num_async) {
		/* Wait for all previously submitted records to be decrypted */
		spin_lock_bh(&ctx->decrypt_compl_lock);
		ctx->async_notify = true;
		pending = atomic_read(&ctx->decrypt_pending);
		spin_unlock_bh(&ctx->decrypt_compl_lock);
		if (pending) {
			err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
		err = tls_decrypt_async_wait(ctx);
		if (err) {
			/* one of async decrypt failed */
			tls_err_abort(sk, err);
@@ -1968,14 +1953,6 @@ int tls_sw_recvmsg(struct sock *sk,
			decrypted = 0;
			goto end;
		}
		} else {
			reinit_completion(&ctx->async_wait.completion);
		}

		/* There can be no concurrent accesses, since we have no
		 * pending decrypt operations
		 */
		WRITE_ONCE(ctx->async_notify, false);

		/* Drain records from the rx_list & copy if required */
		if (is_peek || is_kvec)
@@ -2188,16 +2165,9 @@ void tls_sw_release_resources_tx(struct sock *sk)
	struct tls_context *tls_ctx = tls_get_ctx(sk);
	struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
	struct tls_rec *rec, *tmp;
	int pending;

	/* Wait for any pending async encryptions to complete */
	spin_lock_bh(&ctx->encrypt_compl_lock);
	ctx->async_notify = true;
	pending = atomic_read(&ctx->encrypt_pending);
	spin_unlock_bh(&ctx->encrypt_compl_lock);

	if (pending)
		crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
	tls_encrypt_async_wait(ctx);

	tls_tx_records(sk, -1);

@@ -2387,7 +2357,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)

	if (tx) {
		crypto_init_wait(&sw_ctx_tx->async_wait);
		spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
		atomic_set(&sw_ctx_tx->encrypt_pending, 1);
		crypto_info = &ctx->crypto_send.info;
		cctx = &ctx->tx;
		aead = &sw_ctx_tx->aead_send;
@@ -2396,7 +2366,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
		sw_ctx_tx->tx_work.sk = sk;
	} else {
		crypto_init_wait(&sw_ctx_rx->async_wait);
		spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
		atomic_set(&sw_ctx_rx->decrypt_pending, 1);
		crypto_info = &ctx->crypto_recv.info;
		cctx = &ctx->rx;
		skb_queue_head_init(&sw_ctx_rx->rx_list);