Commit b9407325 authored by Jens Axboe's avatar Jens Axboe Committed by Zheng Zengkai
Browse files

io_uring: fix race between timeout flush and removal

stable inclusion
from stable-v5.10.110
commit 2827328e646d0c2d3db1bfcad4b5f5016ce0d643
category: bugfix
bugzilla: 186670, https://gitee.com/src-openeuler/kernel/issues/I54H78
CVE: CVE-2022-29582

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-4.19.y&id=2827328e646d0c2d3db1bfcad4b5f5016ce0d643



--------------------------------

commit e677edbc upstream.

io_flush_timeouts() assumes the timeout isn't in progress of triggering
or being removed/canceled, so it unconditionally removes it from the
timeout list and attempts to cancel it.

Leave it on the list and let the normal timeout cancelation take care
of it.

Cc: stable@vger.kernel.org # 5.5+
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Conflicts:
	fs/io_uring.c

Signed-off-by: default avatarGuo Xuenan <guoxuenan@huawei.com>
Reviewed-by: default avatarZhang Yi <yi.zhang@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent 9944ac6f
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -1556,6 +1556,7 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)

static void io_flush_timeouts(struct io_ring_ctx *ctx)
{
	struct io_kiocb *req, *tmp;
	u32 seq;

	if (list_empty(&ctx->timeout_list))
@@ -1563,10 +1564,8 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)

	seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);

	do {
	list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
		u32 events_needed, events_got;
		struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
						struct io_kiocb, timeout.list);

		if (io_is_timeout_noseq(req))
			break;
@@ -1583,9 +1582,8 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
		if (events_got < events_needed)
			break;

		list_del_init(&req->timeout.list);
		io_kill_timeout(req, 0);
	} while (!list_empty(&ctx->timeout_list));
	}

	ctx->cq_last_tm_flush = seq;
}
@@ -5629,6 +5627,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
	else
		data->mode = HRTIMER_MODE_REL;

	INIT_LIST_HEAD(&req->timeout.list);
	hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
	return 0;
}
@@ -6281,6 +6280,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
			prev = NULL;
	}

	list_del(&req->timeout.list);
	spin_unlock_irqrestore(&ctx->completion_lock, flags);

	if (prev) {