Commit da4cb346 authored by Jens Axboe's avatar Jens Axboe Committed by Zheng Zengkai
Browse files

io_uring: don't keep looping for more events if we can't flush overflow

mainline inclusion
from mainline-v5.12-rc1
commit ca0a2651
category: bugfix
bugzilla: 186454,https://gitee.com/openeuler/kernel/issues/I5026G
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=ca0a26511c679a797f86589894a4523db36d833e



--------------------------------

It doesn't make sense to wait for more events to come in, if we can't
even flush the overflow we already have to the ring. Return -EBUSY for
that condition, just like we do for attempts to submit with overflow
pending.

Cc: stable@vger.kernel.org # 5.11
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
Conflicts:
        fs/io_uring.c
Signed-off-by: default avatarGuo Xuenan <guoxuenan@huawei.com>
Reviewed-by: default avatarZhang Yi <yi.zhang@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent 47249371
Loading
Loading
Loading
Loading
+11 −3
Original line number Diff line number Diff line
@@ -1713,18 +1713,22 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
	return cqe != NULL;
}

static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
				     struct task_struct *tsk,
				     struct files_struct *files)
{
	bool ret = true;

	if (test_bit(0, &ctx->cq_check_overflow)) {
		/* iopoll syncs against uring_lock, not completion_lock */
		if (ctx->flags & IORING_SETUP_IOPOLL)
			mutex_lock(&ctx->uring_lock);
		__io_cqring_overflow_flush(ctx, force, tsk, files);
		ret = __io_cqring_overflow_flush(ctx, force, tsk, files);
		if (ctx->flags & IORING_SETUP_IOPOLL)
			mutex_unlock(&ctx->uring_lock);
	}

	return ret;
}

static void __io_cqring_fill_event(struct io_kiocb *req, long res,
@@ -7051,7 +7055,11 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
	iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
	trace_io_uring_cqring_wait(ctx, min_events);
	do {
		io_cqring_overflow_flush(ctx, false, NULL, NULL);
		/* if we can't even flush overflow, don't wait for more */
		if (!io_cqring_overflow_flush(ctx, false, NULL, NULL)) {
			ret = -EBUSY;
			break;
		}
		prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
						TASK_INTERRUPTIBLE);
		/* make sure we run task_work before checking for signals */