Commit 3fcf19d5 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: parse check_cq out of wq waiting



We already avoid flushing overflows in io_cqring_wait_schedule() but
only return an error for the outer loop to handle it. Minimise it even
further by moving all ->check_cq parsing there.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/9dfcec3121013f98208dbf79368d636d74e1231a.1672916894.git.asml.silence@gmail.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 140102ae
Loading
Loading
Loading
Loading
+18 −14
Original line number Diff line number Diff line
@@ -2471,21 +2471,13 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
					  ktime_t *timeout)
{
	int ret;
	unsigned long check_cq;

	if (unlikely(READ_ONCE(ctx->check_cq)))
		return 1;
	/* make sure we run task_work before checking for signals */
	ret = io_run_task_work_sig(ctx);
	if (ret || io_should_wake(iowq))
		return ret;

	check_cq = READ_ONCE(ctx->check_cq);
	if (unlikely(check_cq)) {
		/* let the caller flush overflows, retry */
		if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
			return 1;
		if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
			return -EBADR;
	}
	if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
		return -ETIME;

@@ -2551,13 +2543,25 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,

	trace_io_uring_cqring_wait(ctx, min_events);
	do {
		if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
			finish_wait(&ctx->cq_wait, &iowq.wq);
			io_cqring_do_overflow_flush(ctx);
		}
		unsigned long check_cq;

		prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
						TASK_INTERRUPTIBLE);
		ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);

		check_cq = READ_ONCE(ctx->check_cq);
		if (unlikely(check_cq)) {
			/* let the caller flush overflows, retry */
			if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)) {
				finish_wait(&ctx->cq_wait, &iowq.wq);
				io_cqring_do_overflow_flush(ctx);
			}
			if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) {
				ret = -EBADR;
				break;
			}
		}

		if (__io_cqring_events_user(ctx) >= min_events)
			break;
		cond_resched();