Commit a8576af9 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: kill not necessary resubmit switch



773af691 ("io_uring: always reissue from task_work context") makes
all resubmission to be made from task_work, so we don't need that hack
with resubmit/not-resubmit switch anymore.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/47fa177cca04e5ffd308a35227966c8e15d8525b.1628981736.git.asml.silence@gmail.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent fb682099
Loading
Loading
Loading
Loading
+7 −7
Original line number Diff line number Diff line
@@ -2293,7 +2293,7 @@ static inline bool io_run_task_work(void)
 * Find and free completed poll iocbs
 */
static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
			       struct list_head *done, bool resubmit)
			       struct list_head *done)
{
	struct req_batch rb;
	struct io_kiocb *req;
@@ -2308,7 +2308,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
		req = list_first_entry(done, struct io_kiocb, inflight_entry);
		list_del(&req->inflight_entry);

		if (READ_ONCE(req->result) == -EAGAIN && resubmit &&
		if (READ_ONCE(req->result) == -EAGAIN &&
		    !(req->flags & REQ_F_DONT_REISSUE)) {
			req->iopoll_completed = 0;
			io_req_task_queue_reissue(req);
@@ -2331,7 +2331,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
}

static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
			long min, bool resubmit)
			long min)
{
	struct io_kiocb *req, *tmp;
	LIST_HEAD(done);
@@ -2371,7 +2371,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
	}

	if (!list_empty(&done))
		io_iopoll_complete(ctx, nr_events, &done, resubmit);
		io_iopoll_complete(ctx, nr_events, &done);

	return 0;
}
@@ -2389,7 +2389,7 @@ static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
	while (!list_empty(&ctx->iopoll_list)) {
		unsigned int nr_events = 0;

		io_do_iopoll(ctx, &nr_events, 0, false);
		io_do_iopoll(ctx, &nr_events, 0);

		/* let it sleep and repeat later if can't complete a request */
		if (nr_events == 0)
@@ -2451,7 +2451,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
			    list_empty(&ctx->iopoll_list))
				break;
		}
		ret = io_do_iopoll(ctx, &nr_events, min, true);
		ret = io_do_iopoll(ctx, &nr_events, min);
	} while (!ret && nr_events < min && !need_resched());
out:
	mutex_unlock(&ctx->uring_lock);
@@ -6857,7 +6857,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)

		mutex_lock(&ctx->uring_lock);
		if (!list_empty(&ctx->iopoll_list))
			io_do_iopoll(ctx, &nr_events, 0, true);
			io_do_iopoll(ctx, &nr_events, 0);

		/*
		 * Don't submit if refs are dying, good for io_uring_register(),