Commit d3d7298d authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: optimise out unlikely link queue



__io_queue_sqe() tries to issue as much requests of a link as it can,
and uses io_put_req_find_next() to extract a next one, targeting inline
completed requests. As now __io_queue_sqe() is always used together with
struct io_comp_state, it leaves next propagation only a small window and
only for async reqs, that doesn't justify its existence.

Remove it, make __io_queue_sqe() to issue only a head request. It
simplifies the code and will allow other optimisations.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent bd759045
Loading
Loading
Loading
Loading
+10 −32
Original line number Original line Diff line number Diff line
@@ -6563,26 +6563,20 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)


static void __io_queue_sqe(struct io_kiocb *req)
static void __io_queue_sqe(struct io_kiocb *req)
{
{
	struct io_kiocb *linked_timeout;
	struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
	const struct cred *old_creds = NULL;
	const struct cred *old_creds = NULL;
	int ret;
	int ret;


again:
	linked_timeout = io_prep_linked_timeout(req);

	if ((req->flags & REQ_F_WORK_INITIALIZED) &&
	if ((req->flags & REQ_F_WORK_INITIALIZED) &&
	    (req->work.flags & IO_WQ_WORK_CREDS) &&
	    (req->work.flags & IO_WQ_WORK_CREDS) &&
	    req->work.identity->creds != current_cred()) {
	    req->work.identity->creds != current_cred())
		if (old_creds)
			revert_creds(old_creds);
		if (old_creds == req->work.identity->creds)
			old_creds = NULL; /* restored original creds */
		else
		old_creds = override_creds(req->work.identity->creds);
		old_creds = override_creds(req->work.identity->creds);
	}


	ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
	ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);


	if (old_creds)
		revert_creds(old_creds);

	/*
	/*
	 * We async punt it if the file wasn't marked NOWAIT, or if the file
	 * We async punt it if the file wasn't marked NOWAIT, or if the file
	 * doesn't support non-blocking read/write attempts
	 * doesn't support non-blocking read/write attempts
@@ -6595,9 +6589,6 @@ static void __io_queue_sqe(struct io_kiocb *req)
			 */
			 */
			io_queue_async_work(req);
			io_queue_async_work(req);
		}
		}

		if (linked_timeout)
			io_queue_linked_timeout(linked_timeout);
	} else if (likely(!ret)) {
	} else if (likely(!ret)) {
		/* drop submission reference */
		/* drop submission reference */
		if (req->flags & REQ_F_COMPLETE_INLINE) {
		if (req->flags & REQ_F_COMPLETE_INLINE) {
@@ -6605,31 +6596,18 @@ static void __io_queue_sqe(struct io_kiocb *req)
			struct io_comp_state *cs = &ctx->submit_state.comp;
			struct io_comp_state *cs = &ctx->submit_state.comp;


			cs->reqs[cs->nr++] = req;
			cs->reqs[cs->nr++] = req;
			if (cs->nr == IO_COMPL_BATCH)
			if (cs->nr == ARRAY_SIZE(cs->reqs))
				io_submit_flush_completions(cs, ctx);
				io_submit_flush_completions(cs, ctx);
			req = NULL;
		} else {
		} else {
			req = io_put_req_find_next(req);
			io_put_req(req);
		}

		if (linked_timeout)
			io_queue_linked_timeout(linked_timeout);

		if (req) {
			if (!(req->flags & REQ_F_FORCE_ASYNC))
				goto again;
			io_queue_async_work(req);
		}
		}
	} else {
	} else {
		/* un-prep timeout, so it'll be killed as any other linked */
		req->flags &= ~REQ_F_LINK_TIMEOUT;
		req_set_fail_links(req);
		req_set_fail_links(req);
		io_put_req(req);
		io_put_req(req);
		io_req_complete(req, ret);
		io_req_complete(req, ret);
	}
	}

	if (linked_timeout)
	if (old_creds)
		io_queue_linked_timeout(linked_timeout);
		revert_creds(old_creds);
}
}


static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)