Commit cb3d8972 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: refactor io_iopoll_req_issued



A simple refactoring of io_iopoll_req_issued(), move in_async inside so
we don't pass it around and save on double checking it.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/1513bfde4f0c835be25ac69a82737ab0668d7665.1623634181.git.asml.silence@gmail.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 382cb030
Loading
Loading
Loading
Loading
+21 −23
Original line number Diff line number Diff line
@@ -2525,9 +2525,14 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
 * find it from a io_do_iopoll() thread before the issuer is done
 * accessing the kiocb cookie.
 */
static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async)
static void io_iopoll_req_issued(struct io_kiocb *req)
{
	struct io_ring_ctx *ctx = req->ctx;
	const bool in_async = io_wq_current_is_worker();

	/* workqueue context doesn't hold uring_lock, grab it now */
	if (unlikely(in_async))
		mutex_lock(&ctx->uring_lock);

	/*
	 * Track whether we have multiple files in our lists. This will impact
@@ -2554,14 +2559,19 @@ static void io_iopoll_req_issued(struct io_kiocb *req, bool in_async)
	else
		list_add_tail(&req->inflight_entry, &ctx->iopoll_list);

	if (unlikely(in_async)) {
		/*
	 * If IORING_SETUP_SQPOLL is enabled, sqes are either handled in sq thread
	 * task context or in io worker task context. If current task context is
	 * sq thread, we don't need to check whether should wake up sq thread.
		 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
		 * in sq thread task context or in io worker task context. If
		 * current task context is sq thread, we don't need to check
		 * whether should wake up sq thread.
		 */
	if (in_async && (ctx->flags & IORING_SETUP_SQPOLL) &&
		if ((ctx->flags & IORING_SETUP_SQPOLL) &&
		    wq_has_sleeper(&ctx->sq_data->wait))
			wake_up(&ctx->sq_data->wait);

		mutex_unlock(&ctx->uring_lock);
	}
}

static inline void io_state_file_put(struct io_submit_state *state)
@@ -6215,23 +6225,11 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)

	if (creds)
		revert_creds(creds);

	if (ret)
		return ret;

	/* If the op doesn't have a file, we're not polling for it */
	if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
		const bool in_async = io_wq_current_is_worker();

		/* workqueue context doesn't hold uring_lock, grab it now */
		if (in_async)
			mutex_lock(&ctx->uring_lock);

		io_iopoll_req_issued(req, in_async);

		if (in_async)
			mutex_unlock(&ctx->uring_lock);
	}
	if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file)
		io_iopoll_req_issued(req);

	return 0;
}