Commit 3d4aeb9f authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: don't spinlock when not posting CQEs



When no of queued for the batch completion requests need to post an CQE,
see IOSQE_CQE_SKIP_SUCCESS, avoid grabbing ->completion_lock and other
commit/post.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/8d4b4a08bca022cbe19af00266407116775b3e4d.1636559119.git.asml.silence@gmail.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 04c76b41
Loading
Loading
Loading
Loading
+17 −9
Original line number Diff line number Diff line
@@ -321,6 +321,7 @@ struct io_submit_state {

	bool			plug_started;
	bool			need_plug;
	bool			flush_cqes;
	unsigned short		submit_nr;
	struct blk_plug		plug;
};
@@ -1525,8 +1526,11 @@ static void io_prep_async_link(struct io_kiocb *req)

static inline void io_req_add_compl_list(struct io_kiocb *req)
{
	struct io_ring_ctx *ctx = req->ctx;
	struct io_submit_state *state = &req->ctx->submit_state;

	if (!(req->flags & REQ_F_CQE_SKIP))
		ctx->submit_state.flush_cqes = true;
	wq_list_add_tail(&req->comp_list, &state->compl_reqs);
}

@@ -2386,6 +2390,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
	struct io_wq_work_node *node, *prev;
	struct io_submit_state *state = &ctx->submit_state;

	if (state->flush_cqes) {
		spin_lock(&ctx->completion_lock);
		wq_list_for_each(node, prev, &state->compl_reqs) {
			struct io_kiocb *req = container_of(node, struct io_kiocb,
@@ -2395,9 +2400,12 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
				__io_fill_cqe(ctx, req->user_data, req->result,
					      req->cflags);
		}

		io_commit_cqring(ctx);
		spin_unlock(&ctx->completion_lock);
		io_cqring_ev_posted(ctx);
		state->flush_cqes = false;
	}

	io_free_batch_list(ctx, state->compl_reqs.first);
	INIT_WQ_LIST(&state->compl_reqs);