Commit cd0ca2e0 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: inline struct io_comp_state



Inline struct io_comp_state into struct io_submit_state. They are
already coupled tightly, together with mixed responsibilities it
only brings confusion having them separately.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/e55bba77426b399e3a2e54e3c6c267c6a0fc4b57.1628536684.git.asml.silence@gmail.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent bb943b82
Loading
Loading
Loading
Loading
+27 −34
Original line number Diff line number Diff line
@@ -299,13 +299,6 @@ struct io_sq_data {
#define IO_REQ_CACHE_SIZE		32
#define IO_REQ_ALLOC_BATCH		8

struct io_comp_state {
	struct io_kiocb		*reqs[IO_COMPL_BATCH];
	unsigned int		nr;
	/* inline/task_work completion list, under ->uring_lock */
	struct list_head	free_list;
};

struct io_submit_link {
	struct io_kiocb		*head;
	struct io_kiocb		*last;
@@ -326,7 +319,10 @@ struct io_submit_state {
	/*
	 * Batch completion logic
	 */
	struct io_comp_state	comp;
	struct io_kiocb		*compl_reqs[IO_COMPL_BATCH];
	unsigned int		compl_nr;
	/* inline/task_work completion list, under ->uring_lock */
	struct list_head	free_list;

	/*
	 * File reference cache
@@ -1208,7 +1204,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
	INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
	init_llist_head(&ctx->rsrc_put_llist);
	INIT_LIST_HEAD(&ctx->tctx_list);
	INIT_LIST_HEAD(&ctx->submit_state.comp.free_list);
	INIT_LIST_HEAD(&ctx->submit_state.free_list);
	INIT_LIST_HEAD(&ctx->locked_free_list);
	INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
	return ctx;
@@ -1734,10 +1730,10 @@ static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
}

static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
					struct io_comp_state *cs)
					struct io_submit_state *state)
{
	spin_lock_irq(&ctx->completion_lock);
	list_splice_init(&ctx->locked_free_list, &cs->free_list);
	list_splice_init(&ctx->locked_free_list, &state->free_list);
	ctx->locked_free_nr = 0;
	spin_unlock_irq(&ctx->completion_lock);
}
@@ -1746,7 +1742,6 @@ static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
{
	struct io_submit_state *state = &ctx->submit_state;
	struct io_comp_state *cs = &state->comp;
	int nr;

	/*
@@ -1755,11 +1750,11 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
	 * side cache.
	 */
	if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
		io_flush_cached_locked_reqs(ctx, cs);
		io_flush_cached_locked_reqs(ctx, state);

	nr = state->free_reqs;
	while (!list_empty(&cs->free_list)) {
		struct io_kiocb *req = list_first_entry(&cs->free_list,
	while (!list_empty(&state->free_list)) {
		struct io_kiocb *req = list_first_entry(&state->free_list,
					struct io_kiocb, inflight_entry);

		list_del(&req->inflight_entry);
@@ -1946,7 +1941,7 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx)
{
	if (!ctx)
		return;
	if (ctx->submit_state.comp.nr) {
	if (ctx->submit_state.compl_nr) {
		mutex_lock(&ctx->uring_lock);
		io_submit_flush_completions(ctx);
		mutex_unlock(&ctx->uring_lock);
@@ -2143,19 +2138,19 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
	if (state->free_reqs != ARRAY_SIZE(state->reqs))
		state->reqs[state->free_reqs++] = req;
	else
		list_add(&req->inflight_entry, &state->comp.free_list);
		list_add(&req->inflight_entry, &state->free_list);
}

static void io_submit_flush_completions(struct io_ring_ctx *ctx)
	__must_hold(&req->ctx->uring_lock)
{
	struct io_comp_state *cs = &ctx->submit_state.comp;
	int i, nr = cs->nr;
	struct io_submit_state *state = &ctx->submit_state;
	int i, nr = state->compl_nr;
	struct req_batch rb;

	spin_lock_irq(&ctx->completion_lock);
	for (i = 0; i < nr; i++) {
		struct io_kiocb *req = cs->reqs[i];
		struct io_kiocb *req = state->compl_reqs[i];

		__io_cqring_fill_event(ctx, req->user_data, req->result,
					req->compl.cflags);
@@ -2166,7 +2161,7 @@ static void io_submit_flush_completions(struct io_ring_ctx *ctx)

	io_init_req_batch(&rb);
	for (i = 0; i < nr; i++) {
		struct io_kiocb *req = cs->reqs[i];
		struct io_kiocb *req = state->compl_reqs[i];

		/* submission and completion refs */
		if (req_ref_sub_and_test(req, 2))
@@ -2174,7 +2169,7 @@ static void io_submit_flush_completions(struct io_ring_ctx *ctx)
	}

	io_req_free_batch_finish(ctx, &rb);
	cs->nr = 0;
	state->compl_nr = 0;
}

/*
@@ -6484,10 +6479,10 @@ static void __io_queue_sqe(struct io_kiocb *req)
		/* drop submission reference */
		if (req->flags & REQ_F_COMPLETE_INLINE) {
			struct io_ring_ctx *ctx = req->ctx;
			struct io_comp_state *cs = &ctx->submit_state.comp;
			struct io_submit_state *state = &ctx->submit_state;

			cs->reqs[cs->nr++] = req;
			if (cs->nr == ARRAY_SIZE(cs->reqs))
			state->compl_reqs[state->compl_nr++] = req;
			if (state->compl_nr == ARRAY_SIZE(state->compl_reqs))
				io_submit_flush_completions(ctx);
		} else {
			io_put_req(req);
@@ -6690,7 +6685,7 @@ static void io_submit_state_end(struct io_submit_state *state,
{
	if (state->link.head)
		io_queue_sqe(state->link.head);
	if (state->comp.nr)
	if (state->compl_nr)
		io_submit_flush_completions(ctx);
	if (state->plug_started)
		blk_finish_plug(&state->plug);
@@ -8633,19 +8628,17 @@ static void io_req_cache_free(struct list_head *list)

static void io_req_caches_free(struct io_ring_ctx *ctx)
{
	struct io_submit_state *submit_state = &ctx->submit_state;
	struct io_comp_state *cs = &ctx->submit_state.comp;
	struct io_submit_state *state = &ctx->submit_state;

	mutex_lock(&ctx->uring_lock);

	if (submit_state->free_reqs) {
		kmem_cache_free_bulk(req_cachep, submit_state->free_reqs,
				     submit_state->reqs);
		submit_state->free_reqs = 0;
	if (state->free_reqs) {
		kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
		state->free_reqs = 0;
	}

	io_flush_cached_locked_reqs(ctx, cs);
	io_req_cache_free(&cs->free_list);
	io_flush_cached_locked_reqs(ctx, state);
	io_req_cache_free(&state->free_list);
	mutex_unlock(&ctx->uring_lock);
}