Commit 913a571a authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: clean cqe filling functions



Split io_cqring_fill_event() into a couple of more targeted functions.
The first on is io_fill_cqe_aux() for completions that are not
associated with request completions and doing the ->cq_extra accounting.
Examples are additional CQEs from multishot poll and rsrc notifications.

The second is io_fill_cqe_req(), should be called when it's a normal
request completion. Nothing more to it at the moment, will be used in
later patches.

The last one is inlined __io_fill_cqe() for a finer grained control,
should be used with caution and in hottest places.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/59a9117a4a44fc9efcf04b3afa51e0d080f5943c.1636559119.git.asml.silence@gmail.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 2ea537ca
Loading
Loading
Loading
Loading
+30 −28
Original line number Diff line number Diff line
@@ -1108,8 +1108,8 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
					 bool cancel_all);
static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);

static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
				 s32 res, u32 cflags);
static void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags);

static void io_put_req(struct io_kiocb *req);
static void io_put_req_deferred(struct io_kiocb *req);
static void io_dismantle_req(struct io_kiocb *req);
@@ -1560,7 +1560,7 @@ static void io_kill_timeout(struct io_kiocb *req, int status)
		atomic_set(&req->ctx->cq_timeouts,
			atomic_read(&req->ctx->cq_timeouts) + 1);
		list_del_init(&req->timeout.list);
		io_cqring_fill_event(req->ctx, req->user_data, status, 0);
		io_fill_cqe_req(req, status, 0);
		io_put_req_deferred(req);
	}
}
@@ -1819,7 +1819,7 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
	return true;
}

static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
				 s32 res, u32 cflags)
{
	struct io_uring_cqe *cqe;
@@ -1841,11 +1841,16 @@ static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data
	return io_cqring_event_overflow(ctx, user_data, res, cflags);
}

/* not as hot to bloat with inlining */
static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
{
	__io_fill_cqe(req->ctx, req->user_data, res, cflags);
}

static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
				     s32 res, u32 cflags)
{
	return __io_cqring_fill_event(ctx, user_data, res, cflags);
	ctx->cq_extra++;
	return __io_fill_cqe(ctx, user_data, res, cflags);
}

static void io_req_complete_post(struct io_kiocb *req, s32 res,
@@ -1854,7 +1859,7 @@ static void io_req_complete_post(struct io_kiocb *req, s32 res,
	struct io_ring_ctx *ctx = req->ctx;

	spin_lock(&ctx->completion_lock);
	__io_cqring_fill_event(ctx, req->user_data, res, cflags);
	__io_fill_cqe(ctx, req->user_data, res, cflags);
	/*
	 * If we're the last reference to this request, add to our locked
	 * free_list cache.
@@ -2062,8 +2067,7 @@ static bool io_kill_linked_timeout(struct io_kiocb *req)
		link->timeout.head = NULL;
		if (hrtimer_try_to_cancel(&io->timer) != -1) {
			list_del(&link->timeout.list);
			io_cqring_fill_event(link->ctx, link->user_data,
					     -ECANCELED, 0);
			io_fill_cqe_req(link, -ECANCELED, 0);
			io_put_req_deferred(link);
			return true;
		}
@@ -2087,7 +2091,7 @@ static void io_fail_links(struct io_kiocb *req)
		link->link = NULL;

		trace_io_uring_fail_link(req, link);
		io_cqring_fill_event(link->ctx, link->user_data, res, 0);
		io_fill_cqe_req(link, res, 0);
		io_put_req_deferred(link);
		link = nxt;
	}
@@ -2104,8 +2108,7 @@ static bool io_disarm_next(struct io_kiocb *req)
		req->flags &= ~REQ_F_ARM_LTIMEOUT;
		if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
			io_remove_next_linked(req);
			io_cqring_fill_event(link->ctx, link->user_data,
					     -ECANCELED, 0);
			io_fill_cqe_req(link, -ECANCELED, 0);
			io_put_req_deferred(link);
			posted = true;
		}
@@ -2369,7 +2372,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
		struct io_kiocb *req = container_of(node, struct io_kiocb,
						    comp_list);

		__io_cqring_fill_event(ctx, req->user_data, req->result,
		__io_fill_cqe(ctx, req->user_data, req->result,
			      req->cflags);
	}
	io_commit_cqring(ctx);
@@ -2504,7 +2507,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
		if (!smp_load_acquire(&req->iopoll_completed))
			break;
		__io_cqring_fill_event(ctx, req->user_data, req->result,
		__io_fill_cqe(ctx, req->user_data, req->result,
			      io_put_rw_kbuf(req));
		nr_events++;
	}
@@ -5360,13 +5363,13 @@ static bool __io_poll_complete(struct io_kiocb *req, __poll_t mask)
	}
	if (req->poll.events & EPOLLONESHOT)
		flags = 0;
	if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {

	if (!(flags & IORING_CQE_F_MORE)) {
		io_fill_cqe_req(req, error, flags);
	} else if (!io_fill_cqe_aux(ctx, req->user_data, error, flags)) {
		req->poll.events |= EPOLLONESHOT;
		flags = 0;
	}
	if (flags & IORING_CQE_F_MORE)
		ctx->cq_extra++;

	return !(flags & IORING_CQE_F_MORE);
}

@@ -5684,9 +5687,9 @@ static bool io_poll_remove_one(struct io_kiocb *req)
	do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true);

	if (do_complete) {
		io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0);
		io_commit_cqring(req->ctx);
		req_set_fail(req);
		io_fill_cqe_req(req, -ECANCELED, 0);
		io_commit_cqring(req->ctx);
		io_put_req_deferred(req);
	}
	return do_complete;
@@ -5986,7 +5989,7 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
		return PTR_ERR(req);

	req_set_fail(req);
	io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0);
	io_fill_cqe_req(req, -ECANCELED, 0);
	io_put_req_deferred(req);
	return 0;
}
@@ -8219,8 +8222,7 @@ static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)

			io_ring_submit_lock(ctx, lock_ring);
			spin_lock(&ctx->completion_lock);
			io_cqring_fill_event(ctx, prsrc->tag, 0, 0);
			ctx->cq_extra++;
			io_fill_cqe_aux(ctx, prsrc->tag, 0, 0);
			io_commit_cqring(ctx);
			spin_unlock(&ctx->completion_lock);
			io_cqring_ev_posted(ctx);