Commit 20d6b633 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: refactor __io_get_cqe()



Make __io_get_cqe simpler by not grabbing the cqe from refilled cached,
but letting io_get_cqe() do it for us. That's cleaner and removes some
duplication.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/74dc8fdf2657e438b2e05e1d478a3596924604e9.1692916914.git.asml.silence@gmail.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b24c5d75
Loading
Loading
Loading
Loading
+4 −9
Original line number Diff line number Diff line
@@ -818,7 +818,7 @@ void io_req_cqe_overflow(struct io_kiocb *req)
 * control dependency is enough as we're using WRITE_ONCE to
 * fill the cq entry
 */
struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow)
{
	struct io_rings *rings = ctx->rings;
	unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
@@ -830,7 +830,7 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
	 * Force overflow the completion.
	 */
	if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)))
		return NULL;
		return false;

	/* userspace may cheat modifying the tail, be safe and do min */
	queued = min(__io_cqring_events(ctx), ctx->cq_entries);
@@ -838,7 +838,7 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
	/* we need a contiguous range, limit based on the current array offset */
	len = min(free, ctx->cq_entries - off);
	if (!len)
		return NULL;
		return false;

	if (ctx->flags & IORING_SETUP_CQE32) {
		off <<= 1;
@@ -847,12 +847,7 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)

	ctx->cqe_cached = &rings->cqes[off];
	ctx->cqe_sentinel = ctx->cqe_cached + len;

	ctx->cached_cq_tail++;
	ctx->cqe_cached++;
	if (ctx->flags & IORING_SETUP_CQE32)
		ctx->cqe_cached++;
	return &rings->cqes[off];
	return true;
}

static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
+12 −11
Original line number Diff line number Diff line
@@ -38,7 +38,7 @@ enum {
	IOU_STOP_MULTISHOT	= -ECANCELED,
};

struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow);
bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
void io_req_cqe_overflow(struct io_kiocb *req);
int io_run_task_work_sig(struct io_ring_ctx *ctx);
void io_req_defer_failed(struct io_kiocb *req, s32 res);
@@ -112,11 +112,15 @@ static inline void io_req_task_work_add(struct io_kiocb *req)
static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
						       bool overflow)
{
	io_lockdep_assert_cq_locked(ctx);
	struct io_uring_cqe *cqe;

	if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
		struct io_uring_cqe *cqe = ctx->cqe_cached;
	io_lockdep_assert_cq_locked(ctx);

	if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) {
		if (unlikely(!io_cqe_cache_refill(ctx, overflow)))
			return NULL;
	}
	cqe = ctx->cqe_cached;
	ctx->cached_cq_tail++;
	ctx->cqe_cached++;
	if (ctx->flags & IORING_SETUP_CQE32)
@@ -124,9 +128,6 @@ static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
	return cqe;
}

	return __io_get_cqe(ctx, overflow);
}

static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
{
	return io_get_cqe_overflow(ctx, false);