Commit d4d19c19 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: decouple CQE filling from requests



Make __io_cqring_fill_event() agnostic of struct io_kiocb, pass all the
data needed directly into it. Will be used to post rsrc removal
completions, which don't have an associated request.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/c9b8da9e42772db2033547dfebe479dc972a0f2c.1619356238.git.asml.silence@gmail.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 44b31f2f
Loading
Loading
Loading
Loading
+28 −27
Original line number Diff line number Diff line
@@ -1025,7 +1025,8 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
static void io_uring_cancel_sqpoll(struct io_sq_data *sqd);
static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx);

static bool io_cqring_fill_event(struct io_kiocb *req, long res, unsigned cflags);
static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
				 long res, unsigned int cflags);
static void io_put_req(struct io_kiocb *req);
static void io_put_req_deferred(struct io_kiocb *req, int nr);
static void io_dismantle_req(struct io_kiocb *req);
@@ -1266,7 +1267,7 @@ static void io_kill_timeout(struct io_kiocb *req, int status)
		atomic_set(&req->ctx->cq_timeouts,
			atomic_read(&req->ctx->cq_timeouts) + 1);
		list_del_init(&req->timeout.list);
		io_cqring_fill_event(req, status, 0);
		io_cqring_fill_event(req->ctx, req->user_data, status, 0);
		io_put_req_deferred(req, 1);
	}
}
@@ -1500,10 +1501,9 @@ static inline void req_ref_get(struct io_kiocb *req)
	atomic_inc(&req->refs);
}

static bool io_cqring_event_overflow(struct io_kiocb *req, long res,
				     unsigned int cflags)
static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
				     long res, unsigned int cflags)
{
	struct io_ring_ctx *ctx = req->ctx;
	struct io_overflow_cqe *ocqe;

	ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
@@ -1521,20 +1521,19 @@ static bool io_cqring_event_overflow(struct io_kiocb *req, long res,
		set_bit(0, &ctx->cq_check_overflow);
		ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
	}
	ocqe->cqe.user_data = req->user_data;
	ocqe->cqe.user_data = user_data;
	ocqe->cqe.res = res;
	ocqe->cqe.flags = cflags;
	list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
	return true;
}

static inline bool __io_cqring_fill_event(struct io_kiocb *req, long res,
					     unsigned int cflags)
static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
					  long res, unsigned int cflags)
{
	struct io_ring_ctx *ctx = req->ctx;
	struct io_uring_cqe *cqe;

	trace_io_uring_complete(ctx, req->user_data, res, cflags);
	trace_io_uring_complete(ctx, user_data, res, cflags);

	/*
	 * If we can't get a cq entry, userspace overflowed the
@@ -1543,19 +1542,19 @@ static inline bool __io_cqring_fill_event(struct io_kiocb *req, long res,
	 */
	cqe = io_get_cqring(ctx);
	if (likely(cqe)) {
		WRITE_ONCE(cqe->user_data, req->user_data);
		WRITE_ONCE(cqe->user_data, user_data);
		WRITE_ONCE(cqe->res, res);
		WRITE_ONCE(cqe->flags, cflags);
		return true;
	}
	return io_cqring_event_overflow(req, res, cflags);
	return io_cqring_event_overflow(ctx, user_data, res, cflags);
}

/* not as hot to bloat with inlining */
static noinline bool io_cqring_fill_event(struct io_kiocb *req, long res,
					  unsigned int cflags)
static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
					  long res, unsigned int cflags)
{
	return __io_cqring_fill_event(req, res, cflags);
	return __io_cqring_fill_event(ctx, user_data, res, cflags);
}

static void io_req_complete_post(struct io_kiocb *req, long res,
@@ -1565,7 +1564,7 @@ static void io_req_complete_post(struct io_kiocb *req, long res,
	unsigned long flags;

	spin_lock_irqsave(&ctx->completion_lock, flags);
	__io_cqring_fill_event(req, res, cflags);
	__io_cqring_fill_event(ctx, req->user_data, res, cflags);
	/*
	 * If we're the last reference to this request, add to our locked
	 * free_list cache.
@@ -1776,7 +1775,8 @@ static bool io_kill_linked_timeout(struct io_kiocb *req)
		io_remove_next_linked(req);
		link->timeout.head = NULL;
		if (hrtimer_try_to_cancel(&io->timer) != -1) {
			io_cqring_fill_event(link, -ECANCELED, 0);
			io_cqring_fill_event(link->ctx, link->user_data,
					     -ECANCELED, 0);
			io_put_req_deferred(link, 1);
			return true;
		}
@@ -1795,7 +1795,7 @@ static void io_fail_links(struct io_kiocb *req)
		link->link = NULL;

		trace_io_uring_fail_link(req, link);
		io_cqring_fill_event(link, -ECANCELED, 0);
		io_cqring_fill_event(link->ctx, link->user_data, -ECANCELED, 0);
		io_put_req_deferred(link, 2);
		link = nxt;
	}
@@ -2116,7 +2116,8 @@ static void io_submit_flush_completions(struct io_comp_state *cs,
	spin_lock_irq(&ctx->completion_lock);
	for (i = 0; i < nr; i++) {
		req = cs->reqs[i];
		__io_cqring_fill_event(req, req->result, req->compl.cflags);
		__io_cqring_fill_event(ctx, req->user_data, req->result,
					req->compl.cflags);
	}
	io_commit_cqring(ctx);
	spin_unlock_irq(&ctx->completion_lock);
@@ -2256,7 +2257,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
		if (req->flags & REQ_F_BUFFER_SELECTED)
			cflags = io_put_rw_kbuf(req);

		__io_cqring_fill_event(req, req->result, cflags);
		__io_cqring_fill_event(ctx, req->user_data, req->result, cflags);
		(*nr_events)++;

		if (req_ref_put_and_test(req))
@@ -4875,7 +4876,7 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
	}
	if (req->poll.events & EPOLLONESHOT)
		flags = 0;
	if (!io_cqring_fill_event(req, error, flags)) {
	if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
		io_poll_remove_waitqs(req);
		req->poll.done = true;
		flags = 0;
@@ -5203,7 +5204,7 @@ static bool io_poll_remove_one(struct io_kiocb *req)

	do_complete = io_poll_remove_waitqs(req);
	if (do_complete) {
		io_cqring_fill_event(req, -ECANCELED, 0);
		io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0);
		io_commit_cqring(req->ctx);
		req_set_fail_links(req);
		io_put_req_deferred(req, 1);
@@ -5455,7 +5456,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
	atomic_set(&req->ctx->cq_timeouts,
		atomic_read(&req->ctx->cq_timeouts) + 1);

	io_cqring_fill_event(req, -ETIME, 0);
	io_cqring_fill_event(ctx, req->user_data, -ETIME, 0);
	io_commit_cqring(ctx);
	spin_unlock_irqrestore(&ctx->completion_lock, flags);

@@ -5497,7 +5498,7 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
		return PTR_ERR(req);

	req_set_fail_links(req);
	io_cqring_fill_event(req, -ECANCELED, 0);
	io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0);
	io_put_req_deferred(req, 1);
	return 0;
}
@@ -5570,7 +5571,7 @@ static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
		ret = io_timeout_update(ctx, tr->addr, &tr->ts,
					io_translate_timeout_mode(tr->flags));

	io_cqring_fill_event(req, ret, 0);
	io_cqring_fill_event(ctx, req->user_data, ret, 0);
	io_commit_cqring(ctx);
	spin_unlock_irq(&ctx->completion_lock);
	io_cqring_ev_posted(ctx);
@@ -5722,7 +5723,7 @@ static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
done:
	if (!ret)
		ret = success_ret;
	io_cqring_fill_event(req, ret, 0);
	io_cqring_fill_event(ctx, req->user_data, ret, 0);
	io_commit_cqring(ctx);
	spin_unlock_irqrestore(&ctx->completion_lock, flags);
	io_cqring_ev_posted(ctx);
@@ -5779,7 +5780,7 @@ static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)

	spin_lock_irq(&ctx->completion_lock);
done:
	io_cqring_fill_event(req, ret, 0);
	io_cqring_fill_event(ctx, req->user_data, ret, 0);
	io_commit_cqring(ctx);
	spin_unlock_irq(&ctx->completion_lock);
	io_cqring_ev_posted(ctx);