Commit 7a612350 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: fix complete_post races for linked req



Calling io_queue_next() after spin_unlock in io_req_complete_post()
races with the other side extracting and reusing this request. Hand
coded parts of io_req_find_next() considering that io_disarm_next()
and io_req_task_queue() have (and safe) to be called with
completion_lock held.

It already does io_commit_cqring() and io_cqring_ev_posted(), so just
reuse it for post io_disarm_next().

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/5672a62f3150ee7c55849f40c0037655c4f2840f.1615250156.git.asml.silence@gmail.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 33cc89a9
Loading
Loading
Loading
Loading
+14 −7
Original line number Diff line number Diff line
@@ -985,6 +985,7 @@ static const struct io_op_def io_op_defs[] = {
	[IORING_OP_UNLINKAT] = {},
};

static bool io_disarm_next(struct io_kiocb *req);
static void io_uring_del_task_file(unsigned long index);
static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
					 struct task_struct *task,
@@ -1525,7 +1526,7 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res)
	__io_cqring_fill_event(req, res, 0);
}

static inline void io_req_complete_post(struct io_kiocb *req, long res,
static void io_req_complete_post(struct io_kiocb *req, long res,
				 unsigned int cflags)
{
	struct io_ring_ctx *ctx = req->ctx;
@@ -1533,7 +1534,6 @@ static inline void io_req_complete_post(struct io_kiocb *req, long res,

	spin_lock_irqsave(&ctx->completion_lock, flags);
	__io_cqring_fill_event(req, res, cflags);
	io_commit_cqring(ctx);
	/*
	 * If we're the last reference to this request, add to our locked
	 * free_list cache.
@@ -1541,20 +1541,27 @@ static inline void io_req_complete_post(struct io_kiocb *req, long res,
	if (refcount_dec_and_test(&req->refs)) {
		struct io_comp_state *cs = &ctx->submit_state.comp;

		if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
			if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK))
				io_disarm_next(req);
			if (req->link) {
				io_req_task_queue(req->link);
				req->link = NULL;
			}
		}
		io_dismantle_req(req);
		io_put_task(req->task, 1);
		list_add(&req->compl.list, &cs->locked_free_list);
		cs->locked_free_nr++;
	} else
		req = NULL;
	io_commit_cqring(ctx);
	spin_unlock_irqrestore(&ctx->completion_lock, flags);

	io_cqring_ev_posted(ctx);
	if (req) {
		io_queue_next(req);

	if (req)
		percpu_ref_put(&ctx->refs);
}
}

static void io_req_complete_state(struct io_kiocb *req, long res,
				  unsigned int cflags)