Commit aacf2f9f authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: fix req->apoll_events



apoll_events should be set once in the beginning of poll arming just as
poll->events and not change after. However, currently io_uring resets it
on each __io_poll_execute() for no clear reason. There is also a place
in __io_arm_poll_handler() where we add EPOLLONESHOT to downgrade a
multishot, but forget to do the same thing with ->apoll_events, which is
buggy.

Fixes: 81459350 ("io_uring: cache req->apoll->events in req->cflags")
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Reviewed-by: default avatarHao Xu <howeyxu@tencent.com>
Link: https://lore.kernel.org/r/0aef40399ba75b1a4d2c2e85e6e8fd93c02fc6e4.1655814213.git.asml.silence@gmail.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b60cac14
Loading
Loading
Loading
Loading
+8 −4
Original line number Diff line number Diff line
@@ -6950,7 +6950,8 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
		io_req_complete_failed(req, ret);
}

static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events)
static void __io_poll_execute(struct io_kiocb *req, int mask,
			      __poll_t __maybe_unused events)
{
	req->cqe.res = mask;
	/*
@@ -6959,7 +6960,6 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, __poll_t events)
	 * CPU. We want to avoid pulling in req->apoll->events for that
	 * case.
	 */
	req->apoll_events = events;
	if (req->opcode == IORING_OP_POLL_ADD)
		req->io_task_work.func = io_poll_task_func;
	else
@@ -7110,6 +7110,8 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
	io_init_poll_iocb(poll, mask, io_poll_wake);
	poll->file = req->file;

	req->apoll_events = poll->events;

	ipt->pt._key = mask;
	ipt->req = req;
	ipt->error = 0;
@@ -7140,8 +7142,10 @@ static int __io_arm_poll_handler(struct io_kiocb *req,

	if (mask) {
		/* can't multishot if failed, just queue the event we've got */
		if (unlikely(ipt->error || !ipt->nr_entries))
		if (unlikely(ipt->error || !ipt->nr_entries)) {
			poll->events |= EPOLLONESHOT;
			req->apoll_events |= EPOLLONESHOT;
		}
		__io_poll_execute(req, mask, poll->events);
		return 0;
	}
@@ -7388,7 +7392,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
		return -EINVAL;

	io_req_set_refcount(req);
	req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
	poll->events = io_poll_parse_events(sqe, flags);
	return 0;
}