Commit e77823c7 authored by Jens Axboe's avatar Jens Axboe Committed by sanglipeng
Browse files

io_uring: pass in EPOLL_URING_WAKE for eventfd signaling and wakeups

stable inclusion
from stable-v5.10.162
commit 189556b05e1770263c43fa5b4c689e7cd3fa5b4e
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I7P7OH

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=189556b05e1770263c43fa5b4c689e7cd3fa5b4e



--------------------------------

[ Upstream commit 44648532 ]

Pass in EPOLL_URING_WAKE when signaling eventfd or doing poll related
wakups, so that we can check for a circular event dependency between
eventfd and epoll. If this flag is set when our wakeup handlers are
called, then we know we have a dependency that needs to terminate
multishot requests.

eventfd and epoll are the only such possible dependencies.

Cc: stable@vger.kernel.org # 6.0
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarsanglipeng <sanglipeng1@jd.com>
parent c6fc74c8
Loading
Loading
Loading
Loading
+20 −7
Original line number Diff line number Diff line
@@ -1629,13 +1629,15 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
	 * wake as many waiters as we need to.
	 */
	if (wq_has_sleeper(&ctx->cq_wait))
		wake_up_all(&ctx->cq_wait);
		__wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
				poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
	if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
		wake_up(&ctx->sq_data->wait);
	if (io_should_trigger_evfd(ctx))
		eventfd_signal(ctx->cq_ev_fd, 1);
		eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE);
	if (waitqueue_active(&ctx->poll_wait))
		wake_up_interruptible(&ctx->poll_wait);
		__wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0,
				poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
}

static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
@@ -1645,12 +1647,14 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)

	if (ctx->flags & IORING_SETUP_SQPOLL) {
		if (waitqueue_active(&ctx->cq_wait))
			wake_up_all(&ctx->cq_wait);
			__wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
				  poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
	}
	if (io_should_trigger_evfd(ctx))
		eventfd_signal(ctx->cq_ev_fd, 1);
		eventfd_signal_mask(ctx->cq_ev_fd, 1, EPOLL_URING_WAKE);
	if (waitqueue_active(&ctx->poll_wait))
		wake_up_interruptible(&ctx->poll_wait);
		__wake_up(&ctx->poll_wait, TASK_INTERRUPTIBLE, 0,
				poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
}

/* Returns true if there are no backlogged entries after the flush */
@@ -5480,8 +5484,17 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
	if (mask && !(mask & poll->events))
		return 0;

	if (io_poll_get_ownership(req))
	if (io_poll_get_ownership(req)) {
		/*
		 * If we trigger a multishot poll off our own wakeup path,
		 * disable multishot as there is a circular dependency between
		 * CQ posting and triggering the event.
		 */
		if (mask & EPOLL_URING_WAKE)
			poll->events |= EPOLLONESHOT;

		__io_poll_execute(req, mask);
	}
	return 1;
}