Commit dbc2564c authored by Hao Xu's avatar Hao Xu Committed by Jens Axboe
Browse files

io_uring: let fast poll support multishot



For operations like accept, multishot is a useful feature, since we can
reduce a number of accept sqe. Let's integrate it to fast poll, it may
be good for other operations in the future.

Signed-off-by: default avatarHao Xu <howeyxu@tencent.com>
Link: https://lore.kernel.org/r/20220514142046.58072-4-haoxu.linux@gmail.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 227685eb
Loading
Loading
Loading
Loading
+32 −15
Original line number Diff line number Diff line
@@ -6011,6 +6011,7 @@ static void io_poll_remove_entries(struct io_kiocb *req)
	rcu_read_unlock();
}

static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags);
/*
 * All poll tw should go through this. Checks for poll events, manages
 * references, does rewait, etc.
@@ -6019,10 +6020,10 @@ static void io_poll_remove_entries(struct io_kiocb *req)
 * either spurious wakeup or multishot CQE is served. 0 when it's done with
 * the request, then the mask is stored in req->cqe.res.
 */
static int io_poll_check_events(struct io_kiocb *req, bool locked)
static int io_poll_check_events(struct io_kiocb *req, bool *locked)
{
	struct io_ring_ctx *ctx = req->ctx;
	int v;
	int v, ret;

	/* req->task == current here, checking PF_EXITING is safe */
	if (unlikely(req->task->flags & PF_EXITING))
@@ -6046,22 +6047,36 @@ static int io_poll_check_events(struct io_kiocb *req, bool locked)
			req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
		}

		/* multishot, just fill an CQE and proceed */
		if (req->cqe.res && !(req->apoll_events & EPOLLONESHOT)) {
			__poll_t mask = mangle_poll(req->cqe.res & req->apoll_events);
		if ((unlikely(!req->cqe.res)))
			continue;
		if (req->apoll_events & EPOLLONESHOT)
			return 0;

		/* multishot, just fill a CQE and proceed */
		if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
			__poll_t mask = mangle_poll(req->cqe.res &
						    req->apoll_events);
			bool filled;

			spin_lock(&ctx->completion_lock);
			filled = io_fill_cqe_aux(ctx, req->cqe.user_data, mask,
						 IORING_CQE_F_MORE);
			filled = io_fill_cqe_aux(ctx, req->cqe.user_data,
						 mask, IORING_CQE_F_MORE);
			io_commit_cqring(ctx);
			spin_unlock(&ctx->completion_lock);
			if (unlikely(!filled))
				return -ECANCELED;
			if (filled) {
				io_cqring_ev_posted(ctx);
		} else if (req->cqe.res) {
			return 0;
				continue;
			}
			return -ECANCELED;
		}

		io_tw_lock(req->ctx, locked);
		if (unlikely(req->task->flags & PF_EXITING))
			return -EFAULT;
		ret = io_issue_sqe(req,
				   IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
		if (ret)
			return ret;

		/*
		 * Release all references, retry if someone tried to restart
@@ -6077,7 +6092,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
	struct io_ring_ctx *ctx = req->ctx;
	int ret;

	ret = io_poll_check_events(req, *locked);
	ret = io_poll_check_events(req, locked);
	if (ret > 0)
		return;

@@ -6102,7 +6117,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
	struct io_ring_ctx *ctx = req->ctx;
	int ret;

	ret = io_poll_check_events(req, *locked);
	ret = io_poll_check_events(req, locked);
	if (ret > 0)
		return;

@@ -6343,7 +6358,7 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
	struct io_ring_ctx *ctx = req->ctx;
	struct async_poll *apoll;
	struct io_poll_table ipt;
	__poll_t mask = IO_ASYNC_POLL_COMMON | POLLERR;
	__poll_t mask = POLLPRI | POLLERR;
	int ret;

	if (!def->pollin && !def->pollout)
@@ -6352,6 +6367,8 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
		return IO_APOLL_ABORTED;
	if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
		return IO_APOLL_ABORTED;
	if (!(req->flags & REQ_F_APOLL_MULTISHOT))
		mask |= EPOLLONESHOT;

	if (def->pollin) {
		mask |= POLLIN | POLLRDNORM;