Commit b688f11e authored by Jens Axboe's avatar Jens Axboe
Browse files

io_uring: utilize the io batching infrastructure for more efficient polled IO



Wire up using an io_comp_batch for f_op->iopoll(). If the lower stack
supports it, we can handle high rates of polled IO more efficiently.

This raises the single core efficiency on my system from ~6.1M IOPS to
~6.6M IOPS running a random read workload at depth 128 on two gen2
Optane drives.

Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c234a653
Loading
Loading
Loading
Loading
+6 −2
Original line number Diff line number Diff line
@@ -2458,6 +2458,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
{
	struct io_kiocb *req, *tmp;
	unsigned int poll_flags = BLK_POLL_NOSLEEP;
	DEFINE_IO_COMP_BATCH(iob);
	LIST_HEAD(done);

	/*
@@ -2483,17 +2484,20 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
		if (!list_empty(&done))
			break;

		ret = kiocb->ki_filp->f_op->iopoll(kiocb, NULL, poll_flags);
		ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob, poll_flags);
		if (unlikely(ret < 0))
			return ret;
		else if (ret)
			poll_flags |= BLK_POLL_ONESHOT;

		/* iopoll may have completed current req */
		if (READ_ONCE(req->iopoll_completed))
		if (!rq_list_empty(iob.req_list) ||
		    READ_ONCE(req->iopoll_completed))
			list_move_tail(&req->inflight_entry, &done);
	}

	if (!rq_list_empty(iob.req_list))
		iob.complete(&iob);
	if (!list_empty(&done))
		io_iopoll_complete(ctx, nr_events, &done);