Commit 3e6a0d3c authored by Jens Axboe's avatar Jens Axboe
Browse files

io_uring: fix -EAGAIN retry with IOPOLL



We no longer revert the iovec on -EIOCBQUEUED, see commit ab2125df,
and this started causing issues for IOPOLL on devies that run out of
request slots. Turns out what outside of needing a revert for those, we
also had a bug where we didn't properly setup retry inside the submission
path. That could cause re-import of the iovec, if any, and that could lead
to spurious results if the application had those allocated on the stack.

Catch -EAGAIN retry and make the iovec stable for IOPOLL, just like we do
for !IOPOLL retries.

Cc: <stable@vger.kernel.org> # 5.9+
Reported-by: default avatarAbaci Robot <abaci@linux.alibaba.com>
Reported-by: default avatarXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent dc7bbc9e
Loading
Loading
Loading
Loading
+31 −5
Original line number Diff line number Diff line
@@ -2423,23 +2423,32 @@ static bool io_resubmit_prep(struct io_kiocb *req)
		return false;
	return !io_setup_async_rw(req, iovec, inline_vecs, &iter, false);
}
#endif

static bool io_rw_reissue(struct io_kiocb *req)
static bool io_rw_should_reissue(struct io_kiocb *req)
{
#ifdef CONFIG_BLOCK
	umode_t mode = file_inode(req->file)->i_mode;
	struct io_ring_ctx *ctx = req->ctx;

	if (!S_ISBLK(mode) && !S_ISREG(mode))
		return false;
	if ((req->flags & REQ_F_NOWAIT) || io_wq_current_is_worker())
	if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
	    !(ctx->flags & IORING_SETUP_IOPOLL)))
		return false;
	/*
	 * If ref is dying, we might be running poll reap from the exit work.
	 * Don't attempt to reissue from that path, just let it fail with
	 * -EAGAIN.
	 */
	if (percpu_ref_is_dying(&req->ctx->refs))
	if (percpu_ref_is_dying(&ctx->refs))
		return false;
	return true;
}
#endif

static bool io_rw_reissue(struct io_kiocb *req)
{
#ifdef CONFIG_BLOCK
	if (!io_rw_should_reissue(req))
		return false;

	lockdep_assert_held(&req->ctx->uring_lock);
@@ -2482,6 +2491,19 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
{
	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);

#ifdef CONFIG_BLOCK
	/* Rewind iter, if we have one. iopoll path resubmits as usual */
	if (res == -EAGAIN && io_rw_should_reissue(req)) {
		struct io_async_rw *rw = req->async_data;

		if (rw)
			iov_iter_revert(&rw->iter,
					req->result - iov_iter_count(&rw->iter));
		else if (!io_resubmit_prep(req))
			res = -EIO;
	}
#endif

	if (kiocb->ki_flags & IOCB_WRITE)
		kiocb_end_write(req);

@@ -3230,6 +3252,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
	ret = io_iter_do_read(req, iter);

	if (ret == -EIOCBQUEUED) {
		if (req->async_data)
			iov_iter_revert(iter, io_size - iov_iter_count(iter));
		goto out_free;
	} else if (ret == -EAGAIN) {
		/* IOPOLL retry should happen for io-wq threads */
@@ -3361,6 +3385,8 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
	/* no retry on NONBLOCK nor RWF_NOWAIT */
	if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
		goto done;
	if (ret2 == -EIOCBQUEUED && req->async_data)
		iov_iter_revert(iter, io_size - iov_iter_count(iter));
	if (!force_nonblock || ret2 != -EAGAIN) {
		/* IOPOLL retry should happen for io-wq threads */
		if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)