Commit 4d55f238 authored by Jens Axboe's avatar Jens Axboe
Browse files

io_uring: don't recycle provided buffer if punted to async worker



We only really need to recycle the buffer when going async for a file
type that has an indefinite reponse time (eg non-file/bdev). And for
files that to arm poll, the async worker will arm poll anyway and the
buffer will get recycled there.

In that latter case, we're not holding ctx->uring_lock. Ensure we take
the issue_flags into account and acquire it if we need to.

Fixes: b1c62645 ("io_uring: recycle provided buffers if request goes async")
Reported-by: default avatarStefan Roesch <shr@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d89a4fac
Loading
Loading
Loading
Loading
+8 −3
Original line number Original line Diff line number Diff line
@@ -1383,7 +1383,7 @@ static struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
	return NULL;
	return NULL;
}
}


static void io_kbuf_recycle(struct io_kiocb *req)
static void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
{
{
	struct io_ring_ctx *ctx = req->ctx;
	struct io_ring_ctx *ctx = req->ctx;
	struct io_buffer_list *bl;
	struct io_buffer_list *bl;
@@ -1392,6 +1392,9 @@ static void io_kbuf_recycle(struct io_kiocb *req)
	if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
	if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
		return;
		return;


	if (issue_flags & IO_URING_F_UNLOCKED)
		mutex_lock(&ctx->uring_lock);

	lockdep_assert_held(&ctx->uring_lock);
	lockdep_assert_held(&ctx->uring_lock);


	buf = req->kbuf;
	buf = req->kbuf;
@@ -1399,6 +1402,9 @@ static void io_kbuf_recycle(struct io_kiocb *req)
	list_add(&buf->list, &bl->buf_list);
	list_add(&buf->list, &bl->buf_list);
	req->flags &= ~REQ_F_BUFFER_SELECTED;
	req->flags &= ~REQ_F_BUFFER_SELECTED;
	req->kbuf = NULL;
	req->kbuf = NULL;

	if (issue_flags & IO_URING_F_UNLOCKED)
		mutex_unlock(&ctx->uring_lock);
}
}


static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
@@ -6254,7 +6260,7 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
	req->flags |= REQ_F_POLLED;
	req->flags |= REQ_F_POLLED;
	ipt.pt._qproc = io_async_queue_proc;
	ipt.pt._qproc = io_async_queue_proc;


	io_kbuf_recycle(req);
	io_kbuf_recycle(req, issue_flags);


	ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
	ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
	if (ret || ipt.error)
	if (ret || ipt.error)
@@ -7504,7 +7510,6 @@ static void io_queue_sqe_arm_apoll(struct io_kiocb *req)
		 * Queued up for async execution, worker will release
		 * Queued up for async execution, worker will release
		 * submit reference when the iocb is actually submitted.
		 * submit reference when the iocb is actually submitted.
		 */
		 */
		io_kbuf_recycle(req);
		io_queue_async_work(req, NULL);
		io_queue_async_work(req, NULL);
		break;
		break;
	case IO_APOLL_OK:
	case IO_APOLL_OK: