Commit 024b8fde authored by Hao Xu's avatar Hao Xu Committed by Jens Axboe
Browse files

io_uring: kbuf: kill __io_kbuf_recycle()



__io_kbuf_recycle() is only called in io_kbuf_recycle(). Kill it and
tweak the code so that the legacy pbuf and ring pbuf code become clear

Signed-off-by: default avatarHao Xu <howeyxu@tencent.com>
Link: https://lore.kernel.org/r/20220622055551.642370-1-hao.xu@linux.dev


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c6dd763c
Loading
Loading
Loading
Loading
+42 −29
Original line number Diff line number Diff line
@@ -37,18 +37,52 @@ static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
	return xa_load(&ctx->io_bl_xa, bgid);
}

void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
static int io_buffer_add_list(struct io_ring_ctx *ctx,
			      struct io_buffer_list *bl, unsigned int bgid)
{
	bl->bgid = bgid;
	if (bgid < BGID_ARRAY)
		return 0;

	return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
}

void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
{
	struct io_ring_ctx *ctx = req->ctx;
	struct io_buffer_list *bl;
	struct io_buffer *buf;

	/*
	 * For legacy provided buffer mode, don't recycle if we already did
	 * IO to this buffer. For ring-mapped provided buffer mode, we should
	 * increment ring->head to explicitly monopolize the buffer to avoid
	 * multiple use.
	 */
	if (req->flags & REQ_F_PARTIAL_IO)
		return;

	io_ring_submit_lock(ctx, issue_flags);

	buf = req->kbuf;
	bl = io_buffer_get_list(ctx, buf->bgid);
	list_add(&buf->list, &bl->buf_list);
	req->flags &= ~REQ_F_BUFFER_SELECTED;
	req->buf_index = buf->bgid;

	io_ring_submit_unlock(ctx, issue_flags);
	return;
}

void io_kbuf_recycle_ring(struct io_kiocb *req)
{
	/*
	 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
	 * the flag and hence ensure that bl->head doesn't get incremented.
	 * If the tail has already been incremented, hang on to it.
	 * The exception is partial io, that case we should increment bl->head
	 * to monopolize the buffer.
	 */
	if (req->flags & REQ_F_BUFFER_RING) {
	if (req->buf_list) {
		if (req->flags & REQ_F_PARTIAL_IO) {
			/*
@@ -68,27 +102,6 @@ void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
	return;
}

	io_ring_submit_lock(ctx, issue_flags);

	buf = req->kbuf;
	bl = io_buffer_get_list(ctx, buf->bgid);
	list_add(&buf->list, &bl->buf_list);
	req->flags &= ~REQ_F_BUFFER_SELECTED;
	req->buf_index = buf->bgid;

	io_ring_submit_unlock(ctx, issue_flags);
}

static int io_buffer_add_list(struct io_ring_ctx *ctx,
			      struct io_buffer_list *bl, unsigned int bgid)
{
	bl->bgid = bgid;
	if (bgid < BGID_ARRAY)
		return 0;

	return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
}

unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
{
	unsigned int cflags;
+7 −14
Original line number Diff line number Diff line
@@ -35,7 +35,6 @@ struct io_buffer {

void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
			      unsigned int issue_flags);
void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags);
void io_destroy_buffers(struct io_ring_ctx *ctx);

int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
@@ -49,6 +48,9 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);

unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);

void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
void io_kbuf_recycle_ring(struct io_kiocb *req);

static inline bool io_do_buffer_select(struct io_kiocb *req)
{
	if (!(req->flags & REQ_F_BUFFER_SELECT))
@@ -58,18 +60,6 @@ static inline bool io_do_buffer_select(struct io_kiocb *req)

static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
{
	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
		return;
	/*
	 * For legacy provided buffer mode, don't recycle if we already did
	 * IO to this buffer. For ring-mapped provided buffer mode, we should
	 * increment ring->head to explicitly monopolize the buffer to avoid
	 * multiple use.
	 */
	if ((req->flags & REQ_F_BUFFER_SELECTED) &&
	    (req->flags & REQ_F_PARTIAL_IO))
		return;

	/*
	 * READV uses fields in `struct io_rw` (len/addr) to stash the selected
	 * buffer data. However if that buffer is recycled the original request
@@ -78,7 +68,10 @@ static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
	if (req->opcode == IORING_OP_READV)
		return;

	__io_kbuf_recycle(req, issue_flags);
	if (req->flags & REQ_F_BUFFER_SELECTED)
		io_kbuf_recycle_legacy(req, issue_flags);
	if (req->flags & REQ_F_BUFFER_RING)
		io_kbuf_recycle_ring(req);
}

static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,