Commit 2b358604 authored by Bijan Mottahedeh's avatar Bijan Mottahedeh Committed by Jens Axboe
Browse files

io_uring: modularize io_sqe_buffers_register



Move allocation of buffer management structures, and validation of
buffers into separate routines.

Reviewed-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarBijan Mottahedeh <bijan.mottahedeh@oracle.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 0a96bbe4
Loading
Loading
Loading
Loading
+34 −17
Original line number Diff line number Diff line
@@ -8579,13 +8579,8 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
	return ret;
}

static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
				   unsigned int nr_args)
static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
{
	int i, ret;
	struct iovec iov;
	struct page *last_hpage = NULL;

	if (ctx->user_bufs)
		return -EBUSY;
	if (!nr_args || nr_args > UIO_MAXIOV)
@@ -8596,24 +8591,46 @@ static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
	if (!ctx->user_bufs)
		return -ENOMEM;

	for (i = 0; i < nr_args; i++) {
		struct io_mapped_ubuf *imu = &ctx->user_bufs[i];

		ret = io_copy_iov(ctx, &iov, arg, i);
		if (ret)
			break;
	return 0;
}

static int io_buffer_validate(struct iovec *iov)
{
	/*
	 * Don't impose further limits on the size and buffer
	 * constraints here, we'll -EINVAL later when IO is
	 * submitted if they are wrong.
	 */
		ret = -EFAULT;
		if (!iov.iov_base || !iov.iov_len)
			break;
	if (!iov->iov_base || !iov->iov_len)
		return -EFAULT;

	/* arbitrary limit, but we need something */
		if (iov.iov_len > SZ_1G)
	if (iov->iov_len > SZ_1G)
		return -EFAULT;

	return 0;
}

static int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
				   unsigned int nr_args)
{
	int i, ret;
	struct iovec iov;
	struct page *last_hpage = NULL;

	ret = io_buffers_map_alloc(ctx, nr_args);
	if (ret)
		return ret;

	for (i = 0; i < nr_args; i++) {
		struct io_mapped_ubuf *imu = &ctx->user_bufs[i];

		ret = io_copy_iov(ctx, &iov, arg, i);
		if (ret)
			break;

		ret = io_buffer_validate(&iov);
		if (ret)
			break;

		ret = io_sqe_buffer_register(ctx, &iov, imu, &last_hpage);