Commit 4e326358 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: optimise SQPOLL mm/files grabbing



There are two reasons for this. First is to optimise
io_sq_thread_acquire_mm_files() for non-SQPOLL case, which currently do
too many checks and function calls in the hot path, e.g. in
io_init_req().

The second is to not grab mm/files when there are not needed. As
__io_queue_sqe() issues only one request now, we can reuse
io_sq_thread_acquire_mm_files() instead of unconditional acquire
mm/files.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d3d7298d
Loading
Loading
Loading
Loading
+13 −14
Original line number Diff line number Diff line
@@ -1145,9 +1145,6 @@ static void io_sq_thread_drop_mm_files(void)

static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx)
{
	if (current->flags & PF_EXITING)
		return -EFAULT;

	if (!current->files) {
		struct files_struct *files;
		struct nsproxy *nsproxy;
@@ -1175,15 +1172,9 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
{
	struct mm_struct *mm;

	if (current->flags & PF_EXITING)
		return -EFAULT;
	if (current->mm)
		return 0;

	/* Should never happen */
	if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL)))
		return -EFAULT;

	task_lock(ctx->sqo_task);
	mm = ctx->sqo_task->mm;
	if (unlikely(!mm || !mmget_not_zero(mm)))
@@ -1198,7 +1189,7 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
	return -EFAULT;
}

static int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
static int __io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
					   struct io_kiocb *req)
{
	const struct io_op_def *def = &io_op_defs[req->opcode];
@@ -1219,6 +1210,16 @@ static int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
	return 0;
}

static inline int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
						struct io_kiocb *req)
{
	if (unlikely(current->flags & PF_EXITING))
		return -EFAULT;
	if (!(ctx->flags & IORING_SETUP_SQPOLL))
		return 0;
	return __io_sq_thread_acquire_mm_files(ctx, req);
}

static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx,
					 struct cgroup_subsys_state **cur_css)

@@ -2336,9 +2337,7 @@ static void __io_req_task_submit(struct io_kiocb *req)
	struct io_ring_ctx *ctx = req->ctx;

	mutex_lock(&ctx->uring_lock);
	if (!ctx->sqo_dead &&
	    !__io_sq_thread_acquire_mm(ctx) &&
	    !__io_sq_thread_acquire_files(ctx))
	if (!ctx->sqo_dead && !io_sq_thread_acquire_mm_files(ctx, req))
		__io_queue_sqe(req);
	else
		__io_req_task_cancel(req, -EFAULT);