Commit 70aacfe6 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: kill sqo_dead and sqo submission halting



As SQPOLL task doesn't poke into ->sqo_task anymore, there is no need to
kill the sqo when the master task exits. Before it was necessary to
avoid races accessing sqo_task->files with removing them.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
[axboe: don't forget to enable SQPOLL before exit, if started disabled]
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 1c3b3e65
Loading
Loading
Loading
Loading
+8 −37
Original line number Diff line number Diff line
@@ -338,7 +338,6 @@ struct io_ring_ctx {
		unsigned int		drain_next: 1;
		unsigned int		eventfd_async: 1;
		unsigned int		restricted: 1;
		unsigned int		sqo_dead: 1;
		unsigned int		sqo_exec: 1;

		/*
@@ -1967,7 +1966,7 @@ static void __io_req_task_submit(struct io_kiocb *req)

	/* ctx stays valid until unlock, even if we drop all ours ctx->refs */
	mutex_lock(&ctx->uring_lock);
	if (!ctx->sqo_dead && !(current->flags & PF_EXITING) && !current->in_execve)
	if (!(current->flags & PF_EXITING) && !current->in_execve)
		__io_queue_sqe(req);
	else
		__io_req_task_cancel(req, -EFAULT);
@@ -6578,8 +6577,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
		if (!list_empty(&ctx->iopoll_list))
			io_do_iopoll(ctx, &nr_events, 0);

		if (to_submit && !ctx->sqo_dead &&
		    likely(!percpu_ref_is_dying(&ctx->refs)))
		if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)))
			ret = io_submit_sqes(ctx, to_submit);
		mutex_unlock(&ctx->uring_lock);
	}
@@ -7818,7 +7816,7 @@ static int io_sq_thread_fork(struct io_sq_data *sqd, struct io_ring_ctx *ctx)

	clear_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
	reinit_completion(&sqd->completion);
	ctx->sqo_dead = ctx->sqo_exec = 0;
	ctx->sqo_exec = 0;
	sqd->task_pid = current->pid;
	current->flags |= PF_IO_WORKER;
	ret = io_wq_fork_thread(io_sq_thread, sqd);
@@ -8529,10 +8527,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
{
	mutex_lock(&ctx->uring_lock);
	percpu_ref_kill(&ctx->refs);

	if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
		ctx->sqo_dead = 1;

	/* if force is set, the ring is going away. always drop after that */
	ctx->cq_overflow_flushed = 1;
	if (ctx->rings)
@@ -8692,19 +8686,6 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
	}
}

static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
{
	mutex_lock(&ctx->uring_lock);
	ctx->sqo_dead = 1;
	if (ctx->flags & IORING_SETUP_R_DISABLED)
		io_sq_offload_start(ctx);
	mutex_unlock(&ctx->uring_lock);

	/* make sure callers enter the ring to get error */
	if (ctx->rings)
		io_ring_set_wakeup_flag(ctx);
}

/*
 * We need to iteratively cancel requests, in case a request has dependent
 * hard links. These persist even for failure of cancelations, hence keep
@@ -8717,7 +8698,11 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
	bool did_park = false;

	if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
		io_disable_sqo_submit(ctx);
		/* never started, nothing to cancel */
		if (ctx->flags & IORING_SETUP_R_DISABLED) {
			io_sq_offload_start(ctx);
			return;
		}
		did_park = io_sq_thread_park(ctx->sq_data);
		if (did_park) {
			task = ctx->sq_data->thread;
@@ -8838,7 +8823,6 @@ static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)

	if (!sqd)
		return;
	io_disable_sqo_submit(ctx);
	if (!io_sq_thread_park(sqd))
		return;
	tctx = ctx->sq_data->thread->io_uring;
@@ -8883,7 +8867,6 @@ void __io_uring_task_cancel(void)
	/* make sure overflow events are dropped */
	atomic_inc(&tctx->in_idle);

	/* trigger io_disable_sqo_submit() */
	if (tctx->sqpoll) {
		struct file *file;
		unsigned long index;
@@ -8996,22 +8979,14 @@ static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
	do {
		if (!io_sqring_full(ctx))
			break;

		prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);

		if (unlikely(ctx->sqo_dead)) {
			ret = -EOWNERDEAD;
			goto out;
		}

		if (!io_sqring_full(ctx))
			break;

		schedule();
	} while (!signal_pending(current));

	finish_wait(&ctx->sqo_sq_wait, &wait);
out:
	return ret;
}

@@ -9093,8 +9068,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
			ctx->sqo_exec = 0;
		}
		ret = -EOWNERDEAD;
		if (unlikely(ctx->sqo_dead))
			goto out;
		if (flags & IORING_ENTER_SQ_WAKEUP)
			wake_up(&ctx->sq_data->wait);
		if (flags & IORING_ENTER_SQ_WAIT) {
@@ -9466,7 +9439,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
	 */
	ret = io_uring_install_fd(ctx, file);
	if (ret < 0) {
		io_disable_sqo_submit(ctx);
		/* fput will clean it up */
		fput(file);
		return ret;
@@ -9475,7 +9447,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
	trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
	return ret;
err:
	io_disable_sqo_submit(ctx);
	io_ring_ctx_wait_and_kill(ctx);
	return ret;
}