Commit 9936c7c2 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: deduplicate core cancellations sequence



Files and task cancellations go over same steps trying to cancel
requests in io-wq, poll, etc. Deduplicate it with a helper.

note: new io_uring_try_cancel_requests() is former
__io_uring_cancel_task_requests() with files passed as an agrument and
flushing overflowed requests.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 57cd657b
Loading
Loading
Loading
Loading
+40 −45
Original line number Diff line number Diff line
@@ -1003,9 +1003,9 @@ enum io_mem_account {
	ACCT_PINNED,
};

static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
					    struct task_struct *task);

static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
					 struct task_struct *task,
					 struct files_struct *files);
static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
static struct fixed_rsrc_ref_node *alloc_fixed_rsrc_ref_node(
			struct io_ring_ctx *ctx);
@@ -8817,7 +8817,7 @@ static void io_ring_exit_work(struct work_struct *work)
	 * as nobody else will be looking for them.
	 */
	do {
		__io_uring_cancel_task_requests(ctx, NULL);
		io_uring_try_cancel_requests(ctx, NULL, NULL);
	} while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
	io_ring_ctx_free(ctx);
}
@@ -8931,6 +8931,40 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
	}
}

static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
					 struct task_struct *task,
					 struct files_struct *files)
{
	struct io_task_cancel cancel = { .task = task, .files = files, };

	while (1) {
		enum io_wq_cancel cret;
		bool ret = false;

		if (ctx->io_wq) {
			cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
					       &cancel, true);
			ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
		}

		/* SQPOLL thread does its own polling */
		if (!(ctx->flags & IORING_SETUP_SQPOLL) && !files) {
			while (!list_empty_careful(&ctx->iopoll_list)) {
				io_iopoll_try_reap_events(ctx);
				ret = true;
			}
		}

		ret |= io_poll_remove_all(ctx, task, files);
		ret |= io_kill_timeouts(ctx, task, files);
		ret |= io_run_task_work();
		io_cqring_overflow_flush(ctx, true, task, files);
		if (!ret)
			break;
		cond_resched();
	}
}

static int io_uring_count_inflight(struct io_ring_ctx *ctx,
				   struct task_struct *task,
				   struct files_struct *files)
@@ -8950,7 +8984,6 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
				  struct files_struct *files)
{
	while (!list_empty_careful(&ctx->inflight_list)) {
		struct io_task_cancel cancel = { .task = task, .files = files };
		DEFINE_WAIT(wait);
		int inflight;

@@ -8958,13 +8991,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
		if (!inflight)
			break;

		io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
		io_poll_remove_all(ctx, task, files);
		io_kill_timeouts(ctx, task, files);
		io_cqring_overflow_flush(ctx, true, task, files);
		/* cancellations _may_ trigger task work */
		io_run_task_work();

		io_uring_try_cancel_requests(ctx, task, files);
		prepare_to_wait(&task->io_uring->wait, &wait,
				TASK_UNINTERRUPTIBLE);
		if (inflight == io_uring_count_inflight(ctx, task, files))
@@ -8973,37 +9000,6 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
	}
}

static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
					    struct task_struct *task)
{
	while (1) {
		struct io_task_cancel cancel = { .task = task, .files = NULL, };
		enum io_wq_cancel cret;
		bool ret = false;

		if (ctx->io_wq) {
			cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
					       &cancel, true);
			ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
		}

		/* SQPOLL thread does its own polling */
		if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
			while (!list_empty_careful(&ctx->iopoll_list)) {
				io_iopoll_try_reap_events(ctx);
				ret = true;
			}
		}

		ret |= io_poll_remove_all(ctx, task, NULL);
		ret |= io_kill_timeouts(ctx, task, NULL);
		ret |= io_run_task_work();
		if (!ret)
			break;
		cond_resched();
	}
}

static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
{
	mutex_lock(&ctx->uring_lock);
@@ -9033,11 +9029,10 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
	}

	io_cancel_defer_files(ctx, task, files);
	io_cqring_overflow_flush(ctx, true, task, files);

	io_uring_cancel_files(ctx, task, files);
	if (!files)
		__io_uring_cancel_task_requests(ctx, task);
		io_uring_try_cancel_requests(ctx, task, NULL);

	if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
		atomic_dec(&task->io_uring->in_idle);