Commit e1915f76 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: cancel deferred requests in try_cancel



As io_uring_cancel_files() and others let SQO to run between
io_uring_try_cancel_requests(), SQO may generate new deferred requests,
so it's safer to try to cancel them in it.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d052d1d6
Loading
Loading
Loading
Loading
+6 −4
Original line number Diff line number Diff line
@@ -8577,11 +8577,11 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
	return ret;
}

static void io_cancel_defer_files(struct io_ring_ctx *ctx,
static bool io_cancel_defer_files(struct io_ring_ctx *ctx,
				  struct task_struct *task,
				  struct files_struct *files)
{
	struct io_defer_entry *de = NULL;
	struct io_defer_entry *de;
	LIST_HEAD(list);

	spin_lock_irq(&ctx->completion_lock);
@@ -8592,6 +8592,8 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
		}
	}
	spin_unlock_irq(&ctx->completion_lock);
	if (list_empty(&list))
		return false;

	while (!list_empty(&list)) {
		de = list_first_entry(&list, struct io_defer_entry, list);
@@ -8601,6 +8603,7 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
		io_req_complete(de->req, -ECANCELED);
		kfree(de);
	}
	return true;
}

static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
@@ -8666,6 +8669,7 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
			}
		}

		ret |= io_cancel_defer_files(ctx, task, files);
		ret |= io_poll_remove_all(ctx, task, files);
		ret |= io_kill_timeouts(ctx, task, files);
		ret |= io_run_task_work();
@@ -8734,8 +8738,6 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
			atomic_inc(&task->io_uring->in_idle);
	}

	io_cancel_defer_files(ctx, task, files);

	io_uring_cancel_files(ctx, task, files);
	if (!files)
		io_uring_try_cancel_requests(ctx, task, NULL);