Commit f152165a authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'io_uring-5.16-2021-12-10' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:
 "A few fixes that are all bound for stable:

   - Two syzbot reports for io-wq that turned out to be separate fixes,
     but ultimately very closely related

   - io_uring task_work running on cancelations"

* tag 'io_uring-5.16-2021-12-10' of git://git.kernel.dk/linux-block:
  io-wq: check for wq exit after adding new worker task_work
  io_uring: ensure task_work gets run as part of cancelations
  io-wq: remove spurious bit clear on task_work addition
parents bd66be54 71a85387
Loading
Loading
Loading
Loading
+23 −6
Original line number Diff line number Diff line
@@ -142,6 +142,7 @@ static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
					struct io_wqe_acct *acct,
					struct io_cb_cancel_data *match);
static void create_worker_cb(struct callback_head *cb);
static void io_wq_cancel_tw_create(struct io_wq *wq);

static bool io_worker_get(struct io_worker *worker)
{
@@ -357,12 +358,22 @@ static bool io_queue_worker_create(struct io_worker *worker,
	    test_and_set_bit_lock(0, &worker->create_state))
		goto fail_release;

	atomic_inc(&wq->worker_refs);
	init_task_work(&worker->create_work, func);
	worker->create_index = acct->index;
	if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
		clear_bit_unlock(0, &worker->create_state);
		/*
		 * EXIT may have been set after checking it above, check after
		 * adding the task_work and remove any creation item if it is
		 * now set. wq exit does that too, but we can have added this
		 * work item after we canceled in io_wq_exit_workers().
		 */
		if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
			io_wq_cancel_tw_create(wq);
		io_worker_ref_put(wq);
		return true;
	}
	io_worker_ref_put(wq);
	clear_bit_unlock(0, &worker->create_state);
fail_release:
	io_worker_release(worker);
@@ -1198,13 +1209,9 @@ void io_wq_exit_start(struct io_wq *wq)
	set_bit(IO_WQ_BIT_EXIT, &wq->state);
}

static void io_wq_exit_workers(struct io_wq *wq)
static void io_wq_cancel_tw_create(struct io_wq *wq)
{
	struct callback_head *cb;
	int node;

	if (!wq->task)
		return;

	while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
		struct io_worker *worker;
@@ -1212,6 +1219,16 @@ static void io_wq_exit_workers(struct io_wq *wq)
		worker = container_of(cb, struct io_worker, create_work);
		io_worker_cancel_cb(worker);
	}
}

static void io_wq_exit_workers(struct io_wq *wq)
{
	int node;

	if (!wq->task)
		return;

	io_wq_cancel_tw_create(wq);

	rcu_read_lock();
	for_each_node(node) {
+4 −2
Original line number Diff line number Diff line
@@ -9824,7 +9824,7 @@ static __cold void io_uring_drop_tctx_refs(struct task_struct *task)

/*
 * Find any io_uring ctx that this task has registered or done IO on, and cancel
 * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation.
 * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
 */
static __cold void io_uring_cancel_generic(bool cancel_all,
					   struct io_sq_data *sqd)
@@ -9866,8 +9866,10 @@ static __cold void io_uring_cancel_generic(bool cancel_all,
							     cancel_all);
		}

		prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
		prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
		io_run_task_work();
		io_uring_drop_tctx_refs(current);

		/*
		 * If we've seen completions, retry without waiting. This
		 * avoids a race where a completion comes in before we did