Commit 3f48cf18 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: unify files and task cancel



Now __io_uring_cancel() and __io_uring_files_cancel() are very similar
and mostly differ by how we count requests, merge them and allow
tctx_inflight() to handle counting.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/1a5986a97df4dc1378f3fe0ca1eb483dbcf42112.1618101759.git.asml.silence@gmail.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b303fe2e
Loading
Loading
Loading
Loading
+14 −42
Original line number Diff line number Diff line
@@ -8920,13 +8920,10 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx)
	}
}

static s64 tctx_inflight_tracked(struct io_uring_task *tctx)
static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
{
	if (tracked)
		return atomic_read(&tctx->inflight_tracked);
}

static s64 tctx_inflight(struct io_uring_task *tctx)
{
	return percpu_counter_sum(&tctx->inflight);
}

@@ -8993,7 +8990,7 @@ static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
	atomic_inc(&tctx->in_idle);
	do {
		/* read completions before cancelations */
		inflight = tctx_inflight(tctx);
		inflight = tctx_inflight(tctx, false);
		if (!inflight)
			break;
		io_uring_try_cancel_requests(ctx, current, NULL);
@@ -9004,43 +9001,18 @@ static void io_uring_cancel_sqpoll(struct io_ring_ctx *ctx)
		 * avoids a race where a completion comes in before we did
		 * prepare_to_wait().
		 */
		if (inflight == tctx_inflight(tctx))
			schedule();
		finish_wait(&tctx->wait, &wait);
	} while (1);
	atomic_dec(&tctx->in_idle);
}

void __io_uring_files_cancel(struct files_struct *files)
{
	struct io_uring_task *tctx = current->io_uring;
	DEFINE_WAIT(wait);
	s64 inflight;

	/* make sure overflow events are dropped */
	atomic_inc(&tctx->in_idle);
	do {
		/* read completions before cancelations */
		inflight = tctx_inflight_tracked(tctx);
		if (!inflight)
			break;
		io_uring_try_cancel(files);

		prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
		if (inflight == tctx_inflight_tracked(tctx))
		if (inflight == tctx_inflight(tctx, false))
			schedule();
		finish_wait(&tctx->wait, &wait);
	} while (1);
	atomic_dec(&tctx->in_idle);

	io_uring_clean_tctx(tctx);
}

/*
 * Find any io_uring fd that this task has registered or done IO on, and cancel
 * requests.
 */
void __io_uring_task_cancel(void)
void __io_uring_cancel(struct files_struct *files)
{
	struct io_uring_task *tctx = current->io_uring;
	DEFINE_WAIT(wait);
@@ -9048,15 +9020,14 @@ void __io_uring_task_cancel(void)

	/* make sure overflow events are dropped */
	atomic_inc(&tctx->in_idle);
	__io_uring_files_cancel(NULL);
	io_uring_try_cancel(files);

	do {
		/* read completions before cancelations */
		inflight = tctx_inflight(tctx);
		inflight = tctx_inflight(tctx, !!files);
		if (!inflight)
			break;
		io_uring_try_cancel(NULL);

		io_uring_try_cancel(files);
		prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);

		/*
@@ -9064,17 +9035,18 @@ void __io_uring_task_cancel(void)
		 * avoids a race where a completion comes in before we did
		 * prepare_to_wait().
		 */
		if (inflight == tctx_inflight(tctx))
		if (inflight == tctx_inflight(tctx, !!files))
			schedule();
		finish_wait(&tctx->wait, &wait);
	} while (1);

	atomic_dec(&tctx->in_idle);

	io_uring_clean_tctx(tctx);
	/* all current's requests should be gone, we can kill tctx */
	if (!files) {
		/* for exec all current's requests should be gone, kill tctx */
		__io_uring_free(current);
	}
}

static void *io_uring_validate_mmap_request(struct file *file,
					    loff_t pgoff, size_t sz)
+5 −7
Original line number Diff line number Diff line
@@ -7,19 +7,17 @@

#if defined(CONFIG_IO_URING)
struct sock *io_uring_get_socket(struct file *file);
void __io_uring_task_cancel(void);
void __io_uring_files_cancel(struct files_struct *files);
void __io_uring_cancel(struct files_struct *files);
void __io_uring_free(struct task_struct *tsk);

static inline void io_uring_task_cancel(void)
static inline void io_uring_files_cancel(struct files_struct *files)
{
	if (current->io_uring)
		__io_uring_task_cancel();
		__io_uring_cancel(files);
}
static inline void io_uring_files_cancel(struct files_struct *files)
static inline void io_uring_task_cancel(void)
{
	if (current->io_uring)
		__io_uring_files_cancel(files);
	return io_uring_files_cancel(NULL);
}
static inline void io_uring_free(struct task_struct *tsk)
{