Commit 91c7884a authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: remove IOU_F_TWQ_FORCE_NORMAL



Extract a function for non-local task_work_add, and use it directly from
io_move_task_work_from_local(). Now we don't use IOU_F_TWQ_FORCE_NORMAL
and it can be killed.

As a small positive side effect we don't grab task->io_uring in
io_req_normal_work_add anymore, which is not needed for
io_req_local_work_add().

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/2e55571e8ff2927ae3cc12da606d204e2485525b.1687518903.git.asml.silence@gmail.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 2fdd6fb5
Loading
Loading
Loading
Loading
+14 −11
Original line number Diff line number Diff line
@@ -1317,7 +1317,7 @@ static __cold void io_fallback_tw(struct io_uring_task *tctx)
	}
}

static void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
{
	struct io_ring_ctx *ctx = req->ctx;
	unsigned nr_wait, nr_tw, nr_tw_prev;
@@ -1368,19 +1368,11 @@ static void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
	wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE);
}

void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
static void io_req_normal_work_add(struct io_kiocb *req)
{
	struct io_uring_task *tctx = req->task->io_uring;
	struct io_ring_ctx *ctx = req->ctx;

	if (!(flags & IOU_F_TWQ_FORCE_NORMAL) &&
	    (ctx->flags & IORING_SETUP_DEFER_TASKRUN)) {
		rcu_read_lock();
		io_req_local_work_add(req, flags);
		rcu_read_unlock();
		return;
	}

	/* task_work already pending, we're done */
	if (!llist_add(&req->io_task_work.node, &tctx->task_list))
		return;
@@ -1394,6 +1386,17 @@ void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
	io_fallback_tw(tctx);
}

void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
{
	if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
		rcu_read_lock();
		io_req_local_work_add(req, flags);
		rcu_read_unlock();
	} else {
		io_req_normal_work_add(req);
	}
}

static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
{
	struct llist_node *node;
@@ -1404,7 +1407,7 @@ static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
						    io_task_work.node);

		node = node->next;
		__io_req_task_work_add(req, IOU_F_TWQ_FORCE_NORMAL);
		io_req_normal_work_add(req);
	}
}

+1 −4
Original line number Diff line number Diff line
@@ -16,9 +16,6 @@
#endif

enum {
	/* don't use deferred task_work */
	IOU_F_TWQ_FORCE_NORMAL			= 1,

	/*
	 * A hint to not wake right away but delay until there are enough of
	 * tw's queued to match the number of CQEs the task is waiting for.
@@ -26,7 +23,7 @@ enum {
	 * Must not be used wirh requests generating more than one CQE.
	 * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
	 */
	IOU_F_TWQ_LAZY_WAKE			= 2,
	IOU_F_TWQ_LAZY_WAKE			= 1,
};

enum {