Commit 63809137 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: flush notifiers after sendzc



Allow to flush notifiers as a part of sendzc request by setting
IORING_SENDZC_FLUSH flag. When the sendzc request succeedes it will
flush the used [active] notifier.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/e0b4d9a6797e2fd6092824fe42953db7a519bbc8.1657643355.git.asml.silence@gmail.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 10c7d33e
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -275,10 +275,14 @@ enum io_uring_op {
 *
 * IORING_RECVSEND_FIXED_BUF	Use registered buffers, the index is stored in
 *				the buf_index field.
 *
 * IORING_RECVSEND_NOTIF_FLUSH	Flush a notification after a successful
 *				successful. Only for zerocopy sends.
 */
#define IORING_RECVSEND_POLL_FIRST	(1U << 0)
#define IORING_RECV_MULTISHOT		(1U << 1)
#define IORING_RECVSEND_FIXED_BUF	(1U << 2)
#define IORING_RECVSEND_NOTIF_FLUSH	(1U << 3)

/*
 * accept flags stored in sqe->ioprio
+1 −10
Original line number Diff line number Diff line
@@ -621,7 +621,7 @@ void __io_put_task(struct task_struct *task, int nr)
	put_task_struct_many(task, nr);
}

static void io_task_refs_refill(struct io_uring_task *tctx)
void io_task_refs_refill(struct io_uring_task *tctx)
{
	unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;

@@ -630,15 +630,6 @@ static void io_task_refs_refill(struct io_uring_task *tctx)
	tctx->cached_refs += refill;
}

static inline void io_get_task_refs(int nr)
{
	struct io_uring_task *tctx = current->io_uring;

	tctx->cached_refs -= nr;
	if (unlikely(tctx->cached_refs < 0))
		io_task_refs_refill(tctx);
}

static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
{
	struct io_uring_task *tctx = task->io_uring;
+10 −0
Original line number Diff line number Diff line
@@ -74,6 +74,7 @@ void io_wq_submit_work(struct io_wq_work *work);
void io_free_req(struct io_kiocb *req);
void io_queue_next(struct io_kiocb *req);
void __io_put_task(struct task_struct *task, int nr);
void io_task_refs_refill(struct io_uring_task *tctx);

bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
			bool cancel_all);
@@ -270,4 +271,13 @@ static inline void io_put_task(struct task_struct *task, int nr)
		__io_put_task(task, nr);
}

static inline void io_get_task_refs(int nr)
{
	struct io_uring_task *tctx = current->io_uring;

	tctx->cached_refs -= nr;
	if (unlikely(tctx->cached_refs < 0))
		io_task_refs_refill(tctx);
}

#endif
+4 −1
Original line number Diff line number Diff line
@@ -856,7 +856,8 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
		return -EINVAL;

	zc->flags = READ_ONCE(sqe->ioprio);
	if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF))
	if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
			  IORING_RECVSEND_FIXED_BUF | IORING_RECVSEND_NOTIF_FLUSH))
		return -EINVAL;
	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
		unsigned idx = READ_ONCE(sqe->buf_index);
@@ -958,6 +959,8 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
		return ret == -ERESTARTSYS ? -EINTR : ret;
	}

	if (zc->flags & IORING_RECVSEND_NOTIF_FLUSH)
		io_notif_slot_flush_submit(notif_slot, 0);
	io_req_set_res(req, ret, 0);
	return IOU_OK;
}
+1 −1
Original line number Diff line number Diff line
@@ -133,7 +133,7 @@ struct io_notif *io_alloc_notif(struct io_ring_ctx *ctx,
	return notif;
}

static void io_notif_slot_flush(struct io_notif_slot *slot)
void io_notif_slot_flush(struct io_notif_slot *slot)
	__must_hold(&ctx->uring_lock)
{
	struct io_notif *notif = slot->notif;
Loading