Commit 77bc59b4 authored by Usama Arif's avatar Usama Arif Committed by Jens Axboe
Browse files

io_uring: avoid ring quiesce while registering/unregistering eventfd



This is done by creating a new RCU data structure (io_ev_fd) as part of
io_ring_ctx that holds the eventfd_ctx.

The function io_eventfd_signal is executed under rcu_read_lock with a
single rcu_dereference to io_ev_fd so that if another thread unregisters
the eventfd while io_eventfd_signal is still being executed, the
eventfd_signal for which io_eventfd_signal was called completes
successfully.

The process of registering/unregistering eventfd is already done under
uring_lock so multiple threads won't enter a race condition while
registering/unregistering eventfd.

With the above approach ring quiesce can be avoided which is much more
expensive then using RCU lock. On the system tested, io_uring_register
with IORING_REGISTER_EVENTFD takes less than 1ms with RCU lock, compared
to 15ms before with ring quiesce.

Signed-off-by: default avatarUsama Arif <usama.arif@bytedance.com>
Link: https://lore.kernel.org/r/20220204145117.1186568-3-usama.arif@bytedance.com


[axboe: long line fixups]
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 2757be22
Loading
Loading
Loading
Loading
+68 −21
Original line number Diff line number Diff line
@@ -326,6 +326,11 @@ struct io_submit_state {
	struct blk_plug		plug;
};

struct io_ev_fd {
	struct eventfd_ctx	*cq_ev_fd;
	struct rcu_head		rcu;
};

struct io_ring_ctx {
	/* const or read-mostly hot data */
	struct {
@@ -399,7 +404,7 @@ struct io_ring_ctx {
	struct {
		unsigned		cached_cq_tail;
		unsigned		cq_entries;
		struct eventfd_ctx	*cq_ev_fd;
		struct io_ev_fd	__rcu	*io_ev_fd;
		struct wait_queue_head	cq_wait;
		unsigned		cq_extra;
		atomic_t		cq_timeouts;
@@ -1726,13 +1731,36 @@ static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
	return &rings->cqes[tail & mask];
}

static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
static void io_eventfd_signal(struct io_ring_ctx *ctx)
{
	if (likely(!ctx->cq_ev_fd))
		return false;
	struct io_ev_fd *ev_fd;

	/* Return quickly if ctx->io_ev_fd doesn't exist */
	if (likely(!rcu_dereference_raw(ctx->io_ev_fd)))
		return;

	rcu_read_lock();
	/*
	 * rcu_dereference ctx->io_ev_fd once and use it for both for checking
	 * and eventfd_signal
	 */
	ev_fd = rcu_dereference(ctx->io_ev_fd);

	/*
	 * Check again if ev_fd exists incase an io_eventfd_unregister call
	 * completed between the NULL check of ctx->io_ev_fd at the start of
	 * the function and rcu_read_lock.
	 */
	if (unlikely(!ev_fd))
		goto out;
	if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
		return false;
	return !ctx->eventfd_async || io_wq_current_is_worker();
		goto out;

	if (!ctx->eventfd_async || io_wq_current_is_worker())
		eventfd_signal(ev_fd->cq_ev_fd, 1);

out:
	rcu_read_unlock();
}

/*
@@ -1751,8 +1779,7 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
	 */
	if (wq_has_sleeper(&ctx->cq_wait))
		wake_up_all(&ctx->cq_wait);
	if (io_should_trigger_evfd(ctx))
		eventfd_signal(ctx->cq_ev_fd, 1);
	io_eventfd_signal(ctx);
}

static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
@@ -1764,8 +1791,7 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
		if (waitqueue_active(&ctx->cq_wait))
			wake_up_all(&ctx->cq_wait);
	}
	if (io_should_trigger_evfd(ctx))
		eventfd_signal(ctx->cq_ev_fd, 1);
	io_eventfd_signal(ctx);
}

/* Returns true if there are no backlogged entries after the flush */
@@ -9361,31 +9387,50 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,

static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
{
	struct io_ev_fd *ev_fd;
	__s32 __user *fds = arg;
	int fd;
	int fd, ret;

	if (ctx->cq_ev_fd)
	ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
					lockdep_is_held(&ctx->uring_lock));
	if (ev_fd)
		return -EBUSY;

	if (copy_from_user(&fd, fds, sizeof(*fds)))
		return -EFAULT;

	ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
	if (IS_ERR(ctx->cq_ev_fd)) {
		int ret = PTR_ERR(ctx->cq_ev_fd);
	ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
	if (!ev_fd)
		return -ENOMEM;

		ctx->cq_ev_fd = NULL;
	ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
	if (IS_ERR(ev_fd->cq_ev_fd)) {
		ret = PTR_ERR(ev_fd->cq_ev_fd);
		kfree(ev_fd);
		return ret;
	}

	return 0;
	rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
	return ret;
}

static void io_eventfd_put(struct rcu_head *rcu)
{
	struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);

	eventfd_ctx_put(ev_fd->cq_ev_fd);
	kfree(ev_fd);
}

static int io_eventfd_unregister(struct io_ring_ctx *ctx)
{
	if (ctx->cq_ev_fd) {
		eventfd_ctx_put(ctx->cq_ev_fd);
		ctx->cq_ev_fd = NULL;
	struct io_ev_fd *ev_fd;

	ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
					lockdep_is_held(&ctx->uring_lock));
	if (ev_fd) {
		rcu_assign_pointer(ctx->io_ev_fd, NULL);
		call_rcu(&ev_fd->rcu, io_eventfd_put);
		return 0;
	}

@@ -9450,8 +9495,8 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
		__io_sqe_files_unregister(ctx);
	if (ctx->rings)
		__io_cqring_overflow_flush(ctx, true);
	mutex_unlock(&ctx->uring_lock);
	io_eventfd_unregister(ctx);
	mutex_unlock(&ctx->uring_lock);
	io_destroy_buffers(ctx);
	if (ctx->sq_creds)
		put_cred(ctx->sq_creds);
@@ -10968,6 +11013,8 @@ static bool io_register_op_must_quiesce(int op)
	case IORING_REGISTER_FILES:
	case IORING_UNREGISTER_FILES:
	case IORING_REGISTER_FILES_UPDATE:
	case IORING_REGISTER_EVENTFD:
	case IORING_UNREGISTER_EVENTFD:
	case IORING_REGISTER_PROBE:
	case IORING_REGISTER_PERSONALITY:
	case IORING_UNREGISTER_PERSONALITY: