Commit 1ab1edb0 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe
Browse files

io_uring: pass poll_find lock back



Instead of using implicit knowledge of what is locked or not after
io_poll_find() and co returns, pass back a pointer to the locked
bucket if any. If set the user must to unlock the spinlock.

Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/dae1dc5749aa34367812ecf62f82fd3f053aae44.1655371007.git.asml.silence@gmail.com


Reviewed-by: default avatarHao Xu <howeyxu@tencent.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 38513c46
Loading
Loading
Loading
Loading
+26 −20
Original line number Diff line number Diff line
@@ -565,12 +565,15 @@ __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
}

static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
				     struct io_cancel_data *cd)
				     struct io_cancel_data *cd,
				     struct io_hash_bucket **out_bucket)
{
	struct io_kiocb *req;
	u32 index = hash_long(cd->data, ctx->cancel_hash_bits);
	struct io_hash_bucket *hb = &ctx->cancel_hash[index];

	*out_bucket = NULL;

	spin_lock(&hb->lock);
	hlist_for_each_entry(req, &hb->list, hash_node) {
		if (cd->data != req->cqe.user_data)
@@ -582,6 +585,7 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
				continue;
			req->work.cancel_seq = cd->seq;
		}
		*out_bucket = hb;
		return req;
	}
	spin_unlock(&hb->lock);
@@ -589,11 +593,14 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
}

static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
					  struct io_cancel_data *cd)
					  struct io_cancel_data *cd,
					  struct io_hash_bucket **out_bucket)
{
	struct io_kiocb *req;
	int i;

	*out_bucket = NULL;

	for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
		struct io_hash_bucket *hb = &ctx->cancel_hash[i];

@@ -605,6 +612,7 @@ static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
			if (cd->seq == req->work.cancel_seq)
				continue;
			req->work.cancel_seq = cd->seq;
			*out_bucket = hb;
			return req;
		}
		spin_unlock(&hb->lock);
@@ -623,23 +631,19 @@ static bool io_poll_disarm(struct io_kiocb *req)

int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
{
	struct io_hash_bucket *bucket;
	struct io_kiocb *req;
	u32 index;
	spinlock_t *lock;

	if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY))
		req = io_poll_file_find(ctx, cd);
		req = io_poll_file_find(ctx, cd, &bucket);
	else
		req = io_poll_find(ctx, false, cd);
	if (!req) {
		return -ENOENT;
	} else {
		index = hash_long(req->cqe.user_data, ctx->cancel_hash_bits);
		lock = &ctx->cancel_hash[index].lock;
	}
		req = io_poll_find(ctx, false, cd, &bucket);

	if (req)
		io_poll_cancel_req(req);
	spin_unlock(lock);
	return 0;
	if (bucket)
		spin_unlock(&bucket->lock);
	return req ? 0 : -ENOENT;
}

static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
@@ -732,19 +736,21 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
	struct io_poll_update *poll_update = io_kiocb_to_cmd(req);
	struct io_cancel_data cd = { .data = poll_update->old_user_data, };
	struct io_ring_ctx *ctx = req->ctx;
	u32 index = hash_long(cd.data, ctx->cancel_hash_bits);
	spinlock_t *lock = &ctx->cancel_hash[index].lock;
	struct io_hash_bucket *bucket;
	struct io_kiocb *preq;
	int ret2, ret = 0;
	bool locked;

	preq = io_poll_find(ctx, true, &cd);
	preq = io_poll_find(ctx, true, &cd, &bucket);
	if (preq)
		ret2 = io_poll_disarm(preq);
	if (bucket)
		spin_unlock(&bucket->lock);

	if (!preq) {
		ret = -ENOENT;
		goto out;
	}
	ret2 = io_poll_disarm(preq);
	spin_unlock(lock);
	if (!ret2) {
		ret = -EALREADY;
		goto out;