Commit 3a5f59b1 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'io_uring-5.17-2022-02-23' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:

 - Add a conditional schedule point in io_add_buffers() (Eric)

 - Fix for a quiesce speedup merged in this release (Dylan)

 - Don't convert to jiffies for event timeout waiting, it's way too
   coarse when we accept a timespec as input (me)

* tag 'io_uring-5.17-2022-02-23' of git://git.kernel.dk/linux-block:
  io_uring: disallow modification of rsrc_data during quiesce
  io_uring: don't convert to jiffies for waiting on timeouts
  io_uring: add a schedule point in io_add_buffers()
parents 6c528f34 80912cef
Loading
Loading
Loading
Loading
+17 −7
Original line number Diff line number Diff line
@@ -4567,6 +4567,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
		} else {
			list_add_tail(&buf->list, &(*head)->list);
		}
		cond_resched();
	}

	return i ? i : -ENOMEM;
@@ -7693,7 +7694,7 @@ static int io_run_task_work_sig(void)
/* when returns >0, the caller should retry */
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
					  struct io_wait_queue *iowq,
					  signed long *timeout)
					  ktime_t timeout)
{
	int ret;

@@ -7705,8 +7706,9 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
	if (test_bit(0, &ctx->check_cq_overflow))
		return 1;

	*timeout = schedule_timeout(*timeout);
	return !*timeout ? -ETIME : 1;
	if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
		return -ETIME;
	return 1;
}

/*
@@ -7719,7 +7721,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
{
	struct io_wait_queue iowq;
	struct io_rings *rings = ctx->rings;
	signed long timeout = MAX_SCHEDULE_TIMEOUT;
	ktime_t timeout = KTIME_MAX;
	int ret;

	do {
@@ -7735,7 +7737,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,

		if (get_timespec64(&ts, uts))
			return -EFAULT;
		timeout = timespec64_to_jiffies(&ts);
		timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
	}

	if (sig) {
@@ -7767,7 +7769,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
		}
		prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
						TASK_INTERRUPTIBLE);
		ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
		ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
		finish_wait(&ctx->cq_wait, &iowq.wq);
		cond_resched();
	} while (ret > 0);
@@ -7924,8 +7926,16 @@ static __cold int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
		ret = wait_for_completion_interruptible(&data->done);
		if (!ret) {
			mutex_lock(&ctx->uring_lock);
			if (atomic_read(&data->refs) > 0) {
				/*
				 * it has been revived by another thread while
				 * we were unlocked
				 */
				mutex_unlock(&ctx->uring_lock);
			} else {
				break;
			}
		}

		atomic_inc(&data->refs);
		/* wait for all works potentially completing data->done */