Commit ef99b2d3 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: replace the spin argument to blk_iopoll with a flags argument



Switch the boolean spin argument to blk_poll to passing a set of flags
instead.  This will allow to control polling behavior in a more fine
grained way.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarMark Wunderlich <mark.wunderlich@intel.com>
Link: https://lore.kernel.org/r/20211012111226.760968-10-hch@lst.de


[axboe: adapt to changed io_uring iopoll]
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 28a1ae6b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -71,7 +71,7 @@ static bool blk_rq_is_poll(struct request *rq)
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
{
	do {
		blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), true);
		blk_poll(rq->q, request_to_qc_t(rq->mq_hctx, rq), 0);
		cond_resched();
	} while (!completion_done(wait));
}
+7 −10
Original line number Diff line number Diff line
@@ -4052,7 +4052,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
}

static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
		bool spin)
		unsigned int flags)
{
	struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
	long state = get_current_state();
@@ -4075,7 +4075,7 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
		if (task_is_running(current))
			return 1;

		if (ret < 0 || !spin)
		if (ret < 0 || (flags & BLK_POLL_ONESHOT))
			break;
		cpu_relax();
	} while (!need_resched());
@@ -4088,15 +4088,13 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
 * blk_poll - poll for IO completions
 * @q:  the queue
 * @cookie: cookie passed back at IO submission time
 * @spin: whether to spin for completions
 * @flags: BLK_POLL_* flags that control the behavior
 *
 * Description:
 *    Poll for completions on the passed in queue. Returns number of
 *    completed entries found. If @spin is true, then blk_poll will continue
 *    looping until at least one completion is found, unless the task is
 *    otherwise marked running (or we need to reschedule).
 *    completed entries found.
 */
int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags)
{
	if (cookie == BLK_QC_T_NONE ||
	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
@@ -4105,12 +4103,11 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
	if (current->plug)
		blk_flush_plug_list(current->plug, false);

	/* If specified not to spin, we also should not sleep. */
	if (spin && q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
	if (q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
		if (blk_mq_poll_hybrid(q, cookie))
			return 1;
	}
	return blk_mq_poll_classic(q, cookie, spin);
	return blk_mq_poll_classic(q, cookie, flags);
}
EXPORT_SYMBOL_GPL(blk_poll);

+4 −4
Original line number Diff line number Diff line
@@ -108,7 +108,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
		if (!READ_ONCE(bio.bi_private))
			break;
		if (!(iocb->ki_flags & IOCB_HIPRI) ||
		    !blk_poll(bdev_get_queue(bdev), qc, true))
		    !blk_poll(bdev_get_queue(bdev), qc, 0))
			blk_io_schedule();
	}
	__set_current_state(TASK_RUNNING);
@@ -141,12 +141,12 @@ struct blkdev_dio {

static struct bio_set blkdev_dio_pool;

static int blkdev_iopoll(struct kiocb *kiocb, bool wait)
static int blkdev_iopoll(struct kiocb *kiocb, unsigned int flags)
{
	struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
	struct request_queue *q = bdev_get_queue(bdev);

	return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait);
	return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags);
}

static void blkdev_bio_end_io(struct bio *bio)
@@ -297,7 +297,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
		if (!READ_ONCE(dio->waiter))
			break;

		if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, true))
		if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, 0))
			blk_io_schedule();
	}
	__set_current_state(TASK_RUNNING);
+5 −4
Original line number Diff line number Diff line
@@ -2457,14 +2457,15 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
			long min)
{
	struct io_kiocb *req, *tmp;
	unsigned int poll_flags = 0;
	LIST_HEAD(done);
	bool spin;

	/*
	 * Only spin for completions if we don't have multiple devices hanging
	 * off our complete list, and we're under the requested amount.
	 */
	spin = !ctx->poll_multi_queue && *nr_events < min;
	if (ctx->poll_multi_queue || *nr_events >= min)
		poll_flags |= BLK_POLL_ONESHOT;

	list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
		struct kiocb *kiocb = &req->rw.kiocb;
@@ -2482,11 +2483,11 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
		if (!list_empty(&done))
			break;

		ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
		ret = kiocb->ki_filp->f_op->iopoll(kiocb, poll_flags);
		if (unlikely(ret < 0))
			return ret;
		else if (ret)
			spin = false;
			poll_flags |= BLK_POLL_ONESHOT;

		/* iopoll may have completed current req */
		if (READ_ONCE(req->iopoll_completed))
+3 −3
Original line number Diff line number Diff line
@@ -49,13 +49,13 @@ struct iomap_dio {
	};
};

int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags)
{
	struct request_queue *q = READ_ONCE(kiocb->private);

	if (!q)
		return 0;
	return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin);
	return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags);
}
EXPORT_SYMBOL_GPL(iomap_dio_iopoll);

@@ -642,7 +642,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
			if (!(iocb->ki_flags & IOCB_HIPRI) ||
			    !dio->submit.last_queue ||
			    !blk_poll(dio->submit.last_queue,
					 dio->submit.cookie, true))
					 dio->submit.cookie, 0))
				blk_io_schedule();
		}
		__set_current_state(TASK_RUNNING);
Loading