Commit 5a72e899 authored by Jens Axboe's avatar Jens Axboe
Browse files

block: add a struct io_comp_batch argument to fops->iopoll()



struct io_comp_batch contains a list head and a completion handler, which
will allow completions to more effciently completed batches of IO.

For now, no functional changes in this patch, we just define the
io_comp_batch structure and add the argument to the file_operations iopoll
handler.

Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 013a7f95
Loading
Loading
Loading
Loading
+5 −4
Original line number Diff line number Diff line
@@ -1078,7 +1078,7 @@ EXPORT_SYMBOL(submit_bio);
 * Note: the caller must either be the context that submitted @bio, or
 * be in a RCU critical section to prevent freeing of @bio.
 */
int bio_poll(struct bio *bio, unsigned int flags)
int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
{
	struct request_queue *q = bio->bi_bdev->bd_disk->queue;
	blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
@@ -1096,7 +1096,7 @@ int bio_poll(struct bio *bio, unsigned int flags)
	if (WARN_ON_ONCE(!queue_is_mq(q)))
		ret = 0;	/* not yet implemented, should not happen */
	else
		ret = blk_mq_poll(q, cookie, flags);
		ret = blk_mq_poll(q, cookie, iob, flags);
	blk_queue_exit(q);
	return ret;
}
@@ -1106,7 +1106,8 @@ EXPORT_SYMBOL_GPL(bio_poll);
 * Helper to implement file_operations.iopoll.  Requires the bio to be stored
 * in iocb->private, and cleared before freeing the bio.
 */
int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags)
int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
		    unsigned int flags)
{
	struct bio *bio;
	int ret = 0;
@@ -1134,7 +1135,7 @@ int iocb_bio_iopoll(struct kiocb *kiocb, unsigned int flags)
	rcu_read_lock();
	bio = READ_ONCE(kiocb->private);
	if (bio && bio->bi_bdev)
		ret = bio_poll(bio, flags);
		ret = bio_poll(bio, iob, flags);
	rcu_read_unlock();

	return ret;
+1 −1
Original line number Diff line number Diff line
@@ -77,7 +77,7 @@ static bool blk_rq_is_poll(struct request *rq)
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
{
	do {
		bio_poll(rq->bio, 0);
		bio_poll(rq->bio, NULL, 0);
		cond_resched();
	} while (!completion_done(wait));
}
+5 −4
Original line number Diff line number Diff line
@@ -4174,14 +4174,14 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
}

static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
		unsigned int flags)
			       struct io_comp_batch *iob, unsigned int flags)
{
	struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
	long state = get_current_state();
	int ret;

	do {
		ret = q->mq_ops->poll(hctx);
		ret = q->mq_ops->poll(hctx, iob);
		if (ret > 0) {
			__set_current_state(TASK_RUNNING);
			return ret;
@@ -4201,14 +4201,15 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
	return 0;
}

int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags)
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
		unsigned int flags)
{
	if (!(flags & BLK_POLL_NOSLEEP) &&
	    q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
		if (blk_mq_poll_hybrid(q, cookie))
			return 1;
	}
	return blk_mq_poll_classic(q, cookie, flags);
	return blk_mq_poll_classic(q, cookie, iob, flags);
}

unsigned int blk_mq_rq_cpu(struct request *rq)
+2 −1
Original line number Diff line number Diff line
@@ -31,7 +31,8 @@ struct blk_mq_ctx {
} ____cacheline_aligned_in_smp;

void blk_mq_submit_bio(struct bio *bio);
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags);
int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, struct io_comp_batch *iob,
		unsigned int flags);
void blk_mq_exit_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
+2 −2
Original line number Diff line number Diff line
@@ -105,7 +105,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
		set_current_state(TASK_UNINTERRUPTIBLE);
		if (!READ_ONCE(bio.bi_private))
			break;
		if (!(iocb->ki_flags & IOCB_HIPRI) || !bio_poll(&bio, 0))
		if (!(iocb->ki_flags & IOCB_HIPRI) || !bio_poll(&bio, NULL, 0))
			blk_io_schedule();
	}
	__set_current_state(TASK_RUNNING);
@@ -291,7 +291,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
		if (!READ_ONCE(dio->waiter))
			break;

		if (!do_poll || !bio_poll(bio, 0))
		if (!do_poll || !bio_poll(bio, NULL, 0))
			blk_io_schedule();
	}
	__set_current_state(TASK_RUNNING);
Loading