Commit f794f335 authored by Jens Axboe's avatar Jens Axboe
Browse files

block: add support for blk_mq_end_request_batch()



Instead of calling blk_mq_end_request() on a single request, add a helper
that takes the new struct io_comp_batch and completes any request stored
in there.

Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 1aec5e4a
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -207,6 +207,12 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
	}
}

void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags)
{
	sbitmap_queue_clear_batch(&tags->bitmap_tags, tags->nr_reserved_tags,
					tag_array, nr_tags);
}

struct bt_iter_data {
	struct blk_mq_hw_ctx *hctx;
	busy_iter_fn *fn;
+1 −0
Original line number Diff line number Diff line
@@ -42,6 +42,7 @@ unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags,
			      unsigned int *offset);
extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
			   unsigned int tag);
void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
					struct blk_mq_tags **tags,
					unsigned int depth, bool can_grow);
+63 −19
Original line number Diff line number Diff line
@@ -300,15 +300,6 @@ void blk_mq_wake_waiters(struct request_queue *q)
			blk_mq_tag_wakeup_all(hctx->tags, true);
}

/*
 * Only need start/end time stamping if we have iostat or
 * blk stats enabled, or using an IO scheduler.
 */
static inline bool blk_mq_need_time_stamp(struct request *rq)
{
	return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV));
}

static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
		unsigned int tag, u64 alloc_time_ns)
{
@@ -768,11 +759,8 @@ bool blk_update_request(struct request *req, blk_status_t error,
}
EXPORT_SYMBOL_GPL(blk_update_request);

inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
{
	if (blk_mq_need_time_stamp(rq)) {
		u64 now = ktime_get_ns();

	if (rq->rq_flags & RQF_STATS) {
		blk_mq_poll_stats_start(rq->q);
		blk_stat_add(rq, now);
@@ -782,6 +770,11 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
	blk_account_io_done(rq, now);
}

inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
{
	if (blk_mq_need_time_stamp(rq))
		__blk_mq_end_request_acct(rq, ktime_get_ns());

	if (rq->end_io) {
		rq_qos_done(rq->q, rq);
		rq->end_io(rq, error);
@@ -799,6 +792,57 @@ void blk_mq_end_request(struct request *rq, blk_status_t error)
}
EXPORT_SYMBOL(blk_mq_end_request);

#define TAG_COMP_BATCH		32

static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
					  int *tag_array, int nr_tags)
{
	struct request_queue *q = hctx->queue;

	blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
	percpu_ref_put_many(&q->q_usage_counter, nr_tags);
}

void blk_mq_end_request_batch(struct io_comp_batch *iob)
{
	int tags[TAG_COMP_BATCH], nr_tags = 0;
	struct blk_mq_hw_ctx *last_hctx = NULL;
	struct request *rq;
	u64 now = 0;

	if (iob->need_ts)
		now = ktime_get_ns();

	while ((rq = rq_list_pop(&iob->req_list)) != NULL) {
		prefetch(rq->bio);
		prefetch(rq->rq_next);

		blk_update_request(rq, BLK_STS_OK, blk_rq_bytes(rq));
		if (iob->need_ts)
			__blk_mq_end_request_acct(rq, now);

		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
		if (!refcount_dec_and_test(&rq->ref))
			continue;

		blk_crypto_free_request(rq);
		blk_pm_mark_last_busy(rq);
		rq_qos_done(rq->q, rq);

		if (nr_tags == TAG_COMP_BATCH ||
		    (last_hctx && last_hctx != rq->mq_hctx)) {
			blk_mq_flush_tag_batch(last_hctx, tags, nr_tags);
			nr_tags = 0;
		}
		tags[nr_tags++] = rq->tag;
		last_hctx = rq->mq_hctx;
	}

	if (nr_tags)
		blk_mq_flush_tag_batch(last_hctx, tags, nr_tags);
}
EXPORT_SYMBOL_GPL(blk_mq_end_request_batch);

static void blk_complete_reqs(struct llist_head *list)
{
	struct llist_node *entry = llist_reverse_order(llist_del_all(list));
+29 −0
Original line number Diff line number Diff line
@@ -728,6 +728,35 @@ static inline void blk_mq_set_request_complete(struct request *rq)
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, blk_status_t error);
void __blk_mq_end_request(struct request *rq, blk_status_t error);
void blk_mq_end_request_batch(struct io_comp_batch *ib);

/*
 * Only need start/end time stamping if we have iostat or
 * blk stats enabled, or using an IO scheduler.
 */
static inline bool blk_mq_need_time_stamp(struct request *rq)
{
	return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_ELV));
}

/*
 * Batched completions only work when there is no I/O error and no special
 * ->end_io handler.
 */
static inline bool blk_mq_add_to_batch(struct request *req,
				       struct io_comp_batch *iob, int ioerror,
				       void (*complete)(struct io_comp_batch *))
{
	if (!iob || (req->rq_flags & RQF_ELV) || req->end_io || ioerror)
		return false;
	if (!iob->complete)
		iob->complete = complete;
	else if (iob->complete != complete)
		return false;
	iob->need_ts |= blk_mq_need_time_stamp(req);
	rq_list_add(&iob->req_list, req);
	return true;
}

void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q);