Commit d8d1551b authored by Yu Kuai's avatar Yu Kuai
Browse files

blk-io-hierarchy: support new rq based stage rq_driver

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/release-management/issues/IB4E8P


CVE: NA

--------------------------------

Like blk-throttle, following new debugfs entries will be created for
rq-based disk:

/sys/kernel/debug/block/sda/blk_io_hierarchy/
|-- rq_driver
|   |-- io_dump
|   |-- stats
|   `-- threshold

User can use them to analyze how IO behaves in rq_driver.

Signed-off-by: default avatarYu Kuai <yukuai3@huawei.com>
parent 77b41c3c
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -132,4 +132,14 @@ config HIERARCHY_REQUEUE

	If unsure, say N.

config HIERARCHY_RQ_DRIVER
	bool "Enable hierarchy stats layer rq_driver"
	default n
	help
	Enabling this lets blk hierarchy stats to record additional information
	for rq_driver. Such information can be helpful to debug performance
	and problems like io hang.

	If unsure, say N.

endif
+14 −0
Original line number Diff line number Diff line
@@ -1058,6 +1058,13 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)

	blk_mq_finish_request(rq);

	/*
	 * Avoid accounting flush request with data twice and request that is
	 * not started.
	 */
	if (blk_mq_request_started(rq) && !blk_rq_hierarchy_is_flush_done(rq))
		rq_hierarchy_end_io_acct(rq, STAGE_RQ_DRIVER);

	if (rq->end_io) {
		rq_qos_done(rq->q, rq);
		if (rq->end_io(rq, error) == RQ_END_IO_FREE)
@@ -1116,6 +1123,10 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)

		rq_qos_done(rq->q, rq);

		/* Avoid accounting flush request with data twice. */
		if (!blk_rq_hierarchy_is_flush_done(rq))
			rq_hierarchy_end_io_acct(rq, STAGE_RQ_DRIVER);

		/*
		 * If end_io handler returns NONE, then it still has
		 * ownership of the request.
@@ -1269,6 +1280,7 @@ void blk_mq_start_request(struct request *rq)
	struct request_queue *q = rq->q;

	trace_block_rq_issue(rq);
	rq_hierarchy_start_io_acct(rq, STAGE_RQ_DRIVER);

	if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags) &&
	    !blk_rq_is_passthrough(rq)) {
@@ -1461,6 +1473,7 @@ static void __blk_mq_requeue_request(struct request *rq)
	if (blk_mq_request_started(rq)) {
		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
		rq->rq_flags &= ~RQF_TIMED_OUT;
		rq_hierarchy_end_io_acct(rq, STAGE_RQ_DRIVER);
	}
}

@@ -4398,6 +4411,7 @@ static void blk_mq_unregister_default_hierarchy(struct request_queue *q)
	blk_mq_unregister_hierarchy(q, STAGE_PLUG);
	blk_mq_unregister_hierarchy(q, STAGE_HCTX);
	blk_mq_unregister_hierarchy(q, STAGE_REQUEUE);
	blk_mq_unregister_hierarchy(q, STAGE_RQ_DRIVER);
}

/* tags can _not_ be used after returning from blk_mq_exit_queue */
+1 −0
Original line number Diff line number Diff line
@@ -825,6 +825,7 @@ static void blk_mq_register_default_hierarchy(struct request_queue *q)
	blk_mq_register_hierarchy(q, STAGE_PLUG);
	blk_mq_register_hierarchy(q, STAGE_HCTX);
	blk_mq_register_hierarchy(q, STAGE_REQUEUE);
	blk_mq_register_hierarchy(q, STAGE_RQ_DRIVER);
}

/**
+1 −0
Original line number Diff line number Diff line
@@ -500,6 +500,7 @@ enum stage_group {
#endif
	STAGE_HCTX,
	STAGE_REQUEUE,
	STAGE_RQ_DRIVER,
	NR_RQ_STAGE_GROUPS,
	STAGE_BIO = NR_RQ_STAGE_GROUPS,
	NR_STAGE_GROUPS,