Commit 136ffbfb authored by Yu Kuai's avatar Yu Kuai
Browse files

blk-io-hierarchy: support new rq based stage hctx

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/release-management/issues/IB4E8P


CVE: NA

--------------------------------

Like blk-throttle, following new debugfs entries will be created for
rq-based disk:

/sys/kernel/debug/block/sda/blk_io_hierarchy/
|-- hctx
|   |-- io_dump
|   |-- stats
|   `-- threshold

User can use them to analyze how IO behaves in hctx.

Signed-off-by: default avatarYu Kuai <yukuai3@huawei.com>
parent 4849eb09
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -185,6 +185,7 @@ static void blk_flush_complete_seq(struct request *rq,
		if (list_empty(pending))
			fq->flush_pending_since = jiffies;
		list_add_tail(&rq->queuelist, pending);
		rq_hierarchy_start_io_acct(rq, STAGE_HCTX);
		break;

	case REQ_FSEQ_DATA:
@@ -264,6 +265,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,

		BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
		list_del_init(&rq->queuelist);
		rq_hierarchy_end_io_acct(rq, STAGE_HCTX);
		blk_flush_complete_seq(rq, fq, seq, error);
	}

+10 −0
Original line number Diff line number Diff line
@@ -112,4 +112,14 @@ config HIERARCHY_KYBER

	If unsure, say N.

config HIERARCHY_HCTX
	bool "Enable hierarchy stats layer hctx"
	default n
	help
	Enabling this lets blk hierarchy stats to record additional information
	for hctx. Such information can be helpful to debug performance
	and problems like io hang.

	If unsure, say N.

endif
+2 −0
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@
#include "blk-mq-debugfs.h"
#include "blk-mq-sched.h"
#include "blk-wbt.h"
#include "blk-io-hierarchy/stats.h"

/*
 * Mark a hardware queue as needing a restart.
@@ -298,6 +299,7 @@ static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
	 */
	if (!list_empty(&rq_list)) {
		blk_mq_sched_mark_restart_hctx(hctx);
		rq_list_hierarchy_end_io_acct(&rq_list, STAGE_HCTX);
		if (!blk_mq_dispatch_rq_list(hctx, &rq_list, 0))
			return 0;
		need_dispatch = true;
+5 −0
Original line number Diff line number Diff line
@@ -2147,6 +2147,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
		if (nr_budgets)
			blk_mq_release_budgets(q, list);

		rq_list_hierarchy_start_io_acct(list, STAGE_HCTX);
		spin_lock(&hctx->lock);
		list_splice_tail_init(list, &hctx->dispatch);
		spin_unlock(&hctx->lock);
@@ -2508,6 +2509,7 @@ static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
{
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;

	rq_hierarchy_start_io_acct(rq, STAGE_HCTX);
	spin_lock(&hctx->lock);
	if (flags & BLK_MQ_INSERT_AT_HEAD)
		list_add(&rq->queuelist, &hctx->dispatch);
@@ -2815,6 +2817,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
	percpu_ref_get(&this_hctx->queue->q_usage_counter);
	/* passthrough requests should never be issued to the I/O scheduler */
	if (is_passthrough) {
		rq_list_hierarchy_start_io_acct(&list, STAGE_HCTX);
		spin_lock(&this_hctx->lock);
		list_splice_tail_init(&list, &this_hctx->dispatch);
		spin_unlock(&this_hctx->lock);
@@ -3618,6 +3621,7 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
	if (list_empty(&tmp))
		return 0;

	rq_list_hierarchy_start_io_acct(&tmp, STAGE_HCTX);
	spin_lock(&hctx->lock);
	list_splice_tail_init(&tmp, &hctx->dispatch);
	spin_unlock(&hctx->lock);
@@ -4388,6 +4392,7 @@ static void blk_mq_unregister_default_hierarchy(struct request_queue *q)
{
	blk_mq_unregister_hierarchy(q, STAGE_GETTAG);
	blk_mq_unregister_hierarchy(q, STAGE_PLUG);
	blk_mq_unregister_hierarchy(q, STAGE_HCTX);
}

/* tags can _not_ be used after returning from blk_mq_exit_queue */
+1 −0
Original line number Diff line number Diff line
@@ -823,6 +823,7 @@ static void blk_mq_register_default_hierarchy(struct request_queue *q)
{
	blk_mq_register_hierarchy(q, STAGE_GETTAG);
	blk_mq_register_hierarchy(q, STAGE_PLUG);
	blk_mq_register_hierarchy(q, STAGE_HCTX);
}

/**
Loading