Commit 3a3e097a authored by Li Nan's avatar Li Nan Committed by Zheng Zengkai
Browse files

blk-mq: fix io hang for scsi drivers that depends on timeout handling during scan

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I617GN


CVE: NA

--------------------------------

Since 8b97d51a0c9c, blk_mq_queue_tag_busy_iter() will return directly if
queue has not been registered. However, scsi_scan will issue io before
queue is registered ,and it causes io hang as some special scsi driver
(e.x. ata_piix) relied on blk_mq_timeou_work() to complete io when driver
initializing during scan.
Fix the bug by checking QUEUE_FLAG_REGISTERED upward.

Fixes: 8b97d51a0c9c ("[Huawei] blk-mq: fix null pointer dereference in blk_mq_queue_tag_busy_ite")
Signed-off-by: default avatarLi Nan <linan122@huawei.com>
Reviewed-by: default avatarJason Yan <yanaijie@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent 0054d8bf
Loading
Loading
Loading
Loading
+0 −7
Original line number Diff line number Diff line
@@ -515,13 +515,6 @@ EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
		void *priv)
{
	/*
	 * For dm, it can run here after register_disk, but the queue has not
	 * been initialized yet. Check QUEUE_FLAG_REGISTERED prevent null point
	 * access.
	 */
	if (!blk_queue_registered(q))
		return;
	/*
	 * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
	 * while the queue is frozen. So we can use q_usage_counter to avoid
+8 −4
Original line number Diff line number Diff line
@@ -151,6 +151,7 @@ unsigned int blk_mq_in_flight_with_stat(struct request_queue *q,
{
	struct mq_inflight mi = { .part = part };

	if (blk_queue_registered(q))
		blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_with_stat, &mi);

	return mi.inflight[0] + mi.inflight[1];
@@ -174,6 +175,7 @@ unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part)
{
	struct mq_inflight mi = { .part = part };

	if (blk_queue_registered(q))
		blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);

	return mi.inflight[0] + mi.inflight[1];
@@ -184,6 +186,7 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
{
	struct mq_inflight mi = { .part = part };

	if (blk_queue_registered(q))
		blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
	inflight[0] = mi.inflight[0];
	inflight[1] = mi.inflight[1];
@@ -974,6 +977,7 @@ bool blk_mq_queue_inflight(struct request_queue *q)
{
	bool busy = false;

	if (blk_queue_registered(q))
		blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
	return busy;
}