Commit 483239c7 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

blk-mq: pass a tagset to blk_mq_wait_quiesce_done



Nothing in blk_mq_wait_quiesce_done needs the request_queue now, so just
pass the tagset, and move the non-mq check into the only caller that
needs it.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarKeith Busch <kbusch@kernel.org>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarChao Leng <lengchao@huawei.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20221101150050.3510-13-hch@lst.de


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 80bd4a7a
Loading
Loading
Loading
Loading
+9 −7
Original line number Diff line number Diff line
@@ -254,15 +254,17 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);

/**
 * blk_mq_wait_quiesce_done() - wait until in-progress quiesce is done
 * @q: request queue.
 * @set: tag_set to wait on
 *
 * Note: it is driver's responsibility for making sure that quiesce has
 * been started.
 * been started on or more of the request_queues of the tag_set.  This
 * function only waits for the quiesce on those request_queues that had
 * the quiesce flag set using blk_mq_quiesce_queue_nowait.
 */
void blk_mq_wait_quiesce_done(struct request_queue *q)
void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set)
{
	if (q->tag_set->flags & BLK_MQ_F_BLOCKING)
		synchronize_srcu(q->tag_set->srcu);
	if (set->flags & BLK_MQ_F_BLOCKING)
		synchronize_srcu(set->srcu);
	else
		synchronize_rcu();
}
@@ -282,7 +284,7 @@ void blk_mq_quiesce_queue(struct request_queue *q)
	blk_mq_quiesce_queue_nowait(q);
	/* nothing to wait for non-mq queues */
	if (queue_is_mq(q))
		blk_mq_wait_quiesce_done(q);
		blk_mq_wait_quiesce_done(q->tag_set);
}
EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);

@@ -1623,7 +1625,7 @@ static void blk_mq_timeout_work(struct work_struct *work)
		 * uses srcu or rcu, wait for a synchronization point to
		 * ensure all running submits have finished
		 */
		blk_mq_wait_quiesce_done(q);
		blk_mq_wait_quiesce_done(q->tag_set);

		expired.next = 0;
		blk_mq_queue_tag_busy_iter(q, blk_mq_handle_expired, &expired);
+2 −2
Original line number Diff line number Diff line
@@ -5107,7 +5107,7 @@ static void nvme_stop_ns_queue(struct nvme_ns *ns)
	if (!test_and_set_bit(NVME_NS_STOPPED, &ns->flags))
		blk_mq_quiesce_queue(ns->queue);
	else
		blk_mq_wait_quiesce_done(ns->queue);
		blk_mq_wait_quiesce_done(ns->queue->tag_set);
}

/* let I/O to all namespaces fail in preparation for surprise removal */
@@ -5197,7 +5197,7 @@ void nvme_stop_admin_queue(struct nvme_ctrl *ctrl)
	if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
		blk_mq_quiesce_queue(ctrl->admin_q);
	else
		blk_mq_wait_quiesce_done(ctrl->admin_q);
		blk_mq_wait_quiesce_done(ctrl->admin_q->tag_set);
}
EXPORT_SYMBOL_GPL(nvme_stop_admin_queue);

+1 −1
Original line number Diff line number Diff line
@@ -2735,7 +2735,7 @@ static void scsi_stop_queue(struct scsi_device *sdev, bool nowait)
			blk_mq_quiesce_queue(sdev->request_queue);
	} else {
		if (!nowait)
			blk_mq_wait_quiesce_done(sdev->request_queue);
			blk_mq_wait_quiesce_done(sdev->request_queue->tag_set);
	}
}

+1 −1
Original line number Diff line number Diff line
@@ -880,7 +880,7 @@ void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_quiesce_queue(struct request_queue *q);
void blk_mq_wait_quiesce_done(struct request_queue *q);
void blk_mq_wait_quiesce_done(struct blk_mq_tag_set *set);
void blk_mq_unquiesce_queue(struct request_queue *q);
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);