Commit 4d337ceb authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe
Browse files

blk-mq: avoid to touch q->elevator without any protection



q->elevator is referred in blk_mq_has_sqsched() without any protection,
no .q_usage_counter is held, no queue srcu and rcu read lock is held,
so potential use-after-free may be triggered.

Fix the issue by adding one queue flag for checking if the elevator
uses single queue style dispatch. Meantime the elevator feature flag
of ELEVATOR_F_MQ_AWARE isn't needed any more.

Cc: Jan Kara <jack@suse.cz>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20220616014401.817001-3-ming.lei@redhat.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 5fd7a84a
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -7188,6 +7188,9 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
	bfq_init_root_group(bfqd->root_group, bfqd);
	bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);

	/* We dispatch from request queue wide instead of hw queue */
	blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);

	wbt_disable_default(q);
	return 0;

+1 −0
Original line number Diff line number Diff line
@@ -564,6 +564,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
	int ret;

	if (!e) {
		blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);
		q->elevator = NULL;
		q->nr_requests = q->tag_set->queue_depth;
		return 0;
+2 −16
Original line number Diff line number Diff line
@@ -2142,20 +2142,6 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
}
EXPORT_SYMBOL(blk_mq_run_hw_queue);

/*
 * Is the request queue handled by an IO scheduler that does not respect
 * hardware queues when dispatching?
 */
static bool blk_mq_has_sqsched(struct request_queue *q)
{
	struct elevator_queue *e = q->elevator;

	if (e && e->type->ops.dispatch_request &&
	    !(e->type->elevator_features & ELEVATOR_F_MQ_AWARE))
		return true;
	return false;
}

/*
 * Return prefered queue to dispatch from (if any) for non-mq aware IO
 * scheduler.
@@ -2188,7 +2174,7 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async)
	unsigned long i;

	sq_hctx = NULL;
	if (blk_mq_has_sqsched(q))
	if (blk_queue_sq_sched(q))
		sq_hctx = blk_mq_get_sq_hctx(q);
	queue_for_each_hw_ctx(q, hctx, i) {
		if (blk_mq_hctx_stopped(hctx))
@@ -2216,7 +2202,7 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
	unsigned long i;

	sq_hctx = NULL;
	if (blk_mq_has_sqsched(q))
	if (blk_queue_sq_sched(q))
		sq_hctx = blk_mq_get_sq_hctx(q);
	queue_for_each_hw_ctx(q, hctx, i) {
		if (blk_mq_hctx_stopped(hctx))
+2 −1
Original line number Diff line number Diff line
@@ -421,6 +421,8 @@ static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)

	blk_stat_enable_accounting(q);

	blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q);

	eq->elevator_data = kqd;
	q->elevator = eq;

@@ -1033,7 +1035,6 @@ static struct elevator_type kyber_sched = {
#endif
	.elevator_attrs = kyber_sched_attrs,
	.elevator_name = "kyber",
	.elevator_features = ELEVATOR_F_MQ_AWARE,
	.elevator_owner = THIS_MODULE,
};

+3 −0
Original line number Diff line number Diff line
@@ -642,6 +642,9 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
	spin_lock_init(&dd->lock);
	spin_lock_init(&dd->zone_lock);

	/* We dispatch from request queue wide instead of hw queue */
	blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);

	q->elevator = eq;
	return 0;

Loading