Commit b445547e authored by Kashyap Desai's avatar Kashyap Desai Committed by Jens Axboe
Browse files

blk-mq, elevator: Count requests per hctx to improve performance



High CPU utilization on "native_queued_spin_lock_slowpath" due to lock
contention is possible for mq-deadline and bfq IO schedulers
when nr_hw_queues is more than one.

It is because kblockd work queue can submit IO from all online CPUs
(through blk_mq_run_hw_queues()) even though only one hctx has pending
commands.

The elevator callback .has_work for mq-deadline and bfq scheduler considers
pending work if there are any IOs on request queue but it does not account
hctx context.

Add a per-hctx 'elevator_queued' count to the hctx to avoid triggering
the elevator even though there are no requests queued.

[jpg: Relocated atomic_dec() in dd_dispatch_request(), update commit message per Kashyap]

Signed-off-by: default avatarKashyap Desai <kashyap.desai@broadcom.com>
Signed-off-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarJohn Garry <john.garry@huawei.com>
Tested-by: default avatarDouglas Gilbert <dgilbert@interlog.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f1b49fdc
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -4640,6 +4640,9 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
{
	struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;

	if (!atomic_read(&hctx->elevator_queued))
		return false;

	/*
	 * Avoiding lock: a race on bfqd->busy_queues should cause at
	 * most a call to dispatch for nothing
@@ -5554,6 +5557,7 @@ static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
		rq = list_first_entry(list, struct request, queuelist);
		list_del_init(&rq->queuelist);
		bfq_insert_request(hctx, rq, at_head);
		atomic_inc(&hctx->elevator_queued);
	}
}

@@ -5933,6 +5937,7 @@ static void bfq_finish_requeue_request(struct request *rq)

		bfq_completed_request(bfqq, bfqd);
		bfq_finish_requeue_request_body(bfqq);
		atomic_dec(&rq->mq_hctx->elevator_queued);

		spin_unlock_irqrestore(&bfqd->lock, flags);
	} else {
+1 −0
Original line number Diff line number Diff line
@@ -2660,6 +2660,7 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
		goto free_hctx;

	atomic_set(&hctx->nr_active, 0);
	atomic_set(&hctx->elevator_queued, 0);
	if (node == NUMA_NO_NODE)
		node = set->numa_node;
	hctx->numa_node = node;
+6 −0
Original line number Diff line number Diff line
@@ -386,6 +386,8 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
	spin_lock(&dd->lock);
	rq = __dd_dispatch_request(dd);
	spin_unlock(&dd->lock);
	if (rq)
		atomic_dec(&rq->mq_hctx->elevator_queued);

	return rq;
}
@@ -533,6 +535,7 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
		rq = list_first_entry(list, struct request, queuelist);
		list_del_init(&rq->queuelist);
		dd_insert_request(hctx, rq, at_head);
		atomic_inc(&hctx->elevator_queued);
	}
	spin_unlock(&dd->lock);
}
@@ -579,6 +582,9 @@ static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
{
	struct deadline_data *dd = hctx->queue->elevator->elevator_data;

	if (!atomic_read(&hctx->elevator_queued))
		return false;

	return !list_empty_careful(&dd->dispatch) ||
		!list_empty_careful(&dd->fifo_list[0]) ||
		!list_empty_careful(&dd->fifo_list[1]);
+4 −0
Original line number Diff line number Diff line
@@ -139,6 +139,10 @@ struct blk_mq_hw_ctx {
	 * shared across request queues.
	 */
	atomic_t		nr_active;
	/**
	 * @elevator_queued: Number of queued requests on hctx.
	 */
	atomic_t                elevator_queued;

	/** @cpuhp_online: List to store request if CPU is going to die */
	struct hlist_node	cpuhp_online;