Commit 48b5c1fb authored by Jens Axboe's avatar Jens Axboe
Browse files

block: only allocate poll_stats if there's a user of them



This is essentially never used, yet it's about 1/3rd of the total
queue size. Allocate it when needed, and don't embed it in the queue.

Kill the queue flag for this while at it, since we can just check the
assigned pointer now.

Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 25c4b5e0
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -122,7 +122,6 @@ static const char *const blk_queue_flag_name[] = {
	QUEUE_FLAG_NAME(FUA),
	QUEUE_FLAG_NAME(DAX),
	QUEUE_FLAG_NAME(STATS),
	QUEUE_FLAG_NAME(POLL_STATS),
	QUEUE_FLAG_NAME(REGISTERED),
	QUEUE_FLAG_NAME(QUIESCED),
	QUEUE_FLAG_NAME(PCI_P2PDMA),
+4 −6
Original line number Diff line number Diff line
@@ -4581,11 +4581,10 @@ EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
/* Enable polling stats and return whether they were already enabled. */
static bool blk_poll_stats_enable(struct request_queue *q)
{
	if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
	    blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q))
	if (q->poll_stat)
		return true;
	blk_stat_add_callback(q, q->poll_cb);
	return false;

	return blk_stats_alloc_enable(q);
}

static void blk_mq_poll_stats_start(struct request_queue *q)
@@ -4594,8 +4593,7 @@ static void blk_mq_poll_stats_start(struct request_queue *q)
	 * We don't arm the callback if polling stats are not enabled or the
	 * callback is already active.
	 */
	if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
	    blk_stat_is_active(q->poll_cb))
	if (!q->poll_stat || blk_stat_is_active(q->poll_cb))
		return;

	blk_stat_activate_msecs(q->poll_cb, 100);
+18 −0
Original line number Diff line number Diff line
@@ -219,3 +219,21 @@ void blk_free_queue_stats(struct blk_queue_stats *stats)

	kfree(stats);
}

bool blk_stats_alloc_enable(struct request_queue *q)
{
	struct blk_rq_stat *poll_stat;

	poll_stat = kcalloc(BLK_MQ_POLL_STATS_BKTS, sizeof(*poll_stat),
				GFP_ATOMIC);
	if (!poll_stat)
		return false;

	if (cmpxchg(&q->poll_stat, NULL, poll_stat) != NULL) {
		kfree(poll_stat);
		return true;
	}

	blk_stat_add_callback(q, q->poll_cb);
	return false;
}
+1 −0
Original line number Diff line number Diff line
@@ -64,6 +64,7 @@ struct blk_stat_callback {

struct blk_queue_stats *blk_alloc_queue_stats(void);
void blk_free_queue_stats(struct blk_queue_stats *);
bool blk_stats_alloc_enable(struct request_queue *q);

void blk_stat_add(struct request *rq, u64 now);

+2 −1
Original line number Diff line number Diff line
@@ -785,11 +785,12 @@ static void blk_release_queue(struct kobject *kobj)

	might_sleep();

	if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
	if (q->poll_stat)
		blk_stat_remove_callback(q, q->poll_cb);
	blk_stat_free_callback(q->poll_cb);

	blk_free_queue_stats(q->stats);
	kfree(q->poll_stat);

	blk_exit_queue(q);

Loading