Commit 46efdd72 authored by John Garry's avatar John Garry Committed by Zheng Zengkai
Browse files

blk-mq: Stop using pointers for blk_mq_tags bitmap tags

mainline inclusion
from mainline-v5.16-rc1
commit ae0f1a73
category: performance
bugzilla: 186917, https://gitee.com/openeuler/kernel/issues/I5N1S5
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=ae0f1a732f4a5db284e2af02c305255734efd19c



--------------------------------

Now that we use shared tags for shared sbitmap support, we don't require
the tags sbitmap pointers, so drop them.

This essentially reverts commit 222a5ae0 ("blk-mq: Use pointers for
blk_mq_tags bitmap tags").

Function blk_mq_init_bitmap_tags() is removed also, since it would be only
a wrappper for blk_mq_init_bitmaps().

Reviewed-by: default avatarMing Lei <ming.lei@redhat.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarJohn Garry <john.garry@huawei.com>
Link: https://lore.kernel.org/r/1633429419-228500-14-git-send-email-john.garry@huawei.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarYu Kuai <yukuai3@huawei.com>
Reviewed-by: default avatarYu Kuai <yukuai3@huawei.com>
Reviewed-by: default avatarJason Yan <yanaijie@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent bb59b765
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -6381,8 +6381,8 @@ static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
	struct blk_mq_tags *tags = hctx->sched_tags;
	unsigned int min_shallow;

	min_shallow = bfq_update_depths(bfqd, tags->bitmap_tags);
	sbitmap_queue_min_shallow_depth(tags->bitmap_tags, min_shallow);
	min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags);
	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow);
}

static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+4 −4
Original line number Diff line number Diff line
@@ -474,11 +474,11 @@ static void blk_mq_debugfs_tags_show(struct seq_file *m,
		   atomic_read(&tags->pending_queues));

	seq_puts(m, "\nbitmap_tags:\n");
	sbitmap_queue_show(tags->bitmap_tags, m);
	sbitmap_queue_show(&tags->bitmap_tags, m);

	if (tags->nr_reserved_tags) {
		seq_puts(m, "\nbreserved_tags:\n");
		sbitmap_queue_show(tags->breserved_tags, m);
		sbitmap_queue_show(&tags->breserved_tags, m);
	}
}

@@ -509,7 +509,7 @@ static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
	if (res)
		goto out;
	if (hctx->tags)
		sbitmap_bitmap_show(&hctx->tags->bitmap_tags->sb, m);
		sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
	mutex_unlock(&q->sysfs_lock);

out:
@@ -543,7 +543,7 @@ static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
	if (res)
		goto out;
	if (hctx->sched_tags)
		sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags->sb, m);
		sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
	mutex_unlock(&q->sysfs_lock);

out:
+20 −36
Original line number Diff line number Diff line
@@ -46,9 +46,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 */
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
{
	sbitmap_queue_wake_all(tags->bitmap_tags);
	sbitmap_queue_wake_all(&tags->bitmap_tags);
	if (include_reserve)
		sbitmap_queue_wake_all(tags->breserved_tags);
		sbitmap_queue_wake_all(&tags->breserved_tags);
}

/*
@@ -158,10 +158,10 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
			WARN_ON_ONCE(1);
			return BLK_MQ_NO_TAG;
		}
		bt = tags->breserved_tags;
		bt = &tags->breserved_tags;
		tag_offset = 0;
	} else {
		bt = tags->bitmap_tags;
		bt = &tags->bitmap_tags;
		tag_offset = tags->nr_reserved_tags;
	}

@@ -212,9 +212,9 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
						data->ctx);
		tags = blk_mq_tags_from_data(data);
		if (data->flags & BLK_MQ_REQ_RESERVED)
			bt = tags->breserved_tags;
			bt = &tags->breserved_tags;
		else
			bt = tags->bitmap_tags;
			bt = &tags->bitmap_tags;

		/*
		 * If destination hw queue is changed, fake wake up on
@@ -250,10 +250,10 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
		const int real_tag = tag - tags->nr_reserved_tags;

		BUG_ON(real_tag >= tags->nr_tags);
		sbitmap_queue_clear(tags->bitmap_tags, real_tag, ctx->cpu);
		sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
	} else {
		BUG_ON(tag >= tags->nr_reserved_tags);
		sbitmap_queue_clear(tags->breserved_tags, tag, ctx->cpu);
		sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
	}
}

@@ -404,9 +404,9 @@ static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
	WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);

	if (tags->nr_reserved_tags)
		bt_tags_for_each(tags, tags->breserved_tags, fn, priv,
		bt_tags_for_each(tags, &tags->breserved_tags, fn, priv,
				 flags | BT_TAG_ITER_RESERVED);
	bt_tags_for_each(tags, tags->bitmap_tags, fn, priv, flags);
	bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, flags);
}

/**
@@ -523,8 +523,8 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
			continue;

		if (tags->nr_reserved_tags)
			bt_for_each(hctx, tags->breserved_tags, fn, priv, true);
		bt_for_each(hctx, tags->bitmap_tags, fn, priv, false);
			bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
		bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
	}
	blk_queue_exit(q);
}
@@ -556,24 +556,6 @@ int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags,
	return -ENOMEM;
}

static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
				   int node, int alloc_policy)
{
	int ret;

	ret = blk_mq_init_bitmaps(&tags->__bitmap_tags,
				  &tags->__breserved_tags,
				  tags->nr_tags, tags->nr_reserved_tags,
				  node, alloc_policy);
	if (ret)
		return ret;

	tags->bitmap_tags = &tags->__bitmap_tags;
	tags->breserved_tags = &tags->__breserved_tags;

	return 0;
}

struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
				     unsigned int reserved_tags,
				     int node, int alloc_policy)
@@ -593,7 +575,9 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
	tags->nr_reserved_tags = reserved_tags;
	spin_lock_init(&tags->lock);

	if (blk_mq_init_bitmap_tags(tags, node, alloc_policy) < 0) {
	if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags,
				total_tags, reserved_tags, node,
				alloc_policy) < 0) {
		kfree(tags);
		return NULL;
	}
@@ -602,8 +586,8 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,

void blk_mq_free_tags(struct blk_mq_tags *tags)
{
	sbitmap_queue_free(tags->bitmap_tags);
	sbitmap_queue_free(tags->breserved_tags);
	sbitmap_queue_free(&tags->bitmap_tags);
	sbitmap_queue_free(&tags->breserved_tags);
	kfree(tags);
}

@@ -652,7 +636,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
		 * Don't need (or can't) update reserved tags here, they
		 * remain static and should never need resizing.
		 */
		sbitmap_queue_resize(tags->bitmap_tags,
		sbitmap_queue_resize(&tags->bitmap_tags,
				tdepth - tags->nr_reserved_tags);
	}

@@ -663,12 +647,12 @@ void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int s
{
	struct blk_mq_tags *tags = set->shared_sbitmap_tags;

	sbitmap_queue_resize(&tags->__bitmap_tags, size - set->reserved_tags);
	sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
}

void blk_mq_tag_update_sched_shared_sbitmap(struct request_queue *q)
{
	sbitmap_queue_resize(q->shared_sbitmap_tags->bitmap_tags,
	sbitmap_queue_resize(&q->shared_sbitmap_tags->bitmap_tags,
			     q->nr_requests - q->tag_set->reserved_tags);
}

+2 −5
Original line number Diff line number Diff line
@@ -19,11 +19,8 @@ struct blk_mq_tags {
	 */
	atomic_t pending_queues;

	struct sbitmap_queue *bitmap_tags;
	struct sbitmap_queue *breserved_tags;

	struct sbitmap_queue __bitmap_tags;
	struct sbitmap_queue __breserved_tags;
	struct sbitmap_queue bitmap_tags;
	struct sbitmap_queue breserved_tags;

	struct request **rqs;
	struct request **static_rqs;
+4 −4
Original line number Diff line number Diff line
@@ -1169,14 +1169,14 @@ static inline unsigned int queued_to_index(unsigned int queued)

static bool __blk_mq_get_driver_tag(struct request *rq)
{
	struct sbitmap_queue *bt = rq->mq_hctx->tags->bitmap_tags;
	struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
	unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
	int tag;

	blk_mq_tag_busy(rq->mq_hctx);

	if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
		bt = rq->mq_hctx->tags->breserved_tags;
		bt = &rq->mq_hctx->tags->breserved_tags;
		tag_offset = 0;
	} else {
		if (!hctx_may_queue(rq->mq_hctx, bt))
@@ -1222,7 +1222,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
		struct sbitmap_queue *sbq;

		list_del_init(&wait->entry);
		sbq = hctx->tags->bitmap_tags;
		sbq = &hctx->tags->bitmap_tags;
		atomic_dec(&sbq->ws_active);
	}
	spin_unlock(&hctx->dispatch_wait_lock);
@@ -1240,7 +1240,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
				 struct request *rq)
{
	struct sbitmap_queue *sbq = hctx->tags->bitmap_tags;
	struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
	struct wait_queue_head *wq;
	wait_queue_entry_t *wait;
	bool ret;
Loading