Commit b3260e04 authored by John Garry's avatar John Garry Committed by Zheng Zengkai
Browse files

blk-mq: Refactor and rename blk_mq_free_map_and_{requests->rqs}()

mainline inclusion
from mainline-v5.16-rc1
commit 645db34e
category: performance
bugzilla: 186917, https://gitee.com/openeuler/kernel/issues/I5N1S5
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=645db34e50501aac141713fb47a315e5202ff890



--------------------------------

Refactor blk_mq_free_map_and_requests() such that it can be used at many
sites at which the tag map and rqs are freed.

Also rename to blk_mq_free_map_and_rqs(), which is shorter and matches the
alloc equivalent.

Suggested-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJohn Garry <john.garry@huawei.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Link: https://lore.kernel.org/r/1633429419-228500-12-git-send-email-john.garry@huawei.com


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>

Conflict: commit a846a8e6 ("blk-mq: don't free tags if the tag_set is
used by other device in queue initialztion") is already backported,
blk_mq_free_map_and_rqs() is moved to __blk_mq_update_nr_hw_queues()
instead of blk_mq_realloc_hw_ctxs().
Signed-off-by: default avatarYu Kuai <yukuai3@huawei.com>
Reviewed-by: default avatarYu Kuai <yukuai3@huawei.com>
Reviewed-by: default avatarJason Yan <yanaijie@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent 625089f4
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -674,8 +674,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
		if (!new)
			return -ENOMEM;

		blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
		blk_mq_free_rq_map(*tagsptr, set->flags);
		blk_mq_free_map_and_rqs(set, *tagsptr, hctx->queue_num);
		*tagsptr = new;
	} else {
		/*
+23 −15
Original line number Diff line number Diff line
@@ -3016,15 +3016,15 @@ static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
	return set->tags[hctx_idx];
}

static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
			     struct blk_mq_tags *tags,
			     unsigned int hctx_idx)
{
	unsigned int flags = set->flags;

	if (set->tags && set->tags[hctx_idx]) {
		blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
		blk_mq_free_rq_map(set->tags[hctx_idx], flags);
		set->tags[hctx_idx] = NULL;
	if (tags) {
		blk_mq_free_rqs(set, tags, hctx_idx);
		blk_mq_free_rq_map(tags, flags);
	}
}

@@ -3105,8 +3105,10 @@ static void blk_mq_map_swqueue(struct request_queue *q)
			 * fallback in case of a new remap fails
			 * allocation
			 */
			if (i && set->tags[i])
				blk_mq_free_map_and_requests(set, i);
			if (i && set->tags[i]) {
				blk_mq_free_map_and_rqs(set, set->tags[i], i);
				set->tags[i] = NULL;
			}

			hctx->tags = NULL;
			continue;
@@ -3533,8 +3535,10 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
	return 0;

out_unwind:
	while (--i >= 0)
		blk_mq_free_map_and_requests(set, i);
	while (--i >= 0) {
		blk_mq_free_map_and_rqs(set, set->tags[i], i);
		set->tags[i] = NULL;
	}

	return -ENOMEM;
}
@@ -3724,8 +3728,10 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
	return 0;

out_free_mq_rq_maps:
	for (i = 0; i < set->nr_hw_queues; i++)
		blk_mq_free_map_and_requests(set, i);
	for (i = 0; i < set->nr_hw_queues; i++) {
		blk_mq_free_map_and_rqs(set, set->tags[i], i);
		set->tags[i] = NULL;
	}
out_free_mq_map:
	for (i = 0; i < set->nr_maps; i++) {
		kfree(set->map[i].mq_map);
@@ -3741,8 +3747,10 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
{
	int i, j;

	for (i = 0; i < set->nr_hw_queues; i++)
		blk_mq_free_map_and_requests(set, i);
	for (i = 0; i < set->nr_hw_queues; i++) {
		blk_mq_free_map_and_rqs(set, set->tags[i], i);
		set->tags[i] = NULL;
	}

	if (blk_mq_is_sbitmap_shared(set->flags))
		blk_mq_exit_shared_sbitmap(set);
@@ -3932,7 +3940,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
			pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
					nr_hw_queues, prev_nr_hw_queues);
			for (; i < set->nr_hw_queues; i++)
				blk_mq_free_map_and_requests(set, i);
				blk_mq_free_map_and_rqs(set, set->tags[i], i);

			set->nr_hw_queues = prev_nr_hw_queues;
			blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
+3 −1
Original line number Diff line number Diff line
@@ -57,7 +57,9 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
				unsigned int hctx_idx, unsigned int depth);

void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
			     struct blk_mq_tags *tags,
			     unsigned int hctx_idx);
/*
 * Internal helpers for request insertion into sw queues
 */