Commit 1c0706a7 authored by John Garry's avatar John Garry Committed by Jens Axboe
Browse files

blk-mq: Pass flags for tag init/free



Pass hctx/tagset flags argument down to blk_mq_init_tags() and
blk_mq_free_tags() for selective init/free.

For now, make it include the alloc policy flag, which can be evaluated
when needed (in blk_mq_init_tags()).

Signed-off-by: default avatarJohn Garry <john.garry@huawei.com>
Tested-by: default avatarDouglas Gilbert <dgilbert@interlog.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 4d063237
Loading
Loading
Loading
Loading
+8 −3
Original line number Diff line number Diff line
@@ -560,9 +560,11 @@ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
				   struct blk_mq_hw_ctx *hctx,
				   unsigned int hctx_idx)
{
	unsigned int flags = set->flags;

	if (hctx->sched_tags) {
		blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
		blk_mq_free_rq_map(hctx->sched_tags);
		blk_mq_free_rq_map(hctx->sched_tags, flags);
		hctx->sched_tags = NULL;
	}
}
@@ -572,10 +574,11 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q,
				   unsigned int hctx_idx)
{
	struct blk_mq_tag_set *set = q->tag_set;
	unsigned int flags = set->flags;
	int ret;

	hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
					       set->reserved_tags);
					       set->reserved_tags, flags);
	if (!hctx->sched_tags)
		return -ENOMEM;

@@ -593,8 +596,10 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q)
	int i;

	queue_for_each_hw_ctx(q, hctx, i) {
		unsigned int flags = hctx->flags;

		if (hctx->sched_tags) {
			blk_mq_free_rq_map(hctx->sched_tags);
			blk_mq_free_rq_map(hctx->sched_tags, flags);
			hctx->sched_tags = NULL;
		}
	}
+7 −5
Original line number Diff line number Diff line
@@ -449,8 +449,9 @@ static int blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,

struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
				     unsigned int reserved_tags,
				     int node, int alloc_policy)
				     int node, unsigned int flags)
{
	int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(flags);
	struct blk_mq_tags *tags;

	if (total_tags > BLK_MQ_TAG_MAX) {
@@ -472,7 +473,7 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
	return tags;
}

void blk_mq_free_tags(struct blk_mq_tags *tags)
void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags)
{
	sbitmap_queue_free(&tags->bitmap_tags);
	sbitmap_queue_free(&tags->breserved_tags);
@@ -494,6 +495,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
	 */
	if (tdepth > tags->nr_tags) {
		struct blk_mq_tag_set *set = hctx->queue->tag_set;
		unsigned int flags = set->flags;
		struct blk_mq_tags *new;
		bool ret;

@@ -508,17 +510,17 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
			return -EINVAL;

		new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
				tags->nr_reserved_tags);
				tags->nr_reserved_tags, flags);
		if (!new)
			return -ENOMEM;
		ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
		if (ret) {
			blk_mq_free_rq_map(new);
			blk_mq_free_rq_map(new, flags);
			return -ENOMEM;
		}

		blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
		blk_mq_free_rq_map(*tagsptr);
		blk_mq_free_rq_map(*tagsptr, flags);
		*tagsptr = new;
	} else {
		/*
+4 −3
Original line number Diff line number Diff line
@@ -21,9 +21,10 @@ struct blk_mq_tags {
	struct list_head page_list;
};


extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node, int alloc_policy);
extern void blk_mq_free_tags(struct blk_mq_tags *tags);
extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
					unsigned int reserved_tags,
					int node, unsigned int flags);
extern void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags);

extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
+13 −10
Original line number Diff line number Diff line
@@ -2296,20 +2296,21 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
	}
}

void blk_mq_free_rq_map(struct blk_mq_tags *tags)
void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags)
{
	kfree(tags->rqs);
	tags->rqs = NULL;
	kfree(tags->static_rqs);
	tags->static_rqs = NULL;

	blk_mq_free_tags(tags);
	blk_mq_free_tags(tags, flags);
}

struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
					unsigned int hctx_idx,
					unsigned int nr_tags,
					unsigned int reserved_tags)
					unsigned int reserved_tags,
					unsigned int flags)
{
	struct blk_mq_tags *tags;
	int node;
@@ -2318,8 +2319,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
	if (node == NUMA_NO_NODE)
		node = set->numa_node;

	tags = blk_mq_init_tags(nr_tags, reserved_tags, node,
				BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
	tags = blk_mq_init_tags(nr_tags, reserved_tags, node, flags);
	if (!tags)
		return NULL;

@@ -2327,7 +2327,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
				 GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
				 node);
	if (!tags->rqs) {
		blk_mq_free_tags(tags);
		blk_mq_free_tags(tags, flags);
		return NULL;
	}

@@ -2336,7 +2336,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
					node);
	if (!tags->static_rqs) {
		kfree(tags->rqs);
		blk_mq_free_tags(tags);
		blk_mq_free_tags(tags, flags);
		return NULL;
	}

@@ -2745,10 +2745,11 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set,
					int hctx_idx)
{
	unsigned int flags = set->flags;
	int ret = 0;

	set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
					set->queue_depth, set->reserved_tags);
					set->queue_depth, set->reserved_tags, flags);
	if (!set->tags[hctx_idx])
		return false;

@@ -2757,7 +2758,7 @@ static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set,
	if (!ret)
		return true;

	blk_mq_free_rq_map(set->tags[hctx_idx]);
	blk_mq_free_rq_map(set->tags[hctx_idx], flags);
	set->tags[hctx_idx] = NULL;
	return false;
}
@@ -2765,9 +2766,11 @@ static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set,
static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
					 unsigned int hctx_idx)
{
	unsigned int flags = set->flags;

	if (set->tags && set->tags[hctx_idx]) {
		blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
		blk_mq_free_rq_map(set->tags[hctx_idx]);
		blk_mq_free_rq_map(set->tags[hctx_idx], flags);
		set->tags[hctx_idx] = NULL;
	}
}
+3 −2
Original line number Diff line number Diff line
@@ -53,11 +53,12 @@ struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
 */
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
		     unsigned int hctx_idx);
void blk_mq_free_rq_map(struct blk_mq_tags *tags);
void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
					unsigned int hctx_idx,
					unsigned int nr_tags,
					unsigned int reserved_tags);
					unsigned int reserved_tags,
					unsigned int flags);
int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
		     unsigned int hctx_idx, unsigned int depth);