Commit 26eda960 authored by Yufen Yu's avatar Yufen Yu Committed by Zheng Zengkai
Browse files

blk-mq: fix kabi broken by "blk-mq: Use request queue-wide tags for tagset-wide sbitmap"

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I597XM


CVE: NA

---------------------------

Signed-off-by: default avatarYufen Yu <yuyufen@huawei.com>
Reviewed-by: default avatarJason Yan <yanaijie@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent 580c1a56
Loading
Loading
Loading
Loading
+6 −4
Original line number Diff line number Diff line
@@ -517,13 +517,15 @@ static void blk_timeout_work(struct work_struct *work)
struct request_queue *blk_alloc_queue(int node_id)
{
	struct request_queue *q;
	struct request_queue_wrapper *q_wrapper;
	int ret;

	q = kmem_cache_alloc_node(blk_requestq_cachep,
	q_wrapper = kmem_cache_alloc_node(blk_requestq_cachep,
				GFP_KERNEL | __GFP_ZERO, node_id);
	if (!q)
	if (!q_wrapper)
		return NULL;

	q = &q_wrapper->q;
	q->last_merge = NULL;

	q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
@@ -594,7 +596,7 @@ struct request_queue *blk_alloc_queue(int node_id)
fail_id:
	ida_simple_remove(&blk_queue_ida, q->id);
fail_q:
	kmem_cache_free(blk_requestq_cachep, q);
	kmem_cache_free(blk_requestq_cachep, q_wrapper);
	return NULL;
}
EXPORT_SYMBOL(blk_alloc_queue);
@@ -1796,7 +1798,7 @@ int __init blk_dev_init(void)
		panic("Failed to create kblockd\n");

	blk_requestq_cachep = kmem_cache_create("request_queue",
			sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
			sizeof(struct request_queue_wrapper), 0, SLAB_PANIC, NULL);

	blk_debugfs_root = debugfs_create_dir("block", NULL);

+10 −7
Original line number Diff line number Diff line
@@ -548,13 +548,14 @@ static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue)
	int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags);
	struct blk_mq_hw_ctx *hctx;
	int ret, i;
	struct request_queue_wrapper *q_wrapper = queue_to_wrapper(queue);

	/*
	 * Set initial depth at max so that we don't need to reallocate for
	 * updating nr_requests.
	 */
	ret = blk_mq_init_bitmaps(&queue->sched_bitmap_tags,
				  &queue->sched_breserved_tags,
	ret = blk_mq_init_bitmaps(&q_wrapper->sched_bitmap_tags,
				  &q_wrapper->sched_breserved_tags,
				  MAX_SCHED_RQ, set->reserved_tags,
				  set->numa_node, alloc_policy);
	if (ret)
@@ -562,12 +563,12 @@ static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue)

	queue_for_each_hw_ctx(queue, hctx, i) {
		hctx->sched_tags->bitmap_tags =
					&queue->sched_bitmap_tags;
					&q_wrapper->sched_bitmap_tags;
		hctx->sched_tags->breserved_tags =
					&queue->sched_breserved_tags;
					&q_wrapper->sched_breserved_tags;
	}

	sbitmap_queue_resize(&queue->sched_bitmap_tags,
	sbitmap_queue_resize(&q_wrapper->sched_bitmap_tags,
			     queue->nr_requests - set->reserved_tags);

	return 0;
@@ -575,8 +576,10 @@ static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue)

static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue)
{
	sbitmap_queue_free(&queue->sched_bitmap_tags);
	sbitmap_queue_free(&queue->sched_breserved_tags);
	struct request_queue_wrapper *q_wrapper = queue_to_wrapper(queue);

	sbitmap_queue_free(&q_wrapper->sched_bitmap_tags);
	sbitmap_queue_free(&q_wrapper->sched_breserved_tags);
}

int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
+4 −3
Original line number Diff line number Diff line
@@ -3671,6 +3671,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
	struct blk_mq_tag_set *set = q->tag_set;
	struct blk_mq_hw_ctx *hctx;
	int i, ret;
	struct request_queue_wrapper *q_wrapper = queue_to_wrapper(q);

	if (!set)
		return -EINVAL;
@@ -3699,9 +3700,9 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
							nr, true);
			if (blk_mq_is_sbitmap_shared(set->flags)) {
				hctx->sched_tags->bitmap_tags =
					&q->sched_bitmap_tags;
					&q_wrapper->sched_bitmap_tags;
				hctx->sched_tags->breserved_tags =
					&q->sched_breserved_tags;
					&q_wrapper->sched_breserved_tags;
			}
		}
		if (ret)
@@ -3712,7 +3713,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
	if (!ret) {
		q->nr_requests = nr;
		if (q->elevator && blk_mq_is_sbitmap_shared(set->flags))
			sbitmap_queue_resize(&q->sched_bitmap_tags,
			sbitmap_queue_resize(&q_wrapper->sched_bitmap_tags,
					     nr - set->reserved_tags);
	}

+1 −1
Original line number Diff line number Diff line
@@ -726,7 +726,7 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
{
	struct request_queue *q = container_of(rcu_head, struct request_queue,
					       rcu_head);
	kmem_cache_free(blk_requestq_cachep, q);
	kmem_cache_free(blk_requestq_cachep, queue_to_wrapper(q));
}

/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
+13 −0
Original line number Diff line number Diff line
@@ -28,6 +28,19 @@ struct blk_flush_queue {
	spinlock_t		mq_flush_lock;
};

/*
 * The wrapper of request_queue to fix kabi while adding members.
 */
struct request_queue_wrapper {
	struct request_queue q;

	struct sbitmap_queue	sched_bitmap_tags;
	struct sbitmap_queue	sched_breserved_tags;
};

#define queue_to_wrapper(queue) \
	container_of(queue, struct request_queue_wrapper, q)

extern struct kmem_cache *blk_requestq_cachep;
extern struct kobj_type blk_queue_ktype;
extern struct ida blk_queue_ida;
Loading