Commit 3d745ea5 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: simplify queue allocation



Current make_request based drivers use either blk_alloc_queue_node or
blk_alloc_queue to allocate a queue, and then set up the make_request_fn
function pointer and a few parameters using the blk_queue_make_request
helper.  Simplify this by passing the make_request pointer to
blk_alloc_queue, and while at it merge the _node variant into the main
helper by always passing a node_id, and remove the superfluous gfp_mask
parameter.  A lower-level __blk_alloc_queue is kept for the blk-mq case.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ff27668c
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -118,12 +118,11 @@ static int __init nfhd_init_one(int id, u32 blocks, u32 bsize)
	dev->bsize = bsize;
	dev->bshift = ffs(bsize) - 10;

	dev->queue = blk_alloc_queue(GFP_KERNEL);
	dev->queue = blk_alloc_queue(nfhd_make_request, NUMA_NO_NODE);
	if (dev->queue == NULL)
		goto free_dev;

	dev->queue->queuedata = dev;
	blk_queue_make_request(dev->queue, nfhd_make_request);
	blk_queue_logical_block_size(dev->queue, bsize);

	dev->disk = alloc_disk(16);
+1 −2
Original line number Diff line number Diff line
@@ -267,13 +267,12 @@ static int __init simdisk_setup(struct simdisk *dev, int which,
	spin_lock_init(&dev->lock);
	dev->users = 0;

	dev->queue = blk_alloc_queue(GFP_KERNEL);
	dev->queue = blk_alloc_queue(simdisk_make_request, NUMA_NO_NODE);
	if (dev->queue == NULL) {
		pr_err("blk_alloc_queue failed\n");
		goto out_alloc_queue;
	}

	blk_queue_make_request(dev->queue, simdisk_make_request);
	dev->queue->queuedata = dev;

	dev->gd = alloc_disk(SIMDISK_MINORS);
+1 −1
Original line number Diff line number Diff line
@@ -1010,7 +1010,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
 * blkcg_init_queue - initialize blkcg part of request queue
 * @q: request_queue to initialize
 *
 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
 * Called from __blk_alloc_queue(). Responsible for initializing blkcg
 * part of new request_queue @q.
 *
 * RETURNS:
+23 −16
Original line number Diff line number Diff line
@@ -388,12 +388,6 @@ void blk_cleanup_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_cleanup_queue);

struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
{
	return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
}
EXPORT_SYMBOL(blk_alloc_queue);

/**
 * blk_queue_enter() - try to increase q->q_usage_counter
 * @q: request queue pointer
@@ -470,24 +464,19 @@ static void blk_timeout_work(struct work_struct *work)
{
}

/**
 * blk_alloc_queue_node - allocate a request queue
 * @gfp_mask: memory allocation flags
 * @node_id: NUMA node to allocate memory from
 */
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
struct request_queue *__blk_alloc_queue(int node_id)
{
	struct request_queue *q;
	int ret;

	q = kmem_cache_alloc_node(blk_requestq_cachep,
				gfp_mask | __GFP_ZERO, node_id);
				GFP_KERNEL | __GFP_ZERO, node_id);
	if (!q)
		return NULL;

	q->last_merge = NULL;

	q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
	q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
	if (q->id < 0)
		goto fail_q;

@@ -495,7 +484,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
	if (ret)
		goto fail_id;

	q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
	q->backing_dev_info = bdi_alloc_node(GFP_KERNEL, node_id);
	if (!q->backing_dev_info)
		goto fail_split;

@@ -541,6 +530,9 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
	if (blkcg_init_queue(q))
		goto fail_ref;

	blk_queue_dma_alignment(q, 511);
	blk_set_default_limits(&q->limits);

	return q;

fail_ref:
@@ -557,7 +549,22 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
	kmem_cache_free(blk_requestq_cachep, q);
	return NULL;
}
EXPORT_SYMBOL(blk_alloc_queue_node);

struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id)
{
	struct request_queue *q;

	if (WARN_ON_ONCE(!make_request))
		return -EINVAL;

	q = __blk_alloc_queue(node_id);
	if (!q)
		return NULL;
	q->make_request_fn = make_request;
	q->nr_requests = BLKDEV_MAX_RQ;
	return q;
}
EXPORT_SYMBOL(blk_alloc_queue);

bool blk_get_queue(struct request_queue *q)
{
+2 −6
Original line number Diff line number Diff line
@@ -2729,7 +2729,7 @@ struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
{
	struct request_queue *uninit_q, *q;

	uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
	uninit_q = __blk_alloc_queue(set->numa_node);
	if (!uninit_q)
		return ERR_PTR(-ENOMEM);
	uninit_q->queuedata = queuedata;
@@ -2939,11 +2939,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
	INIT_LIST_HEAD(&q->requeue_list);
	spin_lock_init(&q->requeue_lock);

	blk_queue_make_request(q, blk_mq_make_request);

	/*
	 * Do this after blk_queue_make_request() overrides it...
	 */
	q->make_request_fn = blk_mq_make_request;
	q->nr_requests = set->queue_depth;

	/*
Loading