Commit 2455a4b7 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

nvme-pci: split nvme_dev_add



Split nvme_dev_add into a helper to actually allocate the tag set, and
one that just update the number of queues.  Add a local variable for
the tag_set to clean up the code a bit.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarKeith Busch <kbusch@kernel.org>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f91b727c
Loading
Loading
Loading
Loading
+37 −35
Original line number Diff line number Diff line
@@ -2533,23 +2533,22 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
	return true;
}

static void nvme_dev_add(struct nvme_dev *dev)
static void nvme_pci_alloc_tag_set(struct nvme_dev *dev)
{
	struct blk_mq_tag_set * set = &dev->tagset;
	int ret;

	if (!dev->ctrl.tagset) {
		dev->tagset.ops = &nvme_mq_ops;
		dev->tagset.nr_hw_queues = dev->online_queues - 1;
		dev->tagset.nr_maps = 2; /* default + read */
	set->ops = &nvme_mq_ops;
	set->nr_hw_queues = dev->online_queues - 1;
	set->nr_maps = 2; /* default + read */
	if (dev->io_queues[HCTX_TYPE_POLL])
			dev->tagset.nr_maps++;
		dev->tagset.timeout = NVME_IO_TIMEOUT;
		dev->tagset.numa_node = dev->ctrl.numa_node;
		dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth,
						BLK_MQ_MAX_DEPTH) - 1;
		dev->tagset.cmd_size = sizeof(struct nvme_iod);
		dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
		dev->tagset.driver_data = dev;
		set->nr_maps++;
	set->timeout = NVME_IO_TIMEOUT;
	set->numa_node = dev->ctrl.numa_node;
	set->queue_depth = min_t(unsigned, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
	set->cmd_size = sizeof(struct nvme_iod);
	set->flags = BLK_MQ_F_SHOULD_MERGE;
	set->driver_data = dev;

	/*
	 * Some Apple controllers requires tags to be unique
@@ -2557,23 +2556,22 @@ static void nvme_dev_add(struct nvme_dev *dev)
	 * tags of the IO queue.
	 */
	if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
			dev->tagset.reserved_tags = NVME_AQ_DEPTH;
		set->reserved_tags = NVME_AQ_DEPTH;

		ret = blk_mq_alloc_tag_set(&dev->tagset);
	ret = blk_mq_alloc_tag_set(set);
	if (ret) {
		dev_warn(dev->ctrl.device,
			"IO queues tagset allocation failed %d\n", ret);
		return;
	}
		dev->ctrl.tagset = &dev->tagset;
	} else {
		blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);

		/* Free previously allocated queues that are no longer usable */
		nvme_free_queues(dev, dev->online_queues);
	dev->ctrl.tagset = set;
}

	nvme_dbbuf_set(dev);
static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
{
	blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
	/* free previously allocated queues that are no longer usable */
	nvme_free_queues(dev, dev->online_queues);
}

static int nvme_pci_enable(struct nvme_dev *dev)
@@ -2924,7 +2922,11 @@ static void nvme_reset_work(struct work_struct *work)
	} else {
		nvme_start_queues(&dev->ctrl);
		nvme_wait_freeze(&dev->ctrl);
		nvme_dev_add(dev);
		if (!dev->ctrl.tagset)
			nvme_pci_alloc_tag_set(dev);
		else
			nvme_pci_update_nr_queues(dev);
		nvme_dbbuf_set(dev);
		nvme_unfreeze(&dev->ctrl);
	}