Commit cefa1032 authored by Christoph Hellwig's avatar Christoph Hellwig
Browse files

nvme-rdma: use the tagset alloc/free helpers



Use the common helpers to allocate and free the tagsets.  To make this
work the generic nvme_ctrl now needs to be stored in the hctx private
data instead of the nvme_rdma_ctrl.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
parent 2d60738c
Loading
Loading
Loading
Loading
+34 −99
Original line number Diff line number Diff line
@@ -788,64 +788,21 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
	return ret;
}

static int nvme_rdma_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
{
	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
	struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
	int ret;

	memset(set, 0, sizeof(*set));
	set->ops = &nvme_rdma_admin_mq_ops;
	set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
	set->reserved_tags = NVMF_RESERVED_TAGS;
	set->numa_node = nctrl->numa_node;
	set->cmd_size = sizeof(struct nvme_rdma_request) +
	unsigned int cmd_size = sizeof(struct nvme_rdma_request) +
				NVME_RDMA_DATA_SGL_SIZE;
	set->driver_data = &ctrl->ctrl;
	set->nr_hw_queues = 1;
	set->timeout = NVME_ADMIN_TIMEOUT;
	set->flags = BLK_MQ_F_NO_SCHED;
	ret = blk_mq_alloc_tag_set(set);
	if (!ret)
		ctrl->ctrl.admin_tagset = set;
	return ret;
}

static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *nctrl)
{
	struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
	struct blk_mq_tag_set *set = &ctrl->tag_set;
	int ret;

	memset(set, 0, sizeof(*set));
	set->ops = &nvme_rdma_mq_ops;
	set->queue_depth = nctrl->sqsize + 1;
	set->reserved_tags = NVMF_RESERVED_TAGS;
	set->numa_node = nctrl->numa_node;
	set->flags = BLK_MQ_F_SHOULD_MERGE;
	set->cmd_size = sizeof(struct nvme_rdma_request) +
			NVME_RDMA_DATA_SGL_SIZE;
	if (nctrl->max_integrity_segments)
		set->cmd_size += sizeof(struct nvme_rdma_sgl) +
	if (ctrl->max_integrity_segments)
		cmd_size += sizeof(struct nvme_rdma_sgl) +
			    NVME_RDMA_METADATA_SGL_SIZE;
	set->driver_data = &ctrl->ctrl;
	set->nr_hw_queues = nctrl->queue_count - 1;
	set->timeout = NVME_IO_TIMEOUT;
	set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
	ret = blk_mq_alloc_tag_set(set);
	if (!ret)
		ctrl->ctrl.tagset = set;
	return ret;

	return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
			&nvme_rdma_mq_ops, BLK_MQ_F_SHOULD_MERGE, cmd_size);
}

static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
		bool remove)
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
{
	if (remove) {
		blk_mq_destroy_queue(ctrl->ctrl.admin_q);
		blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
		blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
	}
	if (ctrl->async_event_sqe.data) {
		cancel_work_sync(&ctrl->ctrl.async_event_work);
		nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
@@ -887,26 +844,19 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
		goto out_free_queue;

	if (new) {
		error = nvme_rdma_alloc_admin_tag_set(&ctrl->ctrl);
		error = nvme_alloc_admin_tag_set(&ctrl->ctrl,
				&ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops,
				BLK_MQ_F_NO_SCHED,
				sizeof(struct nvme_rdma_request) +
				NVME_RDMA_DATA_SGL_SIZE);
		if (error)
			goto out_free_async_qe;

		ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
		if (IS_ERR(ctrl->ctrl.fabrics_q)) {
			error = PTR_ERR(ctrl->ctrl.fabrics_q);
			goto out_free_tagset;
		}

		ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
		if (IS_ERR(ctrl->ctrl.admin_q)) {
			error = PTR_ERR(ctrl->ctrl.admin_q);
			goto out_cleanup_fabrics_q;
		}
	}

	error = nvme_rdma_start_queue(ctrl, 0);
	if (error)
		goto out_cleanup_queue;
		goto out_remove_admin_tag_set;

	error = nvme_enable_ctrl(&ctrl->ctrl);
	if (error)
@@ -933,15 +883,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
out_stop_queue:
	nvme_rdma_stop_queue(&ctrl->queues[0]);
	nvme_cancel_admin_tagset(&ctrl->ctrl);
out_cleanup_queue:
	if (new)
		blk_mq_destroy_queue(ctrl->ctrl.admin_q);
out_cleanup_fabrics_q:
out_remove_admin_tag_set:
	if (new)
		blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
out_free_tagset:
	if (new)
		blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
		nvme_remove_admin_tag_set(&ctrl->ctrl);
out_free_async_qe:
	if (ctrl->async_event_sqe.data) {
		nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
@@ -953,16 +897,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
	return error;
}

static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
		bool remove)
{
	if (remove) {
		blk_mq_destroy_queue(ctrl->ctrl.connect_q);
		blk_mq_free_tag_set(ctrl->ctrl.tagset);
	}
	nvme_rdma_free_io_queues(ctrl);
}

static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
{
	int ret, nr_queues;
@@ -975,10 +909,6 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
		ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl);
		if (ret)
			goto out_free_io_queues;

		ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
		if (ret)
			goto out_free_tag_set;
	}

	/*
@@ -989,7 +919,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
	nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count);
	ret = nvme_rdma_start_io_queues(ctrl, 1, nr_queues);
	if (ret)
		goto out_cleanup_connect_q;
		goto out_cleanup_tagset;

	if (!new) {
		nvme_start_queues(&ctrl->ctrl);
@@ -1022,13 +952,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
	nvme_stop_queues(&ctrl->ctrl);
	nvme_sync_io_queues(&ctrl->ctrl);
	nvme_rdma_stop_io_queues(ctrl);
out_cleanup_connect_q:
out_cleanup_tagset:
	nvme_cancel_tagset(&ctrl->ctrl);
	if (new)
		blk_mq_destroy_queue(ctrl->ctrl.connect_q);
out_free_tag_set:
	if (new)
		blk_mq_free_tag_set(ctrl->ctrl.tagset);
		nvme_remove_io_tag_set(&ctrl->ctrl);
out_free_io_queues:
	nvme_rdma_free_io_queues(ctrl);
	return ret;
@@ -1041,9 +968,11 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
	blk_sync_queue(ctrl->ctrl.admin_q);
	nvme_rdma_stop_queue(&ctrl->queues[0]);
	nvme_cancel_admin_tagset(&ctrl->ctrl);
	if (remove)
	if (remove) {
		nvme_start_admin_queue(&ctrl->ctrl);
	nvme_rdma_destroy_admin_queue(ctrl, remove);
		nvme_remove_admin_tag_set(&ctrl->ctrl);
	}
	nvme_rdma_destroy_admin_queue(ctrl);
}

static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
@@ -1055,9 +984,11 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
		nvme_sync_io_queues(&ctrl->ctrl);
		nvme_rdma_stop_io_queues(ctrl);
		nvme_cancel_tagset(&ctrl->ctrl);
		if (remove)
		if (remove) {
			nvme_start_queues(&ctrl->ctrl);
		nvme_rdma_destroy_io_queues(ctrl, remove);
			nvme_remove_io_tag_set(&ctrl->ctrl);
		}
		nvme_rdma_free_io_queues(ctrl);
	}
}

@@ -1179,14 +1110,18 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
		nvme_sync_io_queues(&ctrl->ctrl);
		nvme_rdma_stop_io_queues(ctrl);
		nvme_cancel_tagset(&ctrl->ctrl);
		nvme_rdma_destroy_io_queues(ctrl, new);
		if (new)
			nvme_remove_io_tag_set(&ctrl->ctrl);
		nvme_rdma_free_io_queues(ctrl);
	}
destroy_admin:
	nvme_stop_admin_queue(&ctrl->ctrl);
	blk_sync_queue(ctrl->ctrl.admin_q);
	nvme_rdma_stop_queue(&ctrl->queues[0]);
	nvme_cancel_admin_tagset(&ctrl->ctrl);
	nvme_rdma_destroy_admin_queue(ctrl, new);
	if (new)
		nvme_remove_admin_tag_set(&ctrl->ctrl);
	nvme_rdma_destroy_admin_queue(ctrl);
	return ret;
}