Commit dfdcbf1f authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge tag 'nvme-6.1-2022-09-28' of git://git.infradead.org/nvme into for-6.1/block

Pull NVMe updates from Christoph:

"nvme updates for Linux 6.1

 - handle effects after freeing the request (Keith Busch)
 - copy firmware_rev on each init (Keith Busch)
 - restrict management ioctls to admin (Keith Busch)
 - ensure subsystem reset is single threaded (Keith Busch)
 - report the actual number of tagset maps in nvme-pci (Keith Busch)
 - small fabrics authentication fixups (Christoph Hellwig)
 - add common code for tagset allocation and freeing (Christoph Hellwig)
 - stop using the request_queue in nvmet (Christoph Hellwig)
 - set min_align_mask before calculating max_hw_sectors
   (Rishabh Bhatnagar)
 - send a rediscover uevent when a persistent discovery controller
   reconnects (Sagi Grimberg)
 - misc nvmet-tcp fixes (Varun Prakash, zhenwei pi)"

* tag 'nvme-6.1-2022-09-28' of git://git.infradead.org/nvme: (31 commits)
  nvmet: don't look at the request_queue in nvmet_bdev_set_limits
  nvmet: don't look at the request_queue in nvmet_bdev_zone_mgmt_emulate_all
  nvme: remove nvme_ctrl_init_connect_q
  nvme-loop: use the tagset alloc/free helpers
  nvme-loop: store the generic nvme_ctrl in set->driver_data
  nvme-loop: initialize sqsize later
  nvme-fc: use the tagset alloc/free helpers
  nvme-fc: store the generic nvme_ctrl in set->driver_data
  nvme-fc: keep ctrl->sqsize in sync with opts->queue_size
  nvme-rdma: use the tagset alloc/free helpers
  nvme-rdma: store the generic nvme_ctrl in set->driver_data
  nvme-tcp: use the tagset alloc/free helpers
  nvme-tcp: store the generic nvme_ctrl in set->driver_data
  nvme-tcp: remove the unused queue_size member in nvme_tcp_queue
  nvme: add common helpers to allocate and free tagsets
  nvme-auth: add a MAINTAINERS entry
  nvmet: add helpers to set the result field for connect commands
  nvme: improve the NVME_CONNECT_AUTHREQ* definitions
  nvmet-auth: don't try to cancel a non-initialized work_struct
  nvmet-tcp: remove nvmet_tcp_finish_cmd
  ...
parents c68f4f4e 84fe64f8
Loading
Loading
Loading
Loading
+9 −0
Original line number Original line Diff line number Diff line
@@ -14542,6 +14542,15 @@ F: drivers/nvme/common/
F:	include/linux/nvme*
F:	include/linux/nvme*
F:	include/uapi/linux/nvme_ioctl.h
F:	include/uapi/linux/nvme_ioctl.h
NVM EXPRESS FABRICS AUTHENTICATION
M:	Hannes Reinecke <hare@suse.de>
L:	linux-nvme@lists.infradead.org
S:	Supported
F:	drivers/nvme/host/auth.c
F:	drivers/nvme/target/auth.c
F:	drivers/nvme/target/fabrics-cmd-auth.c
F:	include/linux/nvme-auth.h
NVM EXPRESS FC TRANSPORT DRIVERS
NVM EXPRESS FC TRANSPORT DRIVERS
M:	James Smart <james.smart@broadcom.com>
M:	James Smart <james.smart@broadcom.com>
L:	linux-nvme@lists.infradead.org
L:	linux-nvme@lists.infradead.org
+120 −12
Original line number Original line Diff line number Diff line
@@ -1111,7 +1111,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
	return effects;
	return effects;
}
}


static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
		       struct nvme_command *cmd, int status)
		       struct nvme_command *cmd, int status)
{
{
	if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
	if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
@@ -1148,21 +1148,16 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
		break;
		break;
	}
	}
}
}
EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);


int nvme_execute_passthru_rq(struct request *rq)
int nvme_execute_passthru_rq(struct request *rq, u32 *effects)
{
{
	struct nvme_command *cmd = nvme_req(rq)->cmd;
	struct nvme_command *cmd = nvme_req(rq)->cmd;
	struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
	struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
	struct nvme_ns *ns = rq->q->queuedata;
	struct nvme_ns *ns = rq->q->queuedata;
	u32 effects;
	int  ret;

	effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
	ret = nvme_execute_rq(rq, false);
	if (effects) /* nothing to be done for zero cmd effects */
		nvme_passthru_end(ctrl, effects, cmd, ret);


	return ret;
	*effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
	return nvme_execute_rq(rq, false);
}
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);


@@ -2898,7 +2893,6 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
	nvme_init_subnqn(subsys, ctrl, id);
	nvme_init_subnqn(subsys, ctrl, id);
	memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
	memcpy(subsys->serial, id->sn, sizeof(subsys->serial));
	memcpy(subsys->model, id->mn, sizeof(subsys->model));
	memcpy(subsys->model, id->mn, sizeof(subsys->model));
	memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
	subsys->vendor_id = le16_to_cpu(id->vid);
	subsys->vendor_id = le16_to_cpu(id->vid);
	subsys->cmic = id->cmic;
	subsys->cmic = id->cmic;


@@ -3117,6 +3111,8 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
				ctrl->quirks |= core_quirks[i].quirks;
				ctrl->quirks |= core_quirks[i].quirks;
		}
		}
	}
	}
	memcpy(ctrl->subsys->firmware_rev, id->fr,
	       sizeof(ctrl->subsys->firmware_rev));


	if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
	if (force_apst && (ctrl->quirks & NVME_QUIRK_NO_DEEPEST_PS)) {
		dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
		dev_warn(ctrl->device, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
@@ -4800,6 +4796,108 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
}
}
EXPORT_SYMBOL_GPL(nvme_complete_async_event);
EXPORT_SYMBOL_GPL(nvme_complete_async_event);


int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
		const struct blk_mq_ops *ops, unsigned int flags,
		unsigned int cmd_size)
{
	int ret;

	memset(set, 0, sizeof(*set));
	set->ops = ops;
	set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
	if (ctrl->ops->flags & NVME_F_FABRICS)
		set->reserved_tags = NVMF_RESERVED_TAGS;
	set->numa_node = ctrl->numa_node;
	set->flags = flags;
	set->cmd_size = cmd_size;
	set->driver_data = ctrl;
	set->nr_hw_queues = 1;
	set->timeout = NVME_ADMIN_TIMEOUT;
	ret = blk_mq_alloc_tag_set(set);
	if (ret)
		return ret;

	ctrl->admin_q = blk_mq_init_queue(set);
	if (IS_ERR(ctrl->admin_q)) {
		ret = PTR_ERR(ctrl->admin_q);
		goto out_free_tagset;
	}

	if (ctrl->ops->flags & NVME_F_FABRICS) {
		ctrl->fabrics_q = blk_mq_init_queue(set);
		if (IS_ERR(ctrl->fabrics_q)) {
			ret = PTR_ERR(ctrl->fabrics_q);
			goto out_cleanup_admin_q;
		}
	}

	ctrl->admin_tagset = set;
	return 0;

out_cleanup_admin_q:
	blk_mq_destroy_queue(ctrl->fabrics_q);
out_free_tagset:
	blk_mq_free_tag_set(ctrl->admin_tagset);
	return ret;
}
EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);

void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl)
{
	blk_mq_destroy_queue(ctrl->admin_q);
	if (ctrl->ops->flags & NVME_F_FABRICS)
		blk_mq_destroy_queue(ctrl->fabrics_q);
	blk_mq_free_tag_set(ctrl->admin_tagset);
}
EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);

int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
		const struct blk_mq_ops *ops, unsigned int flags,
		unsigned int cmd_size)
{
	int ret;

	memset(set, 0, sizeof(*set));
	set->ops = ops;
	set->queue_depth = ctrl->sqsize + 1;
	set->reserved_tags = NVMF_RESERVED_TAGS;
	set->numa_node = ctrl->numa_node;
	set->flags = flags;
	set->cmd_size = cmd_size,
	set->driver_data = ctrl;
	set->nr_hw_queues = ctrl->queue_count - 1;
	set->timeout = NVME_IO_TIMEOUT;
	if (ops->map_queues)
		set->nr_maps = ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
	ret = blk_mq_alloc_tag_set(set);
	if (ret)
		return ret;

	if (ctrl->ops->flags & NVME_F_FABRICS) {
		ctrl->connect_q = blk_mq_init_queue(set);
        	if (IS_ERR(ctrl->connect_q)) {
			ret = PTR_ERR(ctrl->connect_q);
			goto out_free_tag_set;
		}
	}

	ctrl->tagset = set;
	return 0;

out_free_tag_set:
	blk_mq_free_tag_set(set);
	return ret;
}
EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);

void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl)
{
	if (ctrl->ops->flags & NVME_F_FABRICS)
		blk_mq_destroy_queue(ctrl->connect_q);
	blk_mq_free_tag_set(ctrl->tagset);
}
EXPORT_SYMBOL_GPL(nvme_remove_io_tag_set);

void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
{
{
	nvme_mpath_stop(ctrl);
	nvme_mpath_stop(ctrl);
@@ -4819,6 +4917,16 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)


	nvme_enable_aen(ctrl);
	nvme_enable_aen(ctrl);


	/*
	 * persistent discovery controllers need to send indication to userspace
	 * to re-read the discovery log page to learn about possible changes
	 * that were missed. We identify persistent discovery controllers by
	 * checking that they started once before, hence are reconnecting back.
	 */
	if (test_and_set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
	    nvme_discovery_ctrl(ctrl))
		nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");

	if (ctrl->queue_count > 1) {
	if (ctrl->queue_count > 1) {
		nvme_queue_scan(ctrl);
		nvme_queue_scan(ctrl);
		nvme_start_queues(ctrl);
		nvme_start_queues(ctrl);
+28 −93
Original line number Original line Diff line number Diff line
@@ -1829,7 +1829,7 @@ nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
{
{
	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);


	return __nvme_fc_exit_request(set->driver_data, op);
	return __nvme_fc_exit_request(to_fc_ctrl(set->driver_data), op);
}
}


static int
static int
@@ -2135,7 +2135,7 @@ static int
nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
		unsigned int hctx_idx, unsigned int numa_node)
		unsigned int hctx_idx, unsigned int numa_node)
{
{
	struct nvme_fc_ctrl *ctrl = set->driver_data;
	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data);
	struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
	struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
	int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
	struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
	struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
@@ -2206,36 +2206,28 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
	}
	}
}
}


static inline void
static inline int
__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int qidx)
		unsigned int qidx)
{
{
	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(data);
	struct nvme_fc_queue *queue = &ctrl->queues[qidx];
	struct nvme_fc_queue *queue = &ctrl->queues[qidx];


	hctx->driver_data = queue;
	hctx->driver_data = queue;
	queue->hctx = hctx;
	queue->hctx = hctx;
	return 0;
}
}


static int
static int
nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx)
		unsigned int hctx_idx)
{
{
	struct nvme_fc_ctrl *ctrl = data;
	return __nvme_fc_init_hctx(hctx, data, hctx_idx + 1);

	__nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);

	return 0;
}
}


static int
static int
nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
		unsigned int hctx_idx)
		unsigned int hctx_idx)
{
{
	struct nvme_fc_ctrl *ctrl = data;
	return __nvme_fc_init_hctx(hctx, data, hctx_idx);

	__nvme_fc_init_hctx(hctx, ctrl, hctx_idx);

	return 0;
}
}


static void
static void
@@ -2391,10 +2383,8 @@ nvme_fc_ctrl_free(struct kref *ref)
		container_of(ref, struct nvme_fc_ctrl, ref);
		container_of(ref, struct nvme_fc_ctrl, ref);
	unsigned long flags;
	unsigned long flags;


	if (ctrl->ctrl.tagset) {
	if (ctrl->ctrl.tagset)
		blk_mq_destroy_queue(ctrl->ctrl.connect_q);
		nvme_remove_io_tag_set(&ctrl->ctrl);
		blk_mq_free_tag_set(&ctrl->tag_set);
	}


	/* remove from rport list */
	/* remove from rport list */
	spin_lock_irqsave(&ctrl->rport->lock, flags);
	spin_lock_irqsave(&ctrl->rport->lock, flags);
@@ -2402,9 +2392,7 @@ nvme_fc_ctrl_free(struct kref *ref)
	spin_unlock_irqrestore(&ctrl->rport->lock, flags);
	spin_unlock_irqrestore(&ctrl->rport->lock, flags);


	nvme_start_admin_queue(&ctrl->ctrl);
	nvme_start_admin_queue(&ctrl->ctrl);
	blk_mq_destroy_queue(ctrl->ctrl.admin_q);
	nvme_remove_admin_tag_set(&ctrl->ctrl);
	blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
	blk_mq_free_tag_set(&ctrl->admin_tag_set);


	kfree(ctrl->queues);
	kfree(ctrl->queues);


@@ -2862,7 +2850,7 @@ nvme_fc_complete_rq(struct request *rq)


static void nvme_fc_map_queues(struct blk_mq_tag_set *set)
static void nvme_fc_map_queues(struct blk_mq_tag_set *set)
{
{
	struct nvme_fc_ctrl *ctrl = set->driver_data;
	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(set->driver_data);
	int i;
	int i;


	for (i = 0; i < set->nr_maps; i++) {
	for (i = 0; i < set->nr_maps; i++) {
@@ -2914,32 +2902,16 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)


	nvme_fc_init_io_queues(ctrl);
	nvme_fc_init_io_queues(ctrl);


	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
	ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
	ctrl->tag_set.ops = &nvme_fc_mq_ops;
			&nvme_fc_mq_ops, BLK_MQ_F_SHOULD_MERGE,
	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
	ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS;
	ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
	ctrl->tag_set.cmd_size =
			struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
			struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
			    ctrl->lport->ops->fcprqst_priv_sz);
				    ctrl->lport->ops->fcprqst_priv_sz));
	ctrl->tag_set.driver_data = ctrl;
	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;

	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
	if (ret)
	if (ret)
		return ret;
		return ret;


	ctrl->ctrl.tagset = &ctrl->tag_set;

	ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
	if (ret)
		goto out_free_tag_set;

	ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
	ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
	if (ret)
	if (ret)
		goto out_cleanup_blk_queue;
		goto out_cleanup_tagset;


	ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
	ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
	if (ret)
	if (ret)
@@ -2951,10 +2923,8 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)


out_delete_hw_queues:
out_delete_hw_queues:
	nvme_fc_delete_hw_io_queues(ctrl);
	nvme_fc_delete_hw_io_queues(ctrl);
out_cleanup_blk_queue:
out_cleanup_tagset:
	blk_mq_destroy_queue(ctrl->ctrl.connect_q);
	nvme_remove_io_tag_set(&ctrl->ctrl);
out_free_tag_set:
	blk_mq_free_tag_set(&ctrl->tag_set);
	nvme_fc_free_io_queues(ctrl);
	nvme_fc_free_io_queues(ctrl);


	/* force put free routine to ignore io queues */
	/* force put free routine to ignore io queues */
@@ -3165,15 +3135,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
			"to maxcmd\n",
			"to maxcmd\n",
			opts->queue_size, ctrl->ctrl.maxcmd);
			opts->queue_size, ctrl->ctrl.maxcmd);
		opts->queue_size = ctrl->ctrl.maxcmd;
		opts->queue_size = ctrl->ctrl.maxcmd;
	}
		ctrl->ctrl.sqsize = opts->queue_size - 1;

	if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
		/* warn if sqsize is lower than queue_size */
		dev_warn(ctrl->ctrl.device,
			"queue_size %zu > ctrl sqsize %u, reducing "
			"to sqsize\n",
			opts->queue_size, ctrl->ctrl.sqsize + 1);
		opts->queue_size = ctrl->ctrl.sqsize + 1;
	}
	}


	ret = nvme_fc_init_aen_ops(ctrl);
	ret = nvme_fc_init_aen_ops(ctrl);
@@ -3546,35 +3508,12 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,


	nvme_fc_init_queue(ctrl, 0);
	nvme_fc_init_queue(ctrl, 0);


	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
	ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
	ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
			&nvme_fc_admin_mq_ops, BLK_MQ_F_NO_SCHED,
	ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
	ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
	ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
	ctrl->admin_tag_set.cmd_size =
			struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
			struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
			    ctrl->lport->ops->fcprqst_priv_sz);
				    ctrl->lport->ops->fcprqst_priv_sz));
	ctrl->admin_tag_set.driver_data = ctrl;
	ctrl->admin_tag_set.nr_hw_queues = 1;
	ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
	ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;

	ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
	if (ret)
	if (ret)
		goto out_free_queues;
		goto out_free_queues;
	ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;

	ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
	if (IS_ERR(ctrl->ctrl.fabrics_q)) {
		ret = PTR_ERR(ctrl->ctrl.fabrics_q);
		goto out_free_admin_tag_set;
	}

	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
	if (IS_ERR(ctrl->ctrl.admin_q)) {
		ret = PTR_ERR(ctrl->ctrl.admin_q);
		goto out_cleanup_fabrics_q;
	}


	/*
	/*
	 * Would have been nice to init io queues tag set as well.
	 * Would have been nice to init io queues tag set as well.
@@ -3585,7 +3524,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,


	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
	if (ret)
	if (ret)
		goto out_cleanup_admin_q;
		goto out_cleanup_tagset;


	/* at this point, teardown path changes to ref counting on nvme ctrl */
	/* at this point, teardown path changes to ref counting on nvme ctrl */


@@ -3640,12 +3579,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,


	return ERR_PTR(-EIO);
	return ERR_PTR(-EIO);


out_cleanup_admin_q:
out_cleanup_tagset:
	blk_mq_destroy_queue(ctrl->ctrl.admin_q);
	nvme_remove_admin_tag_set(&ctrl->ctrl);
out_cleanup_fabrics_q:
	blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
out_free_admin_tag_set:
	blk_mq_free_tag_set(&ctrl->admin_tag_set);
out_free_queues:
out_free_queues:
	kfree(ctrl->queues);
	kfree(ctrl->queues);
out_free_ida:
out_free_ida:
+14 −1
Original line number Original line Diff line number Diff line
@@ -136,9 +136,11 @@ static int nvme_submit_user_cmd(struct request_queue *q,
		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
		unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
		u32 meta_seed, u64 *result, unsigned timeout, bool vec)
		u32 meta_seed, u64 *result, unsigned timeout, bool vec)
{
{
	struct nvme_ctrl *ctrl;
	struct request *req;
	struct request *req;
	void *meta = NULL;
	void *meta = NULL;
	struct bio *bio;
	struct bio *bio;
	u32 effects;
	int ret;
	int ret;


	req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer,
	req = nvme_alloc_user_request(q, cmd, ubuffer, bufflen, meta_buffer,
@@ -147,8 +149,9 @@ static int nvme_submit_user_cmd(struct request_queue *q,
		return PTR_ERR(req);
		return PTR_ERR(req);


	bio = req->bio;
	bio = req->bio;
	ctrl = nvme_req(req)->ctrl;


	ret = nvme_execute_passthru_rq(req);
	ret = nvme_execute_passthru_rq(req, &effects);


	if (result)
	if (result)
		*result = le64_to_cpu(nvme_req(req)->result.u64);
		*result = le64_to_cpu(nvme_req(req)->result.u64);
@@ -158,6 +161,10 @@ static int nvme_submit_user_cmd(struct request_queue *q,
	if (bio)
	if (bio)
		blk_rq_unmap_user(bio);
		blk_rq_unmap_user(bio);
	blk_mq_free_request(req);
	blk_mq_free_request(req);

	if (effects)
		nvme_passthru_end(ctrl, effects, cmd, ret);

	return ret;
	return ret;
}
}


@@ -757,11 +764,17 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd,
	case NVME_IOCTL_IO_CMD:
	case NVME_IOCTL_IO_CMD:
		return nvme_dev_user_cmd(ctrl, argp);
		return nvme_dev_user_cmd(ctrl, argp);
	case NVME_IOCTL_RESET:
	case NVME_IOCTL_RESET:
		if (!capable(CAP_SYS_ADMIN))
			return -EACCES;
		dev_warn(ctrl->device, "resetting controller\n");
		dev_warn(ctrl->device, "resetting controller\n");
		return nvme_reset_ctrl_sync(ctrl);
		return nvme_reset_ctrl_sync(ctrl);
	case NVME_IOCTL_SUBSYS_RESET:
	case NVME_IOCTL_SUBSYS_RESET:
		if (!capable(CAP_SYS_ADMIN))
			return -EACCES;
		return nvme_reset_subsystem(ctrl);
		return nvme_reset_subsystem(ctrl);
	case NVME_IOCTL_RESCAN:
	case NVME_IOCTL_RESCAN:
		if (!capable(CAP_SYS_ADMIN))
			return -EACCES;
		nvme_queue_scan(ctrl);
		nvme_queue_scan(ctrl);
		return 0;
		return 0;
	default:
	default:
+30 −14
Original line number Original line Diff line number Diff line
@@ -233,6 +233,12 @@ struct nvme_fault_inject {
#endif
#endif
};
};


enum nvme_ctrl_flags {
	NVME_CTRL_FAILFAST_EXPIRED	= 0,
	NVME_CTRL_ADMIN_Q_STOPPED	= 1,
	NVME_CTRL_STARTED_ONCE		= 2,
};

struct nvme_ctrl {
struct nvme_ctrl {
	bool comp_seen;
	bool comp_seen;
	enum nvme_ctrl_state state;
	enum nvme_ctrl_state state;
@@ -354,8 +360,6 @@ struct nvme_ctrl {
	u16 maxcmd;
	u16 maxcmd;
	int nr_reconnects;
	int nr_reconnects;
	unsigned long flags;
	unsigned long flags;
#define NVME_CTRL_FAILFAST_EXPIRED	0
#define NVME_CTRL_ADMIN_Q_STOPPED	1
	struct nvmf_ctrl_options *opts;
	struct nvmf_ctrl_options *opts;


	struct page *discard_page;
	struct page *discard_page;
@@ -602,11 +606,23 @@ static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
static inline void nvme_should_fail(struct request *req) {}
static inline void nvme_should_fail(struct request *req) {}
#endif
#endif


bool nvme_wait_reset(struct nvme_ctrl *ctrl);
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);

static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
{
{
	int ret;

	if (!ctrl->subsystem)
	if (!ctrl->subsystem)
		return -ENOTTY;
		return -ENOTTY;
	return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
	if (!nvme_wait_reset(ctrl))
		return -EBUSY;

	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
	if (ret)
		return ret;

	return nvme_try_sched_reset(ctrl);
}
}


/*
/*
@@ -712,7 +728,6 @@ void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
		enum nvme_ctrl_state new_state);
		enum nvme_ctrl_state new_state);
bool nvme_wait_reset(struct nvme_ctrl *ctrl);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
@@ -722,6 +737,14 @@ void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
		const struct blk_mq_ops *ops, unsigned int flags,
		unsigned int cmd_size);
void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
		const struct blk_mq_ops *ops, unsigned int flags,
		unsigned int cmd_size);
void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);


void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);


@@ -802,7 +825,6 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
void nvme_queue_scan(struct nvme_ctrl *ctrl);
void nvme_queue_scan(struct nvme_ctrl *ctrl);
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
@@ -968,14 +990,6 @@ static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
}
}
#endif
#endif


static inline int nvme_ctrl_init_connect_q(struct nvme_ctrl *ctrl)
{
	ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
	if (IS_ERR(ctrl->connect_q))
		return PTR_ERR(ctrl->connect_q);
	return 0;
}

static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
{
{
	return dev_to_disk(dev)->private_data;
	return dev_to_disk(dev)->private_data;
@@ -1023,7 +1037,9 @@ static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};


u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
			 u8 opcode);
			 u8 opcode);
int nvme_execute_passthru_rq(struct request *rq);
int nvme_execute_passthru_rq(struct request *rq, u32 *effects);
void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
		       struct nvme_command *cmd, int status);
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
void nvme_put_ns(struct nvme_ns *ns);
void nvme_put_ns(struct nvme_ns *ns);
Loading