Commit ae53aea6 authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge tag 'nvme-5.18-2022-03-17' of git://git.infradead.org/nvme into for-5.18/drivers

Pull NVMe updates from Christoph:

"Second round of nvme updates for Linux 5.18

 - add lockdep annotations for in-kernel sockets (Chris Leech)
 - use vmalloc for ANA log buffer (Hannes Reinecke)
 - kerneldoc fixes (Chaitanya Kulkarni)
 - cleanups (Guoqing Jiang, Chaitanya Kulkarni, me)
 - warn about shared namespaces without multipathing (me)"

* tag 'nvme-5.18-2022-03-17' of git://git.infradead.org/nvme:
  nvme: warn about shared namespaces without CONFIG_NVME_MULTIPATH
  nvme: remove nvme_alloc_request and nvme_alloc_request_qid
  nvme: cleanup how disk->disk_name is assigned
  nvmet: move the call to nvmet_ns_changed out of nvmet_ns_revalidate
  nvmet: use snprintf() with PAGE_SIZE in configfs
  nvmet: don't fold lines
  nvmet-rdma: fix kernel-doc warning for nvmet_rdma_device_removal
  nvmet-fc: fix kernel-doc warning for nvmet_fc_unregister_targetport
  nvmet-fc: fix kernel-doc warning for nvmet_fc_register_targetport
  nvme-tcp: lockdep: annotate in-kernel sockets
  nvme-tcp: don't fold the line
  nvme-tcp: don't initialize ret variable
  nvme-multipath: call bio_io_error in nvme_ns_head_submit_bio
  nvme-multipath: use vmalloc for ANA log buffer
parents bcfe9b6c ce8d7861
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -2092,6 +2092,7 @@ static void loop_remove(struct loop_device *lo)
	del_gendisk(lo->lo_disk);
	blk_cleanup_disk(lo->lo_disk);
	blk_mq_free_tag_set(&lo->tag_set);

	mutex_lock(&loop_ctl_mutex);
	idr_remove(&loop_index_idr, lo->lo_number);
	mutex_unlock(&loop_ctl_mutex);
+37 −39
Original line number Diff line number Diff line
@@ -639,13 +639,8 @@ static inline void nvme_clear_nvme_request(struct request *req)
	req->rq_flags |= RQF_DONTPREP;
}

static inline unsigned int nvme_req_op(struct nvme_command *cmd)
{
	return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
}

static inline void nvme_init_request(struct request *req,
		struct nvme_command *cmd)
/* initialize a passthrough request */
void nvme_init_request(struct request *req, struct nvme_command *cmd)
{
	if (req->q->queuedata)
		req->timeout = NVME_IO_TIMEOUT;
@@ -661,30 +656,7 @@ static inline void nvme_init_request(struct request *req,
	nvme_clear_nvme_request(req);
	memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
}

struct request *nvme_alloc_request(struct request_queue *q,
		struct nvme_command *cmd, blk_mq_req_flags_t flags)
{
	struct request *req;

	req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
	if (!IS_ERR(req))
		nvme_init_request(req, cmd);
	return req;
}
EXPORT_SYMBOL_GPL(nvme_alloc_request);

static struct request *nvme_alloc_request_qid(struct request_queue *q,
		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
{
	struct request *req;

	req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
			qid ? qid - 1 : 0);
	if (!IS_ERR(req))
		nvme_init_request(req, cmd);
	return req;
}
EXPORT_SYMBOL_GPL(nvme_init_request);

/*
 * For something we're not in a state to send to the device the default action
@@ -1110,11 +1082,14 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
	int ret;

	if (qid == NVME_QID_ANY)
		req = nvme_alloc_request(q, cmd, flags);
		req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
	else
		req = nvme_alloc_request_qid(q, cmd, flags, qid);
		req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
						qid ? qid - 1 : 0);

	if (IS_ERR(req))
		return PTR_ERR(req);
	nvme_init_request(req, cmd);

	if (timeout)
		req->timeout = timeout;
@@ -1304,7 +1279,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
		return;
	}

	rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,
	rq = blk_mq_alloc_request(ctrl->admin_q, nvme_req_op(&ctrl->ka_cmd),
				  BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
	if (IS_ERR(rq)) {
		/* allocation failure, reset the controller */
@@ -1312,6 +1287,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
		nvme_reset_ctrl(ctrl);
		return;
	}
	nvme_init_request(rq, &ctrl->ka_cmd);

	rq->timeout = ctrl->kato * HZ;
	rq->end_io_data = ctrl;
@@ -3879,6 +3855,14 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
					nsid);
			goto out_put_ns_head;
		}

		if (!multipath && !list_empty(&head->list)) {
			dev_warn(ctrl->device,
				"Found shared namespace %d, but multipathing not supported.\n",
				nsid);
			dev_warn_once(ctrl->device,
				"Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
		}
	}

	list_add_tail_rcu(&ns->siblings, &head->list);
@@ -3967,13 +3951,27 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
		goto out_cleanup_disk;

	/*
	 * Without the multipath code enabled, multiple controller per
	 * subsystems are visible as devices and thus we cannot use the
	 * subsystem instance.
	 * If multipathing is enabled, the device name for all disks and not
	 * just those that represent shared namespaces needs to be based on the
	 * subsystem instance.  Using the controller instance for private
	 * namespaces could lead to naming collisions between shared and private
	 * namespaces if they don't use a common numbering scheme.
	 *
	 * If multipathing is not enabled, disk names must use the controller
	 * instance as shared namespaces will show up as multiple block
	 * devices.
	 */
	if (!nvme_mpath_set_disk_name(ns, disk->disk_name, &disk->flags))
	if (ns->head->disk) {
		sprintf(disk->disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
			ctrl->instance, ns->head->instance);
		disk->flags |= GENHD_FL_HIDDEN;
	} else if (multipath) {
		sprintf(disk->disk_name, "nvme%dn%d", ctrl->subsys->instance,
			ns->head->instance);
	} else {
		sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance,
			ns->head->instance);
	}

	if (nvme_update_ns_info(ns, id))
		goto out_unlink_ns;
+2 −1
Original line number Diff line number Diff line
@@ -66,9 +66,10 @@ static int nvme_submit_user_cmd(struct request_queue *q,
	void *meta = NULL;
	int ret;

	req = nvme_alloc_request(q, cmd, 0);
	req = blk_mq_alloc_request(q, nvme_req_op(cmd), 0);
	if (IS_ERR(req))
		return PTR_ERR(req);
	nvme_init_request(req, cmd);

	if (timeout)
		req->timeout = timeout;
+5 −27
Original line number Diff line number Diff line
@@ -5,10 +5,11 @@

#include <linux/backing-dev.h>
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <trace/events/block.h>
#include "nvme.h"

static bool multipath = true;
bool multipath = true;
module_param(multipath, bool, 0444);
MODULE_PARM_DESC(multipath,
	"turn on native support for multiple controllers per subsystem");
@@ -79,28 +80,6 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
			blk_freeze_queue_start(h->disk->queue);
}

/*
 * If multipathing is enabled we need to always use the subsystem instance
 * number for numbering our devices to avoid conflicts between subsystems that
 * have multiple controllers and thus use the multipath-aware subsystem node
 * and those that have a single controller and use the controller node
 * directly.
 */
bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags)
{
	if (!multipath)
		return false;
	if (!ns->head->disk) {
		sprintf(disk_name, "nvme%dn%d", ns->ctrl->subsys->instance,
			ns->head->instance);
		return true;
	}
	sprintf(disk_name, "nvme%dc%dn%d", ns->ctrl->subsys->instance,
		ns->ctrl->instance, ns->head->instance);
	*flags = GENHD_FL_HIDDEN;
	return true;
}

void nvme_failover_req(struct request *req)
{
	struct nvme_ns *ns = req->q->queuedata;
@@ -386,8 +365,7 @@ static void nvme_ns_head_submit_bio(struct bio *bio)
	} else {
		dev_warn_ratelimited(dev, "no available path - failing I/O\n");

		bio->bi_status = BLK_STS_IOERR;
		bio_endio(bio);
		bio_io_error(bio);
	}

	srcu_read_unlock(&head->srcu, srcu_idx);
@@ -898,7 +876,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
	if (ana_log_size > ctrl->ana_log_size) {
		nvme_mpath_stop(ctrl);
		nvme_mpath_uninit(ctrl);
		ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
		ctrl->ana_log_buf = kvmalloc(ana_log_size, GFP_KERNEL);
		if (!ctrl->ana_log_buf)
			return -ENOMEM;
	}
@@ -915,7 +893,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)

void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{
	kfree(ctrl->ana_log_buf);
	kvfree(ctrl->ana_log_buf);
	ctrl->ana_log_buf = NULL;
	ctrl->ana_log_size = 0;
}
+8 −8
Original line number Diff line number Diff line
@@ -698,9 +698,13 @@ void nvme_wait_freeze(struct nvme_ctrl *ctrl);
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
void nvme_start_freeze(struct nvme_ctrl *ctrl);

static inline unsigned int nvme_req_op(struct nvme_command *cmd)
{
	return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
}

#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q,
		struct nvme_command *cmd, blk_mq_req_flags_t flags);
void nvme_init_request(struct request *req, struct nvme_command *cmd);
void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
@@ -770,7 +774,6 @@ void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags);
void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
@@ -793,20 +796,17 @@ static inline void nvme_trace_bio_complete(struct request *req)
		trace_block_bio_complete(ns->head->disk->queue, req->bio);
}

extern bool multipath;
extern struct device_attribute dev_attr_ana_grpid;
extern struct device_attribute dev_attr_ana_state;
extern struct device_attribute subsys_attr_iopolicy;

#else
#define multipath false
static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
{
	return false;
}
static inline bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name,
		int *flags)
{
	return false;
}
static inline void nvme_failover_req(struct request *req)
{
}
Loading