Commit 04b1ecb6 authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge tag 'nvme-5.11-2021-01-07' of git://git.infradead.org/nvme into block-5.11

Pull NVMe updates from Christoph:

"nvme updates for 5.11:

 - fix a race in the nvme-tcp send code (Sagi Grimberg)
 - fix a list corruption in an nvme-rdma error path (Israel Rukshin)
 - avoid a possible double fetch in nvme-pci (Lalithambika Krishnakumar)
 - add the susystem NQN quirk for a Samsung driver (Gopal Tiwari)
 - fix two compiler warnings in nvme-fcloop (James Smart)
 - don't call sleeping functions from irq context in nvme-fc (James Smart)
 - remove an unused argument (Max Gurtovoy)
 - remove unused exports (Minwoo Im)"

* tag 'nvme-5.11-2021-01-07' of git://git.infradead.org/nvme:
  nvme: remove the unused status argument from nvme_trace_bio_complete
  nvmet-rdma: Fix list_del corruption on queue establishment failure
  nvme: unexport functions with no external caller
  nvme: avoid possible double fetch in handling CQE
  nvme-tcp: Fix possible race of io_work and direct send
  nvme-pci: mark Samsung PM1725a as IGNORE_DEV_SUBNQN
  nvme-fcloop: Fix sscanf type and list_first_entry_or_null warnings
  nvme-fc: avoid calling _nvme_fc_abort_outstanding_ios from interrupt context
parents 04a6a536 2b59787a
Loading
Loading
Loading
Loading
+3 −5
Original line number Diff line number Diff line
@@ -179,7 +179,7 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);

int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{
	int ret;

@@ -192,7 +192,6 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)

	return ret;
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);

static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
{
@@ -331,7 +330,7 @@ static inline void nvme_end_req(struct request *req)
		req->__sector = nvme_lba_to_sect(req->q->queuedata,
			le64_to_cpu(nvme_req(req)->result.u64));

	nvme_trace_bio_complete(req, status);
	nvme_trace_bio_complete(req);
	blk_mq_end_request(req, status);
}

@@ -578,7 +577,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
}
EXPORT_SYMBOL_GPL(nvme_alloc_request);

struct request *nvme_alloc_request_qid(struct request_queue *q,
static struct request *nvme_alloc_request_qid(struct request_queue *q,
		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
{
	struct request *req;
@@ -589,7 +588,6 @@ struct request *nvme_alloc_request_qid(struct request_queue *q,
		nvme_init_request(req, cmd);
	return req;
}
EXPORT_SYMBOL_GPL(nvme_alloc_request_qid);

static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
{
+14 −1
Original line number Diff line number Diff line
@@ -166,6 +166,7 @@ struct nvme_fc_ctrl {
	struct blk_mq_tag_set	admin_tag_set;
	struct blk_mq_tag_set	tag_set;

	struct work_struct	ioerr_work;
	struct delayed_work	connect_work;

	struct kref		ref;
@@ -1888,6 +1889,15 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
	}
}

static void
nvme_fc_ctrl_ioerr_work(struct work_struct *work)
{
	struct nvme_fc_ctrl *ctrl =
			container_of(work, struct nvme_fc_ctrl, ioerr_work);

	nvme_fc_error_recovery(ctrl, "transport detected io error");
}

static void
nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
{
@@ -2046,7 +2056,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)

check_error:
	if (terminate_assoc)
		nvme_fc_error_recovery(ctrl, "transport detected io error");
		queue_work(nvme_reset_wq, &ctrl->ioerr_work);
}

static int
@@ -3233,6 +3243,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
	struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);

	cancel_work_sync(&ctrl->ioerr_work);
	cancel_delayed_work_sync(&ctrl->connect_work);
	/*
	 * kill the association on the link side.  this will block
@@ -3449,6 +3460,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,

	INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
	INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
	INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
	spin_lock_init(&ctrl->lock);

	/* io queue count */
@@ -3540,6 +3552,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,

fail_ctrl:
	nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
	cancel_work_sync(&ctrl->ioerr_work);
	cancel_work_sync(&ctrl->ctrl.reset_work);
	cancel_delayed_work_sync(&ctrl->connect_work);

+2 −7
Original line number Diff line number Diff line
@@ -610,8 +610,6 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q,
		struct nvme_command *cmd, blk_mq_req_flags_t flags);
struct request *nvme_alloc_request_qid(struct request_queue *q,
		struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
		struct nvme_command *cmd);
@@ -630,7 +628,6 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);

@@ -675,8 +672,7 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
		kblockd_schedule_work(&head->requeue_work);
}

static inline void nvme_trace_bio_complete(struct request *req,
        blk_status_t status)
static inline void nvme_trace_bio_complete(struct request *req)
{
	struct nvme_ns *ns = req->q->queuedata;

@@ -731,8 +727,7 @@ static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
}
static inline void nvme_trace_bio_complete(struct request *req,
        blk_status_t status)
static inline void nvme_trace_bio_complete(struct request *req)
{
}
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
+6 −4
Original line number Diff line number Diff line
@@ -967,6 +967,7 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
{
	struct nvme_completion *cqe = &nvmeq->cqes[idx];
	__u16 command_id = READ_ONCE(cqe->command_id);
	struct request *req;

	/*
@@ -975,17 +976,17 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
	 * aborts.  We don't even bother to allocate a struct request
	 * for them but rather special case them here.
	 */
	if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
	if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
		nvme_complete_async_event(&nvmeq->dev->ctrl,
				cqe->status, &cqe->result);
		return;
	}

	req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
	req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id);
	if (unlikely(!req)) {
		dev_warn(nvmeq->dev->ctrl.device,
			"invalid id %d completed on queue %d\n",
			cqe->command_id, le16_to_cpu(cqe->sq_id));
			command_id, le16_to_cpu(cqe->sq_id));
		return;
	}

@@ -3196,7 +3197,8 @@ static const struct pci_device_id nvme_id_table[] = {
	{ PCI_DEVICE(0x144d, 0xa821),   /* Samsung PM1725 */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
	{ PCI_DEVICE(0x144d, 0xa822),   /* Samsung PM1725a */
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
		.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
				NVME_QUIRK_IGNORE_DEV_SUBNQN, },
	{ PCI_DEVICE(0x1d1d, 0x1f1f),	/* LighNVM qemu device */
		.driver_data = NVME_QUIRK_LIGHTNVM, },
	{ PCI_DEVICE(0x1d1d, 0x2807),	/* CNEX WL */
+11 −1
Original line number Diff line number Diff line
@@ -262,6 +262,16 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
	}
}

static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
{
	int ret;

	/* drain the send queue as much as we can... */
	do {
		ret = nvme_tcp_try_send(queue);
	} while (ret > 0);
}

static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
		bool sync, bool last)
{
@@ -279,7 +289,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
	if (queue->io_cpu == smp_processor_id() &&
	    sync && empty && mutex_trylock(&queue->send_mutex)) {
		queue->more_requests = !last;
		nvme_tcp_try_send(queue);
		nvme_tcp_send_all(queue);
		queue->more_requests = false;
		mutex_unlock(&queue->send_mutex);
	} else if (last) {
Loading