Loading block/bio.c +3 −1 Original line number Diff line number Diff line Loading @@ -754,10 +754,12 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page, if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) return false; if (bio->bi_vcnt > 0 && !bio_full(bio, len)) { if (bio->bi_vcnt > 0) { struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; if (page_is_mergeable(bv, page, len, off, same_page)) { if (bio->bi_iter.bi_size > UINT_MAX - len) return false; bv->bv_len += len; bio->bi_iter.bi_size += len; return true; Loading drivers/nvme/host/core.c +6 −0 Original line number Diff line number Diff line Loading @@ -1735,6 +1735,8 @@ static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, if (ret) dev_warn(ctrl->device, "Identify Descriptors failed (%d)\n", ret); if (ret > 0) ret = 0; } return ret; } Loading Loading @@ -2852,6 +2854,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) * admin connect */ if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { dev_err(ctrl->device, "Mismatching cntlid: Connect %u vs Identify " "%u, rejecting\n", ctrl->cntlid, le16_to_cpu(id->cntlid)); ret = -EINVAL; goto out_free; } Loading drivers/nvme/host/fc.c +31 −9 Original line number Diff line number Diff line Loading @@ -95,7 +95,7 @@ struct nvme_fc_fcp_op { struct nvme_fcp_op_w_sgl { struct nvme_fc_fcp_op op; struct scatterlist sgl[SG_CHUNK_SIZE]; struct scatterlist sgl[NVME_INLINE_SG_CNT]; uint8_t priv[0]; }; Loading Loading @@ -342,7 +342,8 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, !template->ls_req || !template->fcp_io || !template->ls_abort || !template->fcp_abort || !template->max_hw_queues || !template->max_sgl_segments || !template->max_dif_sgl_segments || !template->dma_boundary) { !template->max_dif_sgl_segments || !template->dma_boundary || !template->module) { ret = -EINVAL; goto out_reghost_failed; } Loading Loading @@ -2015,6 +2016,7 @@ nvme_fc_ctrl_free(struct kref *ref) { struct nvme_fc_ctrl *ctrl = container_of(ref, struct nvme_fc_ctrl, ref); struct nvme_fc_lport *lport = ctrl->lport; unsigned long flags; if (ctrl->ctrl.tagset) { Loading @@ -2041,6 +2043,7 @@ nvme_fc_ctrl_free(struct kref *ref) if (ctrl->ctrl.opts) nvmf_free_options(ctrl->ctrl.opts); kfree(ctrl); module_put(lport->ops->module); } static void Loading Loading @@ -2141,7 +2144,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, freq->sg_table.sgl = freq->first_sgl; ret = sg_alloc_table_chained(&freq->sg_table, blk_rq_nr_phys_segments(rq), freq->sg_table.sgl, SG_CHUNK_SIZE); NVME_INLINE_SG_CNT); if (ret) return -ENOMEM; Loading @@ -2150,7 +2153,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, rq_dma_dir(rq)); if (unlikely(freq->sg_cnt <= 0)) { sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE); sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); freq->sg_cnt = 0; return -EFAULT; } Loading @@ -2173,7 +2176,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, rq_dma_dir(rq)); sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE); sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); freq->sg_cnt = 0; } Loading Loading @@ -2910,10 +2913,22 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) static void __nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl) { /* * if state is connecting - the error occurred as part of a * reconnect attempt. The create_association error paths will * clean up any outstanding io. * * if it's a different state - ensure all pending io is * terminated. Given this can delay while waiting for the * aborted io to return, we recheck adapter state below * before changing state. */ if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) { nvme_stop_keep_alive(&ctrl->ctrl); /* will block will waiting for io to terminate */ nvme_fc_delete_association(ctrl); } if (ctrl->ctrl.state != NVME_CTRL_CONNECTING && !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) Loading Loading @@ -3059,10 +3074,15 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, goto out_fail; } if (!try_module_get(lport->ops->module)) { ret = -EUNATCH; goto out_free_ctrl; } idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); if (idx < 0) { ret = -ENOSPC; goto out_free_ctrl; goto out_mod_put; } ctrl->ctrl.opts = opts; Loading Loading @@ -3215,6 +3235,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, out_free_ida: put_device(ctrl->dev); ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); out_mod_put: module_put(lport->ops->module); out_free_ctrl: kfree(ctrl); out_fail: Loading drivers/nvme/host/nvme.h +6 −0 Original line number Diff line number Diff line Loading @@ -28,6 +28,12 @@ extern unsigned int admin_timeout; #define NVME_DEFAULT_KATO 5 #define NVME_KATO_GRACE 10 #ifdef CONFIG_ARCH_NO_SG_CHAIN #define NVME_INLINE_SG_CNT 0 #else #define NVME_INLINE_SG_CNT 2 #endif extern struct workqueue_struct *nvme_wq; extern struct workqueue_struct *nvme_reset_wq; extern struct workqueue_struct *nvme_delete_wq; Loading drivers/nvme/host/pci.c +9 −14 Original line number Diff line number Diff line Loading @@ -68,14 +68,14 @@ static int io_queue_depth = 1024; module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2"); static int write_queues; module_param(write_queues, int, 0644); static unsigned int write_queues; module_param(write_queues, uint, 0644); MODULE_PARM_DESC(write_queues, "Number of queues to use for writes. If not set, reads and writes " "will share a queue set."); static int poll_queues; module_param(poll_queues, int, 0644); static unsigned int poll_queues; module_param(poll_queues, uint, 0644); MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); struct nvme_dev; Loading Loading @@ -176,7 +176,6 @@ struct nvme_queue { u16 sq_tail; u16 last_sq_tail; u16 cq_head; u16 last_cq_head; u16 qid; u8 cq_phase; u8 sqes; Loading Loading @@ -1026,10 +1025,7 @@ static irqreturn_t nvme_irq(int irq, void *data) * the irq handler, even if that was on another CPU. */ rmb(); if (nvmeq->cq_head != nvmeq->last_cq_head) ret = IRQ_HANDLED; nvme_process_cq(nvmeq, &start, &end, -1); nvmeq->last_cq_head = nvmeq->cq_head; wmb(); if (start != end) { Loading Loading @@ -1549,7 +1545,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) result = adapter_alloc_sq(dev, qid, nvmeq); if (result < 0) return result; else if (result) if (result) goto release_cq; nvmeq->cq_vector = vector; Loading Loading @@ -2058,7 +2054,6 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) .priv = dev, }; unsigned int irq_queues, this_p_queues; unsigned int nr_cpus = num_possible_cpus(); /* * Poll queues don't need interrupts, but we need at least one IO Loading @@ -2069,9 +2064,6 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) this_p_queues = nr_io_queues - 1; irq_queues = 1; } else { if (nr_cpus < nr_io_queues - this_p_queues) irq_queues = nr_cpus + 1; else irq_queues = nr_io_queues - this_p_queues + 1; } dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; Loading Loading @@ -3142,6 +3134,9 @@ static int __init nvme_init(void) BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); write_queues = min(write_queues, num_possible_cpus()); poll_queues = min(poll_queues, num_possible_cpus()); return pci_register_driver(&nvme_driver); } Loading Loading
block/bio.c +3 −1 Original line number Diff line number Diff line Loading @@ -754,10 +754,12 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page, if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) return false; if (bio->bi_vcnt > 0 && !bio_full(bio, len)) { if (bio->bi_vcnt > 0) { struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; if (page_is_mergeable(bv, page, len, off, same_page)) { if (bio->bi_iter.bi_size > UINT_MAX - len) return false; bv->bv_len += len; bio->bi_iter.bi_size += len; return true; Loading
drivers/nvme/host/core.c +6 −0 Original line number Diff line number Diff line Loading @@ -1735,6 +1735,8 @@ static int nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, if (ret) dev_warn(ctrl->device, "Identify Descriptors failed (%d)\n", ret); if (ret > 0) ret = 0; } return ret; } Loading Loading @@ -2852,6 +2854,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) * admin connect */ if (ctrl->cntlid != le16_to_cpu(id->cntlid)) { dev_err(ctrl->device, "Mismatching cntlid: Connect %u vs Identify " "%u, rejecting\n", ctrl->cntlid, le16_to_cpu(id->cntlid)); ret = -EINVAL; goto out_free; } Loading
drivers/nvme/host/fc.c +31 −9 Original line number Diff line number Diff line Loading @@ -95,7 +95,7 @@ struct nvme_fc_fcp_op { struct nvme_fcp_op_w_sgl { struct nvme_fc_fcp_op op; struct scatterlist sgl[SG_CHUNK_SIZE]; struct scatterlist sgl[NVME_INLINE_SG_CNT]; uint8_t priv[0]; }; Loading Loading @@ -342,7 +342,8 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo, !template->ls_req || !template->fcp_io || !template->ls_abort || !template->fcp_abort || !template->max_hw_queues || !template->max_sgl_segments || !template->max_dif_sgl_segments || !template->dma_boundary) { !template->max_dif_sgl_segments || !template->dma_boundary || !template->module) { ret = -EINVAL; goto out_reghost_failed; } Loading Loading @@ -2015,6 +2016,7 @@ nvme_fc_ctrl_free(struct kref *ref) { struct nvme_fc_ctrl *ctrl = container_of(ref, struct nvme_fc_ctrl, ref); struct nvme_fc_lport *lport = ctrl->lport; unsigned long flags; if (ctrl->ctrl.tagset) { Loading @@ -2041,6 +2043,7 @@ nvme_fc_ctrl_free(struct kref *ref) if (ctrl->ctrl.opts) nvmf_free_options(ctrl->ctrl.opts); kfree(ctrl); module_put(lport->ops->module); } static void Loading Loading @@ -2141,7 +2144,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, freq->sg_table.sgl = freq->first_sgl; ret = sg_alloc_table_chained(&freq->sg_table, blk_rq_nr_phys_segments(rq), freq->sg_table.sgl, SG_CHUNK_SIZE); NVME_INLINE_SG_CNT); if (ret) return -ENOMEM; Loading @@ -2150,7 +2153,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, rq_dma_dir(rq)); if (unlikely(freq->sg_cnt <= 0)) { sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE); sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); freq->sg_cnt = 0; return -EFAULT; } Loading @@ -2173,7 +2176,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, rq_dma_dir(rq)); sg_free_table_chained(&freq->sg_table, SG_CHUNK_SIZE); sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); freq->sg_cnt = 0; } Loading Loading @@ -2910,10 +2913,22 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) static void __nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl) { /* * if state is connecting - the error occurred as part of a * reconnect attempt. The create_association error paths will * clean up any outstanding io. * * if it's a different state - ensure all pending io is * terminated. Given this can delay while waiting for the * aborted io to return, we recheck adapter state below * before changing state. */ if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) { nvme_stop_keep_alive(&ctrl->ctrl); /* will block will waiting for io to terminate */ nvme_fc_delete_association(ctrl); } if (ctrl->ctrl.state != NVME_CTRL_CONNECTING && !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) Loading Loading @@ -3059,10 +3074,15 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, goto out_fail; } if (!try_module_get(lport->ops->module)) { ret = -EUNATCH; goto out_free_ctrl; } idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL); if (idx < 0) { ret = -ENOSPC; goto out_free_ctrl; goto out_mod_put; } ctrl->ctrl.opts = opts; Loading Loading @@ -3215,6 +3235,8 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, out_free_ida: put_device(ctrl->dev); ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); out_mod_put: module_put(lport->ops->module); out_free_ctrl: kfree(ctrl); out_fail: Loading
drivers/nvme/host/nvme.h +6 −0 Original line number Diff line number Diff line Loading @@ -28,6 +28,12 @@ extern unsigned int admin_timeout; #define NVME_DEFAULT_KATO 5 #define NVME_KATO_GRACE 10 #ifdef CONFIG_ARCH_NO_SG_CHAIN #define NVME_INLINE_SG_CNT 0 #else #define NVME_INLINE_SG_CNT 2 #endif extern struct workqueue_struct *nvme_wq; extern struct workqueue_struct *nvme_reset_wq; extern struct workqueue_struct *nvme_delete_wq; Loading
drivers/nvme/host/pci.c +9 −14 Original line number Diff line number Diff line Loading @@ -68,14 +68,14 @@ static int io_queue_depth = 1024; module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2"); static int write_queues; module_param(write_queues, int, 0644); static unsigned int write_queues; module_param(write_queues, uint, 0644); MODULE_PARM_DESC(write_queues, "Number of queues to use for writes. If not set, reads and writes " "will share a queue set."); static int poll_queues; module_param(poll_queues, int, 0644); static unsigned int poll_queues; module_param(poll_queues, uint, 0644); MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); struct nvme_dev; Loading Loading @@ -176,7 +176,6 @@ struct nvme_queue { u16 sq_tail; u16 last_sq_tail; u16 cq_head; u16 last_cq_head; u16 qid; u8 cq_phase; u8 sqes; Loading Loading @@ -1026,10 +1025,7 @@ static irqreturn_t nvme_irq(int irq, void *data) * the irq handler, even if that was on another CPU. */ rmb(); if (nvmeq->cq_head != nvmeq->last_cq_head) ret = IRQ_HANDLED; nvme_process_cq(nvmeq, &start, &end, -1); nvmeq->last_cq_head = nvmeq->cq_head; wmb(); if (start != end) { Loading Loading @@ -1549,7 +1545,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) result = adapter_alloc_sq(dev, qid, nvmeq); if (result < 0) return result; else if (result) if (result) goto release_cq; nvmeq->cq_vector = vector; Loading Loading @@ -2058,7 +2054,6 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) .priv = dev, }; unsigned int irq_queues, this_p_queues; unsigned int nr_cpus = num_possible_cpus(); /* * Poll queues don't need interrupts, but we need at least one IO Loading @@ -2069,9 +2064,6 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) this_p_queues = nr_io_queues - 1; irq_queues = 1; } else { if (nr_cpus < nr_io_queues - this_p_queues) irq_queues = nr_cpus + 1; else irq_queues = nr_io_queues - this_p_queues + 1; } dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; Loading Loading @@ -3142,6 +3134,9 @@ static int __init nvme_init(void) BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); write_queues = min(write_queues, num_possible_cpus()); poll_queues = min(poll_queues, num_possible_cpus()); return pci_register_driver(&nvme_driver); } Loading