Commit e369edbb authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge tag 'nvme-5.13-2021-06-03' of git://git.infradead.org/nvme into block-5.13

Pull NVMe fixes from Christoph:

"nvme fixes for Linux 5.13:

 - fix corruption in RDMA in-capsule SGLs (Sagi Grimberg)
 - nvme-loop reset fixes (Hannes Reinecke)
 - nvmet fix for freeing unallocated p2pmem (Max Gurtovoy)"

* tag 'nvme-5.13-2021-06-03' of git://git.infradead.org/nvme:
  nvmet: fix freeing unallocated p2pmem
  nvme-loop: do not warn for deleted controllers during reset
  nvme-loop: check for NVME_LOOP_Q_LIVE in nvme_loop_destroy_admin_queue()
  nvme-loop: clear NVME_LOOP_Q_LIVE when nvme_loop_configure_admin_queue() fails
  nvme-loop: reset queue count to 1 in nvme_loop_destroy_io_queues()
  nvme-rdma: fix in-casule data send for chained sgls
parents a4b58f17 bcd9a079
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -1320,16 +1320,17 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
		int count)
{
	struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
	struct scatterlist *sgl = req->data_sgl.sg_table.sgl;
	struct ib_sge *sge = &req->sge[1];
	struct scatterlist *sgl;
	u32 len = 0;
	int i;

	for (i = 0; i < count; i++, sgl++, sge++) {
	for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) {
		sge->addr = sg_dma_address(sgl);
		sge->length = sg_dma_len(sgl);
		sge->lkey = queue->device->pd->local_dma_lkey;
		len += sge->length;
		sge++;
	}

	sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
+16 −17
Original line number Diff line number Diff line
@@ -1005,19 +1005,23 @@ static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
	return req->transfer_len - req->metadata_len;
}

static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
		struct nvmet_req *req)
{
	req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt,
	req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
			nvmet_data_transfer_len(req));
	if (!req->sg)
		goto out_err;

	if (req->metadata_len) {
		req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev,
		req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
				&req->metadata_sg_cnt, req->metadata_len);
		if (!req->metadata_sg)
			goto out_free_sg;
	}

	req->p2p_dev = p2p_dev;

	return 0;
out_free_sg:
	pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
@@ -1025,25 +1029,19 @@ static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req)
	return -ENOMEM;
}

static bool nvmet_req_find_p2p_dev(struct nvmet_req *req)
static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
{
	if (!IS_ENABLED(CONFIG_PCI_P2PDMA))
		return false;

	if (req->sq->ctrl && req->sq->qid && req->ns) {
		req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
						 req->ns->nsid);
		if (req->p2p_dev)
			return true;
	}

	req->p2p_dev = NULL;
	return false;
	if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
	    !req->sq->ctrl || !req->sq->qid || !req->ns)
		return NULL;
	return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
}

int nvmet_req_alloc_sgls(struct nvmet_req *req)
{
	if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req))
	struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);

	if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
		return 0;

	req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
@@ -1072,6 +1070,7 @@ void nvmet_req_free_sgls(struct nvmet_req *req)
		pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
		if (req->metadata_sg)
			pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
		req->p2p_dev = NULL;
	} else {
		sgl_free(req->sg);
		if (req->metadata_sg)
+8 −3
Original line number Diff line number Diff line
@@ -263,7 +263,8 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {

static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
{
	clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
	if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
		return;
	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
	blk_cleanup_queue(ctrl->ctrl.admin_q);
	blk_cleanup_queue(ctrl->ctrl.fabrics_q);
@@ -299,6 +300,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
		clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
	}
	ctrl->ctrl.queue_count = 1;
}

static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
@@ -405,6 +407,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
	return 0;

out_cleanup_queue:
	clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
	blk_cleanup_queue(ctrl->ctrl.admin_q);
out_cleanup_fabrics_q:
	blk_cleanup_queue(ctrl->ctrl.fabrics_q);
@@ -462,7 +465,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
	nvme_loop_shutdown_ctrl(ctrl);

	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
		/* state change failure should never happen */
		if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
		    ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
			/* state change failure for non-deleted ctrl? */
			WARN_ON_ONCE(1);
		return;
	}