Commit 91fb2b60 authored by Logan Gunthorpe's avatar Logan Gunthorpe Committed by Christoph Hellwig
Browse files

nvme-pci: convert to using dma_map_sgtable()



The dma_map operations now support P2PDMA pages directly. So remove
the calls to pci_p2pdma_[un]map_sg_attrs() and replace them with calls
to dma_map_sgtable().

dma_map_sgtable() returns more complete error codes than dma_map_sg()
and allows differentiating EREMOTEIO errors in case an unsupported
P2PDMA transfer is requested. When this happens, return BLK_STS_TARGET
so the request isn't retried.

Signed-off-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Reviewed-by: default avatarMax Gurtovoy <mgurtovoy@nvidia.com>
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 2f859441
Loading
Loading
Loading
Loading
+29 −40
Original line number Diff line number Diff line
@@ -230,11 +230,10 @@ struct nvme_iod {
	bool use_sgl;
	int aborted;
	int npages;		/* In the PRP list. 0 means small pool in use */
	int nents;		/* Used in scatterlist */
	dma_addr_t first_dma;
	unsigned int dma_len;	/* length of single DMA segment mapping */
	dma_addr_t meta_dma;
	struct scatterlist *sg;
	struct sg_table sgt;
};

static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
@@ -524,7 +523,7 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
static void **nvme_pci_iod_list(struct request *req)
{
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
	return (void **)(iod->sgt.sgl + blk_rq_nr_phys_segments(req));
}

static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
@@ -576,17 +575,6 @@ static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
	}
}

static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
{
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);

	if (is_pci_p2pdma_page(sg_page(iod->sg)))
		pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
				    rq_dma_dir(req));
	else
		dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
}

static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -597,9 +585,10 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
		return;
	}

	WARN_ON_ONCE(!iod->nents);
	WARN_ON_ONCE(!iod->sgt.nents);

	dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);

	nvme_unmap_sg(dev, req);
	if (iod->npages == 0)
		dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
			      iod->first_dma);
@@ -607,7 +596,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
		nvme_free_sgls(dev, req);
	else
		nvme_free_prps(dev, req);
	mempool_free(iod->sg, dev->iod_mempool);
	mempool_free(iod->sgt.sgl, dev->iod_mempool);
}

static void nvme_print_sgl(struct scatterlist *sgl, int nents)
@@ -630,7 +619,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct dma_pool *pool;
	int length = blk_rq_payload_bytes(req);
	struct scatterlist *sg = iod->sg;
	struct scatterlist *sg = iod->sgt.sgl;
	int dma_len = sg_dma_len(sg);
	u64 dma_addr = sg_dma_address(sg);
	int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
@@ -703,16 +692,16 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
		dma_len = sg_dma_len(sg);
	}
done:
	cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
	cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl));
	cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
	return BLK_STS_OK;
free_prps:
	nvme_free_prps(dev, req);
	return BLK_STS_RESOURCE;
bad_sgl:
	WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
	WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
			"Invalid SGL for payload:%d nents:%d\n",
			blk_rq_payload_bytes(req), iod->nents);
			blk_rq_payload_bytes(req), iod->sgt.nents);
	return BLK_STS_IOERR;
}

@@ -738,12 +727,13 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
}

static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
		struct request *req, struct nvme_rw_command *cmd, int entries)
		struct request *req, struct nvme_rw_command *cmd)
{
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct dma_pool *pool;
	struct nvme_sgl_desc *sg_list;
	struct scatterlist *sg = iod->sg;
	struct scatterlist *sg = iod->sgt.sgl;
	unsigned int entries = iod->sgt.nents;
	dma_addr_t sgl_dma;
	int i = 0;

@@ -841,7 +831,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
{
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	blk_status_t ret = BLK_STS_RESOURCE;
	int nr_mapped;
	int rc;

	if (blk_rq_nr_phys_segments(req) == 1) {
		struct bio_vec bv = req_bvec(req);
@@ -859,26 +849,25 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
	}

	iod->dma_len = 0;
	iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
	if (!iod->sg)
	iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
	if (!iod->sgt.sgl)
		return BLK_STS_RESOURCE;
	sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
	iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
	if (!iod->nents)
	sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
	iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
	if (!iod->sgt.orig_nents)
		goto out_free_sg;

	if (is_pci_p2pdma_page(sg_page(iod->sg)))
		nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
				iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN);
	else
		nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
					     rq_dma_dir(req), DMA_ATTR_NO_WARN);
	if (!nr_mapped)
	rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req),
			     DMA_ATTR_NO_WARN);
	if (rc) {
		if (rc == -EREMOTEIO)
			ret = BLK_STS_TARGET;
		goto out_free_sg;
	}

	iod->use_sgl = nvme_pci_use_sgls(dev, req);
	if (iod->use_sgl)
		ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
		ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
	else
		ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
	if (ret != BLK_STS_OK)
@@ -886,9 +875,9 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
	return BLK_STS_OK;

out_unmap_sg:
	nvme_unmap_sg(dev, req);
	dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
out_free_sg:
	mempool_free(iod->sg, dev->iod_mempool);
	mempool_free(iod->sgt.sgl, dev->iod_mempool);
	return ret;
}

@@ -912,7 +901,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)

	iod->aborted = 0;
	iod->npages = -1;
	iod->nents = 0;
	iod->sgt.nents = 0;

	ret = nvme_setup_cmd(req->q->queuedata, req);
	if (ret)