Commit 6b080c4e authored by Vasant Hegde's avatar Vasant Hegde Committed by Joerg Roedel
Browse files

iommu/amd: Add map/unmap_pages() iommu_domain_ops callback support



Implement the map_pages() and unmap_pages() callback for the AMD IOMMU
driver to allow calls from iommu core to map and unmap multiple pages.
Also deprecate map/unmap callbacks.

Finally gatherer is not updated by iommu_v1_unmap_pages(). Hence pass
NULL instead of gather to iommu_v1_unmap_pages.

Suggested-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarVasant Hegde <vasant.hegde@amd.com>
Link: https://lore.kernel.org/r/20220825063939.8360-4-vasant.hegde@amd.com


Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 251c4db6
Loading
Loading
Loading
Loading
+16 −13
Original line number Diff line number Diff line
@@ -2174,13 +2174,13 @@ static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
	struct protection_domain *domain = to_pdomain(dom);
	struct io_pgtable_ops *ops = &domain->iop.iop.ops;

	if (ops->map)
	if (ops->map_pages)
		domain_flush_np_cache(domain, iova, size);
}

static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
			 phys_addr_t paddr, size_t page_size, int iommu_prot,
			 gfp_t gfp)
static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
			       phys_addr_t paddr, size_t pgsize, size_t pgcount,
			       int iommu_prot, gfp_t gfp, size_t *mapped)
{
	struct protection_domain *domain = to_pdomain(dom);
	struct io_pgtable_ops *ops = &domain->iop.iop.ops;
@@ -2196,8 +2196,10 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
	if (iommu_prot & IOMMU_WRITE)
		prot |= IOMMU_PROT_IW;

	if (ops->map)
		ret = ops->map(ops, iova, paddr, page_size, prot, gfp);
	if (ops->map_pages) {
		ret = ops->map_pages(ops, iova, paddr, pgsize,
				     pgcount, prot, gfp, mapped);
	}

	return ret;
}
@@ -2223,8 +2225,8 @@ static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain,
	iommu_iotlb_gather_add_range(gather, iova, size);
}

static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
			      size_t page_size,
static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova,
				    size_t pgsize, size_t pgcount,
				    struct iommu_iotlb_gather *gather)
{
	struct protection_domain *domain = to_pdomain(dom);
@@ -2235,9 +2237,10 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
	    (domain->iop.mode == PAGE_MODE_NONE))
		return 0;

	r = (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
	r = (ops->unmap_pages) ? ops->unmap_pages(ops, iova, pgsize, pgcount, NULL) : 0;

	amd_iommu_iotlb_gather_add_page(dom, gather, iova, page_size);
	if (r)
		amd_iommu_iotlb_gather_add_page(dom, gather, iova, r);

	return r;
}
@@ -2399,8 +2402,8 @@ const struct iommu_ops amd_iommu_ops = {
	.default_domain_ops = &(const struct iommu_domain_ops) {
		.attach_dev	= amd_iommu_attach_device,
		.detach_dev	= amd_iommu_detach_device,
		.map		= amd_iommu_map,
		.unmap		= amd_iommu_unmap,
		.map_pages	= amd_iommu_map_pages,
		.unmap_pages	= amd_iommu_unmap_pages,
		.iotlb_sync_map	= amd_iommu_iotlb_sync_map,
		.iova_to_phys	= amd_iommu_iova_to_phys,
		.flush_iotlb_all = amd_iommu_flush_iotlb_all,