Commit 8cc233de authored by Vasant Hegde's avatar Vasant Hegde Committed by Joerg Roedel
Browse files

iommu/amd/io-pgtable: Implement map_pages io_pgtable_ops callback



Implement the io_pgtable_ops->map_pages() callback for AMD driver.
Also deprecate io_pgtable->map callback.

Suggested-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarVasant Hegde <vasant.hegde@amd.com>
Link: https://lore.kernel.org/r/20220825063939.8360-2-vasant.hegde@amd.com


Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 7e18e42e
Loading
Loading
Loading
Loading
+34 −25
Original line number Diff line number Diff line
@@ -360,8 +360,9 @@ static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
 * supporting all features of AMD IOMMU page tables like level skipping
 * and full 64 bit address spaces.
 */
static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
			  phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
			      int prot, gfp_t gfp, size_t *mapped)
{
	struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
	LIST_HEAD(freelist);
@@ -369,15 +370,16 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
	u64 __pte, *pte;
	int ret, i, count;

	BUG_ON(!IS_ALIGNED(iova, size));
	BUG_ON(!IS_ALIGNED(paddr, size));
	BUG_ON(!IS_ALIGNED(iova, pgsize));
	BUG_ON(!IS_ALIGNED(paddr, pgsize));

	ret = -EINVAL;
	if (!(prot & IOMMU_PROT_MASK))
		goto out;

	count = PAGE_SIZE_PTE_COUNT(size);
	pte   = alloc_pte(dom, iova, size, NULL, gfp, &updated);
	while (pgcount > 0) {
		count = PAGE_SIZE_PTE_COUNT(pgsize);
		pte   = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated);

		ret = -ENOMEM;
		if (!pte)
@@ -390,7 +392,7 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
			updated = true;

		if (count > 1) {
		__pte = PAGE_SIZE_PTE(__sme_set(paddr), size);
			__pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
			__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
		} else
			__pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
@@ -403,6 +405,13 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
		for (i = 0; i < count; ++i)
			pte[i] = __pte;

		iova  += pgsize;
		paddr += pgsize;
		pgcount--;
		if (mapped)
			*mapped += pgsize;
	}

	ret = 0;

out:
@@ -514,7 +523,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
	cfg->oas            = IOMMU_OUT_ADDR_BIT_SIZE,
	cfg->tlb            = &v1_flush_ops;

	pgtable->iop.ops.map          = iommu_v1_map_page;
	pgtable->iop.ops.map_pages    = iommu_v1_map_pages;
	pgtable->iop.ops.unmap        = iommu_v1_unmap_page;
	pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;