Commit fd86c950 authored by Suravee Suthikulpanit's avatar Suravee Suthikulpanit Committed by Joerg Roedel
Browse files

iommu/amd: Introduce iommu_v1_map_page and iommu_v1_unmap_page



These implement map and unmap for AMD IOMMU v1 pagetable, which
will be used by the IO pagetable framework.

Also clean up unused extern function declarations.

Signed-off-by: default avatarSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Link: https://lore.kernel.org/r/20201215073705.123786-13-suravee.suthikulpanit@amd.com


Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 441555c6
Loading
Loading
Loading
Loading
+0 −13
Original line number Diff line number Diff line
@@ -133,19 +133,6 @@ void amd_iommu_apply_ivrs_quirks(void);
static inline void amd_iommu_apply_ivrs_quirks(void) { }
#endif

/* TODO: These are temporary and will be removed once fully transition */
extern int iommu_map_page(struct protection_domain *dom,
			  unsigned long bus_addr,
			  unsigned long phys_addr,
			  unsigned long page_size,
			  int prot,
			  gfp_t gfp);
extern unsigned long iommu_unmap_page(struct protection_domain *dom,
				      unsigned long bus_addr,
				      unsigned long page_size);
extern u64 *fetch_pte(struct amd_io_pgtable *pgtable,
		      unsigned long address,
		      unsigned long *page_size);
extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
					 u64 *root, int mode);
#endif
+12 −13
Original line number Diff line number Diff line
@@ -311,7 +311,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
 * This function checks if there is a PTE for a given dma address. If
 * there is one, it returns the pointer to it.
 */
u64 *fetch_pte(struct amd_io_pgtable *pgtable,
static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
		      unsigned long address,
		      unsigned long *page_size)
{
@@ -386,13 +386,10 @@ static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
 * supporting all features of AMD IOMMU page tables like level skipping
 * and full 64 bit address spaces.
 */
int iommu_map_page(struct protection_domain *dom,
		   unsigned long iova,
		   unsigned long paddr,
		   unsigned long size,
		   int prot,
		   gfp_t gfp)
static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
			  phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
	struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
	struct page *freelist = NULL;
	bool updated = false;
	u64 __pte, *pte;
@@ -455,11 +452,11 @@ int iommu_map_page(struct protection_domain *dom,
	return ret;
}

unsigned long iommu_unmap_page(struct protection_domain *dom,
static unsigned long iommu_v1_unmap_page(struct io_pgtable_ops *ops,
				      unsigned long iova,
			       unsigned long size)
				      size_t size,
				      struct iommu_iotlb_gather *gather)
{
	struct io_pgtable_ops *ops = &dom->iop.iop.ops;
	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
	unsigned long long unmapped;
	unsigned long unmap_size;
@@ -548,6 +545,8 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
	cfg->oas            = IOMMU_OUT_ADDR_BIT_SIZE,
	cfg->tlb            = &v1_flush_ops;

	pgtable->iop.ops.map          = iommu_v1_map_page;
	pgtable->iop.ops.unmap        = iommu_v1_unmap_page;
	pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;

	return &pgtable->iop;
+8 −5
Original line number Diff line number Diff line
@@ -2065,8 +2065,9 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
			 gfp_t gfp)
{
	struct protection_domain *domain = to_pdomain(dom);
	struct io_pgtable_ops *ops = &domain->iop.iop.ops;
	int prot = 0;
	int ret;
	int ret = -EINVAL;

	if (domain->iop.mode == PAGE_MODE_NONE)
		return -EINVAL;
@@ -2076,9 +2077,10 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
	if (iommu_prot & IOMMU_WRITE)
		prot |= IOMMU_PROT_IW;

	ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp);

	if (ops->map) {
		ret = ops->map(ops, iova, paddr, page_size, prot, gfp);
		domain_flush_np_cache(domain, iova, page_size);
	}

	return ret;
}
@@ -2088,11 +2090,12 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
			      struct iommu_iotlb_gather *gather)
{
	struct protection_domain *domain = to_pdomain(dom);
	struct io_pgtable_ops *ops = &domain->iop.iop.ops;

	if (domain->iop.mode == PAGE_MODE_NONE)
		return 0;

	return iommu_unmap_page(domain, iova, page_size);
	return (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
}

static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,