Commit 0633bbcc authored by Suravee Suthikulpanit's avatar Suravee Suthikulpanit Committed by Joerg Roedel
Browse files

iommu/amd: Refactor fetch_pte to use struct amd_io_pgtable



To simplify the fetch_pte function. There is no functional change.

Signed-off-by: default avatarSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Link: https://lore.kernel.org/r/20201215073705.123786-11-suravee.suthikulpanit@amd.com


Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 33aef978
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -143,7 +143,7 @@ extern int iommu_map_page(struct protection_domain *dom,
extern unsigned long iommu_unmap_page(struct protection_domain *dom,
				      unsigned long bus_addr,
				      unsigned long page_size);
extern u64 *fetch_pte(struct protection_domain *domain,
extern u64 *fetch_pte(struct amd_io_pgtable *pgtable,
		      unsigned long address,
		      unsigned long *page_size);
extern void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
+7 −6
Original line number Diff line number Diff line
@@ -311,7 +311,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
 * This function checks if there is a PTE for a given dma address. If
 * there is one, it returns the pointer to it.
 */
u64 *fetch_pte(struct protection_domain *domain,
u64 *fetch_pte(struct amd_io_pgtable *pgtable,
	       unsigned long address,
	       unsigned long *page_size)
{
@@ -320,11 +320,11 @@ u64 *fetch_pte(struct protection_domain *domain,

	*page_size = 0;

	if (address > PM_LEVEL_SIZE(domain->iop.mode))
	if (address > PM_LEVEL_SIZE(pgtable->mode))
		return NULL;

	level	   =  domain->iop.mode - 1;
	pte	   = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
	level	   =  pgtable->mode - 1;
	pte	   = &pgtable->root[PM_LEVEL_INDEX(level, address)];
	*page_size =  PTE_LEVEL_PAGE_SIZE(level);

	while (level > 0) {
@@ -459,6 +459,8 @@ unsigned long iommu_unmap_page(struct protection_domain *dom,
			       unsigned long iova,
			       unsigned long size)
{
	struct io_pgtable_ops *ops = &dom->iop.iop.ops;
	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
	unsigned long long unmapped;
	unsigned long unmap_size;
	u64 *pte;
@@ -468,8 +470,7 @@ unsigned long iommu_unmap_page(struct protection_domain *dom,
	unmapped = 0;

	while (unmapped < size) {
		pte = fetch_pte(dom, iova, &unmap_size);

		pte = fetch_pte(pgtable, iova, &unmap_size);
		if (pte) {
			int i, count;

+3 −1
Original line number Diff line number Diff line
@@ -2099,13 +2099,15 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
					  dma_addr_t iova)
{
	struct protection_domain *domain = to_pdomain(dom);
	struct io_pgtable_ops *ops = &domain->iop.iop.ops;
	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
	unsigned long offset_mask, pte_pgsize;
	u64 *pte, __pte;

	if (domain->iop.mode == PAGE_MODE_NONE)
		return iova;

	pte = fetch_pte(domain, iova, &pte_pgsize);
	pte = fetch_pte(pgtable, iova, &pte_pgsize);

	if (!pte || !IOMMU_PTE_PRESENT(*pte))
		return 0;