Commit fc65d0ac authored by Nadav Amit's avatar Nadav Amit Committed by Joerg Roedel
Browse files

iommu/amd: Selective flush on unmap



Recent patch attempted to enable selective page flushes on AMD IOMMU but
neglected to adapt amd_iommu_iotlb_sync() to use the selective flushes.

Adapt amd_iommu_iotlb_sync() to use selective flushes and change
amd_iommu_unmap() to collect the flushes. As a defensive measure, to
avoid potential issues as those that the Intel IOMMU driver encountered
recently, flush the page-walk caches by always setting the "pde"
parameter. This can be removed later.

Cc: Joerg Roedel <joro@8bytes.org>
Cc: Will Deacon <will@kernel.org>
Cc: Jiajun Cao <caojiajun@vmware.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Lu Baolu <baolu.lu@linux.intel.com>
Cc: iommu@lists.linux-foundation.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarNadav Amit <namit@vmware.com>
Link: https://lore.kernel.org/r/20210723093209.714328-2-namit@vmware.com


Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent ee974d96
Loading
Loading
Loading
Loading
+13 −2
Original line number Diff line number Diff line
@@ -2060,12 +2060,17 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
{
	struct protection_domain *domain = to_pdomain(dom);
	struct io_pgtable_ops *ops = &domain->iop.iop.ops;
	size_t r;

	if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
	    (domain->iop.mode == PAGE_MODE_NONE))
		return 0;

	return (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
	r = (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;

	iommu_iotlb_gather_add_page(dom, gather, iova, page_size);

	return r;
}

static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@ -2168,7 +2173,13 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
				 struct iommu_iotlb_gather *gather)
{
	amd_iommu_flush_iotlb_all(domain);
	struct protection_domain *dom = to_pdomain(domain);
	unsigned long flags;

	spin_lock_irqsave(&dom->lock, flags);
	__domain_flush_pages(dom, gather->start, gather->end - gather->start, 1);
	amd_iommu_domain_flush_complete(dom);
	spin_unlock_irqrestore(&dom->lock, flags);
}

static int amd_iommu_def_domain_type(struct device *dev)