Commit 02252b3b authored by Zhen Lei's avatar Zhen Lei Committed by Joerg Roedel
Browse files

iommu/amd: Add support for IOMMU default DMA mode build options



Make IOMMU_DEFAULT_LAZY default for when AMD_IOMMU config is set, which
matches current behaviour.

For "fullflush" param, just call iommu_set_dma_strict(true) directly.

Since we get a strict vs lazy mode print already in iommu_subsys_init(),
and maintain a deprecation print when "fullflush" param is passed, drop the
prints in amd_iommu_init_dma_ops().

Finally drop global flag amd_iommu_unmap_flush, as it has no longer has any
purpose.

[jpg: Rebase for relocated file and drop amd_iommu_unmap_flush]

Signed-off-by: default avatarZhen Lei <thunder.leizhen@huawei.com>
Signed-off-by: default avatarJohn Garry <john.garry@huawei.com>
Link: https://lore.kernel.org/r/1626088340-5838-6-git-send-email-john.garry@huawei.com


Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent d0e108b8
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -94,7 +94,7 @@ choice
	prompt "IOMMU default DMA IOTLB invalidation mode"
	depends on IOMMU_DMA

	default IOMMU_DEFAULT_LAZY if INTEL_IOMMU
	default IOMMU_DEFAULT_LAZY if (AMD_IOMMU || INTEL_IOMMU)
	default IOMMU_DEFAULT_STRICT
	help
	  This option allows an IOMMU DMA IOTLB invalidation mode to be
+0 −6
Original line number Diff line number Diff line
@@ -779,12 +779,6 @@ extern u16 amd_iommu_last_bdf;
/* allocation bitmap for domain ids */
extern unsigned long *amd_iommu_pd_alloc_bitmap;

/*
 * If true, the addresses will be flushed on unmap time, not when
 * they are reused
 */
extern bool amd_iommu_unmap_flush;

/* Smallest max PASID supported by any IOMMU in the system */
extern u32 amd_iommu_max_pasid;

+1 −2
Original line number Diff line number Diff line
@@ -161,7 +161,6 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
					   to handle */
LIST_HEAD(amd_iommu_unity_map);		/* a list of required unity mappings
					   we find in ACPI */
bool amd_iommu_unmap_flush;		/* if true, flush on every unmap */

LIST_HEAD(amd_iommu_list);		/* list of all AMD IOMMUs in the
					   system */
@@ -3100,7 +3099,7 @@ static int __init parse_amd_iommu_options(char *str)
	for (; *str; ++str) {
		if (strncmp(str, "fullflush", 9) == 0) {
			pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n");
			amd_iommu_unmap_flush = true;
			iommu_set_dma_strict(true);
		}
		if (strncmp(str, "force_enable", 12) == 0)
			amd_iommu_force_enable = true;
+0 −6
Original line number Diff line number Diff line
@@ -1775,12 +1775,6 @@ void amd_iommu_domain_update(struct protection_domain *domain)
static void __init amd_iommu_init_dma_ops(void)
{
	swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;

	if (amd_iommu_unmap_flush)
		pr_info("IO/TLB flush on unmap enabled\n");
	else
		pr_info("Lazy IO/TLB flushing enabled\n");
	iommu_set_dma_strict(amd_iommu_unmap_flush);
}

int __init amd_iommu_init_api(void)