Commit 98aeb4ea authored by Suravee Suthikulpanit's avatar Suravee Suthikulpanit Committed by Joerg Roedel
Browse files

iommu/amd: Do not Invalidate IRT when IRTE caching is disabled



With the Interrupt Remapping Table cache disabled, there is no need to
issue invalidate IRT and wait for its completion. Therefore, add logic
to bypass the operation.

Reviewed-by: default avatarJerry Snitselaar <jsnitsel@redhat.com>
Suggested-by: default avatarJoao Martins <joao.m.martins@oracle.com>
Signed-off-by: default avatarSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Link: https://lore.kernel.org/r/20230530141137.14376-5-suravee.suthikulpanit@amd.com


Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 66419036
Loading
Loading
Loading
Loading
+15 −6
Original line number Diff line number Diff line
@@ -1266,12 +1266,24 @@ static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
	u32 devid;
	u16 last_bdf = iommu->pci_seg->last_bdf;

	if (iommu->irtcachedis_enabled)
		return;

	for (devid = 0; devid <= last_bdf; devid++)
		iommu_flush_irt(iommu, devid);

	iommu_completion_wait(iommu);
}

static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
{
	if (iommu->irtcachedis_enabled)
		return;

	iommu_flush_irt(iommu, devid);
	iommu_completion_wait(iommu);
}

void iommu_flush_all_caches(struct amd_iommu *iommu)
{
	if (iommu_feature(iommu, FEATURE_IA)) {
@@ -3030,8 +3042,7 @@ static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,

	raw_spin_unlock_irqrestore(&table->lock, flags);

	iommu_flush_irt(iommu, devid);
	iommu_completion_wait(iommu);
	iommu_flush_irt_and_complete(iommu, devid);

	return 0;
}
@@ -3050,8 +3061,7 @@ static int modify_irte(struct amd_iommu *iommu,
	table->table[index] = irte->val;
	raw_spin_unlock_irqrestore(&table->lock, flags);

	iommu_flush_irt(iommu, devid);
	iommu_completion_wait(iommu);
	iommu_flush_irt_and_complete(iommu, devid);

	return 0;
}
@@ -3069,8 +3079,7 @@ static void free_irte(struct amd_iommu *iommu, u16 devid, int index)
	iommu->irte_ops->clear_allocated(table, index);
	raw_spin_unlock_irqrestore(&table->lock, flags);

	iommu_flush_irt(iommu, devid);
	iommu_completion_wait(iommu);
	iommu_flush_irt_and_complete(iommu, devid);
}

static void irte_prepare(void *entry,