Commit 4a25f2ea authored by Ashish Mhetre's avatar Ashish Mhetre Committed by Will Deacon
Browse files

iommu: arm-smmu: disable large page mappings for Nvidia arm-smmu



Tegra194 and Tegra234 SoCs have the erratum that causes walk cache
entries to not be invalidated correctly. The problem is that the walk
cache index generated for IOVA is not same across translation and
invalidation requests. This is leading to page faults when PMD entry is
released during unmap and populated with new PTE table during subsequent
map request. Disabling large page mappings avoids the release of PMD
entry and avoid translations seeing stale PMD entry in walk cache.
Fix this by limiting the page mappings to PAGE_SIZE for Tegra194 and
Tegra234 devices. This is recommended fix from Tegra hardware design
team.

Acked-by: default avatarRobin Murphy <robin.murphy@arm.com>
Reviewed-by: default avatarKrishna Reddy <vdumpa@nvidia.com>
Co-developed-by: default avatarPritesh Raithatha <praithatha@nvidia.com>
Signed-off-by: default avatarPritesh Raithatha <praithatha@nvidia.com>
Signed-off-by: default avatarAshish Mhetre <amhetre@nvidia.com>
Link: https://lore.kernel.org/r/20220421081504.24678-1-amhetre@nvidia.com


Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 95d4782c
Loading
Loading
Loading
Loading
+30 −0
Original line number Diff line number Diff line
@@ -258,6 +258,34 @@ static void nvidia_smmu_probe_finalize(struct arm_smmu_device *smmu, struct devi
			dev_name(dev), err);
}

static int nvidia_smmu_init_context(struct arm_smmu_domain *smmu_domain,
				    struct io_pgtable_cfg *pgtbl_cfg,
				    struct device *dev)
{
	struct arm_smmu_device *smmu = smmu_domain->smmu;
	const struct device_node *np = smmu->dev->of_node;

	/*
	 * Tegra194 and Tegra234 SoCs have the erratum that causes walk cache
	 * entries to not be invalidated correctly. The problem is that the walk
	 * cache index generated for IOVA is not same across translation and
	 * invalidation requests. This is leading to page faults when PMD entry
	 * is released during unmap and populated with new PTE table during
	 * subsequent map request. Disabling large page mappings avoids the
	 * release of PMD entry and avoid translations seeing stale PMD entry in
	 * walk cache.
	 * Fix this by limiting the page mappings to PAGE_SIZE on Tegra194 and
	 * Tegra234.
	 */
	if (of_device_is_compatible(np, "nvidia,tegra234-smmu") ||
	    of_device_is_compatible(np, "nvidia,tegra194-smmu")) {
		smmu->pgsize_bitmap = PAGE_SIZE;
		pgtbl_cfg->pgsize_bitmap = smmu->pgsize_bitmap;
	}

	return 0;
}

static const struct arm_smmu_impl nvidia_smmu_impl = {
	.read_reg = nvidia_smmu_read_reg,
	.write_reg = nvidia_smmu_write_reg,
@@ -268,10 +296,12 @@ static const struct arm_smmu_impl nvidia_smmu_impl = {
	.global_fault = nvidia_smmu_global_fault,
	.context_fault = nvidia_smmu_context_fault,
	.probe_finalize = nvidia_smmu_probe_finalize,
	.init_context = nvidia_smmu_init_context,
};

static const struct arm_smmu_impl nvidia_smmu_single_impl = {
	.probe_finalize = nvidia_smmu_probe_finalize,
	.init_context = nvidia_smmu_init_context,
};

struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)