Commit 3236130b authored by Dmitry Baryshkov's avatar Dmitry Baryshkov
Browse files

drm/msm: move domain allocation into msm_iommu_new()



After the msm_iommu instance is created, the IOMMU domain is completely
handled inside the msm_iommu code. Move the iommu_domain_alloc() call
into the msm_iommu_new() to simplify callers code.

Reported-by: default avatarkernel test robot <lkp@intel.com>
Signed-off-by: default avatarDmitry Baryshkov <dmitry.baryshkov@linaro.org>
Reviewed-by: default avatarRob Clark <robdclark@gmail.com>
Patchwork: https://patchwork.freedesktop.org/patch/509615/
Link: https://lore.kernel.org/r/20221102175449.452283-2-dmitry.baryshkov@linaro.org


Signed-off-by: default avatarDmitry Baryshkov <dmitry.baryshkov@linaro.org>
parent 90d2c87f
Loading
Loading
Loading
Loading
+5 −7
Original line number Diff line number Diff line
@@ -1213,19 +1213,17 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,

static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
{
	struct iommu_domain *domain;
	struct msm_mmu *mmu;

	domain = iommu_domain_alloc(&platform_bus_type);
	if (!domain)
	mmu = msm_iommu_new(gmu->dev, 0);
	if (!mmu)
		return -ENODEV;
	if (IS_ERR(mmu))
		return PTR_ERR(mmu);

	mmu = msm_iommu_new(gmu->dev, domain);
	gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
	if (IS_ERR(gmu->aspace)) {
		iommu_domain_free(domain);
	if (IS_ERR(gmu->aspace))
		return PTR_ERR(gmu->aspace);
	}

	return 0;
}
+11 −12
Original line number Diff line number Diff line
@@ -1786,35 +1786,34 @@ a6xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
{
	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
	struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
	struct iommu_domain *iommu;
	struct iommu_domain_geometry *geometry;
	struct msm_mmu *mmu;
	struct msm_gem_address_space *aspace;
	u64 start, size;

	iommu = iommu_domain_alloc(&platform_bus_type);
	if (!iommu)
		return NULL;
	unsigned long quirks = 0;

	/*
	 * This allows GPU to set the bus attributes required to use system
	 * cache on behalf of the iommu page table walker.
	 */
	if (!IS_ERR_OR_NULL(a6xx_gpu->htw_llc_slice))
		adreno_set_llc_attributes(iommu);
		quirks |= IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;

	mmu = msm_iommu_new(&pdev->dev, iommu);
	if (IS_ERR(mmu)) {
		iommu_domain_free(iommu);
	mmu = msm_iommu_new(&pdev->dev, quirks);
	if (IS_ERR_OR_NULL(mmu))
		return ERR_CAST(mmu);
	}

	geometry = msm_iommu_get_geometry(mmu);
	if (IS_ERR(geometry))
		return ERR_CAST(geometry);

	/*
	 * Use the aperture start or SZ_16M, whichever is greater. This will
	 * ensure that we align with the allocated pagetable range while still
	 * allowing room in the lower 32 bits for GMEM and whatnot
	 */
	start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
	size = iommu->geometry.aperture_end - start + 1;
	start = max_t(u64, SZ_16M, geometry->aperture_start);
	size = geometry->aperture_end - start + 1;

	aspace = msm_gem_address_space_create(mmu, "gpu",
		start & GENMASK_ULL(48, 0), size);
+9 −16
Original line number Diff line number Diff line
@@ -191,37 +191,30 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
	return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}

void adreno_set_llc_attributes(struct iommu_domain *iommu)
{
	iommu_set_pgtable_quirks(iommu, IO_PGTABLE_QUIRK_ARM_OUTER_WBWA);
}

struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
		struct platform_device *pdev)
{
	struct iommu_domain *iommu;
	struct iommu_domain_geometry *geometry;
	struct msm_mmu *mmu;
	struct msm_gem_address_space *aspace;
	u64 start, size;

	iommu = iommu_domain_alloc(&platform_bus_type);
	if (!iommu)
		return NULL;

	mmu = msm_iommu_new(&pdev->dev, iommu);
	if (IS_ERR(mmu)) {
		iommu_domain_free(iommu);
	mmu = msm_iommu_new(&pdev->dev, 0);
	if (IS_ERR_OR_NULL(mmu))
		return ERR_CAST(mmu);
	}

	geometry = msm_iommu_get_geometry(mmu);
	if (IS_ERR(geometry))
		return ERR_CAST(geometry);

	/*
	 * Use the aperture start or SZ_16M, whichever is greater. This will
	 * ensure that we align with the allocated pagetable range while still
	 * allowing room in the lower 32 bits for GMEM and whatnot
	 */
	start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
	size = iommu->geometry.aperture_end - start + 1;
	start = max_t(u64, SZ_16M, geometry->aperture_start);
	size = geometry->aperture_end - start + 1;

	aspace = msm_gem_address_space_create(mmu, "gpu",
		start & GENMASK_ULL(48, 0), size);
+0 −2
Original line number Diff line number Diff line
@@ -338,8 +338,6 @@ struct msm_gem_address_space *
adreno_iommu_create_address_space(struct msm_gpu *gpu,
		struct platform_device *pdev);

void adreno_set_llc_attributes(struct iommu_domain *iommu);

int adreno_read_speedbin(struct device *dev, u32 *speedbin);

/*
+10 −9
Original line number Diff line number Diff line
@@ -387,7 +387,7 @@ static int mdp4_kms_init(struct drm_device *dev)
	struct msm_drm_private *priv = dev->dev_private;
	struct mdp4_kms *mdp4_kms;
	struct msm_kms *kms = NULL;
	struct iommu_domain *iommu;
	struct msm_mmu *mmu;
	struct msm_gem_address_space *aspace;
	int irq, ret;
	u32 major, minor;
@@ -499,10 +499,15 @@ static int mdp4_kms_init(struct drm_device *dev)
	mdp4_disable(mdp4_kms);
	mdelay(16);

	iommu = iommu_domain_alloc(pdev->dev.bus);
	if (iommu) {
		struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);

	mmu = msm_iommu_new(&pdev->dev, 0);
	if (IS_ERR(mmu)) {
		ret = PTR_ERR(mmu);
		goto fail;
	} else if (!mmu) {
		DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
				"contig buffers for scanout\n");
		aspace = NULL;
	} else {
		aspace  = msm_gem_address_space_create(mmu,
			"mdp4", 0x1000, 0x100000000 - 0x1000);

@@ -514,10 +519,6 @@ static int mdp4_kms_init(struct drm_device *dev)
		}

		kms->aspace = aspace;
	} else {
		DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
				"contig buffers for scanout\n");
		aspace = NULL;
	}

	ret = modeset_init(mdp4_kms);
Loading