Commit a630ac68 authored by Rob Clark's avatar Rob Clark
Browse files
parent f4f6dfde
Loading
Loading
Loading
Loading
+13 −9
Original line number Diff line number Diff line
@@ -1146,7 +1146,7 @@ static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
}

static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
		size_t size, u64 iova)
		size_t size, u64 iova, const char *name)
{
	struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
	struct drm_device *dev = a6xx_gpu->base.base.dev;
@@ -1181,6 +1181,8 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
	bo->virt = msm_gem_get_vaddr(bo->obj);
	bo->size = size;

	msm_gem_object_set_name(bo->obj, name);

	return 0;
}

@@ -1515,7 +1517,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
	 */
	gmu->dummy.size = SZ_4K;
	if (adreno_is_a660_family(adreno_gpu)) {
		ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000);
		ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
					    0x60400000, "debug");
		if (ret)
			goto err_memory;

@@ -1523,24 +1526,25 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
	}

	/* Allocate memory for the GMU dummy page */
	ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, 0x60000000);
	ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size,
				    0x60000000, "dummy");
	if (ret)
		goto err_memory;

	/* Note that a650 family also includes a660 family: */
	if (adreno_is_a650_family(adreno_gpu)) {
		ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
			SZ_16M - SZ_16K, 0x04000);
			SZ_16M - SZ_16K, 0x04000, "icache");
		if (ret)
			goto err_memory;
	} else if (adreno_is_a640_family(adreno_gpu)) {
		ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
			SZ_256K - SZ_16K, 0x04000);
			SZ_256K - SZ_16K, 0x04000, "icache");
		if (ret)
			goto err_memory;

		ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
			SZ_256K - SZ_16K, 0x44000);
			SZ_256K - SZ_16K, 0x44000, "dcache");
		if (ret)
			goto err_memory;
	} else {
@@ -1550,18 +1554,18 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
		gmu->legacy = true;

		/* Allocate memory for the GMU debug region */
		ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0);
		ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug");
		if (ret)
			goto err_memory;
	}

	/* Allocate memory for for the HFI queues */
	ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0);
	ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi");
	if (ret)
		goto err_memory;

	/* Allocate memory for the GMU log region */
	ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0);
	ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0, "log");
	if (ret)
		goto err_memory;