Commit 1ade5f84 authored by Alex Sierra's avatar Alex Sierra Committed by Alex Deucher
Browse files

drm/amdkfd: skip invalid pages during migrations



Invalid pages can be the result of pages that have been migrated
already due to copy-on-write procedure or pages that were never
migrated to VRAM in first place. This is not an issue anymore,
as pranges now support mixed memory domains (CPU/GPU).

Signed-off-by: default avatarAlex Sierra <alex.sierra@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 1d5dbfe6
Loading
Loading
Loading
Loading
+18 −20
Original line number Diff line number Diff line
@@ -372,7 +372,6 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
	size_t size;
	void *buf;
	int r = -ENOMEM;
	int retry = 0;

	memset(&migrate, 0, sizeof(migrate));
	migrate.vma = vma;
@@ -391,7 +390,6 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
	migrate.dst = migrate.src + npages;
	scratch = (dma_addr_t *)(migrate.dst + npages);

retry:
	r = migrate_vma_setup(&migrate);
	if (r) {
		pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n",
@@ -399,17 +397,9 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
		goto out_free;
	}
	if (migrate.cpages != npages) {
		pr_debug("collect 0x%lx/0x%llx pages, retry\n", migrate.cpages,
		pr_debug("Partial migration. 0x%lx/0x%llx pages can be migrated\n",
			 migrate.cpages,
			 npages);
		migrate_vma_finalize(&migrate);
		if (retry++ >= 3) {
			r = -ENOMEM;
			pr_debug("failed %d migrate svms 0x%p [0x%lx 0x%lx]\n",
				 r, prange->svms, prange->start, prange->last);
			goto out_free;
		}

		goto retry;
	}

	if (migrate.cpages) {
@@ -506,9 +496,8 @@ static void svm_migrate_page_free(struct page *page)
static int
svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
			struct migrate_vma *migrate, struct dma_fence **mfence,
			dma_addr_t *scratch)
			dma_addr_t *scratch, uint64_t npages)
{
	uint64_t npages = migrate->cpages;
	struct device *dev = adev->dev;
	uint64_t *src;
	dma_addr_t *dst;
@@ -525,15 +514,23 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
	src = (uint64_t *)(scratch + npages);
	dst = scratch;

	for (i = 0, j = 0; i < npages; i++, j++, addr += PAGE_SIZE) {
	for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
		struct page *spage;

		spage = migrate_pfn_to_page(migrate->src[i]);
		if (!spage) {
			pr_debug("failed get spage svms 0x%p [0x%lx 0x%lx]\n",
		if (!spage || !is_zone_device_page(spage)) {
			pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n",
				 prange->svms, prange->start, prange->last);
			r = -ENOMEM;
			if (j) {
				r = svm_migrate_copy_memory_gart(adev, dst + i - j,
								 src + i - j, j,
								 FROM_VRAM_TO_RAM,
								 mfence);
				if (r)
					goto out_oom;
				j = 0;
			}
			continue;
		}
		src[i] = svm_migrate_addr(adev, spage);
		if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
@@ -566,6 +563,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,

		migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
		migrate->dst[i] |= MIGRATE_PFN_LOCKED;
		j++;
	}

	r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j,
@@ -624,7 +622,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,

	if (migrate.cpages) {
		r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
					    scratch);
					    scratch, npages);
		migrate_vma_pages(&migrate);
		svm_migrate_copy_done(adev, mfence);
		migrate_vma_finalize(&migrate);