Commit ab09243a authored by Alistair Popple's avatar Alistair Popple Committed by Linus Torvalds
Browse files

mm/migrate.c: remove MIGRATE_PFN_LOCKED

MIGRATE_PFN_LOCKED is used to indicate to migrate_vma_prepare() that a
source page was already locked during migrate_vma_collect().  If it
wasn't then the a second attempt is made to lock the page.  However if
the first attempt failed it's unlikely a second attempt will succeed,
and the retry adds complexity.  So clean this up by removing the retry
and MIGRATE_PFN_LOCKED flag.

Destination pages are also meant to have the MIGRATE_PFN_LOCKED flag
set, but nothing actually checks that.

Link: https://lkml.kernel.org/r/20211025041608.289017-1-apopple@nvidia.com


Signed-off-by: default avatarAlistair Popple <apopple@nvidia.com>
Reviewed-by: default avatarRalph Campbell <rcampbell@nvidia.com>
Acked-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Ben Skeggs <bskeggs@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0ef02462
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -360,7 +360,7 @@ between device driver specific code and shared common code:
   system memory page, locks the page with ``lock_page()``, and fills in the
   ``dst`` array entry with::

     dst[i] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
     dst[i] = migrate_pfn(page_to_pfn(dpage));

   Now that the driver knows that this page is being migrated, it can
   invalidate device private MMU mappings and copy device private memory
+2 −2
Original line number Diff line number Diff line
@@ -560,7 +560,7 @@ static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
				  gpa, 0, page_shift);

	if (ret == U_SUCCESS)
		*mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
		*mig.dst = migrate_pfn(pfn);
	else {
		unlock_page(dpage);
		__free_page(dpage);
@@ -774,7 +774,7 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma,
		}
	}

	*mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
	*mig.dst = migrate_pfn(page_to_pfn(dpage));
	migrate_vma_pages(&mig);
out_finalize:
	migrate_vma_finalize(&mig);
+0 −2
Original line number Diff line number Diff line
@@ -317,7 +317,6 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
			migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
			svm_migrate_get_vram_page(prange, migrate->dst[i]);
			migrate->dst[i] = migrate_pfn(migrate->dst[i]);
			migrate->dst[i] |= MIGRATE_PFN_LOCKED;
			src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
					      DMA_TO_DEVICE);
			r = dma_mapping_error(dev, src[i]);
@@ -610,7 +609,6 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
				     dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));

		migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
		migrate->dst[i] |= MIGRATE_PFN_LOCKED;
		j++;
	}

+2 −2
Original line number Diff line number Diff line
@@ -166,7 +166,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
		goto error_dma_unmap;
	mutex_unlock(&svmm->mutex);

	args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
	args->dst[0] = migrate_pfn(page_to_pfn(dpage));
	return 0;

error_dma_unmap:
@@ -602,7 +602,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
		((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
	if (src & MIGRATE_PFN_WRITE)
		*pfn |= NVIF_VMM_PFNMAP_V0_W;
	return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
	return migrate_pfn(page_to_pfn(dpage));

out_dma_unmap:
	dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+0 −1
Original line number Diff line number Diff line
@@ -110,7 +110,6 @@ static inline int migrate_misplaced_page(struct page *page,
 */
#define MIGRATE_PFN_VALID	(1UL << 0)
#define MIGRATE_PFN_MIGRATE	(1UL << 1)
#define MIGRATE_PFN_LOCKED	(1UL << 2)
#define MIGRATE_PFN_WRITE	(1UL << 3)
#define MIGRATE_PFN_SHIFT	6

Loading