Commit dbf49896 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge more updates from Andrew Morton:
 "The post-linux-next material.

  7 patches.

  Subsystems affected by this patch series (all mm): debug,
  slab-generic, migration, memcg, and kasan"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  kasan: add kasan mode messages when kasan init
  mm: unexport {,un}lock_page_memcg
  mm: unexport folio_memcg_{,un}lock
  mm/migrate.c: remove MIGRATE_PFN_LOCKED
  mm: migrate: simplify the file-backed pages validation when migrating its mapping
  mm: allow only SLUB on PREEMPT_RT
  mm/page_owner.c: modify the type of argument "order" in some functions
parents 6d76f6eb b873e986
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -360,7 +360,7 @@ between device driver specific code and shared common code:
   system memory page, locks the page with ``lock_page()``, and fills in the
   ``dst`` array entry with::

     dst[i] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
     dst[i] = migrate_pfn(page_to_pfn(dpage));

   Now that the driver knows that this page is being migrated, it can
   invalidate device private MMU mappings and copy device private memory
+1 −1
Original line number Diff line number Diff line
@@ -310,7 +310,7 @@ void __init kasan_init(void)
	kasan_init_depth();
#if defined(CONFIG_KASAN_GENERIC)
	/* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */
	pr_info("KernelAddressSanitizer initialized\n");
	pr_info("KernelAddressSanitizer initialized (generic)\n");
#endif
}

+2 −2
Original line number Diff line number Diff line
@@ -560,7 +560,7 @@ static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
				  gpa, 0, page_shift);

	if (ret == U_SUCCESS)
		*mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
		*mig.dst = migrate_pfn(pfn);
	else {
		unlock_page(dpage);
		__free_page(dpage);
@@ -774,7 +774,7 @@ static int kvmppc_svm_page_in(struct vm_area_struct *vma,
		}
	}

	*mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
	*mig.dst = migrate_pfn(page_to_pfn(dpage));
	migrate_vma_pages(&mig);
out_finalize:
	migrate_vma_finalize(&mig);
+0 −2
Original line number Diff line number Diff line
@@ -317,7 +317,6 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
			migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
			svm_migrate_get_vram_page(prange, migrate->dst[i]);
			migrate->dst[i] = migrate_pfn(migrate->dst[i]);
			migrate->dst[i] |= MIGRATE_PFN_LOCKED;
			src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
					      DMA_TO_DEVICE);
			r = dma_mapping_error(dev, src[i]);
@@ -610,7 +609,6 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
				     dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));

		migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
		migrate->dst[i] |= MIGRATE_PFN_LOCKED;
		j++;
	}

+2 −2
Original line number Diff line number Diff line
@@ -166,7 +166,7 @@ static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
		goto error_dma_unmap;
	mutex_unlock(&svmm->mutex);

	args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
	args->dst[0] = migrate_pfn(page_to_pfn(dpage));
	return 0;

error_dma_unmap:
@@ -602,7 +602,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
		((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
	if (src & MIGRATE_PFN_WRITE)
		*pfn |= NVIF_VMM_PFNMAP_V0_W;
	return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
	return migrate_pfn(page_to_pfn(dpage));

out_dma_unmap:
	dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
Loading