Commit f7355e99 authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton
Browse files

mm/gup: remove FOLL_MIGRATION

Fortunately, the last user (KSM) is gone, so let's just remove this rather
special code from generic GUP handling -- especially because KSM never
required the PMD handling as KSM only deals with individual base pages.

[akpm@linux-foundation.org: fix merge snafu]Link: https://lkml.kernel.org/r/20221021101141.84170-10-david@redhat.com


Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent d7c0e68d
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -3057,7 +3057,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
				 * and return without waiting upon it */
#define FOLL_NOFAULT	0x80	/* do not fault in pages */
#define FOLL_HWPOISON	0x100	/* check page is hwpoisoned */
#define FOLL_MIGRATION	0x400	/* wait for page to replace migration entry */
#define FOLL_TRIED	0x800	/* a retry, previous pass started an IO */
#define FOLL_REMOTE	0x2000	/* we are working on non-current tsk/mm */
#define FOLL_ANON	0x8000	/* don't do file mappings */
+5 −50
Original line number Diff line number Diff line
@@ -537,30 +537,13 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
	if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
			 (FOLL_PIN | FOLL_GET)))
		return ERR_PTR(-EINVAL);
retry:
	if (unlikely(pmd_bad(*pmd)))
		return no_page_table(vma, flags);

	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
	pte = *ptep;
	if (!pte_present(pte)) {
		swp_entry_t entry;
		/*
		 * KSM's break_ksm() relies upon recognizing a ksm page
		 * even while it is being migrated, so for that case we
		 * need migration_entry_wait().
		 */
		if (likely(!(flags & FOLL_MIGRATION)))
	if (!pte_present(pte))
		goto no_page;
		if (pte_none(pte))
			goto no_page;
		entry = pte_to_swp_entry(pte);
		if (!is_migration_entry(entry))
			goto no_page;
		pte_unmap_unlock(ptep, ptl);
		migration_entry_wait(mm, pmd, address);
		goto retry;
	}
	if (pte_protnone(pte) && !gup_can_follow_protnone(flags))
		goto no_page;

@@ -668,28 +651,8 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
	pmdval = READ_ONCE(*pmd);
	if (pmd_none(pmdval))
		return no_page_table(vma, flags);
retry:
	if (!pmd_present(pmdval)) {
		/*
		 * Should never reach here, if thp migration is not supported;
		 * Otherwise, it must be a thp migration entry.
		 */
		VM_BUG_ON(!thp_migration_supported() ||
				  !is_pmd_migration_entry(pmdval));

		if (likely(!(flags & FOLL_MIGRATION)))
	if (!pmd_present(pmdval))
		return no_page_table(vma, flags);

		pmd_migration_entry_wait(mm, pmd);
		pmdval = READ_ONCE(*pmd);
		/*
		 * MADV_DONTNEED may convert the pmd to null because
		 * mmap_lock is held in read mode
		 */
		if (pmd_none(pmdval))
			return no_page_table(vma, flags);
		goto retry;
	}
	if (pmd_devmap(pmdval)) {
		ptl = pmd_lock(mm, pmd);
		page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
@@ -703,18 +666,10 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
	if (pmd_protnone(pmdval) && !gup_can_follow_protnone(flags))
		return no_page_table(vma, flags);

retry_locked:
	ptl = pmd_lock(mm, pmd);
	if (unlikely(pmd_none(*pmd))) {
		spin_unlock(ptl);
		return no_page_table(vma, flags);
	}
	if (unlikely(!pmd_present(*pmd))) {
		spin_unlock(ptl);
		if (likely(!(flags & FOLL_MIGRATION)))
		return no_page_table(vma, flags);
		pmd_migration_entry_wait(mm, pmd);
		goto retry_locked;
	}
	if (unlikely(!pmd_trans_huge(*pmd))) {
		spin_unlock(ptl);