Commit 4066c119 authored by Yang Shi's avatar Yang Shi Committed by Linus Torvalds
Browse files

mm: gup: remove FOLL_SPLIT

Since commit 5a52c9df ("uprobe: use FOLL_SPLIT_PMD instead of
FOLL_SPLIT") and commit ba925fa3 ("s390/gmap: improve THP splitting")
FOLL_SPLIT has not been used anymore.  Remove the dead code.

Link: https://lkml.kernel.org/r/20210330203900.9222-1-shy828301@gmail.com


Signed-off-by: default avatarYang Shi <shy828301@gmail.com>
Reviewed-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1d4b0166
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -53,11 +53,6 @@ prevent the page from being split by anyone.
   of handling GUP on hugetlbfs will also work fine on transparent
   hugepage backed mappings.

In case you can't handle compound pages if they're returned by
follow_page, the FOLL_SPLIT bit can be specified as a parameter to
follow_page, so that it will split the hugepages before returning
them.

Graceful fallback
=================

+0 −1
Original line number Diff line number Diff line
@@ -2791,7 +2791,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_NOWAIT	0x20	/* if a disk transfer is needed, start the IO
				 * and return without waiting upon it */
#define FOLL_POPULATE	0x40	/* fault in page */
#define FOLL_SPLIT	0x80	/* don't return transhuge pages, split them */
#define FOLL_HWPOISON	0x100	/* check page is hwpoisoned */
#define FOLL_NUMA	0x200	/* force NUMA hinting page fault */
#define FOLL_MIGRATION	0x400	/* wait for page to replace migration entry */
+2 −26
Original line number Diff line number Diff line
@@ -516,18 +516,6 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
		}
	}

	if (flags & FOLL_SPLIT && PageTransCompound(page)) {
		get_page(page);
		pte_unmap_unlock(ptep, ptl);
		lock_page(page);
		ret = split_huge_page(page);
		unlock_page(page);
		put_page(page);
		if (ret)
			return ERR_PTR(ret);
		goto retry;
	}

	/* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
	if (unlikely(!try_grab_page(page, flags))) {
		page = ERR_PTR(-ENOMEM);
@@ -672,7 +660,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
		spin_unlock(ptl);
		return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
	}
	if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
	if (flags & FOLL_SPLIT_PMD) {
		int ret;
		page = pmd_page(*pmd);
		if (is_huge_zero_page(page)) {
@@ -681,19 +669,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
			split_huge_pmd(vma, pmd, address);
			if (pmd_trans_unstable(pmd))
				ret = -EBUSY;
		} else if (flags & FOLL_SPLIT) {
			if (unlikely(!try_get_page(page))) {
				spin_unlock(ptl);
				return ERR_PTR(-ENOMEM);
			}
			spin_unlock(ptl);
			lock_page(page);
			ret = split_huge_page(page);
			unlock_page(page);
			put_page(page);
			if (pmd_none(*pmd))
				return no_page_table(vma, flags);
		} else {  /* flags & FOLL_SPLIT_PMD */
		} else {
			spin_unlock(ptl);
			split_huge_pmd(vma, pmd, address);
			ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;