Commit 0506c31d authored by Baolin Wang's avatar Baolin Wang Committed by Andrew Morton
Browse files

mm: rmap: simplify the hugetlb handling when unmapping or migration

According to previous discussion [1], there are so many levels of
indenting to handle the hugetlb case when unmapping or migration.  We can
combine folio_test_anon() and huge_pmd_unshare() to save one level of
indenting, by adding a local variable and moving the VM_BUG_ON() a little
forward.

No intended functional changes in this patch.

[1] https://lore.kernel.org/all/0b986dc4-5843-3e2d-c2df-5a2e9f13e6ab@oracle.com/

Link: https://lkml.kernel.org/r/28414b1b96f095e838c1e548074f8e0fc70d78cf.1655724713.git.baolin.wang@linux.alibaba.com


Signed-off-by: default avatarBaolin Wang <baolin.wang@linux.alibaba.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f7cc67ae
Loading
Loading
Loading
Loading
+44 −46
Original line number Diff line number Diff line
@@ -1537,6 +1537,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
				 PageAnonExclusive(subpage);

		if (folio_test_hugetlb(folio)) {
			bool anon = folio_test_anon(folio);

			/*
			 * The try_to_unmap() is only passed a hugetlb page
			 * in the case where the hugetlb page is poisoned.
@@ -1551,15 +1553,13 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
			 */
			flush_cache_range(vma, range.start, range.end);

			if (!folio_test_anon(folio)) {
			/*
			 * To call huge_pmd_unshare, i_mmap_rwsem must be
			 * held in write mode.  Caller needs to explicitly
			 * do this outside rmap routines.
			 */
				VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));

				if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
			VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED));
			if (!anon && huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
				flush_tlb_range(vma, range.start, range.end);
				mmu_notifier_invalidate_range(mm, range.start,
							      range.end);
@@ -1576,7 +1576,6 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
				page_vma_mapped_walk_done(&pvmw);
				break;
			}
			}
			pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
		} else {
			flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
@@ -1906,6 +1905,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
				 PageAnonExclusive(subpage);

		if (folio_test_hugetlb(folio)) {
			bool anon = folio_test_anon(folio);

			/*
			 * huge_pmd_unshare may unmap an entire PMD page.
			 * There is no way of knowing exactly which PMDs may
@@ -1915,15 +1916,13 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
			 */
			flush_cache_range(vma, range.start, range.end);

			if (!folio_test_anon(folio)) {
			/*
			 * To call huge_pmd_unshare, i_mmap_rwsem must be
			 * held in write mode.  Caller needs to explicitly
			 * do this outside rmap routines.
			 */
				VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));

				if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
			VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED));
			if (!anon && huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
				flush_tlb_range(vma, range.start, range.end);
				mmu_notifier_invalidate_range(mm, range.start,
							      range.end);
@@ -1940,7 +1939,6 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
				page_vma_mapped_walk_done(&pvmw);
				break;
			}
			}

			/* Nuke the hugetlb page table entry */
			pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);