Commit 54205e9c authored by Baolin Wang's avatar Baolin Wang Committed by Andrew Morton
Browse files

mm: rmap: move the cache flushing to the correct place for hugetlb PMD sharing

The cache level flush will always be first when changing an existing
virtual–>physical mapping to a new value, since this allows us to
properly handle systems whose caches are strict and require a
virtual–>physical translation to exist for a virtual address.  So we
should move the cache flushing before huge_pmd_unshare().

As Muchun pointed out[1], now the architectures whose supporting hugetlb
PMD sharing have no cache flush issues in practice.  But I think we should
still follow the cache/TLB flushing rules when changing a valid virtual
address mapping in case of potential issues in future.

[1] https://lore.kernel.org/all/YmT%2F%2FhuUbFX+KHcy@FVFYT0MHHV2J.usts.net/

Link: https://lkml.kernel.org/r/4f7ae6dfdc838ab71e1655188b657c032ff1f28f.1651056365.git.baolin.wang@linux.alibaba.com


Signed-off-by: default avatarBaolin Wang <baolin.wang@linux.alibaba.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 3d0b95cd
Loading
Loading
Loading
Loading
+22 −18
Original line number Diff line number Diff line
@@ -1532,15 +1532,16 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
			 * do this outside rmap routines.
			 */
			VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
			if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
			/*
				 * huge_pmd_unshare unmapped an entire PMD
				 * page.  There is no way of knowing exactly
				 * which PMDs may be cached for this mm, so
				 * we must flush them all.  start/end were
				 * already adjusted above to cover this range.
			 * huge_pmd_unshare may unmap an entire PMD page.
			 * There is no way of knowing exactly which PMDs may
			 * be cached for this mm, so we must flush them all.
			 * start/end were already adjusted above to cover this
			 * range.
			 */
			flush_cache_range(vma, range.start, range.end);

			if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
				flush_tlb_range(vma, range.start, range.end);
				mmu_notifier_invalidate_range(mm, range.start,
							      range.end);
@@ -1557,13 +1558,14 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
				page_vma_mapped_walk_done(&pvmw);
				break;
			}
		} else {
			flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
		}

		/*
		 * Nuke the page table entry. When having to clear
		 * PageAnonExclusive(), we always have to flush.
		 */
		flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
		if (should_defer_flush(mm, flags) && !anon_exclusive) {
			/*
			 * We clear the PTE but do not flush so potentially
@@ -1884,15 +1886,16 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
			 * do this outside rmap routines.
			 */
			VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
			if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
			/*
				 * huge_pmd_unshare unmapped an entire PMD
				 * page.  There is no way of knowing exactly
				 * which PMDs may be cached for this mm, so
				 * we must flush them all.  start/end were
				 * already adjusted above to cover this range.
			 * huge_pmd_unshare may unmap an entire PMD page.
			 * There is no way of knowing exactly which PMDs may
			 * be cached for this mm, so we must flush them all.
			 * start/end were already adjusted above to cover this
			 * range.
			 */
			flush_cache_range(vma, range.start, range.end);

			if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
				flush_tlb_range(vma, range.start, range.end);
				mmu_notifier_invalidate_range(mm, range.start,
							      range.end);
@@ -1909,10 +1912,11 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
				page_vma_mapped_walk_done(&pvmw);
				break;
			}
		} else {
			flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
		}

		/* Nuke the page table entry. */
		flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
		pteval = ptep_clear_flush(vma, address, pvmw.pte);

		/* Set the dirty flag on the folio now the pte is gone. */