Commit b67fbebd authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds
Browse files

mmu_gather: Force tlb-flush VM_PFNMAP vmas



Jann reported a race between munmap() and unmap_mapping_range(), where
unmap_mapping_range() will no-op once unmap_vmas() has unlinked the
VMA; however munmap() will not yet have invalidated the TLBs.

Therefore unmap_mapping_range() will complete while there are still
(stale) TLB entries for the specified range.

Mitigate this by force flushing TLBs for VM_PFNMAP ranges.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 18ba064e
Loading
Loading
Loading
Loading
+17 −16
Original line number Diff line number Diff line
@@ -303,6 +303,7 @@ struct mmu_gather {
	 */
	unsigned int		vma_exec : 1;
	unsigned int		vma_huge : 1;
	unsigned int		vma_pfn  : 1;

	unsigned int		batch_count;

@@ -373,7 +374,6 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
#else /* CONFIG_MMU_GATHER_NO_RANGE */

#ifndef tlb_flush

/*
 * When an architecture does not provide its own tlb_flush() implementation
 * but does have a reasonably efficient flush_vma_range() implementation
@@ -393,6 +393,9 @@ static inline void tlb_flush(struct mmu_gather *tlb)
		flush_tlb_range(&vma, tlb->start, tlb->end);
	}
}
#endif

#endif /* CONFIG_MMU_GATHER_NO_RANGE */

static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
@@ -410,17 +413,9 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
	 */
	tlb->vma_huge = is_vm_hugetlb_page(vma);
	tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
	tlb->vma_pfn  = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
}

#else

static inline void
tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }

#endif

#endif /* CONFIG_MMU_GATHER_NO_RANGE */

static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
	/*
@@ -507,17 +502,23 @@ static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *

static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
	if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
	if (tlb->fullmm)
		return;

	/*
	 * VM_PFNMAP is more fragile because the core mm will not track the
	 * page mapcount -- there might not be page-frames for these PFNs after
	 * all. Force flush TLBs for such ranges to avoid munmap() vs
	 * unmap_mapping_range() races.
	 */
	if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) {
		/*
		 * Do a TLB flush and reset the range at VMA boundaries; this avoids
	 * the ranges growing with the unused space between consecutive VMAs,
	 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
	 * this.
		 * the ranges growing with the unused space between consecutive VMAs.
		 */
		tlb_flush_mmu_tlbonly(tlb);
	}
}

/*
 * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,