Commit 369258ce authored by Mike Kravetz's avatar Mike Kravetz Committed by Andrew Morton
Browse files

hugetlb: remove duplicate mmu notifications

The common hugetlb unmap routine __unmap_hugepage_range performs mmu
notification calls.  However, in the case where __unmap_hugepage_range is
called via __unmap_hugepage_range_final, mmu notification calls are
performed earlier in other calling routines.

Remove mmu notification calls from __unmap_hugepage_range.  Add
notification calls to the only other caller: unmap_hugepage_range. 
unmap_hugepage_range is called for truncation and hole punch, so change
notification type from UNMAP to CLEAR as this is more appropriate.

Link: https://lkml.kernel.org/r/20221114235507.294320-4-mike.kravetz@oracle.com


Signed-off-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Suggested-by: default avatarPeter Xu <peterx@redhat.com>
Cc: Wei Chen <harperchen1110@gmail.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
Cc: Rik van Riel <riel@surriel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c2da319c
Loading
Loading
Loading
Loading
+9 −9
Original line number Diff line number Diff line
@@ -5076,7 +5076,6 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
	struct page *page;
	struct hstate *h = hstate_vma(vma);
	unsigned long sz = huge_page_size(h);
	struct mmu_notifier_range range;
	unsigned long last_addr_mask;
	bool force_flush = false;

@@ -5091,13 +5090,6 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
	tlb_change_page_size(tlb, sz);
	tlb_start_vma(tlb, vma);

	/*
	 * If sharing possible, alert mmu notifiers of worst case.
	 */
	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
				end);
	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
	mmu_notifier_invalidate_range_start(&range);
	last_addr_mask = hugetlb_mask_last_page(h);
	address = start;
	for (; address < end; address += sz) {
@@ -5182,7 +5174,6 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
		if (ref_page)
			break;
	}
	mmu_notifier_invalidate_range_end(&range);
	tlb_end_vma(tlb, vma);

	/*
@@ -5210,6 +5201,7 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
	hugetlb_vma_lock_write(vma);
	i_mmap_lock_write(vma->vm_file->f_mapping);

	/* mmu notification performed in caller */
	__unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);

	if (zap_flags & ZAP_FLAG_UNMAP) {	/* final unmap */
@@ -5234,10 +5226,18 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
			  unsigned long end, struct page *ref_page,
			  zap_flags_t zap_flags)
{
	struct mmu_notifier_range range;
	struct mmu_gather tlb;

	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
				start, end);
	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
	mmu_notifier_invalidate_range_start(&range);
	tlb_gather_mmu(&tlb, vma->vm_mm);

	__unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);

	mmu_notifier_invalidate_range_end(&range);
	tlb_finish_mmu(&tlb);
}