Commit eb309ec8 authored by David Hildenbrand's avatar David Hildenbrand Committed by Andrew Morton
Browse files

mm/mprotect: factor out check whether manual PTE write upgrades are required

Let's factor the check out into vma_wants_manual_pte_write_upgrade(), to be
reused in NUMA hinting fault context soon.

Link: https://lkml.kernel.org/r/20221108174652.198904-5-david@redhat.com


Signed-off-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nadav Amit <namit@vmware.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c27f479e
Loading
Loading
Loading
Loading
+14 −2
Original line number Diff line number Diff line
@@ -2088,6 +2088,20 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
#define  MM_CP_UFFD_WP_ALL                 (MM_CP_UFFD_WP | \
					    MM_CP_UFFD_WP_RESOLVE)

int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
{
	/*
	 * We want to check manually if we can change individual PTEs writable
	 * if we can't do that automatically for all PTEs in a mapping. For
	 * private mappings, that's always the case when we have write
	 * permissions as we properly have to handle COW.
	 */
	if (vma->vm_flags & VM_SHARED)
		return vma_wants_writenotify(vma, vma->vm_page_prot);
	return !!(vma->vm_flags & VM_WRITE);

}
extern unsigned long change_protection(struct mmu_gather *tlb,
			      struct vm_area_struct *vma, unsigned long start,
			      unsigned long end, pgprot_t newprot,
@@ -2227,8 +2241,6 @@ static inline int pte_devmap(pte_t pte)
}
#endif

int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);

extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
			       spinlock_t **ptl);
static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+4 −13
Original line number Diff line number Diff line
@@ -558,8 +558,8 @@ mprotect_fixup(struct mmu_gather *tlb, struct vm_area_struct *vma,
	struct mm_struct *mm = vma->vm_mm;
	unsigned long oldflags = vma->vm_flags;
	long nrpages = (end - start) >> PAGE_SHIFT;
	unsigned int mm_cp_flags = 0;
	unsigned long charged = 0;
	bool try_change_writable;
	pgoff_t pgoff;
	int error;

@@ -637,20 +637,11 @@ mprotect_fixup(struct mmu_gather *tlb, struct vm_area_struct *vma,
	 * held in write mode.
	 */
	vma->vm_flags = newflags;
	/*
	 * We want to check manually if we can change individual PTEs writable
	 * if we can't do that automatically for all PTEs in a mapping. For
	 * private mappings, that's always the case when we have write
	 * permissions as we properly have to handle COW.
	 */
	if (vma->vm_flags & VM_SHARED)
		try_change_writable = vma_wants_writenotify(vma, vma->vm_page_prot);
	else
		try_change_writable = !!(vma->vm_flags & VM_WRITE);
	if (vma_wants_manual_pte_write_upgrade(vma))
		mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
	vma_set_page_prot(vma);

	change_protection(tlb, vma, start, end, vma->vm_page_prot,
			  try_change_writable ? MM_CP_TRY_CHANGE_WRITABLE : 0);
	change_protection(tlb, vma, start, end, vma->vm_page_prot, mm_cp_flags);

	/*
	 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major