Commit 70d4cbc8 authored by Laurent Dufour's avatar Laurent Dufour Committed by Andrew Morton
Browse files

powerc/mm: try VMA lock-based page fault handling first

Attempt VMA lock-based page fault handling first, and fall back to the
existing mmap_lock-based handling if that fails.  Copied from "x86/mm: try
VMA lock-based page fault handling first"

[ldufour@linux.ibm.com: powerpc/mm: fix mmap_lock bad unlock]
  Link: https://lkml.kernel.org/r/20230306154244.17560-1-ldufour@linux.ibm.com
  Link: https://lore.kernel.org/linux-mm/842502FB-F99C-417C-9648-A37D0ECDC9CE@linux.ibm.com
Link: https://lkml.kernel.org/r/20230227173632.3292573-32-surenb@google.com


Signed-off-by: default avatarLaurent Dufour <ldufour@linux.ibm.com>
Signed-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Cc: Sachin Sant <sachinp@linux.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent cd7f176a
Loading
Loading
Loading
Loading
+37 −0
Original line number Diff line number Diff line
@@ -474,6 +474,40 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
	if (is_exec)
		flags |= FAULT_FLAG_INSTRUCTION;

#ifdef CONFIG_PER_VMA_LOCK
	if (!(flags & FAULT_FLAG_USER))
		goto lock_mmap;

	vma = lock_vma_under_rcu(mm, address);
	if (!vma)
		goto lock_mmap;

	if (unlikely(access_pkey_error(is_write, is_exec,
				       (error_code & DSISR_KEYFAULT), vma))) {
		vma_end_read(vma);
		goto lock_mmap;
	}

	if (unlikely(access_error(is_write, is_exec, vma))) {
		vma_end_read(vma);
		goto lock_mmap;
	}

	fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
	vma_end_read(vma);

	if (!(fault & VM_FAULT_RETRY)) {
		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
		goto done;
	}
	count_vm_vma_lock_event(VMA_LOCK_RETRY);

	if (fault_signal_pending(fault, regs))
		return user_mode(regs) ? 0 : SIGBUS;

lock_mmap:
#endif /* CONFIG_PER_VMA_LOCK */

	/* When running in the kernel we expect faults to occur only to
	 * addresses in user space.  All other faults represent errors in the
	 * kernel and should generate an OOPS.  Unfortunately, in the case of an
@@ -550,6 +584,9 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,

	mmap_read_unlock(current->mm);

#ifdef CONFIG_PER_VMA_LOCK
done:
#endif
	if (unlikely(fault & VM_FAULT_ERROR))
		return mm_fault_error(regs, address, fault);

+1 −0
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@ config PPC_POWERNV
	select PPC_DOORBELL
	select MMU_NOTIFIER
	select FORCE_SMP
	select ARCH_SUPPORTS_PER_VMA_LOCK
	default y

config OPAL_PRD
+1 −0
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@ config PPC_PSERIES
	select HOTPLUG_CPU
	select FORCE_SMP
	select SWIOTLB
	select ARCH_SUPPORTS_PER_VMA_LOCK
	default y

config PARAVIRT