Commit cd7f176a authored by Suren Baghdasaryan's avatar Suren Baghdasaryan Committed by Andrew Morton
Browse files

arm64/mm: try VMA lock-based page fault handling first

Attempt VMA lock-based page fault handling first, and fall back to the
existing mmap_lock-based handling if that fails.

Link: https://lkml.kernel.org/r/20230227173632.3292573-31-surenb@google.com


Signed-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0bff0aae
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -95,6 +95,7 @@ config ARM64
	select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
	select ARCH_SUPPORTS_NUMA_BALANCING
	select ARCH_SUPPORTS_PAGE_TABLE_CHECK
	select ARCH_SUPPORTS_PER_VMA_LOCK
	select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
	select ARCH_WANT_DEFAULT_BPF_JIT
	select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
+36 −0
Original line number Diff line number Diff line
@@ -535,6 +535,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
	unsigned long vm_flags;
	unsigned int mm_flags = FAULT_FLAG_DEFAULT;
	unsigned long addr = untagged_addr(far);
#ifdef CONFIG_PER_VMA_LOCK
	struct vm_area_struct *vma;
#endif

	if (kprobe_page_fault(regs, esr))
		return 0;
@@ -585,6 +588,36 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,

	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);

#ifdef CONFIG_PER_VMA_LOCK
	if (!(mm_flags & FAULT_FLAG_USER))
		goto lock_mmap;

	vma = lock_vma_under_rcu(mm, addr);
	if (!vma)
		goto lock_mmap;

	if (!(vma->vm_flags & vm_flags)) {
		vma_end_read(vma);
		goto lock_mmap;
	}
	fault = handle_mm_fault(vma, addr & PAGE_MASK,
				mm_flags | FAULT_FLAG_VMA_LOCK, regs);
	vma_end_read(vma);

	if (!(fault & VM_FAULT_RETRY)) {
		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
		goto done;
	}
	count_vm_vma_lock_event(VMA_LOCK_RETRY);

	/* Quick path to respond to signals */
	if (fault_signal_pending(fault, regs)) {
		if (!user_mode(regs))
			goto no_context;
		return 0;
	}
lock_mmap:
#endif /* CONFIG_PER_VMA_LOCK */
	/*
	 * As per x86, we may deadlock here. However, since the kernel only
	 * validly references user space from well defined areas of the code,
@@ -628,6 +661,9 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
	}
	mmap_read_unlock(mm);

#ifdef CONFIG_PER_VMA_LOCK
done:
#endif
	/*
	 * Handle the "normal" (no error) case first.
	 */