Commit ae870a68 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

arm64/mm: Convert to using lock_mm_and_find_vma()



This converts arm64 to use the new page fault helper.  It was very
straightforward, but still needed a fix for the "obvious" conversion I
initially did.  Thanks to Suren for the fix and testing.

Fixed-and-tested-by: default avatarSuren Baghdasaryan <surenb@google.com>
Unnecessary-code-removal-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent eda00472
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -225,6 +225,7 @@ config ARM64
	select IRQ_DOMAIN
	select IRQ_FORCED_THREADING
	select KASAN_VMALLOC if KASAN
	select LOCK_MM_AND_FIND_VMA
	select MODULES_USE_ELF_RELA
	select NEED_DMA_MAP_STATE
	select NEED_SG_DMA_LENGTH
+8 −39
Original line number Diff line number Diff line
@@ -483,27 +483,14 @@ static void do_bad_area(unsigned long far, unsigned long esr,
#define VM_FAULT_BADMAP		((__force vm_fault_t)0x010000)
#define VM_FAULT_BADACCESS	((__force vm_fault_t)0x020000)

static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
static vm_fault_t __do_page_fault(struct mm_struct *mm,
				  struct vm_area_struct *vma, unsigned long addr,
				  unsigned int mm_flags, unsigned long vm_flags,
				  struct pt_regs *regs)
{
	struct vm_area_struct *vma = find_vma(mm, addr);

	if (unlikely(!vma))
		return VM_FAULT_BADMAP;

	/*
	 * Ok, we have a good vm_area for this memory access, so we can handle
	 * it.
	 */
	if (unlikely(vma->vm_start > addr)) {
		if (!(vma->vm_flags & VM_GROWSDOWN))
			return VM_FAULT_BADMAP;
		if (expand_stack(vma, addr))
			return VM_FAULT_BADMAP;
	}

	/*
	 * Check that the permissions on the VMA allow for the fault which
	 * occurred.
	 */
@@ -617,31 +604,15 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
	}
lock_mmap:
#endif /* CONFIG_PER_VMA_LOCK */
	/*
	 * As per x86, we may deadlock here. However, since the kernel only
	 * validly references user space from well defined areas of the code,
	 * we can bug out early if this is from code which shouldn't.
	 */
	if (!mmap_read_trylock(mm)) {
		if (!user_mode(regs) && !search_exception_tables(regs->pc))
			goto no_context;

retry:
		mmap_read_lock(mm);
	} else {
		/*
		 * The above mmap_read_trylock() might have succeeded in which
		 * case, we'll have missed the might_sleep() from down_read().
		 */
		might_sleep();
#ifdef CONFIG_DEBUG_VM
		if (!user_mode(regs) && !search_exception_tables(regs->pc)) {
			mmap_read_unlock(mm);
			goto no_context;
		}
#endif
	vma = lock_mm_and_find_vma(mm, addr, regs);
	if (unlikely(!vma)) {
		fault = VM_FAULT_BADMAP;
		goto done;
	}

	fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs);
	fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs);

	/* Quick path to respond to signals */
	if (fault_signal_pending(fault, regs)) {
@@ -660,9 +631,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
	}
	mmap_read_unlock(mm);

#ifdef CONFIG_PER_VMA_LOCK
done:
#endif
	/*
	 * Handle the "normal" (no error) case first.
	 */