Commit 8b35ca3e authored by Ben Hutchings's avatar Ben Hutchings Committed by Linus Torvalds
Browse files

arm/mm: Convert to using lock_mm_and_find_vma()



arm has an additional check for address < FIRST_USER_ADDRESS before
expanding the stack.  Since FIRST_USER_ADDRESS is defined everywhere
(generally as 0), move that check to the generic expand_downwards().

Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7267ef7b
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -125,6 +125,7 @@ config ARM
	select HAVE_UID16
	select HAVE_VIRT_CPU_ACCOUNTING_GEN
	select IRQ_FORCED_THREADING
	select LOCK_MM_AND_FIND_VMA
	select MODULES_USE_ELF_REL
	select NEED_DMA_MAP_STATE
	select OF_EARLY_FLATTREE if OF
+14 −49
Original line number Diff line number Diff line
@@ -232,37 +232,11 @@ static inline bool is_permission_fault(unsigned int fsr)
	return false;
}

static vm_fault_t __kprobes
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int flags,
		unsigned long vma_flags, struct pt_regs *regs)
{
	struct vm_area_struct *vma = find_vma(mm, addr);
	if (unlikely(!vma))
		return VM_FAULT_BADMAP;

	if (unlikely(vma->vm_start > addr)) {
		if (!(vma->vm_flags & VM_GROWSDOWN))
			return VM_FAULT_BADMAP;
		if (addr < FIRST_USER_ADDRESS)
			return VM_FAULT_BADMAP;
		if (expand_stack(vma, addr))
			return VM_FAULT_BADMAP;
	}

	/*
	 * ok, we have a good vm_area for this memory access, check the
	 * permissions on the VMA allow for the fault which occurred.
	 */
	if (!(vma->vm_flags & vma_flags))
		return VM_FAULT_BADACCESS;

	return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
}

static int __kprobes
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	int sig, code;
	vm_fault_t fault;
	unsigned int flags = FAULT_FLAG_DEFAULT;
@@ -301,31 +275,21 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)

	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);

	/*
	 * As per x86, we may deadlock here.  However, since the kernel only
	 * validly references user space from well defined areas of the code,
	 * we can bug out early if this is from code which shouldn't.
	 */
	if (!mmap_read_trylock(mm)) {
		if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
			goto no_context;
retry:
		mmap_read_lock(mm);
	} else {
		/*
		 * The above down_read_trylock() might have succeeded in
		 * which case, we'll have missed the might_sleep() from
		 * down_read()
		 */
		might_sleep();
#ifdef CONFIG_DEBUG_VM
		if (!user_mode(regs) &&
		    !search_exception_tables(regs->ARM_pc))
			goto no_context;
#endif
	vma = lock_mm_and_find_vma(mm, addr, regs);
	if (unlikely(!vma)) {
		fault = VM_FAULT_BADMAP;
		goto bad_area;
	}

	fault = __do_page_fault(mm, addr, flags, vm_flags, regs);
	/*
	 * ok, we have a good vm_area for this memory access, check the
	 * permissions on the VMA allow for the fault which occurred.
	 */
	if (!(vma->vm_flags & vm_flags))
		fault = VM_FAULT_BADACCESS;
	else
		fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);

	/* If we need to retry but a fatal signal is pending, handle the
	 * signal first. We do not need to release the mmap_lock because
@@ -356,6 +320,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
		return 0;

bad_area:
	/*
	 * If we are in kernel mode at this point, we
	 * have no context to handle this fault with.
+1 −1
Original line number Diff line number Diff line
@@ -2036,7 +2036,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
	int error = 0;

	address &= PAGE_MASK;
	if (address < mmap_min_addr)
	if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
		return -EPERM;

	/* Enforce stack_guard_gap */