Commit 8d7071af authored by Linus Torvalds's avatar Linus Torvalds
Browse files

mm: always expand the stack with the mmap write lock held



This finishes the job of always holding the mmap write lock when
extending the user stack vma, and removes the 'write_locked' argument
from the vm helper functions again.

For some cases, we just avoid expanding the stack at all: drivers and
page pinning really shouldn't be extending any stacks.  Let's see if any
strange users really wanted that.

It's worth noting that architectures that weren't converted to the new
lock_mm_and_find_vma() helper function are left using the legacy
"expand_stack()" function, but it has been changed to drop the mmap_lock
and take it for writing while expanding the vma.  This makes it fairly
straightforward to convert the remaining architectures.

As a result of dropping and re-taking the lock, the calling conventions
for this function have also changed, since the old vma may no longer be
valid.  So it will now return the new vma if successful, and NULL - and
the lock dropped - if the area could not be extended.

Tested-by: default avatarVegard Nossum <vegard.nossum@oracle.com>
Tested-by: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> # ia64
Tested-by: Frank Scheiner <frank.scheiner@web.de> # ia64
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f313c51d
Loading
Loading
Loading
Loading
+6 −30
Original line number Diff line number Diff line
@@ -110,10 +110,12 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
         * register backing store that needs to expand upwards, in
         * this case vma will be null, but prev_vma will ne non-null
         */
        if (( !vma && prev_vma ) || (address < vma->vm_start) )
		goto check_expansion;
        if (( !vma && prev_vma ) || (address < vma->vm_start) ) {
		vma = expand_stack(mm, address);
		if (!vma)
			goto bad_area_nosemaphore;
	}

  good_area:
	code = SEGV_ACCERR;

	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
@@ -177,35 +179,9 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
	mmap_read_unlock(mm);
	return;

  check_expansion:
	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
		if (!vma)
			goto bad_area;
		if (!(vma->vm_flags & VM_GROWSDOWN))
			goto bad_area;
		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
			goto bad_area;
		if (expand_stack(vma, address))
			goto bad_area;
	} else {
		vma = prev_vma;
		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
			goto bad_area;
		/*
		 * Since the register backing store is accessed sequentially,
		 * we disallow growing it by more than a page at a time.
		 */
		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
			goto bad_area;
		if (expand_upwards(vma, address))
			goto bad_area;
	}
	goto good_area;

  bad_area:
	mmap_read_unlock(mm);
  bad_area_nosemaphore:
	if ((isr & IA64_ISR_SP)
	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
	{
+6 −3
Original line number Diff line number Diff line
@@ -105,8 +105,9 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
		if (address + 256 < rdusp())
			goto map_err;
	}
	if (expand_stack(vma, address))
		goto map_err;
	vma = expand_stack(mm, address);
	if (!vma)
		goto map_err_nosemaphore;

/*
 * Ok, we have a good vm_area for this memory access, so
@@ -196,10 +197,12 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
	goto send_sig;

map_err:
	mmap_read_unlock(mm);
map_err_nosemaphore:
	current->thread.signo = SIGSEGV;
	current->thread.code = SEGV_MAPERR;
	current->thread.faddr = address;
	goto send_sig;
	return send_fault_sig(regs);

acc_err:
	current->thread.signo = SIGSEGV;
+3 −2
Original line number Diff line number Diff line
@@ -192,8 +192,9 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
			&& (kernel_mode(regs) || !store_updates_sp(regs)))
				goto bad_area;
	}
	if (expand_stack(vma, address))
		goto bad_area;
	vma = expand_stack(mm, address);
	if (!vma)
		goto bad_area_nosemaphore;

good_area:
	code = SEGV_ACCERR;
+3 −2
Original line number Diff line number Diff line
@@ -127,8 +127,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
		if (address + PAGE_SIZE < regs->sp)
			goto bad_area;
	}
	if (expand_stack(vma, address))
		goto bad_area;
	vma = expand_stack(mm, address);
	if (!vma)
		goto bad_area_nosemaphore;

	/*
	 * Ok, we have a good vm_area for this memory access, so
+11 −12
Original line number Diff line number Diff line
@@ -288,15 +288,19 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
retry:
	mmap_read_lock(mm);
	vma = find_vma_prev(mm, address, &prev_vma);
	if (!vma || address < vma->vm_start)
		goto check_expansion;
	if (!vma || address < vma->vm_start) {
		if (!prev || !(prev->vm_flags & VM_GROWSUP))
			goto bad_area;
		vma = expand_stack(mm, address);
		if (!vma)
			goto bad_area_nosemaphore;
	}

/*
 * Ok, we have a good vm_area for this memory access. We still need to
 * check the access permissions.
 */

good_area:

	if ((vma->vm_flags & acc_type) != acc_type)
		goto bad_area;

@@ -347,17 +351,13 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
	mmap_read_unlock(mm);
	return;

check_expansion:
	vma = prev_vma;
	if (vma && (expand_stack(vma, address) == 0))
		goto good_area;

/*
 * Something tried to access memory that isn't in our memory map..
 */
bad_area:
	mmap_read_unlock(mm);

bad_area_nosemaphore:
	if (user_mode(regs)) {
		int signo, si_code;

@@ -449,7 +449,7 @@ handle_nadtlb_fault(struct pt_regs *regs)
{
	unsigned long insn = regs->iir;
	int breg, treg, xreg, val = 0;
	struct vm_area_struct *vma, *prev_vma;
	struct vm_area_struct *vma;
	struct task_struct *tsk;
	struct mm_struct *mm;
	unsigned long address;
@@ -485,7 +485,7 @@ handle_nadtlb_fault(struct pt_regs *regs)
				/* Search for VMA */
				address = regs->ior;
				mmap_read_lock(mm);
				vma = find_vma_prev(mm, address, &prev_vma);
				vma = vma_lookup(mm, address);
				mmap_read_unlock(mm);

				/*
@@ -494,7 +494,6 @@ handle_nadtlb_fault(struct pt_regs *regs)
				 */
				acc_type = (insn & 0x40) ? VM_WRITE : VM_READ;
				if (vma
				    && address >= vma->vm_start
				    && (vma->vm_flags & acc_type) == acc_type)
					val = 1;
			}
Loading