Commit a050ba1e authored by Linus Torvalds's avatar Linus Torvalds
Browse files

mm/fault: convert remaining simple cases to lock_mm_and_find_vma()



This does the simple pattern conversion of alpha, arc, csky, hexagon,
loongarch, nios2, sh, sparc32, and xtensa to the lock_mm_and_find_vma()
helper.  They all have the regular fault handling pattern without odd
special cases.

The remaining architectures all have something that keeps us from a
straightforward conversion: ia64 and parisc have stacks that can grow
both up as well as down (and ia64 has special address region checks).

And m68k, microblaze, openrisc, sparc64, and um end up having extra
rules about only expanding the stack down a limited amount below the
user space stack pointer.  That is something that x86 used to do too
(long long ago), and it probably could just be skipped, but it still
makes the conversion less than trivial.

Note that this conversion was done manually and with the exception of
alpha without any build testing, because I have a fairly limited cross-
building environment.  The cases are all simple, and I went through the
changes several times, but...

Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8b35ca3e
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -30,6 +30,7 @@ config ALPHA
	select HAS_IOPORT
	select HAVE_ARCH_AUDITSYSCALL
	select HAVE_MOD_ARCH_SPECIFIC
	select LOCK_MM_AND_FIND_VMA
	select MODULES_USE_ELF_RELA
	select ODD_RT_SIGACTION
	select OLD_SIGSUSPEND
+3 −10
Original line number Diff line number Diff line
@@ -119,20 +119,12 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
		flags |= FAULT_FLAG_USER;
	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
	mmap_read_lock(mm);
	vma = find_vma(mm, address);
	vma = lock_mm_and_find_vma(mm, address, regs);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= address)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
	if (expand_stack(vma, address))
		goto bad_area;
		goto bad_area_nosemaphore;

	/* Ok, we have a good vm_area for this memory access, so
	   we can handle it.  */
 good_area:
	si_code = SEGV_ACCERR;
	if (cause < 0) {
		if (!(vma->vm_flags & VM_EXEC))
@@ -192,6 +184,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
 bad_area:
	mmap_read_unlock(mm);

 bad_area_nosemaphore:
	if (user_mode(regs))
		goto do_sigsegv;

+1 −0
Original line number Diff line number Diff line
@@ -41,6 +41,7 @@ config ARC
	select HAVE_PERF_EVENTS
	select HAVE_SYSCALL_TRACEPOINTS
	select IRQ_DOMAIN
	select LOCK_MM_AND_FIND_VMA
	select MODULES_USE_ELF_RELA
	select OF
	select OF_EARLY_FLATTREE
+3 −8
Original line number Diff line number Diff line
@@ -113,15 +113,9 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)

	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
	mmap_read_lock(mm);

	vma = find_vma(mm, address);
	vma = lock_mm_and_find_vma(mm, address, regs);
	if (!vma)
		goto bad_area;
	if (unlikely(address < vma->vm_start)) {
		if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
			goto bad_area;
	}
		goto bad_area_nosemaphore;

	/*
	 * vm_area is good, now check permissions for this memory access
@@ -161,6 +155,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
bad_area:
	mmap_read_unlock(mm);

bad_area_nosemaphore:
	/*
	 * Major/minor page fault accounting
	 * (in case of retry we only land here once)
+1 −0
Original line number Diff line number Diff line
@@ -96,6 +96,7 @@ config CSKY
	select HAVE_REGS_AND_STACK_ACCESS_API
	select HAVE_STACKPROTECTOR
	select HAVE_SYSCALL_TRACEPOINTS
	select LOCK_MM_AND_FIND_VMA
	select MAY_HAVE_SPARSE_IRQ
	select MODULES_USE_ELF_RELA if MODULES
	select OF
Loading