Loading arch/powerpc/Kconfig +1 −0 Original line number Diff line number Diff line Loading @@ -278,6 +278,7 @@ config PPC select IRQ_DOMAIN select IRQ_FORCED_THREADING select KASAN_VMALLOC if KASAN && MODULES select LOCK_MM_AND_FIND_VMA select MMU_GATHER_PAGE_SIZE select MMU_GATHER_RCU_TABLE_FREE select MMU_GATHER_MERGE_VMAS Loading arch/powerpc/mm/fault.c +3 −36 Original line number Diff line number Diff line Loading @@ -84,11 +84,6 @@ static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code) return __bad_area_nosemaphore(regs, address, si_code); } static noinline int bad_area(struct pt_regs *regs, unsigned long address) { return __bad_area(regs, address, SEGV_MAPERR); } static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, struct vm_area_struct *vma) { Loading Loading @@ -515,40 +510,12 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, * we will deadlock attempting to validate the fault against the * address space. Luckily the kernel only validly references user * space from well defined areas of code, which are listed in the * exceptions table. * * As the vast majority of faults will be valid we will only perform * the source reference check when there is a possibility of a deadlock. * Attempt to lock the address space, if we cannot we then validate the * source. If this is invalid we can skip the address space check, * thus avoiding the deadlock. * exceptions table. lock_mm_and_find_vma() handles that logic. */ if (unlikely(!mmap_read_trylock(mm))) { if (!is_user && !search_exception_tables(regs->nip)) return bad_area_nosemaphore(regs, address); retry: mmap_read_lock(mm); } else { /* * The above down_read_trylock() might have succeeded in * which case we'll have missed the might_sleep() from * down_read(): */ might_sleep(); } vma = find_vma(mm, address); vma = lock_mm_and_find_vma(mm, address, regs); if (unlikely(!vma)) return bad_area(regs, address); if (unlikely(vma->vm_start > address)) { if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) return bad_area(regs, address); if (unlikely(expand_stack(vma, address))) return bad_area(regs, address); } return bad_area_nosemaphore(regs, address); if (unlikely(access_pkey_error(is_write, is_exec, (error_code & DSISR_KEYFAULT), vma))) Loading Loading
arch/powerpc/Kconfig +1 −0 Original line number Diff line number Diff line Loading @@ -278,6 +278,7 @@ config PPC select IRQ_DOMAIN select IRQ_FORCED_THREADING select KASAN_VMALLOC if KASAN && MODULES select LOCK_MM_AND_FIND_VMA select MMU_GATHER_PAGE_SIZE select MMU_GATHER_RCU_TABLE_FREE select MMU_GATHER_MERGE_VMAS Loading
arch/powerpc/mm/fault.c +3 −36 Original line number Diff line number Diff line Loading @@ -84,11 +84,6 @@ static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code) return __bad_area_nosemaphore(regs, address, si_code); } static noinline int bad_area(struct pt_regs *regs, unsigned long address) { return __bad_area(regs, address, SEGV_MAPERR); } static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, struct vm_area_struct *vma) { Loading Loading @@ -515,40 +510,12 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, * we will deadlock attempting to validate the fault against the * address space. Luckily the kernel only validly references user * space from well defined areas of code, which are listed in the * exceptions table. * * As the vast majority of faults will be valid we will only perform * the source reference check when there is a possibility of a deadlock. * Attempt to lock the address space, if we cannot we then validate the * source. If this is invalid we can skip the address space check, * thus avoiding the deadlock. * exceptions table. lock_mm_and_find_vma() handles that logic. */ if (unlikely(!mmap_read_trylock(mm))) { if (!is_user && !search_exception_tables(regs->nip)) return bad_area_nosemaphore(regs, address); retry: mmap_read_lock(mm); } else { /* * The above down_read_trylock() might have succeeded in * which case we'll have missed the might_sleep() from * down_read(): */ might_sleep(); } vma = find_vma(mm, address); vma = lock_mm_and_find_vma(mm, address, regs); if (unlikely(!vma)) return bad_area(regs, address); if (unlikely(vma->vm_start > address)) { if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) return bad_area(regs, address); if (unlikely(expand_stack(vma, address))) return bad_area(regs, address); } return bad_area_nosemaphore(regs, address); if (unlikely(access_pkey_error(is_write, is_exec, (error_code & DSISR_KEYFAULT), vma))) Loading