Commit f177b06e authored by Wang Kefeng's avatar Wang Kefeng Committed by Russell King (Oracle)
Browse files

ARM: 9127/1: mm: Cleanup access_error()



Now the write fault check in do_page_fault() and access_error() twice,
we can cleanup access_error(), and make the fault check and vma flags set
into do_page_fault() directly, then pass the vma flags to __do_page_fault.

No functional change.

Signed-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
parent 488cab12
Loading
Loading
Loading
Loading
+14 −24
Original line number Diff line number Diff line
@@ -183,26 +183,9 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
#define VM_FAULT_BADMAP		0x010000
#define VM_FAULT_BADACCESS	0x020000

/*
 * Check that the permissions on the VMA allow for the fault which occurred.
 * If we encountered a write fault, we must have write permission, otherwise
 * we allow any permission.
 */
static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
{
	unsigned int mask = VM_ACCESS_FLAGS;

	if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
		mask = VM_WRITE;
	if (fsr & FSR_LNX_PF)
		mask = VM_EXEC;

	return vma->vm_flags & mask ? false : true;
}

static vm_fault_t __kprobes
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
		unsigned int flags, struct pt_regs *regs)
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int flags,
		unsigned long vma_flags, struct pt_regs *regs)
{
	struct vm_area_struct *vma = find_vma(mm, addr);
	if (unlikely(!vma))
@@ -218,10 +201,10 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
	}

	/*
	 * Ok, we have a good vm_area for this
	 * memory access, so we can handle it.
	 * ok, we have a good vm_area for this memory access, check the
	 * permissions on the VMA allow for the fault which occurred.
	 */
	if (access_error(fsr, vma))
	if (!(vma->vm_flags & vma_flags))
		return VM_FAULT_BADACCESS;

	return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
@@ -234,6 +217,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
	int sig, code;
	vm_fault_t fault;
	unsigned int flags = FAULT_FLAG_DEFAULT;
	unsigned long vm_flags = VM_ACCESS_FLAGS;

	if (kprobe_page_fault(regs, fsr))
		return 0;
@@ -252,8 +236,14 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)

	if (user_mode(regs))
		flags |= FAULT_FLAG_USER;
	if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))

	if ((fsr & FSR_WRITE) && !(fsr & FSR_CM)) {
		flags |= FAULT_FLAG_WRITE;
		vm_flags = VM_WRITE;
	}

	if (fsr & FSR_LNX_PF)
		vm_flags = VM_EXEC;

	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);

@@ -281,7 +271,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
#endif
	}

	fault = __do_page_fault(mm, addr, fsr, flags, regs);
	fault = __do_page_fault(mm, addr, flags, vm_flags, regs);

	/* If we need to retry but a fatal signal is pending, handle the
	 * signal first. We do not need to release the mmap_lock because