Commit 270a8306 authored by Max Filippov's avatar Max Filippov
Browse files

xtensa: extract vmalloc_fault code into a function



Move full MMU-specific code into a separate function to isolate it from
more generic do_page_fault code. No functional changes.

Signed-off-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
parent 4916be42
Loading
Loading
Loading
Loading
+54 −53
Original line number Diff line number Diff line
@@ -23,6 +23,55 @@

void bad_page_fault(struct pt_regs*, unsigned long, int);

static void vmalloc_fault(struct pt_regs *regs, unsigned int address)
{
	/* Synchronize this task's top level page-table
	 * with the 'reference' page table.
	 */
	struct mm_struct *act_mm = current->active_mm;
	int index = pgd_index(address);
	pgd_t *pgd, *pgd_k;
	p4d_t *p4d, *p4d_k;
	pud_t *pud, *pud_k;
	pmd_t *pmd, *pmd_k;
	pte_t *pte_k;

	if (act_mm == NULL)
		goto bad_page_fault;

	pgd = act_mm->pgd + index;
	pgd_k = init_mm.pgd + index;

	if (!pgd_present(*pgd_k))
		goto bad_page_fault;

	pgd_val(*pgd) = pgd_val(*pgd_k);

	p4d = p4d_offset(pgd, address);
	p4d_k = p4d_offset(pgd_k, address);
	if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
		goto bad_page_fault;

	pud = pud_offset(p4d, address);
	pud_k = pud_offset(p4d_k, address);
	if (!pud_present(*pud) || !pud_present(*pud_k))
		goto bad_page_fault;

	pmd = pmd_offset(pud, address);
	pmd_k = pmd_offset(pud_k, address);
	if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
		goto bad_page_fault;

	pmd_val(*pmd) = pmd_val(*pmd_k);
	pte_k = pte_offset_kernel(pmd_k, address);

	if (!pte_present(*pte_k))
		goto bad_page_fault;
	return;

bad_page_fault:
	bad_page_fault(regs, address, SIGKILL);
}
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
@@ -48,8 +97,10 @@ void do_page_fault(struct pt_regs *regs)
	/* We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 */
	if (address >= TASK_SIZE && !user_mode(regs))
		goto vmalloc_fault;
	if (address >= TASK_SIZE && !user_mode(regs)) {
		vmalloc_fault(regs, address);
		return;
	}

	/* If we're in an interrupt or have no user
	 * context, we must not take the fault..
@@ -113,7 +164,7 @@ void do_page_fault(struct pt_regs *regs)

	if (fault_signal_pending(fault, regs)) {
		if (!user_mode(regs))
			goto bad_page_fault;
			bad_page_fault(regs, address, SIGKILL);
		return;
	}

@@ -180,56 +231,6 @@ void do_page_fault(struct pt_regs *regs)
	if (!user_mode(regs))
		bad_page_fault(regs, address, SIGBUS);
	return;

vmalloc_fault:
	{
		/* Synchronize this task's top level page-table
		 * with the 'reference' page table.
		 */
		struct mm_struct *act_mm = current->active_mm;
		int index = pgd_index(address);
		pgd_t *pgd, *pgd_k;
		p4d_t *p4d, *p4d_k;
		pud_t *pud, *pud_k;
		pmd_t *pmd, *pmd_k;
		pte_t *pte_k;

		if (act_mm == NULL)
			goto bad_page_fault;

		pgd = act_mm->pgd + index;
		pgd_k = init_mm.pgd + index;

		if (!pgd_present(*pgd_k))
			goto bad_page_fault;

		pgd_val(*pgd) = pgd_val(*pgd_k);

		p4d = p4d_offset(pgd, address);
		p4d_k = p4d_offset(pgd_k, address);
		if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
			goto bad_page_fault;

		pud = pud_offset(p4d, address);
		pud_k = pud_offset(p4d_k, address);
		if (!pud_present(*pud) || !pud_present(*pud_k))
			goto bad_page_fault;

		pmd = pmd_offset(pud, address);
		pmd_k = pmd_offset(pud_k, address);
		if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
			goto bad_page_fault;

		pmd_val(*pmd) = pmd_val(*pmd_k);
		pte_k = pte_offset_kernel(pmd_k, address);

		if (!pte_present(*pte_k))
			goto bad_page_fault;
		return;
	}
bad_page_fault:
	bad_page_fault(regs, address, SIGKILL);
	return;
}