Commit 89d0d424 authored by Vineet Gupta's avatar Vineet Gupta
Browse files

ARC: mm: move MMU specific bits out of ASID allocator



And while at it, rewrite commentary on ASID allocator

Signed-off-by: default avatarVineet Gupta <vgupta@kernel.org>
parent be43b096
Loading
Loading
Loading
Loading
+13 −0
Original line number Diff line number Diff line
@@ -64,6 +64,19 @@ typedef struct {
	unsigned long asid[NR_CPUS];	/* 8 bit MMU PID + Generation cycle */
} mm_context_t;

static inline void mmu_setup_asid(struct mm_struct *mm, unsigned int asid)
{
	write_aux_reg(ARC_REG_PID, asid | MMU_ENABLE);
}

static inline void mmu_setup_pgd(struct mm_struct *mm, void *pgd)
{
	/* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
#ifdef CONFIG_ISA_ARCV2
	write_aux_reg(ARC_REG_SCRATCH_DATA0, (unsigned int)pgd);
#endif
}

static inline int is_pae40_enabled(void)
{
	return IS_ENABLED(CONFIG_ARC_HAS_PAE40);
+13 −15
Original line number Diff line number Diff line
@@ -15,22 +15,23 @@
#ifndef _ASM_ARC_MMU_CONTEXT_H
#define _ASM_ARC_MMU_CONTEXT_H

#include <asm/arcregs.h>
#include <asm/tlb.h>
#include <linux/sched/mm.h>

#include <asm/tlb.h>
#include <asm-generic/mm_hooks.h>

/*		ARC700 ASID Management
/*		ARC ASID Management
 *
 * MMU tags TLBs with an 8-bit ASID, avoiding need to flush the TLB on
 * context-switch.
 *
 * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries
 * with same vaddr (different tasks) to co-exit. This provides for
 * "Fast Context Switch" i.e. no TLB flush on ctxt-switch
 * ASID is managed per cpu, so task threads across CPUs can have different
 * ASID. Global ASID management is needed if hardware supports TLB shootdown
 * and/or shared TLB across cores, which ARC doesn't.
 *
 * Linux assigns each task a unique ASID. A simple round-robin allocation
 * of H/w ASID is done using software tracker @asid_cpu.
 * When it reaches max 255, the allocation cycle starts afresh by flushing
 * the entire TLB and wrapping ASID back to zero.
 * Each task is assigned unique ASID, with a simple round-robin allocator
 * tracked in @asid_cpu. When 8-bit value rolls over,a new cycle is started
 * over from 0, and TLB is flushed
 *
 * A new allocation cycle, post rollover, could potentially reassign an ASID
 * to a different task. Thus the rule is to refresh the ASID in a new cycle.
@@ -93,7 +94,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
	asid_mm(mm, cpu) = asid_cpu(cpu);

set_hw:
	write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE);
	mmu_setup_asid(mm, hw_pid(mm, cpu));

	local_irq_restore(flags);
}
@@ -146,10 +147,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
	 */
	cpumask_set_cpu(cpu, mm_cpumask(next));

#ifdef CONFIG_ISA_ARCV2
	/* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
	write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
#endif
	mmu_setup_pgd(next, next->pgd);

	get_new_mmu_context(next);
}
+4 −7
Original line number Diff line number Diff line
@@ -716,14 +716,11 @@ void arc_mmu_init(void)
	if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
		panic("Hardware doesn't support PAE40\n");

	/* Enable the MMU */
	write_aux_reg(ARC_REG_PID, MMU_ENABLE);
	/* Enable the MMU with ASID 0 */
	mmu_setup_asid(NULL, 0);

	/* In arc700/smp needed for re-entrant interrupt handling */
#ifdef CONFIG_ISA_ARCV2
	/* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
	write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
#endif
	/* cache the pgd pointer in MMU SCRATCH reg (ARCv2 only) */
	mmu_setup_pgd(NULL, swapper_pg_dir);

	if (pae40_exist_but_not_enab())
		write_aux_reg(ARC_REG_TLBPD1HI, 0);