Loading arch/arc/include/asm/mmu.h +1 −1 Original line number Diff line number Diff line Loading @@ -48,7 +48,7 @@ #ifndef __ASSEMBLY__ typedef struct { unsigned long asid; /* 8 bit MMU PID + Generation cycle */ unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */ } mm_context_t; #ifdef CONFIG_ARC_DBG_TLB_PARANOIA Loading arch/arc/include/asm/mmu_context.h +28 −16 Original line number Diff line number Diff line Loading @@ -30,13 +30,13 @@ * "Fast Context Switch" i.e. no TLB flush on ctxt-switch * * Linux assigns each task a unique ASID. A simple round-robin allocation * of H/w ASID is done using software tracker @asid_cache. * of H/w ASID is done using software tracker @asid_cpu. * When it reaches max 255, the allocation cycle starts afresh by flushing * the entire TLB and wrapping ASID back to zero. * * A new allocation cycle, post rollover, could potentially reassign an ASID * to a different task. Thus the rule is to refresh the ASID in a new cycle. * The 32 bit @asid_cache (and mm->asid) have 8 bits MMU PID and rest 24 bits * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits * serve as cycle/generation indicator and natural 32 bit unsigned math * automagically increments the generation when lower 8 bits rollover. */ Loading @@ -47,9 +47,11 @@ #define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1) #define MM_CTXT_NO_ASID 0UL #define hw_pid(mm) (mm->context.asid & MM_CTXT_ASID_MASK) #define asid_mm(mm, cpu) mm->context.asid[cpu] #define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK) extern unsigned int asid_cache; DECLARE_PER_CPU(unsigned int, asid_cache); #define asid_cpu(cpu) per_cpu(asid_cache, cpu) /* * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle) Loading @@ -57,6 +59,7 @@ extern unsigned int asid_cache; */ static inline void get_new_mmu_context(struct mm_struct *mm) { const unsigned int cpu = smp_processor_id(); unsigned long flags; local_irq_save(flags); Loading @@ -71,11 +74,11 @@ static inline void get_new_mmu_context(struct mm_struct *mm) * first need to destroy the context, setting it to invalid * value. */ if (!((mm->context.asid ^ asid_cache) & MM_CTXT_CYCLE_MASK)) if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK)) goto set_hw; /* move to new ASID and handle rollover */ if (unlikely(!(++asid_cache & MM_CTXT_ASID_MASK))) { if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) { flush_tlb_all(); Loading @@ -84,15 +87,15 @@ static inline void get_new_mmu_context(struct mm_struct *mm) * If the container itself wrapped around, set it to a non zero * "generation" to distinguish from no context */ if (!asid_cache) asid_cache = MM_CTXT_FIRST_CYCLE; if (!asid_cpu(cpu)) asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE; } /* Assign new ASID to tsk */ mm->context.asid = asid_cache; asid_mm(mm, cpu) = asid_cpu(cpu); set_hw: write_aux_reg(ARC_REG_PID, hw_pid(mm) | MMU_ENABLE); write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE); local_irq_restore(flags); } Loading @@ -104,10 +107,24 @@ static inline void get_new_mmu_context(struct mm_struct *mm) static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { mm->context.asid = MM_CTXT_NO_ASID; int i; for_each_possible_cpu(i) asid_mm(mm, i) = MM_CTXT_NO_ASID; return 0; } static inline void destroy_context(struct mm_struct *mm) { unsigned long flags; /* Needed to elide CONFIG_DEBUG_PREEMPT warning */ local_irq_save(flags); asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID; local_irq_restore(flags); } /* Prepare the MMU for task: setup PID reg with allocated ASID If task doesn't have an ASID (never alloc or stolen, get a new ASID) */ Loading @@ -131,11 +148,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, */ #define activate_mm(prev, next) switch_mm(prev, next, NULL) static inline void destroy_context(struct mm_struct *mm) { mm->context.asid = MM_CTXT_NO_ASID; } /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping * for retiring-mm. However destroy_context( ) still needs to do that because * between mm_release( ) = >deactive_mm( ) and Loading arch/arc/mm/tlb.c +8 −6 Original line number Diff line number Diff line Loading @@ -100,7 +100,7 @@ /* A copy of the ASID from the PID reg is kept in asid_cache */ unsigned int asid_cache = MM_CTXT_FIRST_CYCLE; DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE; /* * Utility Routine to erase a J-TLB entry Loading Loading @@ -274,6 +274,7 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm) void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { const unsigned int cpu = smp_processor_id(); unsigned long flags; /* If range @start to @end is more than 32 TLB entries deep, Loading @@ -297,9 +298,9 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, local_irq_save(flags); if (vma->vm_mm->context.asid != MM_CTXT_NO_ASID) { if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { while (start < end) { tlb_entry_erase(start | hw_pid(vma->vm_mm)); tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); start += PAGE_SIZE; } } Loading Loading @@ -346,6 +347,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { const unsigned int cpu = smp_processor_id(); unsigned long flags; /* Note that it is critical that interrupts are DISABLED between Loading @@ -353,8 +355,8 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) */ local_irq_save(flags); if (vma->vm_mm->context.asid != MM_CTXT_NO_ASID) { tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm)); if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); utlb_invalidate(); } Loading Loading @@ -400,7 +402,7 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) local_irq_save(flags); tlb_paranoid_check(vma->vm_mm->context.asid, address); tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address); address &= PAGE_MASK; Loading Loading
arch/arc/include/asm/mmu.h +1 −1 Original line number Diff line number Diff line Loading @@ -48,7 +48,7 @@ #ifndef __ASSEMBLY__ typedef struct { unsigned long asid; /* 8 bit MMU PID + Generation cycle */ unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */ } mm_context_t; #ifdef CONFIG_ARC_DBG_TLB_PARANOIA Loading
arch/arc/include/asm/mmu_context.h +28 −16 Original line number Diff line number Diff line Loading @@ -30,13 +30,13 @@ * "Fast Context Switch" i.e. no TLB flush on ctxt-switch * * Linux assigns each task a unique ASID. A simple round-robin allocation * of H/w ASID is done using software tracker @asid_cache. * of H/w ASID is done using software tracker @asid_cpu. * When it reaches max 255, the allocation cycle starts afresh by flushing * the entire TLB and wrapping ASID back to zero. * * A new allocation cycle, post rollover, could potentially reassign an ASID * to a different task. Thus the rule is to refresh the ASID in a new cycle. * The 32 bit @asid_cache (and mm->asid) have 8 bits MMU PID and rest 24 bits * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits * serve as cycle/generation indicator and natural 32 bit unsigned math * automagically increments the generation when lower 8 bits rollover. */ Loading @@ -47,9 +47,11 @@ #define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1) #define MM_CTXT_NO_ASID 0UL #define hw_pid(mm) (mm->context.asid & MM_CTXT_ASID_MASK) #define asid_mm(mm, cpu) mm->context.asid[cpu] #define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK) extern unsigned int asid_cache; DECLARE_PER_CPU(unsigned int, asid_cache); #define asid_cpu(cpu) per_cpu(asid_cache, cpu) /* * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle) Loading @@ -57,6 +59,7 @@ extern unsigned int asid_cache; */ static inline void get_new_mmu_context(struct mm_struct *mm) { const unsigned int cpu = smp_processor_id(); unsigned long flags; local_irq_save(flags); Loading @@ -71,11 +74,11 @@ static inline void get_new_mmu_context(struct mm_struct *mm) * first need to destroy the context, setting it to invalid * value. */ if (!((mm->context.asid ^ asid_cache) & MM_CTXT_CYCLE_MASK)) if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK)) goto set_hw; /* move to new ASID and handle rollover */ if (unlikely(!(++asid_cache & MM_CTXT_ASID_MASK))) { if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) { flush_tlb_all(); Loading @@ -84,15 +87,15 @@ static inline void get_new_mmu_context(struct mm_struct *mm) * If the container itself wrapped around, set it to a non zero * "generation" to distinguish from no context */ if (!asid_cache) asid_cache = MM_CTXT_FIRST_CYCLE; if (!asid_cpu(cpu)) asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE; } /* Assign new ASID to tsk */ mm->context.asid = asid_cache; asid_mm(mm, cpu) = asid_cpu(cpu); set_hw: write_aux_reg(ARC_REG_PID, hw_pid(mm) | MMU_ENABLE); write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE); local_irq_restore(flags); } Loading @@ -104,10 +107,24 @@ static inline void get_new_mmu_context(struct mm_struct *mm) static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { mm->context.asid = MM_CTXT_NO_ASID; int i; for_each_possible_cpu(i) asid_mm(mm, i) = MM_CTXT_NO_ASID; return 0; } static inline void destroy_context(struct mm_struct *mm) { unsigned long flags; /* Needed to elide CONFIG_DEBUG_PREEMPT warning */ local_irq_save(flags); asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID; local_irq_restore(flags); } /* Prepare the MMU for task: setup PID reg with allocated ASID If task doesn't have an ASID (never alloc or stolen, get a new ASID) */ Loading @@ -131,11 +148,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, */ #define activate_mm(prev, next) switch_mm(prev, next, NULL) static inline void destroy_context(struct mm_struct *mm) { mm->context.asid = MM_CTXT_NO_ASID; } /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping * for retiring-mm. However destroy_context( ) still needs to do that because * between mm_release( ) = >deactive_mm( ) and Loading
arch/arc/mm/tlb.c +8 −6 Original line number Diff line number Diff line Loading @@ -100,7 +100,7 @@ /* A copy of the ASID from the PID reg is kept in asid_cache */ unsigned int asid_cache = MM_CTXT_FIRST_CYCLE; DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE; /* * Utility Routine to erase a J-TLB entry Loading Loading @@ -274,6 +274,7 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm) void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { const unsigned int cpu = smp_processor_id(); unsigned long flags; /* If range @start to @end is more than 32 TLB entries deep, Loading @@ -297,9 +298,9 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, local_irq_save(flags); if (vma->vm_mm->context.asid != MM_CTXT_NO_ASID) { if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { while (start < end) { tlb_entry_erase(start | hw_pid(vma->vm_mm)); tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); start += PAGE_SIZE; } } Loading Loading @@ -346,6 +347,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { const unsigned int cpu = smp_processor_id(); unsigned long flags; /* Note that it is critical that interrupts are DISABLED between Loading @@ -353,8 +355,8 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) */ local_irq_save(flags); if (vma->vm_mm->context.asid != MM_CTXT_NO_ASID) { tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm)); if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); utlb_invalidate(); } Loading Loading @@ -400,7 +402,7 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) local_irq_save(flags); tlb_paranoid_check(vma->vm_mm->context.asid, address); tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address); address &= PAGE_MASK; Loading