Loading arch/parisc/kernel/cache.c +175 −11 Original line number Diff line number Diff line Loading @@ -68,16 +68,6 @@ flush_cache_all_local(void) } EXPORT_SYMBOL(flush_cache_all_local); /* flushes EVERYTHING (tlb & cache) */ void flush_all_caches(void) { flush_cache_all(); flush_tlb_all(); } EXPORT_SYMBOL(flush_all_caches); void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { Loading Loading @@ -270,6 +260,83 @@ void disable_sr_hashing(void) panic("SpaceID hashing is still on!\n"); } /* Simple function to work out if we have an existing address translation * for a user space vma. */ static inline int translation_exists(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { pgd_t *pgd = pgd_offset(vma->vm_mm, addr); pmd_t *pmd; pte_t pte; if(pgd_none(*pgd)) return 0; pmd = pmd_offset(pgd, addr); if(pmd_none(*pmd) || pmd_bad(*pmd)) return 0; /* We cannot take the pte lock here: flush_cache_page is usually * called with pte lock already held. Whereas flush_dcache_page * takes flush_dcache_mmap_lock, which is lower in the hierarchy: * the vma itself is secure, but the pte might come or go racily. */ pte = *pte_offset_map(pmd, addr); /* But pte_unmap() does nothing on this architecture */ /* Filter out coincidental file entries and swap entries */ if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT))) return 0; return pte_pfn(pte) == pfn; } /* Private function to flush a page from the cache of a non-current * process. cr25 contains the Page Directory of the current user * process; we're going to hijack both it and the user space %sr3 to * temporarily make the non-current process current. We have to do * this because cache flushing may cause a non-access tlb miss which * the handlers have to fill in from the pgd of the non-current * process. */ static inline void flush_user_cache_page_non_current(struct vm_area_struct *vma, unsigned long vmaddr) { /* save the current process space and pgd */ unsigned long space = mfsp(3), pgd = mfctl(25); /* we don't mind taking interrups since they may not * do anything with user space, but we can't * be preempted here */ preempt_disable(); /* make us current */ mtctl(__pa(vma->vm_mm->pgd), 25); mtsp(vma->vm_mm->context, 3); flush_user_dcache_page(vmaddr); if(vma->vm_flags & VM_EXEC) flush_user_icache_page(vmaddr); /* put the old current process back */ mtsp(space, 3); mtctl(pgd, 25); preempt_enable(); } static inline void __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr) { if (likely(vma->vm_mm->context == mfsp(3))) { flush_user_dcache_page(vmaddr); if (vma->vm_flags & VM_EXEC) flush_user_icache_page(vmaddr); } else { flush_user_cache_page_non_current(vma, vmaddr); } } void flush_dcache_page(struct page *page) { struct address_space *mapping = page_mapping(page); Loading Loading @@ -342,7 +409,7 @@ void clear_user_page_asm(void *page, unsigned long vaddr) #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; void parisc_setup_cache_timing(void) void __init parisc_setup_cache_timing(void) { unsigned long rangetime, alltime; unsigned long size; Loading @@ -366,6 +433,9 @@ void parisc_setup_cache_timing(void) if (!parisc_cache_flush_threshold) parisc_cache_flush_threshold = FLUSH_THRESHOLD; if (parisc_cache_flush_threshold > cache_info.dc_size) parisc_cache_flush_threshold = cache_info.dc_size; printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus()); } Loading Loading @@ -410,3 +480,97 @@ void kunmap_parisc(void *addr) } EXPORT_SYMBOL(kunmap_parisc); #endif void __flush_tlb_range(unsigned long sid, unsigned long start, unsigned long end) { unsigned long npages; npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ flush_tlb_all(); else { mtsp(sid, 1); purge_tlb_start(); if (split_tlb) { while (npages--) { pdtlb(start); pitlb(start); start += PAGE_SIZE; } } else { while (npages--) { pdtlb(start); start += PAGE_SIZE; } } purge_tlb_end(); } } static void cacheflush_h_tmp_function(void *dummy) { flush_cache_all_local(); } void flush_cache_all(void) { on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1); } void flush_cache_mm(struct mm_struct *mm) { #ifdef CONFIG_SMP flush_cache_all(); #else flush_cache_all_local(); #endif } void flush_user_dcache_range(unsigned long start, unsigned long end) { if ((end - start) < parisc_cache_flush_threshold) flush_user_dcache_range_asm(start,end); else flush_data_cache(); } void flush_user_icache_range(unsigned long start, unsigned long end) { if ((end - start) < parisc_cache_flush_threshold) flush_user_icache_range_asm(start,end); else flush_instruction_cache(); } void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { int sr3; if (!vma->vm_mm->context) { BUG(); return; } sr3 = mfsp(3); if (vma->vm_mm->context == sr3) { flush_user_dcache_range(start,end); flush_user_icache_range(start,end); } else { flush_cache_all(); } } void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { BUG_ON(!vma->vm_mm->context); if (likely(translation_exists(vma, vmaddr, pfn))) __flush_cache_page(vma, vmaddr); } arch/parisc/kernel/traps.c +4 −1 Original line number Diff line number Diff line Loading @@ -39,6 +39,8 @@ #include <asm/pdc.h> #include <asm/pdc_chassis.h> #include <asm/unwind.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include "../math-emu/math-emu.h" /* for handle_fpe() */ Loading Loading @@ -554,7 +556,8 @@ void handle_interruption(int code, struct pt_regs *regs) /* Low-priority machine check */ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC); flush_all_caches(); flush_cache_all(); flush_tlb_all(); cpu_lpmc(5, regs); return; Loading include/asm-parisc/cache.h +5 −24 Original line number Diff line number Diff line Loading @@ -30,31 +30,11 @@ #define __read_mostly __attribute__((__section__(".data.read_mostly"))) extern void flush_data_cache_local(void *); /* flushes local data-cache only */ extern void flush_instruction_cache_local(void *); /* flushes local code-cache only */ #ifdef CONFIG_SMP extern void flush_data_cache(void); /* flushes data-cache only (all processors) */ extern void flush_instruction_cache(void); /* flushes i-cache only (all processors) */ #else #define flush_data_cache() flush_data_cache_local(NULL) #define flush_instruction_cache() flush_instruction_cache_local(NULL) #endif extern void parisc_cache_init(void); /* initializes cache-flushing */ extern void flush_all_caches(void); /* flush everything (tlb & cache) */ extern int get_cache_info(char *); extern void flush_user_icache_range_asm(unsigned long, unsigned long); extern void flush_kernel_icache_range_asm(unsigned long, unsigned long); extern void flush_user_dcache_range_asm(unsigned long, unsigned long); extern void flush_kernel_dcache_range_asm(unsigned long, unsigned long); extern void flush_kernel_dcache_page_asm(void *); extern void flush_kernel_icache_page(void *); extern void disable_sr_hashing(void); /* turns off space register hashing */ extern void disable_sr_hashing_asm(int); /* low level support for above */ extern void free_sid(unsigned long); void parisc_cache_init(void); /* initializes cache-flushing */ void disable_sr_hashing_asm(int); /* low level support for above */ void disable_sr_hashing(void); /* turns off space register hashing */ void free_sid(unsigned long); unsigned long alloc_sid(void); extern void flush_user_dcache_page(unsigned long); extern void flush_user_icache_page(unsigned long); struct seq_file; extern void show_cache_info(struct seq_file *m); Loading @@ -63,6 +43,7 @@ extern int split_tlb; extern int dcache_stride; extern int icache_stride; extern struct pdc_cache_info cache_info; void parisc_setup_cache_timing(void); #define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" : : "r" (addr)); #define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" : : "r" (addr)); Loading include/asm-parisc/cacheflush.h +33 −144 Original line number Diff line number Diff line Loading @@ -2,60 +2,44 @@ #define _PARISC_CACHEFLUSH_H #include <linux/mm.h> #include <asm/cache.h> /* for flush_user_dcache_range_asm() proto */ /* The usual comment is "Caches aren't brain-dead on the <architecture>". * Unfortunately, that doesn't apply to PA-RISC. */ /* Cache flush operations */ /* Internal implementation */ void flush_data_cache_local(void *); /* flushes local data-cache only */ void flush_instruction_cache_local(void *); /* flushes local code-cache only */ #ifdef CONFIG_SMP #define flush_cache_mm(mm) flush_cache_all() void flush_data_cache(void); /* flushes data-cache only (all processors) */ void flush_instruction_cache(void); /* flushes i-cache only (all processors) */ #else #define flush_cache_mm(mm) flush_cache_all_local() #define flush_data_cache() flush_data_cache_local(NULL) #define flush_instruction_cache() flush_instruction_cache_local(NULL) #endif #define flush_cache_dup_mm(mm) flush_cache_mm(mm) #define flush_kernel_dcache_range(start,size) \ flush_kernel_dcache_range_asm((start), (start)+(size)); void flush_user_icache_range_asm(unsigned long, unsigned long); void flush_kernel_icache_range_asm(unsigned long, unsigned long); void flush_user_dcache_range_asm(unsigned long, unsigned long); void flush_kernel_dcache_range_asm(unsigned long, unsigned long); void flush_kernel_dcache_page_asm(void *); void flush_kernel_icache_page(void *); void flush_user_dcache_page(unsigned long); void flush_user_icache_page(unsigned long); extern void flush_cache_all_local(void); /* Cache flush operations */ static inline void cacheflush_h_tmp_function(void *dummy) { flush_cache_all_local(); } void flush_cache_all_local(void); void flush_cache_all(void); void flush_cache_mm(struct mm_struct *mm); static inline void flush_cache_all(void) { on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1); } #define flush_kernel_dcache_range(start,size) \ flush_kernel_dcache_range_asm((start), (start)+(size)); #define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all() extern int parisc_cache_flush_threshold; void parisc_setup_cache_timing(void); static inline void flush_user_dcache_range(unsigned long start, unsigned long end) { if ((end - start) < parisc_cache_flush_threshold) flush_user_dcache_range_asm(start,end); else flush_data_cache(); } static inline void flush_user_icache_range(unsigned long start, unsigned long end) { if ((end - start) < parisc_cache_flush_threshold) flush_user_icache_range_asm(start,end); else flush_instruction_cache(); } extern void flush_dcache_page(struct page *page); #define flush_dcache_mmap_lock(mapping) \ Loading @@ -63,9 +47,15 @@ extern void flush_dcache_page(struct page *page); #define flush_dcache_mmap_unlock(mapping) \ write_unlock_irq(&(mapping)->tree_lock) #define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page); flush_kernel_icache_page(page_address(page)); } while (0) #define flush_icache_page(vma,page) do { \ flush_kernel_dcache_page(page); \ flush_kernel_icache_page(page_address(page)); \ } while (0) #define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0) #define flush_icache_range(s,e) do { \ flush_kernel_dcache_range_asm(s,e); \ flush_kernel_icache_range_asm(s,e); \ } while (0) #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ Loading @@ -80,118 +70,17 @@ do { \ memcpy(dst, src, len); \ } while (0) static inline void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { int sr3; if (!vma->vm_mm->context) { BUG(); return; } sr3 = mfsp(3); if (vma->vm_mm->context == sr3) { flush_user_dcache_range(start,end); flush_user_icache_range(start,end); } else { flush_cache_all(); } } /* Simple function to work out if we have an existing address translation * for a user space vma. */ static inline int translation_exists(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { pgd_t *pgd = pgd_offset(vma->vm_mm, addr); pmd_t *pmd; pte_t pte; if(pgd_none(*pgd)) return 0; pmd = pmd_offset(pgd, addr); if(pmd_none(*pmd) || pmd_bad(*pmd)) return 0; /* We cannot take the pte lock here: flush_cache_page is usually * called with pte lock already held. Whereas flush_dcache_page * takes flush_dcache_mmap_lock, which is lower in the hierarchy: * the vma itself is secure, but the pte might come or go racily. */ pte = *pte_offset_map(pmd, addr); /* But pte_unmap() does nothing on this architecture */ /* Filter out coincidental file entries and swap entries */ if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT))) return 0; return pte_pfn(pte) == pfn; } /* Private function to flush a page from the cache of a non-current * process. cr25 contains the Page Directory of the current user * process; we're going to hijack both it and the user space %sr3 to * temporarily make the non-current process current. We have to do * this because cache flushing may cause a non-access tlb miss which * the handlers have to fill in from the pgd of the non-current * process. */ static inline void flush_user_cache_page_non_current(struct vm_area_struct *vma, unsigned long vmaddr) { /* save the current process space and pgd */ unsigned long space = mfsp(3), pgd = mfctl(25); /* we don't mind taking interrups since they may not * do anything with user space, but we can't * be preempted here */ preempt_disable(); /* make us current */ mtctl(__pa(vma->vm_mm->pgd), 25); mtsp(vma->vm_mm->context, 3); flush_user_dcache_page(vmaddr); if(vma->vm_flags & VM_EXEC) flush_user_icache_page(vmaddr); /* put the old current process back */ mtsp(space, 3); mtctl(pgd, 25); preempt_enable(); } static inline void __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr) { if (likely(vma->vm_mm->context == mfsp(3))) { flush_user_dcache_page(vmaddr); if (vma->vm_flags & VM_EXEC) flush_user_icache_page(vmaddr); } else { flush_user_cache_page_non_current(vma, vmaddr); } } static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { BUG_ON(!vma->vm_mm->context); if (likely(translation_exists(vma, vmaddr, pfn))) __flush_cache_page(vma, vmaddr); } void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn); void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); #define ARCH_HAS_FLUSH_ANON_PAGE static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) { if (PageAnon(page)) flush_user_dcache_page(vmaddr); } #define ARCH_HAS_FLUSH_ANON_PAGE #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE void flush_kernel_dcache_page_addr(void *addr); Loading include/asm-parisc/tlbflush.h +4 −26 Original line number Diff line number Diff line Loading @@ -71,33 +71,11 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, purge_tlb_end(); } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { unsigned long npages; void __flush_tlb_range(unsigned long sid, unsigned long start, unsigned long end); npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ flush_tlb_all(); else { mtsp(vma->vm_mm->context,1); purge_tlb_start(); if (split_tlb) { while (npages--) { pdtlb(start); pitlb(start); start += PAGE_SIZE; } } else { while (npages--) { pdtlb(start); start += PAGE_SIZE; } } purge_tlb_end(); } } #define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end) #define flush_tlb_kernel_range(start, end) flush_tlb_all() #define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end) #endif Loading
arch/parisc/kernel/cache.c +175 −11 Original line number Diff line number Diff line Loading @@ -68,16 +68,6 @@ flush_cache_all_local(void) } EXPORT_SYMBOL(flush_cache_all_local); /* flushes EVERYTHING (tlb & cache) */ void flush_all_caches(void) { flush_cache_all(); flush_tlb_all(); } EXPORT_SYMBOL(flush_all_caches); void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) { Loading Loading @@ -270,6 +260,83 @@ void disable_sr_hashing(void) panic("SpaceID hashing is still on!\n"); } /* Simple function to work out if we have an existing address translation * for a user space vma. */ static inline int translation_exists(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { pgd_t *pgd = pgd_offset(vma->vm_mm, addr); pmd_t *pmd; pte_t pte; if(pgd_none(*pgd)) return 0; pmd = pmd_offset(pgd, addr); if(pmd_none(*pmd) || pmd_bad(*pmd)) return 0; /* We cannot take the pte lock here: flush_cache_page is usually * called with pte lock already held. Whereas flush_dcache_page * takes flush_dcache_mmap_lock, which is lower in the hierarchy: * the vma itself is secure, but the pte might come or go racily. */ pte = *pte_offset_map(pmd, addr); /* But pte_unmap() does nothing on this architecture */ /* Filter out coincidental file entries and swap entries */ if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT))) return 0; return pte_pfn(pte) == pfn; } /* Private function to flush a page from the cache of a non-current * process. cr25 contains the Page Directory of the current user * process; we're going to hijack both it and the user space %sr3 to * temporarily make the non-current process current. We have to do * this because cache flushing may cause a non-access tlb miss which * the handlers have to fill in from the pgd of the non-current * process. */ static inline void flush_user_cache_page_non_current(struct vm_area_struct *vma, unsigned long vmaddr) { /* save the current process space and pgd */ unsigned long space = mfsp(3), pgd = mfctl(25); /* we don't mind taking interrups since they may not * do anything with user space, but we can't * be preempted here */ preempt_disable(); /* make us current */ mtctl(__pa(vma->vm_mm->pgd), 25); mtsp(vma->vm_mm->context, 3); flush_user_dcache_page(vmaddr); if(vma->vm_flags & VM_EXEC) flush_user_icache_page(vmaddr); /* put the old current process back */ mtsp(space, 3); mtctl(pgd, 25); preempt_enable(); } static inline void __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr) { if (likely(vma->vm_mm->context == mfsp(3))) { flush_user_dcache_page(vmaddr); if (vma->vm_flags & VM_EXEC) flush_user_icache_page(vmaddr); } else { flush_user_cache_page_non_current(vma, vmaddr); } } void flush_dcache_page(struct page *page) { struct address_space *mapping = page_mapping(page); Loading Loading @@ -342,7 +409,7 @@ void clear_user_page_asm(void *page, unsigned long vaddr) #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; void parisc_setup_cache_timing(void) void __init parisc_setup_cache_timing(void) { unsigned long rangetime, alltime; unsigned long size; Loading @@ -366,6 +433,9 @@ void parisc_setup_cache_timing(void) if (!parisc_cache_flush_threshold) parisc_cache_flush_threshold = FLUSH_THRESHOLD; if (parisc_cache_flush_threshold > cache_info.dc_size) parisc_cache_flush_threshold = cache_info.dc_size; printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus()); } Loading Loading @@ -410,3 +480,97 @@ void kunmap_parisc(void *addr) } EXPORT_SYMBOL(kunmap_parisc); #endif void __flush_tlb_range(unsigned long sid, unsigned long start, unsigned long end) { unsigned long npages; npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ flush_tlb_all(); else { mtsp(sid, 1); purge_tlb_start(); if (split_tlb) { while (npages--) { pdtlb(start); pitlb(start); start += PAGE_SIZE; } } else { while (npages--) { pdtlb(start); start += PAGE_SIZE; } } purge_tlb_end(); } } static void cacheflush_h_tmp_function(void *dummy) { flush_cache_all_local(); } void flush_cache_all(void) { on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1); } void flush_cache_mm(struct mm_struct *mm) { #ifdef CONFIG_SMP flush_cache_all(); #else flush_cache_all_local(); #endif } void flush_user_dcache_range(unsigned long start, unsigned long end) { if ((end - start) < parisc_cache_flush_threshold) flush_user_dcache_range_asm(start,end); else flush_data_cache(); } void flush_user_icache_range(unsigned long start, unsigned long end) { if ((end - start) < parisc_cache_flush_threshold) flush_user_icache_range_asm(start,end); else flush_instruction_cache(); } void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { int sr3; if (!vma->vm_mm->context) { BUG(); return; } sr3 = mfsp(3); if (vma->vm_mm->context == sr3) { flush_user_dcache_range(start,end); flush_user_icache_range(start,end); } else { flush_cache_all(); } } void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { BUG_ON(!vma->vm_mm->context); if (likely(translation_exists(vma, vmaddr, pfn))) __flush_cache_page(vma, vmaddr); }
arch/parisc/kernel/traps.c +4 −1 Original line number Diff line number Diff line Loading @@ -39,6 +39,8 @@ #include <asm/pdc.h> #include <asm/pdc_chassis.h> #include <asm/unwind.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include "../math-emu/math-emu.h" /* for handle_fpe() */ Loading Loading @@ -554,7 +556,8 @@ void handle_interruption(int code, struct pt_regs *regs) /* Low-priority machine check */ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC); flush_all_caches(); flush_cache_all(); flush_tlb_all(); cpu_lpmc(5, regs); return; Loading
include/asm-parisc/cache.h +5 −24 Original line number Diff line number Diff line Loading @@ -30,31 +30,11 @@ #define __read_mostly __attribute__((__section__(".data.read_mostly"))) extern void flush_data_cache_local(void *); /* flushes local data-cache only */ extern void flush_instruction_cache_local(void *); /* flushes local code-cache only */ #ifdef CONFIG_SMP extern void flush_data_cache(void); /* flushes data-cache only (all processors) */ extern void flush_instruction_cache(void); /* flushes i-cache only (all processors) */ #else #define flush_data_cache() flush_data_cache_local(NULL) #define flush_instruction_cache() flush_instruction_cache_local(NULL) #endif extern void parisc_cache_init(void); /* initializes cache-flushing */ extern void flush_all_caches(void); /* flush everything (tlb & cache) */ extern int get_cache_info(char *); extern void flush_user_icache_range_asm(unsigned long, unsigned long); extern void flush_kernel_icache_range_asm(unsigned long, unsigned long); extern void flush_user_dcache_range_asm(unsigned long, unsigned long); extern void flush_kernel_dcache_range_asm(unsigned long, unsigned long); extern void flush_kernel_dcache_page_asm(void *); extern void flush_kernel_icache_page(void *); extern void disable_sr_hashing(void); /* turns off space register hashing */ extern void disable_sr_hashing_asm(int); /* low level support for above */ extern void free_sid(unsigned long); void parisc_cache_init(void); /* initializes cache-flushing */ void disable_sr_hashing_asm(int); /* low level support for above */ void disable_sr_hashing(void); /* turns off space register hashing */ void free_sid(unsigned long); unsigned long alloc_sid(void); extern void flush_user_dcache_page(unsigned long); extern void flush_user_icache_page(unsigned long); struct seq_file; extern void show_cache_info(struct seq_file *m); Loading @@ -63,6 +43,7 @@ extern int split_tlb; extern int dcache_stride; extern int icache_stride; extern struct pdc_cache_info cache_info; void parisc_setup_cache_timing(void); #define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" : : "r" (addr)); #define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" : : "r" (addr)); Loading
include/asm-parisc/cacheflush.h +33 −144 Original line number Diff line number Diff line Loading @@ -2,60 +2,44 @@ #define _PARISC_CACHEFLUSH_H #include <linux/mm.h> #include <asm/cache.h> /* for flush_user_dcache_range_asm() proto */ /* The usual comment is "Caches aren't brain-dead on the <architecture>". * Unfortunately, that doesn't apply to PA-RISC. */ /* Cache flush operations */ /* Internal implementation */ void flush_data_cache_local(void *); /* flushes local data-cache only */ void flush_instruction_cache_local(void *); /* flushes local code-cache only */ #ifdef CONFIG_SMP #define flush_cache_mm(mm) flush_cache_all() void flush_data_cache(void); /* flushes data-cache only (all processors) */ void flush_instruction_cache(void); /* flushes i-cache only (all processors) */ #else #define flush_cache_mm(mm) flush_cache_all_local() #define flush_data_cache() flush_data_cache_local(NULL) #define flush_instruction_cache() flush_instruction_cache_local(NULL) #endif #define flush_cache_dup_mm(mm) flush_cache_mm(mm) #define flush_kernel_dcache_range(start,size) \ flush_kernel_dcache_range_asm((start), (start)+(size)); void flush_user_icache_range_asm(unsigned long, unsigned long); void flush_kernel_icache_range_asm(unsigned long, unsigned long); void flush_user_dcache_range_asm(unsigned long, unsigned long); void flush_kernel_dcache_range_asm(unsigned long, unsigned long); void flush_kernel_dcache_page_asm(void *); void flush_kernel_icache_page(void *); void flush_user_dcache_page(unsigned long); void flush_user_icache_page(unsigned long); extern void flush_cache_all_local(void); /* Cache flush operations */ static inline void cacheflush_h_tmp_function(void *dummy) { flush_cache_all_local(); } void flush_cache_all_local(void); void flush_cache_all(void); void flush_cache_mm(struct mm_struct *mm); static inline void flush_cache_all(void) { on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1); } #define flush_kernel_dcache_range(start,size) \ flush_kernel_dcache_range_asm((start), (start)+(size)); #define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all() extern int parisc_cache_flush_threshold; void parisc_setup_cache_timing(void); static inline void flush_user_dcache_range(unsigned long start, unsigned long end) { if ((end - start) < parisc_cache_flush_threshold) flush_user_dcache_range_asm(start,end); else flush_data_cache(); } static inline void flush_user_icache_range(unsigned long start, unsigned long end) { if ((end - start) < parisc_cache_flush_threshold) flush_user_icache_range_asm(start,end); else flush_instruction_cache(); } extern void flush_dcache_page(struct page *page); #define flush_dcache_mmap_lock(mapping) \ Loading @@ -63,9 +47,15 @@ extern void flush_dcache_page(struct page *page); #define flush_dcache_mmap_unlock(mapping) \ write_unlock_irq(&(mapping)->tree_lock) #define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page); flush_kernel_icache_page(page_address(page)); } while (0) #define flush_icache_page(vma,page) do { \ flush_kernel_dcache_page(page); \ flush_kernel_icache_page(page_address(page)); \ } while (0) #define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0) #define flush_icache_range(s,e) do { \ flush_kernel_dcache_range_asm(s,e); \ flush_kernel_icache_range_asm(s,e); \ } while (0) #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ Loading @@ -80,118 +70,17 @@ do { \ memcpy(dst, src, len); \ } while (0) static inline void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { int sr3; if (!vma->vm_mm->context) { BUG(); return; } sr3 = mfsp(3); if (vma->vm_mm->context == sr3) { flush_user_dcache_range(start,end); flush_user_icache_range(start,end); } else { flush_cache_all(); } } /* Simple function to work out if we have an existing address translation * for a user space vma. */ static inline int translation_exists(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { pgd_t *pgd = pgd_offset(vma->vm_mm, addr); pmd_t *pmd; pte_t pte; if(pgd_none(*pgd)) return 0; pmd = pmd_offset(pgd, addr); if(pmd_none(*pmd) || pmd_bad(*pmd)) return 0; /* We cannot take the pte lock here: flush_cache_page is usually * called with pte lock already held. Whereas flush_dcache_page * takes flush_dcache_mmap_lock, which is lower in the hierarchy: * the vma itself is secure, but the pte might come or go racily. */ pte = *pte_offset_map(pmd, addr); /* But pte_unmap() does nothing on this architecture */ /* Filter out coincidental file entries and swap entries */ if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT))) return 0; return pte_pfn(pte) == pfn; } /* Private function to flush a page from the cache of a non-current * process. cr25 contains the Page Directory of the current user * process; we're going to hijack both it and the user space %sr3 to * temporarily make the non-current process current. We have to do * this because cache flushing may cause a non-access tlb miss which * the handlers have to fill in from the pgd of the non-current * process. */ static inline void flush_user_cache_page_non_current(struct vm_area_struct *vma, unsigned long vmaddr) { /* save the current process space and pgd */ unsigned long space = mfsp(3), pgd = mfctl(25); /* we don't mind taking interrups since they may not * do anything with user space, but we can't * be preempted here */ preempt_disable(); /* make us current */ mtctl(__pa(vma->vm_mm->pgd), 25); mtsp(vma->vm_mm->context, 3); flush_user_dcache_page(vmaddr); if(vma->vm_flags & VM_EXEC) flush_user_icache_page(vmaddr); /* put the old current process back */ mtsp(space, 3); mtctl(pgd, 25); preempt_enable(); } static inline void __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr) { if (likely(vma->vm_mm->context == mfsp(3))) { flush_user_dcache_page(vmaddr); if (vma->vm_flags & VM_EXEC) flush_user_icache_page(vmaddr); } else { flush_user_cache_page_non_current(vma, vmaddr); } } static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) { BUG_ON(!vma->vm_mm->context); if (likely(translation_exists(vma, vmaddr, pfn))) __flush_cache_page(vma, vmaddr); } void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn); void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); #define ARCH_HAS_FLUSH_ANON_PAGE static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) { if (PageAnon(page)) flush_user_dcache_page(vmaddr); } #define ARCH_HAS_FLUSH_ANON_PAGE #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE void flush_kernel_dcache_page_addr(void *addr); Loading
include/asm-parisc/tlbflush.h +4 −26 Original line number Diff line number Diff line Loading @@ -71,33 +71,11 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, purge_tlb_end(); } static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { unsigned long npages; void __flush_tlb_range(unsigned long sid, unsigned long start, unsigned long end); npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ flush_tlb_all(); else { mtsp(vma->vm_mm->context,1); purge_tlb_start(); if (split_tlb) { while (npages--) { pdtlb(start); pitlb(start); start += PAGE_SIZE; } } else { while (npages--) { pdtlb(start); start += PAGE_SIZE; } } purge_tlb_end(); } } #define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end) #define flush_tlb_kernel_range(start, end) flush_tlb_all() #define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end) #endif