Loading arch/sparc64/kernel/head.S +30 −0 Original line number Diff line number Diff line Loading @@ -10,6 +10,7 @@ #include <linux/config.h> #include <linux/version.h> #include <linux/errno.h> #include <linux/threads.h> #include <asm/thread_info.h> #include <asm/asi.h> #include <asm/pstate.h> Loading Loading @@ -493,6 +494,35 @@ tlb_fixup_done: call prom_init mov %l7, %o0 ! OpenPROM cif handler /* Initialize current_thread_info()->cpu as early as possible. * In order to do that accurately we have to patch up the get_cpuid() * assembler sequences. And that, in turn, requires that we know * if we are on a Starfire box or not. While we're here, patch up * the sun4v sequences as well. */ call check_if_starfire nop call per_cpu_patch nop call sun4v_patch nop #ifdef CONFIG_SMP call hard_smp_processor_id nop cmp %o0, NR_CPUS blu,pt %xcc, 1f nop call boot_cpu_id_too_large nop /* Not reached... */ 1: #else mov 0, %o0 #endif stb %o0, [%g6 + TI_CPU] /* Off we go.... */ call start_kernel nop Loading arch/sparc64/kernel/setup.c +11 −12 Original line number Diff line number Diff line Loading @@ -220,7 +220,7 @@ char reboot_command[COMMAND_LINE_SIZE]; static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; static void __init per_cpu_patch(void) void __init per_cpu_patch(void) { struct cpuid_patch_entry *p; unsigned long ver; Loading Loading @@ -280,7 +280,7 @@ static void __init per_cpu_patch(void) } } static void __init sun4v_patch(void) void __init sun4v_patch(void) { struct sun4v_1insn_patch_entry *p1; struct sun4v_2insn_patch_entry *p2; Loading Loading @@ -315,6 +315,15 @@ static void __init sun4v_patch(void) } } #ifdef CONFIG_SMP void __init boot_cpu_id_too_large(int cpu) { prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n", cpu, NR_CPUS); prom_halt(); } #endif void __init setup_arch(char **cmdline_p) { /* Initialize PROM console and command line. */ Loading @@ -332,16 +341,6 @@ void __init setup_arch(char **cmdline_p) conswitchp = &prom_con; #endif /* Work out if we are starfire early on */ check_if_starfire(); /* Now we know enough to patch the get_cpuid sequences * used by trap code. */ per_cpu_patch(); sun4v_patch(); boot_flags_init(*cmdline_p); idprom_init(); Loading arch/sparc64/kernel/smp.c +3 −13 Original line number Diff line number Diff line Loading @@ -1264,7 +1264,6 @@ void __init smp_tick_init(void) boot_cpu_id = hard_smp_processor_id(); current_tick_offset = timer_tick_offset; cpu_set(boot_cpu_id, cpu_online_map); prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1; } Loading Loading @@ -1345,18 +1344,6 @@ void __init smp_setup_cpu_possible_map(void) void __devinit smp_prepare_boot_cpu(void) { int cpu = hard_smp_processor_id(); if (cpu >= NR_CPUS) { prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); prom_halt(); } current_thread_info()->cpu = cpu; __local_per_cpu_offset = __per_cpu_offset(cpu); cpu_set(smp_processor_id(), cpu_online_map); cpu_set(smp_processor_id(), phys_cpu_present_map); } int __devinit __cpu_up(unsigned int cpu) Loading Loading @@ -1433,4 +1420,7 @@ void __init setup_per_cpu_areas(void) for (i = 0; i < NR_CPUS; i++, ptr += size) memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); /* Setup %g5 for the boot cpu. */ __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); } include/asm-generic/pgtable.h +1 −10 Original line number Diff line number Diff line Loading @@ -159,17 +159,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #define lazy_mmu_prot_update(pte) do { } while (0) #endif #ifndef __HAVE_ARCH_MULTIPLE_ZERO_PAGE #ifndef __HAVE_ARCH_MOVE_PTE #define move_pte(pte, prot, old_addr, new_addr) (pte) #else #define move_pte(pte, prot, old_addr, new_addr) \ ({ \ pte_t newpte = (pte); \ if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \ pte_page(pte) == ZERO_PAGE(old_addr)) \ newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \ newpte; \ }) #endif /* Loading include/asm-mips/pgtable.h +9 −1 Original line number Diff line number Diff line Loading @@ -70,7 +70,15 @@ extern unsigned long zero_page_mask; #define ZERO_PAGE(vaddr) \ (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))) #define __HAVE_ARCH_MULTIPLE_ZERO_PAGE #define __HAVE_ARCH_MOVE_PTE #define move_pte(pte, prot, old_addr, new_addr) \ ({ \ pte_t newpte = (pte); \ if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \ pte_page(pte) == ZERO_PAGE(old_addr)) \ newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \ newpte; \ }) extern void paging_init(void); Loading Loading
arch/sparc64/kernel/head.S +30 −0 Original line number Diff line number Diff line Loading @@ -10,6 +10,7 @@ #include <linux/config.h> #include <linux/version.h> #include <linux/errno.h> #include <linux/threads.h> #include <asm/thread_info.h> #include <asm/asi.h> #include <asm/pstate.h> Loading Loading @@ -493,6 +494,35 @@ tlb_fixup_done: call prom_init mov %l7, %o0 ! OpenPROM cif handler /* Initialize current_thread_info()->cpu as early as possible. * In order to do that accurately we have to patch up the get_cpuid() * assembler sequences. And that, in turn, requires that we know * if we are on a Starfire box or not. While we're here, patch up * the sun4v sequences as well. */ call check_if_starfire nop call per_cpu_patch nop call sun4v_patch nop #ifdef CONFIG_SMP call hard_smp_processor_id nop cmp %o0, NR_CPUS blu,pt %xcc, 1f nop call boot_cpu_id_too_large nop /* Not reached... */ 1: #else mov 0, %o0 #endif stb %o0, [%g6 + TI_CPU] /* Off we go.... */ call start_kernel nop Loading
arch/sparc64/kernel/setup.c +11 −12 Original line number Diff line number Diff line Loading @@ -220,7 +220,7 @@ char reboot_command[COMMAND_LINE_SIZE]; static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; static void __init per_cpu_patch(void) void __init per_cpu_patch(void) { struct cpuid_patch_entry *p; unsigned long ver; Loading Loading @@ -280,7 +280,7 @@ static void __init per_cpu_patch(void) } } static void __init sun4v_patch(void) void __init sun4v_patch(void) { struct sun4v_1insn_patch_entry *p1; struct sun4v_2insn_patch_entry *p2; Loading Loading @@ -315,6 +315,15 @@ static void __init sun4v_patch(void) } } #ifdef CONFIG_SMP void __init boot_cpu_id_too_large(int cpu) { prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n", cpu, NR_CPUS); prom_halt(); } #endif void __init setup_arch(char **cmdline_p) { /* Initialize PROM console and command line. */ Loading @@ -332,16 +341,6 @@ void __init setup_arch(char **cmdline_p) conswitchp = &prom_con; #endif /* Work out if we are starfire early on */ check_if_starfire(); /* Now we know enough to patch the get_cpuid sequences * used by trap code. */ per_cpu_patch(); sun4v_patch(); boot_flags_init(*cmdline_p); idprom_init(); Loading
arch/sparc64/kernel/smp.c +3 −13 Original line number Diff line number Diff line Loading @@ -1264,7 +1264,6 @@ void __init smp_tick_init(void) boot_cpu_id = hard_smp_processor_id(); current_tick_offset = timer_tick_offset; cpu_set(boot_cpu_id, cpu_online_map); prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1; } Loading Loading @@ -1345,18 +1344,6 @@ void __init smp_setup_cpu_possible_map(void) void __devinit smp_prepare_boot_cpu(void) { int cpu = hard_smp_processor_id(); if (cpu >= NR_CPUS) { prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); prom_halt(); } current_thread_info()->cpu = cpu; __local_per_cpu_offset = __per_cpu_offset(cpu); cpu_set(smp_processor_id(), cpu_online_map); cpu_set(smp_processor_id(), phys_cpu_present_map); } int __devinit __cpu_up(unsigned int cpu) Loading Loading @@ -1433,4 +1420,7 @@ void __init setup_per_cpu_areas(void) for (i = 0; i < NR_CPUS; i++, ptr += size) memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); /* Setup %g5 for the boot cpu. */ __local_per_cpu_offset = __per_cpu_offset(smp_processor_id()); }
include/asm-generic/pgtable.h +1 −10 Original line number Diff line number Diff line Loading @@ -159,17 +159,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #define lazy_mmu_prot_update(pte) do { } while (0) #endif #ifndef __HAVE_ARCH_MULTIPLE_ZERO_PAGE #ifndef __HAVE_ARCH_MOVE_PTE #define move_pte(pte, prot, old_addr, new_addr) (pte) #else #define move_pte(pte, prot, old_addr, new_addr) \ ({ \ pte_t newpte = (pte); \ if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \ pte_page(pte) == ZERO_PAGE(old_addr)) \ newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \ newpte; \ }) #endif /* Loading
include/asm-mips/pgtable.h +9 −1 Original line number Diff line number Diff line Loading @@ -70,7 +70,15 @@ extern unsigned long zero_page_mask; #define ZERO_PAGE(vaddr) \ (virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))) #define __HAVE_ARCH_MULTIPLE_ZERO_PAGE #define __HAVE_ARCH_MOVE_PTE #define move_pte(pte, prot, old_addr, new_addr) \ ({ \ pte_t newpte = (pte); \ if (pte_present(pte) && pfn_valid(pte_pfn(pte)) && \ pte_page(pte) == ZERO_PAGE(old_addr)) \ newpte = mk_pte(ZERO_PAGE(new_addr), (prot)); \ newpte; \ }) extern void paging_init(void); Loading