Loading arch/x86/Kconfig +7 −6 Original line number Diff line number Diff line Loading @@ -591,19 +591,20 @@ config IOMMU_HELPER config MAXSMP bool "Configure Maximum number of SMP Processors and NUMA Nodes" depends on X86_64 && SMP && BROKEN depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL select CPUMASK_OFFSTACK default n help Configure maximum number of CPUS and NUMA Nodes for this architecture. If unsure, say N. config NR_CPUS int "Maximum number of CPUs (2-512)" if !MAXSMP range 2 512 depends on SMP int "Maximum number of CPUs" if SMP && !MAXSMP range 2 512 if SMP && !MAXSMP default "1" if !SMP default "4096" if MAXSMP default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000 default "8" default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000) default "8" if SMP help This allows you to specify the maximum number of CPUs which this kernel will support. The maximum supported value is 512 and the Loading arch/x86/include/asm/bigsmp/apic.h +23 −7 Original line number Diff line number Diff line Loading @@ -9,12 +9,12 @@ static inline int apic_id_registered(void) return (1); } static inline cpumask_t target_cpus(void) static inline const cpumask_t *target_cpus(void) { #ifdef CONFIG_SMP return cpu_online_map; return &cpu_online_map; #else return cpumask_of_cpu(0); return &cpumask_of_cpu(0); #endif } Loading Loading @@ -79,7 +79,7 @@ static inline int apicid_to_node(int logical_apicid) static inline int cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < NR_CPUS) if (mps_cpu < nr_cpu_ids) return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); return BAD_APICID; Loading @@ -94,7 +94,7 @@ extern u8 cpu_2_logical_apicid[]; /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) { if (cpu >= NR_CPUS) if (cpu >= nr_cpu_ids) return BAD_APICID; return cpu_physical_id(cpu); } Loading @@ -119,16 +119,32 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) } /* As we are using single CPU as destination, pick only one CPU here */ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) { int cpu; int apicid; cpu = first_cpu(cpumask); cpu = first_cpu(*cpumask); apicid = cpu_to_logical_apicid(cpu); return apicid; } static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, const struct cpumask *andmask) { int cpu; /* * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ cpu = cpumask_any_and(cpumask, andmask); if (cpu < nr_cpu_ids) return cpu_to_logical_apicid(cpu); return BAD_APICID; } static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; Loading arch/x86/include/asm/bigsmp/ipi.h +5 −8 Original line number Diff line number Diff line #ifndef __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H void send_IPI_mask_sequence(cpumask_t mask, int vector); void send_IPI_mask_sequence(const struct cpumask *mask, int vector); void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); static inline void send_IPI_mask(cpumask_t mask, int vector) static inline void send_IPI_mask(const struct cpumask *mask, int vector) { send_IPI_mask_sequence(mask, vector); } static inline void send_IPI_allbutself(int vector) { cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); if (!cpus_empty(mask)) send_IPI_mask(mask, vector); send_IPI_mask_allbutself(cpu_online_mask, vector); } static inline void send_IPI_all(int vector) { send_IPI_mask(cpu_online_map, vector); send_IPI_mask(cpu_online_mask, vector); } #endif /* __ASM_MACH_IPI_H */ arch/x86/include/asm/es7000/apic.h +68 −18 Original line number Diff line number Diff line Loading @@ -9,14 +9,14 @@ static inline int apic_id_registered(void) return (1); } static inline cpumask_t target_cpus_cluster(void) static inline const cpumask_t *target_cpus_cluster(void) { return CPU_MASK_ALL; return &CPU_MASK_ALL; } static inline cpumask_t target_cpus(void) static inline const cpumask_t *target_cpus(void) { return cpumask_of_cpu(smp_processor_id()); return &cpumask_of_cpu(smp_processor_id()); } #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) Loading Loading @@ -82,7 +82,8 @@ static inline void setup_apic_routing(void) int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", (apic_version[apic] == 0x14) ? "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]); "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(*target_cpus())[0]); } static inline int multi_timer_check(int apic, int irq) Loading @@ -100,7 +101,7 @@ static inline int cpu_present_to_apicid(int mps_cpu) { if (!mps_cpu) return boot_cpu_physical_apicid; else if (mps_cpu < NR_CPUS) else if (mps_cpu < nr_cpu_ids) return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); else return BAD_APICID; Loading @@ -120,7 +121,7 @@ extern u8 cpu_2_logical_apicid[]; static inline int cpu_to_logical_apicid(int cpu) { #ifdef CONFIG_SMP if (cpu >= NR_CPUS) if (cpu >= nr_cpu_ids) return BAD_APICID; return (int)cpu_2_logical_apicid[cpu]; #else Loading @@ -146,14 +147,15 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid) return (1); } static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) static inline unsigned int cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) { int num_bits_set; int cpus_found = 0; int cpu; int apicid; num_bits_set = cpus_weight(cpumask); num_bits_set = cpumask_weight(cpumask); /* Return id to all */ if (num_bits_set == NR_CPUS) return 0xFF; Loading @@ -161,10 +163,10 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. */ cpu = first_cpu(cpumask); cpu = cpumask_first(cpumask); apicid = cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { if (cpu_isset(cpu, cpumask)) { if (cpumask_test_cpu(cpu, cpumask)) { int new_apicid = cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)){ Loading @@ -179,14 +181,14 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) return apicid; } static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) { int num_bits_set; int cpus_found = 0; int cpu; int apicid; num_bits_set = cpus_weight(cpumask); num_bits_set = cpus_weight(*cpumask); /* Return id to all */ if (num_bits_set == NR_CPUS) return cpu_to_logical_apicid(0); Loading @@ -194,10 +196,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. */ cpu = first_cpu(cpumask); cpu = first_cpu(*cpumask); apicid = cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { if (cpu_isset(cpu, cpumask)) { if (cpu_isset(cpu, *cpumask)) { int new_apicid = cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)){ Loading @@ -212,6 +214,54 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) return apicid; } static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, const struct cpumask *andmask) { int num_bits_set; int num_bits_set2; int cpus_found = 0; int cpu; int apicid = 0; num_bits_set = cpumask_weight(cpumask); num_bits_set2 = cpumask_weight(andmask); num_bits_set = min(num_bits_set, num_bits_set2); /* Return id to all */ if (num_bits_set >= nr_cpu_ids) #if defined CONFIG_ES7000_CLUSTERED_APIC return 0xFF; #else return cpu_to_logical_apicid(0); #endif /* * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. */ cpu = cpumask_first_and(cpumask, andmask); apicid = cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { if (cpumask_test_cpu(cpu, cpumask) && cpumask_test_cpu(cpu, andmask)) { int new_apicid = cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)) { printk(KERN_WARNING "%s: Not a valid mask!\n", __func__); #if defined CONFIG_ES7000_CLUSTERED_APIC return 0xFF; #else return cpu_to_logical_apicid(0); #endif } apicid = new_apicid; cpus_found++; } cpu++; } return apicid; } static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; Loading arch/x86/include/asm/es7000/ipi.h +5 −7 Original line number Diff line number Diff line #ifndef __ASM_ES7000_IPI_H #define __ASM_ES7000_IPI_H void send_IPI_mask_sequence(cpumask_t mask, int vector); void send_IPI_mask_sequence(const struct cpumask *mask, int vector); void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); static inline void send_IPI_mask(cpumask_t mask, int vector) static inline void send_IPI_mask(const struct cpumask *mask, int vector) { send_IPI_mask_sequence(mask, vector); } static inline void send_IPI_allbutself(int vector) { cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); if (!cpus_empty(mask)) send_IPI_mask(mask, vector); send_IPI_mask_allbutself(cpu_online_mask, vector); } static inline void send_IPI_all(int vector) { send_IPI_mask(cpu_online_map, vector); send_IPI_mask(cpu_online_mask, vector); } #endif /* __ASM_ES7000_IPI_H */ Loading
arch/x86/Kconfig +7 −6 Original line number Diff line number Diff line Loading @@ -591,19 +591,20 @@ config IOMMU_HELPER config MAXSMP bool "Configure Maximum number of SMP Processors and NUMA Nodes" depends on X86_64 && SMP && BROKEN depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL select CPUMASK_OFFSTACK default n help Configure maximum number of CPUS and NUMA Nodes for this architecture. If unsure, say N. config NR_CPUS int "Maximum number of CPUs (2-512)" if !MAXSMP range 2 512 depends on SMP int "Maximum number of CPUs" if SMP && !MAXSMP range 2 512 if SMP && !MAXSMP default "1" if !SMP default "4096" if MAXSMP default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000 default "8" default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000) default "8" if SMP help This allows you to specify the maximum number of CPUs which this kernel will support. The maximum supported value is 512 and the Loading
arch/x86/include/asm/bigsmp/apic.h +23 −7 Original line number Diff line number Diff line Loading @@ -9,12 +9,12 @@ static inline int apic_id_registered(void) return (1); } static inline cpumask_t target_cpus(void) static inline const cpumask_t *target_cpus(void) { #ifdef CONFIG_SMP return cpu_online_map; return &cpu_online_map; #else return cpumask_of_cpu(0); return &cpumask_of_cpu(0); #endif } Loading Loading @@ -79,7 +79,7 @@ static inline int apicid_to_node(int logical_apicid) static inline int cpu_present_to_apicid(int mps_cpu) { if (mps_cpu < NR_CPUS) if (mps_cpu < nr_cpu_ids) return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); return BAD_APICID; Loading @@ -94,7 +94,7 @@ extern u8 cpu_2_logical_apicid[]; /* Mapping from cpu number to logical apicid */ static inline int cpu_to_logical_apicid(int cpu) { if (cpu >= NR_CPUS) if (cpu >= nr_cpu_ids) return BAD_APICID; return cpu_physical_id(cpu); } Loading @@ -119,16 +119,32 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) } /* As we are using single CPU as destination, pick only one CPU here */ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) { int cpu; int apicid; cpu = first_cpu(cpumask); cpu = first_cpu(*cpumask); apicid = cpu_to_logical_apicid(cpu); return apicid; } static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, const struct cpumask *andmask) { int cpu; /* * We're using fixed IRQ delivery, can only return one phys APIC ID. * May as well be the first. */ cpu = cpumask_any_and(cpumask, andmask); if (cpu < nr_cpu_ids) return cpu_to_logical_apicid(cpu); return BAD_APICID; } static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; Loading
arch/x86/include/asm/bigsmp/ipi.h +5 −8 Original line number Diff line number Diff line #ifndef __ASM_MACH_IPI_H #define __ASM_MACH_IPI_H void send_IPI_mask_sequence(cpumask_t mask, int vector); void send_IPI_mask_sequence(const struct cpumask *mask, int vector); void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); static inline void send_IPI_mask(cpumask_t mask, int vector) static inline void send_IPI_mask(const struct cpumask *mask, int vector) { send_IPI_mask_sequence(mask, vector); } static inline void send_IPI_allbutself(int vector) { cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); if (!cpus_empty(mask)) send_IPI_mask(mask, vector); send_IPI_mask_allbutself(cpu_online_mask, vector); } static inline void send_IPI_all(int vector) { send_IPI_mask(cpu_online_map, vector); send_IPI_mask(cpu_online_mask, vector); } #endif /* __ASM_MACH_IPI_H */
arch/x86/include/asm/es7000/apic.h +68 −18 Original line number Diff line number Diff line Loading @@ -9,14 +9,14 @@ static inline int apic_id_registered(void) return (1); } static inline cpumask_t target_cpus_cluster(void) static inline const cpumask_t *target_cpus_cluster(void) { return CPU_MASK_ALL; return &CPU_MASK_ALL; } static inline cpumask_t target_cpus(void) static inline const cpumask_t *target_cpus(void) { return cpumask_of_cpu(smp_processor_id()); return &cpumask_of_cpu(smp_processor_id()); } #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER) Loading Loading @@ -82,7 +82,8 @@ static inline void setup_apic_routing(void) int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", (apic_version[apic] == 0x14) ? "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]); "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(*target_cpus())[0]); } static inline int multi_timer_check(int apic, int irq) Loading @@ -100,7 +101,7 @@ static inline int cpu_present_to_apicid(int mps_cpu) { if (!mps_cpu) return boot_cpu_physical_apicid; else if (mps_cpu < NR_CPUS) else if (mps_cpu < nr_cpu_ids) return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); else return BAD_APICID; Loading @@ -120,7 +121,7 @@ extern u8 cpu_2_logical_apicid[]; static inline int cpu_to_logical_apicid(int cpu) { #ifdef CONFIG_SMP if (cpu >= NR_CPUS) if (cpu >= nr_cpu_ids) return BAD_APICID; return (int)cpu_2_logical_apicid[cpu]; #else Loading @@ -146,14 +147,15 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid) return (1); } static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) static inline unsigned int cpu_mask_to_apicid_cluster(const struct cpumask *cpumask) { int num_bits_set; int cpus_found = 0; int cpu; int apicid; num_bits_set = cpus_weight(cpumask); num_bits_set = cpumask_weight(cpumask); /* Return id to all */ if (num_bits_set == NR_CPUS) return 0xFF; Loading @@ -161,10 +163,10 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. */ cpu = first_cpu(cpumask); cpu = cpumask_first(cpumask); apicid = cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { if (cpu_isset(cpu, cpumask)) { if (cpumask_test_cpu(cpu, cpumask)) { int new_apicid = cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)){ Loading @@ -179,14 +181,14 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask) return apicid; } static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask) { int num_bits_set; int cpus_found = 0; int cpu; int apicid; num_bits_set = cpus_weight(cpumask); num_bits_set = cpus_weight(*cpumask); /* Return id to all */ if (num_bits_set == NR_CPUS) return cpu_to_logical_apicid(0); Loading @@ -194,10 +196,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. */ cpu = first_cpu(cpumask); cpu = first_cpu(*cpumask); apicid = cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { if (cpu_isset(cpu, cpumask)) { if (cpu_isset(cpu, *cpumask)) { int new_apicid = cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)){ Loading @@ -212,6 +214,54 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) return apicid; } static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask, const struct cpumask *andmask) { int num_bits_set; int num_bits_set2; int cpus_found = 0; int cpu; int apicid = 0; num_bits_set = cpumask_weight(cpumask); num_bits_set2 = cpumask_weight(andmask); num_bits_set = min(num_bits_set, num_bits_set2); /* Return id to all */ if (num_bits_set >= nr_cpu_ids) #if defined CONFIG_ES7000_CLUSTERED_APIC return 0xFF; #else return cpu_to_logical_apicid(0); #endif /* * The cpus in the mask must all be on the apic cluster. If are not * on the same apicid cluster return default value of TARGET_CPUS. */ cpu = cpumask_first_and(cpumask, andmask); apicid = cpu_to_logical_apicid(cpu); while (cpus_found < num_bits_set) { if (cpumask_test_cpu(cpu, cpumask) && cpumask_test_cpu(cpu, andmask)) { int new_apicid = cpu_to_logical_apicid(cpu); if (apicid_cluster(apicid) != apicid_cluster(new_apicid)) { printk(KERN_WARNING "%s: Not a valid mask!\n", __func__); #if defined CONFIG_ES7000_CLUSTERED_APIC return 0xFF; #else return cpu_to_logical_apicid(0); #endif } apicid = new_apicid; cpus_found++; } cpu++; } return apicid; } static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) { return cpuid_apic >> index_msb; Loading
arch/x86/include/asm/es7000/ipi.h +5 −7 Original line number Diff line number Diff line #ifndef __ASM_ES7000_IPI_H #define __ASM_ES7000_IPI_H void send_IPI_mask_sequence(cpumask_t mask, int vector); void send_IPI_mask_sequence(const struct cpumask *mask, int vector); void send_IPI_mask_allbutself(const struct cpumask *mask, int vector); static inline void send_IPI_mask(cpumask_t mask, int vector) static inline void send_IPI_mask(const struct cpumask *mask, int vector) { send_IPI_mask_sequence(mask, vector); } static inline void send_IPI_allbutself(int vector) { cpumask_t mask = cpu_online_map; cpu_clear(smp_processor_id(), mask); if (!cpus_empty(mask)) send_IPI_mask(mask, vector); send_IPI_mask_allbutself(cpu_online_mask, vector); } static inline void send_IPI_all(int vector) { send_IPI_mask(cpu_online_map, vector); send_IPI_mask(cpu_online_mask, vector); } #endif /* __ASM_ES7000_IPI_H */