Commit 255b4658 authored by Huacai Chen's avatar Huacai Chen
Browse files

LoongArch: Fix the !CONFIG_SMP build



1, We assume arch/loongarch/include/asm/smp.h be included in include/
   linux/smp.h is valid and the reverse inclusion isn't. So remove the
   <linux/smp.h> in arch/loongarch/include/asm/smp.h.
2, arch/loongarch/include/asm/smp.h is only needed when CONFIG_SMP,
   and setup.c include it only because it need plat_smp_setup(). So,
   reorganize setup.c & smp.h, and then remove <asm/smp.h> in setup.c.
3, Fix cacheinfo.c and percpu.h build error by adding the missing header
   files when !CONFIG_SMP.
4, Fix acpi.c build error by adding CONFIG_SMP guards.
5, Move irq_stat definition from smp.c to irq.c and fix its declaration.
6, Select CONFIG_SMP for CONFIG_NUMA, similar as other architectures do.

Signed-off-by: default avatarHuacai Chen <chenhuacai@loongson.cn>
parent f2906aa8
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -343,6 +343,7 @@ config NR_CPUS

config NUMA
	bool "NUMA Support"
	select SMP
	select ACPI_NUMA if ACPI
	help
	  Say Y to compile the kernel with NUMA (Non-Uniform Memory Access)
+1 −1
Original line number Diff line number Diff line
@@ -19,7 +19,7 @@ typedef struct {
	unsigned int __softirq_pending;
} ____cacheline_aligned irq_cpustat_t;

DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);

#define __ARCH_IRQ_STAT

+1 −0
Original line number Diff line number Diff line
@@ -6,6 +6,7 @@
#define __ASM_PERCPU_H

#include <asm/cmpxchg.h>
#include <asm/loongarch.h>

/* Use r21 for fast access */
register unsigned long __my_cpu_offset __asm__("$r21");
+7 −16
Original line number Diff line number Diff line
@@ -9,10 +9,16 @@
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/linkage.h>
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/cpumask.h>

extern int smp_num_siblings;
extern int num_processors;
extern int disabled_cpus;
extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
extern cpumask_t cpu_foreign_map[];

void loongson3_smp_setup(void);
void loongson3_prepare_cpus(unsigned int max_cpus);
void loongson3_boot_secondary(int cpu, struct task_struct *idle);
@@ -25,26 +31,11 @@ int loongson3_cpu_disable(void);
void loongson3_cpu_die(unsigned int cpu);
#endif

#ifdef CONFIG_SMP

static inline void plat_smp_setup(void)
{
	loongson3_smp_setup();
}

#else /* !CONFIG_SMP */

static inline void plat_smp_setup(void) { }

#endif /* !CONFIG_SMP */

extern int smp_num_siblings;
extern int num_processors;
extern int disabled_cpus;
extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
extern cpumask_t cpu_foreign_map[];

static inline int raw_smp_processor_id(void)
{
#if defined(__VDSO__)
+4 −0
Original line number Diff line number Diff line
@@ -138,6 +138,7 @@ void __init acpi_boot_table_init(void)
	}
}

#ifdef CONFIG_SMP
static int set_processor_mask(u32 id, u32 flags)
{

@@ -166,15 +167,18 @@ static int set_processor_mask(u32 id, u32 flags)

	return cpu;
}
#endif

static void __init acpi_process_madt(void)
{
#ifdef CONFIG_SMP
	int i;

	for (i = 0; i < NR_CPUS; i++) {
		__cpu_number_map[i] = -1;
		__cpu_logical_map[i] = -1;
	}
#endif

	loongson_sysconf.nr_cpus = num_processors;
}
Loading