Loading arch/x86/kernel/process.c +70 −0 Original line number Diff line number Diff line Loading @@ -45,6 +45,76 @@ void arch_task_cache_init(void) SLAB_PANIC, NULL); } /* * Idle related variables and functions */ unsigned long boot_option_idle_override = 0; EXPORT_SYMBOL(boot_option_idle_override); /* * Powermanagement idle function, if any.. */ void (*pm_idle)(void); EXPORT_SYMBOL(pm_idle); #ifdef CONFIG_X86_32 /* * This halt magic was a workaround for ancient floppy DMA * wreckage. It should be safe to remove. */ static int hlt_counter; void disable_hlt(void) { hlt_counter++; } EXPORT_SYMBOL(disable_hlt); void enable_hlt(void) { hlt_counter--; } EXPORT_SYMBOL(enable_hlt); static inline int hlt_use_halt(void) { return (!hlt_counter && boot_cpu_data.hlt_works_ok); } #else static inline int hlt_use_halt(void) { return 1; } #endif /* * We use this if we don't have any better * idle routine.. */ void default_idle(void) { if (hlt_use_halt()) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we * test NEED_RESCHED: */ smp_mb(); if (!need_resched()) safe_halt(); /* enables interrupts racelessly */ else local_irq_enable(); current_thread_info()->status |= TS_POLLING; } else { local_irq_enable(); /* loop is done by the caller */ cpu_relax(); } } #ifdef CONFIG_APM_MODULE EXPORT_SYMBOL(default_idle); #endif static void do_nothing(void *unused) { } Loading arch/x86/kernel/process_32.c +0 −54 Original line number Diff line number Diff line Loading @@ -58,11 +58,6 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); static int hlt_counter; unsigned long boot_option_idle_override = 0; EXPORT_SYMBOL(boot_option_idle_override); DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; EXPORT_PER_CPU_SYMBOL(current_task); Loading @@ -77,55 +72,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk) return ((unsigned long *)tsk->thread.sp)[3]; } /* * Powermanagement idle function, if any.. */ void (*pm_idle)(void); EXPORT_SYMBOL(pm_idle); void disable_hlt(void) { hlt_counter++; } EXPORT_SYMBOL(disable_hlt); void enable_hlt(void) { hlt_counter--; } EXPORT_SYMBOL(enable_hlt); /* * We use this if we don't have any better * idle routine.. */ void default_idle(void) { if (!hlt_counter && boot_cpu_data.hlt_works_ok) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we * test NEED_RESCHED: */ smp_mb(); if (!need_resched()) safe_halt(); /* enables interrupts racelessly */ else local_irq_enable(); current_thread_info()->status |= TS_POLLING; } else { local_irq_enable(); /* loop is done by the caller */ cpu_relax(); } } #ifdef CONFIG_APM_MODULE EXPORT_SYMBOL(default_idle); #endif #ifdef CONFIG_HOTPLUG_CPU #include <asm/nmi.h> /* We don't actually take CPU down, just spin without interrupts. */ Loading arch/x86/kernel/process_64.c +0 −28 Original line number Diff line number Diff line Loading @@ -56,15 +56,6 @@ asmlinkage extern void ret_from_fork(void); unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; unsigned long boot_option_idle_override = 0; EXPORT_SYMBOL(boot_option_idle_override); /* * Powermanagement idle function, if any.. */ void (*pm_idle)(void); EXPORT_SYMBOL(pm_idle); static ATOMIC_NOTIFIER_HEAD(idle_notifier); void idle_notifier_register(struct notifier_block *n) Loading Loading @@ -94,25 +85,6 @@ void exit_idle(void) __exit_idle(); } /* * We use this if we don't have any better * idle routine.. */ void default_idle(void) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we * test NEED_RESCHED: */ smp_mb(); if (!need_resched()) safe_halt(); /* enables interrupts racelessly */ else local_irq_enable(); current_thread_info()->status |= TS_POLLING; } #ifdef CONFIG_HOTPLUG_CPU DECLARE_PER_CPU(int, cpu_state); Loading Loading
arch/x86/kernel/process.c +70 −0 Original line number Diff line number Diff line Loading @@ -45,6 +45,76 @@ void arch_task_cache_init(void) SLAB_PANIC, NULL); } /* * Idle related variables and functions */ unsigned long boot_option_idle_override = 0; EXPORT_SYMBOL(boot_option_idle_override); /* * Powermanagement idle function, if any.. */ void (*pm_idle)(void); EXPORT_SYMBOL(pm_idle); #ifdef CONFIG_X86_32 /* * This halt magic was a workaround for ancient floppy DMA * wreckage. It should be safe to remove. */ static int hlt_counter; void disable_hlt(void) { hlt_counter++; } EXPORT_SYMBOL(disable_hlt); void enable_hlt(void) { hlt_counter--; } EXPORT_SYMBOL(enable_hlt); static inline int hlt_use_halt(void) { return (!hlt_counter && boot_cpu_data.hlt_works_ok); } #else static inline int hlt_use_halt(void) { return 1; } #endif /* * We use this if we don't have any better * idle routine.. */ void default_idle(void) { if (hlt_use_halt()) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we * test NEED_RESCHED: */ smp_mb(); if (!need_resched()) safe_halt(); /* enables interrupts racelessly */ else local_irq_enable(); current_thread_info()->status |= TS_POLLING; } else { local_irq_enable(); /* loop is done by the caller */ cpu_relax(); } } #ifdef CONFIG_APM_MODULE EXPORT_SYMBOL(default_idle); #endif static void do_nothing(void *unused) { } Loading
arch/x86/kernel/process_32.c +0 −54 Original line number Diff line number Diff line Loading @@ -58,11 +58,6 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); static int hlt_counter; unsigned long boot_option_idle_override = 0; EXPORT_SYMBOL(boot_option_idle_override); DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; EXPORT_PER_CPU_SYMBOL(current_task); Loading @@ -77,55 +72,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk) return ((unsigned long *)tsk->thread.sp)[3]; } /* * Powermanagement idle function, if any.. */ void (*pm_idle)(void); EXPORT_SYMBOL(pm_idle); void disable_hlt(void) { hlt_counter++; } EXPORT_SYMBOL(disable_hlt); void enable_hlt(void) { hlt_counter--; } EXPORT_SYMBOL(enable_hlt); /* * We use this if we don't have any better * idle routine.. */ void default_idle(void) { if (!hlt_counter && boot_cpu_data.hlt_works_ok) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we * test NEED_RESCHED: */ smp_mb(); if (!need_resched()) safe_halt(); /* enables interrupts racelessly */ else local_irq_enable(); current_thread_info()->status |= TS_POLLING; } else { local_irq_enable(); /* loop is done by the caller */ cpu_relax(); } } #ifdef CONFIG_APM_MODULE EXPORT_SYMBOL(default_idle); #endif #ifdef CONFIG_HOTPLUG_CPU #include <asm/nmi.h> /* We don't actually take CPU down, just spin without interrupts. */ Loading
arch/x86/kernel/process_64.c +0 −28 Original line number Diff line number Diff line Loading @@ -56,15 +56,6 @@ asmlinkage extern void ret_from_fork(void); unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED; unsigned long boot_option_idle_override = 0; EXPORT_SYMBOL(boot_option_idle_override); /* * Powermanagement idle function, if any.. */ void (*pm_idle)(void); EXPORT_SYMBOL(pm_idle); static ATOMIC_NOTIFIER_HEAD(idle_notifier); void idle_notifier_register(struct notifier_block *n) Loading Loading @@ -94,25 +85,6 @@ void exit_idle(void) __exit_idle(); } /* * We use this if we don't have any better * idle routine.. */ void default_idle(void) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we * test NEED_RESCHED: */ smp_mb(); if (!need_resched()) safe_halt(); /* enables interrupts racelessly */ else local_irq_enable(); current_thread_info()->status |= TS_POLLING; } #ifdef CONFIG_HOTPLUG_CPU DECLARE_PER_CPU(int, cpu_state); Loading