Loading arch/x86/include/asm/time.h +0 −2 Original line number Diff line number Diff line Loading @@ -57,6 +57,4 @@ extern void time_init(void); #endif /* CONFIG_PARAVIRT */ extern unsigned long __init calibrate_cpu(void); #endif /* _ASM_X86_TIME_H */ arch/x86/kernel/time_32.c +0 −1 Original line number Diff line number Diff line Loading @@ -21,7 +21,6 @@ #include <asm/timer.h> #include <asm/hpet.h> #include <asm/time.h> #include <asm/nmi.h> #if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC) int timer_ack; Loading arch/x86/kernel/time_64.c +0 −51 Original line number Diff line number Diff line Loading @@ -21,7 +21,6 @@ #include <asm/timer.h> #include <asm/hpet.h> #include <asm/time.h> #include <asm/nmi.h> #if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC) int timer_ack; Loading Loading @@ -84,56 +83,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } /* * calibrate_cpu is used on systems with fixed rate TSCs to determine * processor frequency */ #define TICK_COUNT 100000000 unsigned long __init calibrate_cpu(void) { int tsc_start, tsc_now; int i, no_ctr_free; unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0; unsigned long flags; for (i = 0; i < 4; i++) if (avail_to_resrv_perfctr_nmi_bit(i)) break; no_ctr_free = (i == 4); if (no_ctr_free) { WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... " "cpu_khz value may be incorrect.\n"); i = 3; rdmsrl(MSR_K7_EVNTSEL3, evntsel3); wrmsrl(MSR_K7_EVNTSEL3, 0); rdmsrl(MSR_K7_PERFCTR3, pmc3); } else { reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i); reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i); } local_irq_save(flags); /* start measuring cycles, incrementing from 0 */ wrmsrl(MSR_K7_PERFCTR0 + i, 0); wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76); rdtscl(tsc_start); do { rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now); tsc_now = get_cycles(); } while ((tsc_now - tsc_start) < TICK_COUNT); local_irq_restore(flags); if (no_ctr_free) { wrmsrl(MSR_K7_EVNTSEL3, 0); wrmsrl(MSR_K7_PERFCTR3, pmc3); wrmsrl(MSR_K7_EVNTSEL3, evntsel3); } else { release_perfctr_nmi(MSR_K7_PERFCTR0 + i); release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); } return pmc_now * tsc_khz / (tsc_now - tsc_start); } static struct irqaction irq0 = { .handler = timer_interrupt, .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER, Loading arch/x86/kernel/tsc.c +55 −2 Original line number Diff line number Diff line Loading @@ -17,6 +17,7 @@ #include <asm/time.h> #include <asm/delay.h> #include <asm/hypervisor.h> #include <asm/nmi.h> unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ EXPORT_SYMBOL(cpu_khz); Loading Loading @@ -852,6 +853,60 @@ static void __init init_tsc_clocksource(void) clocksource_register(&clocksource_tsc); } #ifdef CONFIG_X86_64 /* * calibrate_cpu is used on systems with fixed rate TSCs to determine * processor frequency */ #define TICK_COUNT 100000000 static unsigned long __init calibrate_cpu(void) { int tsc_start, tsc_now; int i, no_ctr_free; unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0; unsigned long flags; for (i = 0; i < 4; i++) if (avail_to_resrv_perfctr_nmi_bit(i)) break; no_ctr_free = (i == 4); if (no_ctr_free) { WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... " "cpu_khz value may be incorrect.\n"); i = 3; rdmsrl(MSR_K7_EVNTSEL3, evntsel3); wrmsrl(MSR_K7_EVNTSEL3, 0); rdmsrl(MSR_K7_PERFCTR3, pmc3); } else { reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i); reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i); } local_irq_save(flags); /* start measuring cycles, incrementing from 0 */ wrmsrl(MSR_K7_PERFCTR0 + i, 0); wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76); rdtscl(tsc_start); do { rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now); tsc_now = get_cycles(); } while ((tsc_now - tsc_start) < TICK_COUNT); local_irq_restore(flags); if (no_ctr_free) { wrmsrl(MSR_K7_EVNTSEL3, 0); wrmsrl(MSR_K7_PERFCTR3, pmc3); wrmsrl(MSR_K7_EVNTSEL3, evntsel3); } else { release_perfctr_nmi(MSR_K7_PERFCTR0 + i); release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); } return pmc_now * tsc_khz / (tsc_now - tsc_start); } #else static inline unsigned long calibrate_cpu(void) { return cpu_khz; } #endif void __init tsc_init(void) { u64 lpj; Loading @@ -870,11 +925,9 @@ void __init tsc_init(void) return; } #ifdef CONFIG_X86_64 if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) cpu_khz = calibrate_cpu(); #endif printk("Detected %lu.%03lu MHz processor.\n", (unsigned long)cpu_khz / 1000, Loading Loading
arch/x86/include/asm/time.h +0 −2 Original line number Diff line number Diff line Loading @@ -57,6 +57,4 @@ extern void time_init(void); #endif /* CONFIG_PARAVIRT */ extern unsigned long __init calibrate_cpu(void); #endif /* _ASM_X86_TIME_H */
arch/x86/kernel/time_32.c +0 −1 Original line number Diff line number Diff line Loading @@ -21,7 +21,6 @@ #include <asm/timer.h> #include <asm/hpet.h> #include <asm/time.h> #include <asm/nmi.h> #if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC) int timer_ack; Loading
arch/x86/kernel/time_64.c +0 −51 Original line number Diff line number Diff line Loading @@ -21,7 +21,6 @@ #include <asm/timer.h> #include <asm/hpet.h> #include <asm/time.h> #include <asm/nmi.h> #if defined(CONFIG_X86_32) && defined(CONFIG_X86_IO_APIC) int timer_ack; Loading Loading @@ -84,56 +83,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } /* * calibrate_cpu is used on systems with fixed rate TSCs to determine * processor frequency */ #define TICK_COUNT 100000000 unsigned long __init calibrate_cpu(void) { int tsc_start, tsc_now; int i, no_ctr_free; unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0; unsigned long flags; for (i = 0; i < 4; i++) if (avail_to_resrv_perfctr_nmi_bit(i)) break; no_ctr_free = (i == 4); if (no_ctr_free) { WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... " "cpu_khz value may be incorrect.\n"); i = 3; rdmsrl(MSR_K7_EVNTSEL3, evntsel3); wrmsrl(MSR_K7_EVNTSEL3, 0); rdmsrl(MSR_K7_PERFCTR3, pmc3); } else { reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i); reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i); } local_irq_save(flags); /* start measuring cycles, incrementing from 0 */ wrmsrl(MSR_K7_PERFCTR0 + i, 0); wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76); rdtscl(tsc_start); do { rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now); tsc_now = get_cycles(); } while ((tsc_now - tsc_start) < TICK_COUNT); local_irq_restore(flags); if (no_ctr_free) { wrmsrl(MSR_K7_EVNTSEL3, 0); wrmsrl(MSR_K7_PERFCTR3, pmc3); wrmsrl(MSR_K7_EVNTSEL3, evntsel3); } else { release_perfctr_nmi(MSR_K7_PERFCTR0 + i); release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); } return pmc_now * tsc_khz / (tsc_now - tsc_start); } static struct irqaction irq0 = { .handler = timer_interrupt, .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER, Loading
arch/x86/kernel/tsc.c +55 −2 Original line number Diff line number Diff line Loading @@ -17,6 +17,7 @@ #include <asm/time.h> #include <asm/delay.h> #include <asm/hypervisor.h> #include <asm/nmi.h> unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */ EXPORT_SYMBOL(cpu_khz); Loading Loading @@ -852,6 +853,60 @@ static void __init init_tsc_clocksource(void) clocksource_register(&clocksource_tsc); } #ifdef CONFIG_X86_64 /* * calibrate_cpu is used on systems with fixed rate TSCs to determine * processor frequency */ #define TICK_COUNT 100000000 static unsigned long __init calibrate_cpu(void) { int tsc_start, tsc_now; int i, no_ctr_free; unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0; unsigned long flags; for (i = 0; i < 4; i++) if (avail_to_resrv_perfctr_nmi_bit(i)) break; no_ctr_free = (i == 4); if (no_ctr_free) { WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... " "cpu_khz value may be incorrect.\n"); i = 3; rdmsrl(MSR_K7_EVNTSEL3, evntsel3); wrmsrl(MSR_K7_EVNTSEL3, 0); rdmsrl(MSR_K7_PERFCTR3, pmc3); } else { reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i); reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i); } local_irq_save(flags); /* start measuring cycles, incrementing from 0 */ wrmsrl(MSR_K7_PERFCTR0 + i, 0); wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76); rdtscl(tsc_start); do { rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now); tsc_now = get_cycles(); } while ((tsc_now - tsc_start) < TICK_COUNT); local_irq_restore(flags); if (no_ctr_free) { wrmsrl(MSR_K7_EVNTSEL3, 0); wrmsrl(MSR_K7_PERFCTR3, pmc3); wrmsrl(MSR_K7_EVNTSEL3, evntsel3); } else { release_perfctr_nmi(MSR_K7_PERFCTR0 + i); release_evntsel_nmi(MSR_K7_EVNTSEL0 + i); } return pmc_now * tsc_khz / (tsc_now - tsc_start); } #else static inline unsigned long calibrate_cpu(void) { return cpu_khz; } #endif void __init tsc_init(void) { u64 lpj; Loading @@ -870,11 +925,9 @@ void __init tsc_init(void) return; } #ifdef CONFIG_X86_64 if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) cpu_khz = calibrate_cpu(); #endif printk("Detected %lu.%03lu MHz processor.\n", (unsigned long)cpu_khz / 1000, Loading