Loading drivers/idle/intel_idle.c +58 −49 Original line number Diff line number Diff line Loading @@ -46,8 +46,6 @@ * to avoid complications with the lapic timer workaround. * Have not seen issues with suspend, but may need same workaround here. * * There is currently no kernel-based automatic probing/loading mechanism * if the driver is built as a module. */ /* un-comment DEBUG to enable pr_debug() statements */ Loading @@ -60,7 +58,7 @@ #include <linux/sched.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include <asm/mwait.h> Loading Loading @@ -828,6 +826,35 @@ static struct cpuidle_state bxt_cstates[] = { .enter = NULL } }; static struct cpuidle_state dnv_cstates[] = { { .name = "C1-DNV", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { .name = "C1E-DNV", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { .name = "C6-DNV", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 50, .target_residency = 500, .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { .enter = NULL } }; /** * intel_idle * @dev: cpuidle_device Loading Loading @@ -1017,6 +1044,11 @@ static const struct idle_cpu idle_cpu_bxt = { .disable_promotion_to_c1e = true, }; static const struct idle_cpu idle_cpu_dnv = { .state_table = dnv_cstates, .disable_promotion_to_c1e = true, }; #define ICPU(model, cpu) \ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } Loading Loading @@ -1053,9 +1085,9 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { ICPU(INTEL_FAM6_SKYLAKE_X, idle_cpu_skx), ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl), ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt), ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); /* * intel_idle_probe() Loading Loading @@ -1155,7 +1187,10 @@ static unsigned long long irtl_2_usec(unsigned long long irtl) { unsigned long long ns; ns = irtl_ns_units[(irtl >> 10) & 0x3]; if (!irtl) return 0; ns = irtl_ns_units[(irtl >> 10) & 0x7]; return div64_u64((irtl & 0x3FF) * ns, 1000); } Loading @@ -1168,43 +1203,39 @@ static unsigned long long irtl_2_usec(unsigned long long irtl) static void bxt_idle_state_table_update(void) { unsigned long long msr; unsigned int usec; rdmsrl(MSR_PKGC6_IRTL, msr); if (msr) { unsigned int usec = irtl_2_usec(msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[2].exit_latency = usec; bxt_cstates[2].target_residency = usec; } rdmsrl(MSR_PKGC7_IRTL, msr); if (msr) { unsigned int usec = irtl_2_usec(msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[3].exit_latency = usec; bxt_cstates[3].target_residency = usec; } rdmsrl(MSR_PKGC8_IRTL, msr); if (msr) { unsigned int usec = irtl_2_usec(msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[4].exit_latency = usec; bxt_cstates[4].target_residency = usec; } rdmsrl(MSR_PKGC9_IRTL, msr); if (msr) { unsigned int usec = irtl_2_usec(msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[5].exit_latency = usec; bxt_cstates[5].target_residency = usec; } rdmsrl(MSR_PKGC10_IRTL, msr); if (msr) { unsigned int usec = irtl_2_usec(msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[6].exit_latency = usec; bxt_cstates[6].target_residency = usec; } Loading Loading @@ -1416,34 +1447,12 @@ static int __init intel_idle_init(void) return 0; } device_initcall(intel_idle_init); static void __exit intel_idle_exit(void) { struct cpuidle_device *dev; int i; cpu_notifier_register_begin(); if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) on_each_cpu(__setup_broadcast_timer, (void *)false, 1); __unregister_cpu_notifier(&cpu_hotplug_notifier); for_each_possible_cpu(i) { dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); cpuidle_unregister_device(dev); } cpu_notifier_register_done(); cpuidle_unregister_driver(&intel_idle_driver); free_percpu(intel_idle_cpuidle_devices); } module_init(intel_idle_init); module_exit(intel_idle_exit); /* * We are not really modular, but we used to support that. Meaning we also * support "intel_idle.max_cstate=..." at boot and also a read-only export of * it at /sys/module/intel_idle/parameters/max_cstate -- so using module_param * is the easiest way (currently) to continue doing that. */ module_param(max_cstate, int, 0444); MODULE_AUTHOR("Len Brown <len.brown@intel.com>"); MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION); MODULE_LICENSE("GPL"); Loading
drivers/idle/intel_idle.c +58 −49 Original line number Diff line number Diff line Loading @@ -46,8 +46,6 @@ * to avoid complications with the lapic timer workaround. * Have not seen issues with suspend, but may need same workaround here. * * There is currently no kernel-based automatic probing/loading mechanism * if the driver is built as a module. */ /* un-comment DEBUG to enable pr_debug() statements */ Loading @@ -60,7 +58,7 @@ #include <linux/sched.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include <asm/mwait.h> Loading Loading @@ -828,6 +826,35 @@ static struct cpuidle_state bxt_cstates[] = { .enter = NULL } }; static struct cpuidle_state dnv_cstates[] = { { .name = "C1-DNV", .desc = "MWAIT 0x00", .flags = MWAIT2flg(0x00), .exit_latency = 2, .target_residency = 2, .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { .name = "C1E-DNV", .desc = "MWAIT 0x01", .flags = MWAIT2flg(0x01), .exit_latency = 10, .target_residency = 20, .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { .name = "C6-DNV", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 50, .target_residency = 500, .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, { .enter = NULL } }; /** * intel_idle * @dev: cpuidle_device Loading Loading @@ -1017,6 +1044,11 @@ static const struct idle_cpu idle_cpu_bxt = { .disable_promotion_to_c1e = true, }; static const struct idle_cpu idle_cpu_dnv = { .state_table = dnv_cstates, .disable_promotion_to_c1e = true, }; #define ICPU(model, cpu) \ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } Loading Loading @@ -1053,9 +1085,9 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { ICPU(INTEL_FAM6_SKYLAKE_X, idle_cpu_skx), ICPU(INTEL_FAM6_XEON_PHI_KNL, idle_cpu_knl), ICPU(INTEL_FAM6_ATOM_GOLDMONT, idle_cpu_bxt), ICPU(INTEL_FAM6_ATOM_DENVERTON, idle_cpu_dnv), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); /* * intel_idle_probe() Loading Loading @@ -1155,7 +1187,10 @@ static unsigned long long irtl_2_usec(unsigned long long irtl) { unsigned long long ns; ns = irtl_ns_units[(irtl >> 10) & 0x3]; if (!irtl) return 0; ns = irtl_ns_units[(irtl >> 10) & 0x7]; return div64_u64((irtl & 0x3FF) * ns, 1000); } Loading @@ -1168,43 +1203,39 @@ static unsigned long long irtl_2_usec(unsigned long long irtl) static void bxt_idle_state_table_update(void) { unsigned long long msr; unsigned int usec; rdmsrl(MSR_PKGC6_IRTL, msr); if (msr) { unsigned int usec = irtl_2_usec(msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[2].exit_latency = usec; bxt_cstates[2].target_residency = usec; } rdmsrl(MSR_PKGC7_IRTL, msr); if (msr) { unsigned int usec = irtl_2_usec(msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[3].exit_latency = usec; bxt_cstates[3].target_residency = usec; } rdmsrl(MSR_PKGC8_IRTL, msr); if (msr) { unsigned int usec = irtl_2_usec(msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[4].exit_latency = usec; bxt_cstates[4].target_residency = usec; } rdmsrl(MSR_PKGC9_IRTL, msr); if (msr) { unsigned int usec = irtl_2_usec(msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[5].exit_latency = usec; bxt_cstates[5].target_residency = usec; } rdmsrl(MSR_PKGC10_IRTL, msr); if (msr) { unsigned int usec = irtl_2_usec(msr); usec = irtl_2_usec(msr); if (usec) { bxt_cstates[6].exit_latency = usec; bxt_cstates[6].target_residency = usec; } Loading Loading @@ -1416,34 +1447,12 @@ static int __init intel_idle_init(void) return 0; } device_initcall(intel_idle_init); static void __exit intel_idle_exit(void) { struct cpuidle_device *dev; int i; cpu_notifier_register_begin(); if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) on_each_cpu(__setup_broadcast_timer, (void *)false, 1); __unregister_cpu_notifier(&cpu_hotplug_notifier); for_each_possible_cpu(i) { dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); cpuidle_unregister_device(dev); } cpu_notifier_register_done(); cpuidle_unregister_driver(&intel_idle_driver); free_percpu(intel_idle_cpuidle_devices); } module_init(intel_idle_init); module_exit(intel_idle_exit); /* * We are not really modular, but we used to support that. Meaning we also * support "intel_idle.max_cstate=..." at boot and also a read-only export of * it at /sys/module/intel_idle/parameters/max_cstate -- so using module_param * is the easiest way (currently) to continue doing that. */ module_param(max_cstate, int, 0444); MODULE_AUTHOR("Len Brown <len.brown@intel.com>"); MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION); MODULE_LICENSE("GPL");