Loading include/linux/cpufreq.h +0 −11 Original line number Diff line number Diff line Loading @@ -595,17 +595,6 @@ struct governor_attr { size_t count); }; static inline bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy) { /* * Allow remote callbacks if: * - dvfs_possible_from_any_cpu flag is set * - the local and remote CPUs share cpufreq policy */ return policy->dvfs_possible_from_any_cpu || cpumask_test_cpu(smp_processor_id(), policy->cpus); } /********************************************************************* * FREQUENCY TABLE HELPERS * *********************************************************************/ Loading include/linux/sched/cpufreq.h +3 −0 Original line number Diff line number Diff line Loading @@ -12,6 +12,8 @@ #define SCHED_CPUFREQ_MIGRATION (1U << 1) #ifdef CONFIG_CPU_FREQ struct cpufreq_policy; struct update_util_data { void (*func)(struct update_util_data *data, u64 time, unsigned int flags); }; Loading @@ -20,6 +22,7 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, void (*func)(struct update_util_data *data, u64 time, unsigned int flags)); void cpufreq_remove_update_util_hook(int cpu); bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy); static inline unsigned long map_util_freq(unsigned long util, unsigned long freq, unsigned long cap) Loading kernel/sched/cpufreq.c +18 −0 Original line number Diff line number Diff line Loading @@ -5,6 +5,8 @@ * Copyright (C) 2016, Intel Corporation * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> */ #include <linux/cpufreq.h> #include "sched.h" DEFINE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); Loading Loading @@ -57,3 +59,19 @@ void cpufreq_remove_update_util_hook(int cpu) rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL); } EXPORT_SYMBOL_GPL(cpufreq_remove_update_util_hook); /** * cpufreq_this_cpu_can_update - Check if cpufreq policy can be updated. * @policy: cpufreq policy to check. * * Return 'true' if: * - the local and remote CPUs share @policy, * - dvfs_possible_from_any_cpu is set in @policy and the local CPU is not going * offline (in which case it is not expected to run cpufreq updates any more). */ bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy) { return cpumask_test_cpu(smp_processor_id(), policy->cpus) || (policy->dvfs_possible_from_any_cpu && rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data))); } kernel/sched/cpufreq_schedutil.c +3 −5 Original line number Diff line number Diff line Loading @@ -82,12 +82,10 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) * by the hardware, as calculating the frequency is pointless if * we cannot in fact act on it. * * For the slow switching platforms, the kthread is always scheduled on * the right set of CPUs and any CPU can find the next frequency and * schedule the kthread. * This is needed on the slow switching platforms too to prevent CPUs * going offline from leaving stale IRQ work items behind. */ if (sg_policy->policy->fast_switch_enabled && !cpufreq_this_cpu_can_update(sg_policy->policy)) if (!cpufreq_this_cpu_can_update(sg_policy->policy)) return false; if (unlikely(sg_policy->limits_changed)) { Loading Loading
include/linux/cpufreq.h +0 −11 Original line number Diff line number Diff line Loading @@ -595,17 +595,6 @@ struct governor_attr { size_t count); }; static inline bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy) { /* * Allow remote callbacks if: * - dvfs_possible_from_any_cpu flag is set * - the local and remote CPUs share cpufreq policy */ return policy->dvfs_possible_from_any_cpu || cpumask_test_cpu(smp_processor_id(), policy->cpus); } /********************************************************************* * FREQUENCY TABLE HELPERS * *********************************************************************/ Loading
include/linux/sched/cpufreq.h +3 −0 Original line number Diff line number Diff line Loading @@ -12,6 +12,8 @@ #define SCHED_CPUFREQ_MIGRATION (1U << 1) #ifdef CONFIG_CPU_FREQ struct cpufreq_policy; struct update_util_data { void (*func)(struct update_util_data *data, u64 time, unsigned int flags); }; Loading @@ -20,6 +22,7 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, void (*func)(struct update_util_data *data, u64 time, unsigned int flags)); void cpufreq_remove_update_util_hook(int cpu); bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy); static inline unsigned long map_util_freq(unsigned long util, unsigned long freq, unsigned long cap) Loading
kernel/sched/cpufreq.c +18 −0 Original line number Diff line number Diff line Loading @@ -5,6 +5,8 @@ * Copyright (C) 2016, Intel Corporation * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> */ #include <linux/cpufreq.h> #include "sched.h" DEFINE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); Loading Loading @@ -57,3 +59,19 @@ void cpufreq_remove_update_util_hook(int cpu) rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL); } EXPORT_SYMBOL_GPL(cpufreq_remove_update_util_hook); /** * cpufreq_this_cpu_can_update - Check if cpufreq policy can be updated. * @policy: cpufreq policy to check. * * Return 'true' if: * - the local and remote CPUs share @policy, * - dvfs_possible_from_any_cpu is set in @policy and the local CPU is not going * offline (in which case it is not expected to run cpufreq updates any more). */ bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy) { return cpumask_test_cpu(smp_processor_id(), policy->cpus) || (policy->dvfs_possible_from_any_cpu && rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data))); }
kernel/sched/cpufreq_schedutil.c +3 −5 Original line number Diff line number Diff line Loading @@ -82,12 +82,10 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) * by the hardware, as calculating the frequency is pointless if * we cannot in fact act on it. * * For the slow switching platforms, the kthread is always scheduled on * the right set of CPUs and any CPU can find the next frequency and * schedule the kthread. * This is needed on the slow switching platforms too to prevent CPUs * going offline from leaving stale IRQ work items behind. */ if (sg_policy->policy->fast_switch_enabled && !cpufreq_this_cpu_can_update(sg_policy->policy)) if (!cpufreq_this_cpu_can_update(sg_policy->policy)) return false; if (unlikely(sg_policy->limits_changed)) { Loading