Loading include/linux/cpu.h +0 −2 Original line number Diff line number Diff line Loading @@ -41,8 +41,6 @@ extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr); extern int cpu_add_sysdev_attr_group(struct attribute_group *attrs); extern void cpu_remove_sysdev_attr_group(struct attribute_group *attrs); extern struct sysdev_attribute attr_sched_mc_power_savings; extern struct sysdev_attribute attr_sched_smt_power_savings; extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); #ifdef CONFIG_HOTPLUG_CPU Loading kernel/sched.c +24 −24 Original line number Diff line number Diff line Loading @@ -3106,7 +3106,7 @@ static void run_rebalance_domains(struct softirq_action *h) if (need_resched()) break; rebalance_domains(balance_cpu, SCHED_IDLE); rebalance_domains(balance_cpu, CPU_IDLE); rq = cpu_rq(balance_cpu); if (time_after(this_rq->next_balance, rq->next_balance)) Loading Loading @@ -6328,7 +6328,7 @@ int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) } #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) int arch_reinit_sched_domains(void) static int arch_reinit_sched_domains(void) { int err; Loading Loading @@ -6357,24 +6357,6 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) return ret ? ret : count; } int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) { int err = 0; #ifdef CONFIG_SCHED_SMT if (smt_capable()) err = sysfs_create_file(&cls->kset.kobj, &attr_sched_smt_power_savings.attr); #endif #ifdef CONFIG_SCHED_MC if (!err && mc_capable()) err = sysfs_create_file(&cls->kset.kobj, &attr_sched_mc_power_savings.attr); #endif return err; } #endif #ifdef CONFIG_SCHED_MC static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) { Loading @@ -6385,7 +6367,7 @@ static ssize_t sched_mc_power_savings_store(struct sys_device *dev, { return sched_power_savings_store(buf, count, 0); } SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, sched_mc_power_savings_store); #endif Loading @@ -6399,10 +6381,28 @@ static ssize_t sched_smt_power_savings_store(struct sys_device *dev, { return sched_power_savings_store(buf, count, 1); } SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, sched_smt_power_savings_store); #endif int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) { int err = 0; #ifdef CONFIG_SCHED_SMT if (smt_capable()) err = sysfs_create_file(&cls->kset.kobj, &attr_sched_smt_power_savings.attr); #endif #ifdef CONFIG_SCHED_MC if (!err && mc_capable()) err = sysfs_create_file(&cls->kset.kobj, &attr_sched_mc_power_savings.attr); #endif return err; } #endif /* * Force a reinitialization of the sched domains hierarchy. The domains * and groups cannot be updated in place without racing with the balancing Loading kernel/sched_fair.c +6 −6 Original line number Diff line number Diff line Loading @@ -75,7 +75,7 @@ enum { unsigned int sysctl_sched_features __read_mostly = SCHED_FEAT_FAIR_SLEEPERS *1 | SCHED_FEAT_SLEEPER_AVG *1 | SCHED_FEAT_SLEEPER_AVG *0 | SCHED_FEAT_SLEEPER_LOAD_AVG *1 | SCHED_FEAT_PRECISE_CPU_LOAD *1 | SCHED_FEAT_START_DEBIT *1 | Loading Loading @@ -304,11 +304,9 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr) delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); if (cfs_rq->sleeper_bonus > sysctl_sched_granularity) { delta = calc_delta_mine(cfs_rq->sleeper_bonus, curr->load.weight, lw); if (unlikely(delta > cfs_rq->sleeper_bonus)) delta = cfs_rq->sleeper_bonus; delta = min(cfs_rq->sleeper_bonus, (u64)delta_exec); delta = calc_delta_mine(delta, curr->load.weight, lw); delta = min((u64)delta, cfs_rq->sleeper_bonus); cfs_rq->sleeper_bonus -= delta; delta_mine -= delta; } Loading Loading @@ -521,6 +519,8 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) * Track the amount of bonus we've given to sleepers: */ cfs_rq->sleeper_bonus += delta_fair; if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit)) cfs_rq->sleeper_bonus = sysctl_sched_runtime_limit; schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); } Loading Loading
include/linux/cpu.h +0 −2 Original line number Diff line number Diff line Loading @@ -41,8 +41,6 @@ extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr); extern int cpu_add_sysdev_attr_group(struct attribute_group *attrs); extern void cpu_remove_sysdev_attr_group(struct attribute_group *attrs); extern struct sysdev_attribute attr_sched_mc_power_savings; extern struct sysdev_attribute attr_sched_smt_power_savings; extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); #ifdef CONFIG_HOTPLUG_CPU Loading
kernel/sched.c +24 −24 Original line number Diff line number Diff line Loading @@ -3106,7 +3106,7 @@ static void run_rebalance_domains(struct softirq_action *h) if (need_resched()) break; rebalance_domains(balance_cpu, SCHED_IDLE); rebalance_domains(balance_cpu, CPU_IDLE); rq = cpu_rq(balance_cpu); if (time_after(this_rq->next_balance, rq->next_balance)) Loading Loading @@ -6328,7 +6328,7 @@ int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2) } #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) int arch_reinit_sched_domains(void) static int arch_reinit_sched_domains(void) { int err; Loading Loading @@ -6357,24 +6357,6 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) return ret ? ret : count; } int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) { int err = 0; #ifdef CONFIG_SCHED_SMT if (smt_capable()) err = sysfs_create_file(&cls->kset.kobj, &attr_sched_smt_power_savings.attr); #endif #ifdef CONFIG_SCHED_MC if (!err && mc_capable()) err = sysfs_create_file(&cls->kset.kobj, &attr_sched_mc_power_savings.attr); #endif return err; } #endif #ifdef CONFIG_SCHED_MC static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) { Loading @@ -6385,7 +6367,7 @@ static ssize_t sched_mc_power_savings_store(struct sys_device *dev, { return sched_power_savings_store(buf, count, 0); } SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, sched_mc_power_savings_store); #endif Loading @@ -6399,10 +6381,28 @@ static ssize_t sched_smt_power_savings_store(struct sys_device *dev, { return sched_power_savings_store(buf, count, 1); } SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, sched_smt_power_savings_store); #endif int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) { int err = 0; #ifdef CONFIG_SCHED_SMT if (smt_capable()) err = sysfs_create_file(&cls->kset.kobj, &attr_sched_smt_power_savings.attr); #endif #ifdef CONFIG_SCHED_MC if (!err && mc_capable()) err = sysfs_create_file(&cls->kset.kobj, &attr_sched_mc_power_savings.attr); #endif return err; } #endif /* * Force a reinitialization of the sched domains hierarchy. The domains * and groups cannot be updated in place without racing with the balancing Loading
kernel/sched_fair.c +6 −6 Original line number Diff line number Diff line Loading @@ -75,7 +75,7 @@ enum { unsigned int sysctl_sched_features __read_mostly = SCHED_FEAT_FAIR_SLEEPERS *1 | SCHED_FEAT_SLEEPER_AVG *1 | SCHED_FEAT_SLEEPER_AVG *0 | SCHED_FEAT_SLEEPER_LOAD_AVG *1 | SCHED_FEAT_PRECISE_CPU_LOAD *1 | SCHED_FEAT_START_DEBIT *1 | Loading Loading @@ -304,11 +304,9 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr) delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); if (cfs_rq->sleeper_bonus > sysctl_sched_granularity) { delta = calc_delta_mine(cfs_rq->sleeper_bonus, curr->load.weight, lw); if (unlikely(delta > cfs_rq->sleeper_bonus)) delta = cfs_rq->sleeper_bonus; delta = min(cfs_rq->sleeper_bonus, (u64)delta_exec); delta = calc_delta_mine(delta, curr->load.weight, lw); delta = min((u64)delta, cfs_rq->sleeper_bonus); cfs_rq->sleeper_bonus -= delta; delta_mine -= delta; } Loading Loading @@ -521,6 +519,8 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) * Track the amount of bonus we've given to sleepers: */ cfs_rq->sleeper_bonus += delta_fair; if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit)) cfs_rq->sleeper_bonus = sysctl_sched_runtime_limit; schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); } Loading