Commit 2475bf02 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'sched_urgent_for_v6.2_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Borislav Petkov:

 - Make sure the scheduler doesn't use stale frequency scaling values
   when latter get disabled due to a value error

 - Fix a NULL pointer access on UP configs

 - Use the proper locking when updating CPU capacity

* tag 'sched_urgent_for_v6.2_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/aperfmperf: Erase stale arch_freq_scale values when disabling frequency invariance readings
  sched/core: Fix NULL pointer access fault in sched_setaffinity() with non-SMP configs
  sched/fair: Fixes for capacity inversion detection
  sched/uclamp: Fix a uninitialized variable warnings
parents ab2f4087 5f5cc9ed
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -330,7 +330,16 @@ static void __init bp_init_freq_invariance(void)

static void disable_freq_invariance_workfn(struct work_struct *work)
{
	int cpu;

	static_branch_disable(&arch_scale_freq_key);

	/*
	 * Set arch_freq_scale to a default value on all cpus
	 * This negates the effect of scaling
	 */
	for_each_possible_cpu(cpu)
		per_cpu(arch_freq_scale, cpu) = SCHED_CAPACITY_SCALE;
}

static DECLARE_WORK(disable_freq_invariance_work,
+8 −2
Original line number Diff line number Diff line
@@ -8290,12 +8290,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
	if (retval)
		goto out_put_task;

	/*
	 * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
	 * alloc_user_cpus_ptr() returns NULL.
	 */
	user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
	if (IS_ENABLED(CONFIG_SMP) && !user_mask) {
	if (user_mask) {
		cpumask_copy(user_mask, in_mask);
	} else if (IS_ENABLED(CONFIG_SMP)) {
		retval = -ENOMEM;
		goto out_put_task;
	}
	cpumask_copy(user_mask, in_mask);

	ac = (struct affinity_context){
		.new_mask  = in_mask,
		.user_mask = user_mask,
+27 −21
Original line number Diff line number Diff line
@@ -7229,10 +7229,10 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
	eenv_task_busy_time(&eenv, p, prev_cpu);

	for (; pd; pd = pd->next) {
		unsigned long util_min = p_util_min, util_max = p_util_max;
		unsigned long cpu_cap, cpu_thermal_cap, util;
		unsigned long cur_delta, max_spare_cap = 0;
		unsigned long rq_util_min, rq_util_max;
		unsigned long util_min, util_max;
		unsigned long prev_spare_cap = 0;
		int max_spare_cap_cpu = -1;
		unsigned long base_energy;
@@ -7251,6 +7251,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
		eenv.pd_cap = 0;

		for_each_cpu(cpu, cpus) {
			struct rq *rq = cpu_rq(cpu);

			eenv.pd_cap += cpu_thermal_cap;

			if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
@@ -7269,11 +7271,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
			 * much capacity we can get out of the CPU; this is
			 * aligned with sched_cpu_util().
			 */
			if (uclamp_is_used()) {
				if (uclamp_rq_is_idle(cpu_rq(cpu))) {
					util_min = p_util_min;
					util_max = p_util_max;
				} else {
			if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) {
				/*
				 * Open code uclamp_rq_util_with() except for
				 * the clamp() part. Ie: apply max aggregation
@@ -7281,13 +7279,12 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
				 * operate on non clamped util but must use the
				 * max-aggregated uclamp_{min, max}.
				 */
					rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
					rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
				rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN);
				rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX);

				util_min = max(rq_util_min, p_util_min);
				util_max = max(rq_util_max, p_util_max);
			}
			}
			if (!util_fits_cpu(util, util_min, util_max, cpu))
				continue;

@@ -8871,16 +8868,23 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
	 *   * Thermal pressure will impact all cpus in this perf domain
	 *     equally.
	 */
	if (static_branch_unlikely(&sched_asym_cpucapacity)) {
	if (sched_energy_enabled()) {
		unsigned long inv_cap = capacity_orig - thermal_load_avg(rq);
		struct perf_domain *pd = rcu_dereference(rq->rd->pd);
		struct perf_domain *pd;

		rcu_read_lock();

		pd = rcu_dereference(rq->rd->pd);
		rq->cpu_capacity_inverted = 0;

		for (; pd; pd = pd->next) {
			struct cpumask *pd_span = perf_domain_span(pd);
			unsigned long pd_cap_orig, pd_cap;

			/* We can't be inverted against our own pd */
			if (cpumask_test_cpu(cpu_of(rq), pd_span))
				continue;

			cpu = cpumask_any(pd_span);
			pd_cap_orig = arch_scale_cpu_capacity(cpu);

@@ -8905,6 +8909,8 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
				break;
			}
		}

		rcu_read_unlock();
	}

	trace_sched_cpu_capacity_tp(rq);