Commit a2e90611 authored by Vincent Guittot's avatar Vincent Guittot Committed by Peter Zijlstra
Browse files

sched/fair: Remove capacity inversion detection



Remove the capacity inversion detection which is now handled by
util_fits_cpu() returning -1 when we need to continue to look for a
potential CPU with better performance.

This ends up almost reverting patches below except for some comments:
commit da07d2f9 ("sched/fair: Fixes for capacity inversion detection")
commit aa69c36f ("sched/fair: Consider capacity inversion in util_fits_cpu()")
commit 44c7b80b ("sched/fair: Detect capacity inversion")

Signed-off-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20230201143628.270912-3-vincent.guittot@linaro.org
parent e5ed0550
Loading
Loading
Loading
Loading
+5 −79
Original line number Diff line number Diff line
@@ -4476,17 +4476,9 @@ static inline int util_fits_cpu(unsigned long util,
	 *
	 * For uclamp_max, we can tolerate a drop in performance level as the
	 * goal is to cap the task. So it's okay if it's getting less.
	 *
	 * In case of capacity inversion we should honour the inverted capacity
	 * for both uclamp_min and uclamp_max all the time.
	 */
	capacity_orig = cpu_in_capacity_inversion(cpu);
	if (capacity_orig) {
		capacity_orig_thermal = capacity_orig;
	} else {
	capacity_orig = capacity_orig_of(cpu);
	capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
	}

	/*
	 * We want to force a task to fit a cpu as implied by uclamp_max.
@@ -9027,82 +9019,16 @@ static unsigned long scale_rt_capacity(int cpu)

static void update_cpu_capacity(struct sched_domain *sd, int cpu)
{
	unsigned long capacity_orig = arch_scale_cpu_capacity(cpu);
	unsigned long capacity = scale_rt_capacity(cpu);
	struct sched_group *sdg = sd->groups;
	struct rq *rq = cpu_rq(cpu);

	rq->cpu_capacity_orig = capacity_orig;
	cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);

	if (!capacity)
		capacity = 1;

	rq->cpu_capacity = capacity;

	/*
	 * Detect if the performance domain is in capacity inversion state.
	 *
	 * Capacity inversion happens when another perf domain with equal or
	 * lower capacity_orig_of() ends up having higher capacity than this
	 * domain after subtracting thermal pressure.
	 *
	 * We only take into account thermal pressure in this detection as it's
	 * the only metric that actually results in *real* reduction of
	 * capacity due to performance points (OPPs) being dropped/become
	 * unreachable due to thermal throttling.
	 *
	 * We assume:
	 *   * That all cpus in a perf domain have the same capacity_orig
	 *     (same uArch).
	 *   * Thermal pressure will impact all cpus in this perf domain
	 *     equally.
	 */
	if (sched_energy_enabled()) {
		unsigned long inv_cap = capacity_orig - thermal_load_avg(rq);
		struct perf_domain *pd;

		rcu_read_lock();

		pd = rcu_dereference(rq->rd->pd);
		rq->cpu_capacity_inverted = 0;

		for (; pd; pd = pd->next) {
			struct cpumask *pd_span = perf_domain_span(pd);
			unsigned long pd_cap_orig, pd_cap;

			/* We can't be inverted against our own pd */
			if (cpumask_test_cpu(cpu_of(rq), pd_span))
				continue;

			cpu = cpumask_any(pd_span);
			pd_cap_orig = arch_scale_cpu_capacity(cpu);

			if (capacity_orig < pd_cap_orig)
				continue;

			/*
			 * handle the case of multiple perf domains have the
			 * same capacity_orig but one of them is under higher
			 * thermal pressure. We record it as capacity
			 * inversion.
			 */
			if (capacity_orig == pd_cap_orig) {
				pd_cap = pd_cap_orig - thermal_load_avg(cpu_rq(cpu));

				if (pd_cap > inv_cap) {
					rq->cpu_capacity_inverted = inv_cap;
					break;
				}
			} else if (pd_cap_orig > inv_cap) {
				rq->cpu_capacity_inverted = inv_cap;
				break;
			}
		}

		rcu_read_unlock();
	}

	trace_sched_cpu_capacity_tp(rq);
	cpu_rq(cpu)->cpu_capacity = capacity;
	trace_sched_cpu_capacity_tp(cpu_rq(cpu));

	sdg->sgc->capacity = capacity;
	sdg->sgc->min_capacity = capacity;
+0 −19
Original line number Diff line number Diff line
@@ -1044,7 +1044,6 @@ struct rq {

	unsigned long		cpu_capacity;
	unsigned long		cpu_capacity_orig;
	unsigned long		cpu_capacity_inverted;

	struct balance_callback *balance_callback;

@@ -2899,24 +2898,6 @@ static inline unsigned long capacity_orig_of(int cpu)
	return cpu_rq(cpu)->cpu_capacity_orig;
}

/*
 * Returns inverted capacity if the CPU is in capacity inversion state.
 * 0 otherwise.
 *
 * Capacity inversion detection only considers thermal impact where actual
 * performance points (OPPs) gets dropped.
 *
 * Capacity inversion state happens when another performance domain that has
 * equal or lower capacity_orig_of() becomes effectively larger than the perf
 * domain this CPU belongs to due to thermal pressure throttling it hard.
 *
 * See comment in update_cpu_capacity().
 */
static inline unsigned long cpu_in_capacity_inversion(int cpu)
{
	return cpu_rq(cpu)->cpu_capacity_inverted;
}

/**
 * enum cpu_util_type - CPU utilization type
 * @FREQUENCY_UTIL:	Utilization used to select frequency