Commit 86bfbb7c authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

sched/fair: Add lag based placement



With the introduction of avg_vruntime, it is possible to approximate
lag (the entire purpose of introducing it in fact). Use this to do lag
based placement over sleep+wake.

Specifically, the FAIR_SLEEPERS thing places things too far to the
left and messes up the deadline aspect of EEVDF.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20230531124603.794929315@infradead.org
parent e0c2ff90
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -554,8 +554,9 @@ struct sched_entity {

	u64				exec_start;
	u64				sum_exec_runtime;
	u64				vruntime;
	u64				prev_sum_exec_runtime;
	u64				vruntime;
	s64				vlag;

	u64				nr_migrations;

+1 −0
Original line number Diff line number Diff line
@@ -4501,6 +4501,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
	p->se.prev_sum_exec_runtime	= 0;
	p->se.nr_migrations		= 0;
	p->se.vruntime			= 0;
	p->se.vlag			= 0;
	INIT_LIST_HEAD(&p->se.group_node);

#ifdef CONFIG_FAIR_GROUP_SCHED
+130 −38
Original line number Diff line number Diff line
@@ -715,6 +715,15 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
	return cfs_rq->min_vruntime + avg;
}

/*
 * lag_i = S - s_i = w_i * (V - v_i)
 */
void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	SCHED_WARN_ON(!se->on_rq);
	se->vlag = avg_vruntime(cfs_rq) - se->vruntime;
}

static u64 __update_min_vruntime(struct cfs_rq *cfs_rq, u64 vruntime)
{
	u64 min_vruntime = cfs_rq->min_vruntime;
@@ -3492,6 +3501,8 @@ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
			    unsigned long weight)
{
	unsigned long old_weight = se->load.weight;

	if (se->on_rq) {
		/* commit outstanding execution time */
		if (cfs_rq->curr == se)
@@ -3504,6 +3515,14 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,

	update_load_set(&se->load, weight);

	if (!se->on_rq) {
		/*
		 * Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
		 * we need to scale se->vlag when w_i changes.
		 */
		se->vlag = div_s64(se->vlag * old_weight, weight);
	}

#ifdef CONFIG_SMP
	do {
		u32 divider = get_pelt_divider(&se->avg);
@@ -4853,6 +4872,87 @@ static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
	u64 vruntime = avg_vruntime(cfs_rq);
	s64 lag = 0;

	/*
	 * Due to how V is constructed as the weighted average of entities,
	 * adding tasks with positive lag, or removing tasks with negative lag
	 * will move 'time' backwards, this can screw around with the lag of
	 * other tasks.
	 *
	 * EEVDF: placement strategy #1 / #2
	 */
	if (sched_feat(PLACE_LAG) && cfs_rq->nr_running > 1) {
		struct sched_entity *curr = cfs_rq->curr;
		unsigned long load;

		lag = se->vlag;

		/*
		 * If we want to place a task and preserve lag, we have to
		 * consider the effect of the new entity on the weighted
		 * average and compensate for this, otherwise lag can quickly
		 * evaporate.
		 *
		 * Lag is defined as:
		 *
		 *   lag_i = S - s_i = w_i * (V - v_i)
		 *
		 * To avoid the 'w_i' term all over the place, we only track
		 * the virtual lag:
		 *
		 *   vl_i = V - v_i <=> v_i = V - vl_i
		 *
		 * And we take V to be the weighted average of all v:
		 *
		 *   V = (\Sum w_j*v_j) / W
		 *
		 * Where W is: \Sum w_j
		 *
		 * Then, the weighted average after adding an entity with lag
		 * vl_i is given by:
		 *
		 *   V' = (\Sum w_j*v_j + w_i*v_i) / (W + w_i)
		 *      = (W*V + w_i*(V - vl_i)) / (W + w_i)
		 *      = (W*V + w_i*V - w_i*vl_i) / (W + w_i)
		 *      = (V*(W + w_i) - w_i*l) / (W + w_i)
		 *      = V - w_i*vl_i / (W + w_i)
		 *
		 * And the actual lag after adding an entity with vl_i is:
		 *
		 *   vl'_i = V' - v_i
		 *         = V - w_i*vl_i / (W + w_i) - (V - vl_i)
		 *         = vl_i - w_i*vl_i / (W + w_i)
		 *
		 * Which is strictly less than vl_i. So in order to preserve lag
		 * we should inflate the lag before placement such that the
		 * effective lag after placement comes out right.
		 *
		 * As such, invert the above relation for vl'_i to get the vl_i
		 * we need to use such that the lag after placement is the lag
		 * we computed before dequeue.
		 *
		 *   vl'_i = vl_i - w_i*vl_i / (W + w_i)
		 *         = ((W + w_i)*vl_i - w_i*vl_i) / (W + w_i)
		 *
		 *   (W + w_i)*vl'_i = (W + w_i)*vl_i - w_i*vl_i
		 *                   = W*vl_i
		 *
		 *   vl_i = (W + w_i)*vl'_i / W
		 */
		load = cfs_rq->avg_load;
		if (curr && curr->on_rq)
			load += curr->load.weight;

		lag *= load + se->load.weight;
		if (WARN_ON_ONCE(!load))
			load = 1;
		lag = div_s64(lag, load);

		vruntime -= lag;
	}

	if (sched_feat(FAIR_SLEEPERS)) {

		/* sleeps up to a single latency don't count. */
		if (!initial) {
@@ -4875,27 +4975,16 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)

		/*
		 * Pull vruntime of the entity being placed to the base level of
	 * cfs_rq, to prevent boosting it if placed backwards.
	 * However, min_vruntime can advance much faster than real time, with
	 * the extreme being when an entity with the minimal weight always runs
	 * on the cfs_rq. If the waking entity slept for a long time, its
	 * vruntime difference from min_vruntime may overflow s64 and their
	 * comparison may get inversed, so ignore the entity's original
	 * vruntime in that case.
	 * The maximal vruntime speedup is given by the ratio of normal to
	 * minimal weight: scale_load_down(NICE_0_LOAD) / MIN_SHARES.
	 * When placing a migrated waking entity, its exec_start has been set
	 * from a different rq. In order to take into account a possible
	 * divergence between new and prev rq's clocks task because of irq and
	 * stolen time, we take an additional margin.
	 * So, cutting off on the sleep time of
	 *     2^63 / scale_load_down(NICE_0_LOAD) ~ 104 days
	 * should be safe.
	 */
	if (entity_is_long_sleeper(se))
		 * cfs_rq, to prevent boosting it if placed backwards.  If the entity
		 * slept for a long time, don't even try to compare its vruntime with
		 * the base as it may be too far off and the comparison may get
		 * inversed due to s64 overflow.
		 */
		if (!entity_is_long_sleeper(se))
			vruntime = max_vruntime(se->vruntime, vruntime);
	}

	se->vruntime = vruntime;
	else
		se->vruntime = max_vruntime(se->vruntime, vruntime);
}

static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
@@ -5077,6 +5166,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)

	clear_buddies(cfs_rq, se);

	if (flags & DEQUEUE_SLEEP)
		update_entity_lag(cfs_rq, se);

	if (se != cfs_rq->curr)
		__dequeue_entity(cfs_rq, se);
	se->on_rq = 0;
+8 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */

/*
 * Only give sleepers 50% of their service deficit. This allows
 * them to run sooner, but does not allow tons of sleepers to
 * rip the spread apart.
 */
SCHED_FEAT(FAIR_SLEEPERS, false)
SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true)

/*
 * Using the avg_vruntime, do the right thing and preserve lag across
 * sleep+wake cycles. EEVDF placement strategy #1, #2 if disabled.
 */
SCHED_FEAT(PLACE_LAG, true)

/*
 * Prefer to schedule the task we woke last (assuming it failed
 * wakeup-preemption), since its likely going to consume data we