Commit 8039e96f authored by Vineeth Pillai's avatar Vineeth Pillai Committed by Peter Zijlstra
Browse files

sched/fair: Fix forced idle sibling starvation corner case



If there is only one long running local task and the sibling is
forced idle, it  might not get a chance to run until a schedule
event happens on any cpu in the core.

So we check for this condition during a tick to see if a sibling
is starved and then give it a chance to schedule.

Signed-off-by: default avatarVineeth Pillai <viremana@linux.microsoft.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: default avatarDon Hiatt <dhiatt@digitalocean.com>
Tested-by: default avatarHongyu Ning <hongyu.ning@linux.intel.com>
Tested-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20210422123308.617407840@infradead.org
parent 539f6512
Loading
Loading
Loading
Loading
+8 −7
Original line number Diff line number Diff line
@@ -5459,16 +5459,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)

	/* reset state */
	rq->core->core_cookie = 0UL;
	if (rq->core->core_forceidle) {
		need_sync = true;
		rq->core->core_forceidle = false;
	}
	for_each_cpu(i, smt_mask) {
		struct rq *rq_i = cpu_rq(i);

		rq_i->core_pick = NULL;

		if (rq_i->core_forceidle) {
			need_sync = true;
			rq_i->core_forceidle = false;
		}

		if (i != cpu)
			update_rq_clock(rq_i);
	}
@@ -5588,8 +5587,10 @@ next_class:;
		if (!rq_i->core_pick)
			continue;

		if (is_task_rq_idle(rq_i->core_pick) && rq_i->nr_running)
			rq_i->core_forceidle = true;
		if (is_task_rq_idle(rq_i->core_pick) && rq_i->nr_running &&
		    !rq_i->core->core_forceidle) {
			rq_i->core->core_forceidle = true;
		}

		if (i == cpu) {
			rq_i->core_pick = NULL;
+40 −0
Original line number Diff line number Diff line
@@ -10767,6 +10767,44 @@ static void rq_offline_fair(struct rq *rq)

#endif /* CONFIG_SMP */

#ifdef CONFIG_SCHED_CORE
static inline bool
__entity_slice_used(struct sched_entity *se, int min_nr_tasks)
{
	u64 slice = sched_slice(cfs_rq_of(se), se);
	u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime;

	return (rtime * min_nr_tasks > slice);
}

#define MIN_NR_TASKS_DURING_FORCEIDLE	2
static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
{
	if (!sched_core_enabled(rq))
		return;

	/*
	 * If runqueue has only one task which used up its slice and
	 * if the sibling is forced idle, then trigger schedule to
	 * give forced idle task a chance.
	 *
	 * sched_slice() considers only this active rq and it gets the
	 * whole slice. But during force idle, we have siblings acting
	 * like a single runqueue and hence we need to consider runnable
	 * tasks on this cpu and the forced idle cpu. Ideally, we should
	 * go through the forced idle rq, but that would be a perf hit.
	 * We can assume that the forced idle cpu has atleast
	 * MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check
	 * if we need to give up the cpu.
	 */
	if (rq->core->core_forceidle && rq->cfs.nr_running == 1 &&
	    __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
		resched_curr(rq);
}
#else
static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {}
#endif

/*
 * scheduler tick hitting a task of our scheduling class.
 *
@@ -10790,6 +10828,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)

	update_misfit_status(curr, rq);
	update_overutilized_status(task_rq(curr));

	task_tick_core(rq, curr);
}

/*
+1 −1
Original line number Diff line number Diff line
@@ -1083,12 +1083,12 @@ struct rq {
	unsigned int		core_enabled;
	unsigned int		core_sched_seq;
	struct rb_root		core_tree;
	unsigned char		core_forceidle;

	/* shared state */
	unsigned int		core_task_seq;
	unsigned int		core_pick_seq;
	unsigned long		core_cookie;
	unsigned char		core_forceidle;
#endif
};