Commit d2dfa17b authored by Peter Zijlstra's avatar Peter Zijlstra
Browse files

sched: Trivial forced-newidle balancer



When a sibling is forced-idle to match the core-cookie; search for
matching tasks to fill the core.

rcu_read_unlock() can incur an infrequent deadlock in
sched_core_balance(). Fix this by using the RCU-sched flavor instead.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: default avatarDon Hiatt <dhiatt@digitalocean.com>
Tested-by: default avatarHongyu Ning <hongyu.ning@linux.intel.com>
Tested-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20210422123308.800048269@infradead.org
parent c6047c2e
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -705,6 +705,7 @@ struct task_struct {
#ifdef CONFIG_SCHED_CORE
	struct rb_node			core_node;
	unsigned long			core_cookie;
	unsigned int			core_occupation;
#endif

#ifdef CONFIG_CGROUP_SCHED
+129 −1
Original line number Diff line number Diff line
@@ -204,6 +204,21 @@ static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie)
	return __node_2_sc(node);
}

static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie)
{
	struct rb_node *node = &p->core_node;

	node = rb_next(node);
	if (!node)
		return NULL;

	p = container_of(node, struct task_struct, core_node);
	if (p->core_cookie != cookie)
		return NULL;

	return p;
}

/*
 * Magic required such that:
 *
@@ -5389,8 +5404,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
	const struct sched_class *class;
	const struct cpumask *smt_mask;
	bool fi_before = false;
	int i, j, cpu, occ = 0;
	bool need_sync;
	int i, j, cpu;

	if (!sched_core_enabled(rq))
		return __pick_next_task(rq, prev, rf);
@@ -5512,6 +5527,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
			if (!p)
				continue;

			if (!is_task_rq_idle(p))
				occ++;

			rq_i->core_pick = p;
			if (rq_i->idle == p && rq_i->nr_running) {
				rq->core->core_forceidle = true;
@@ -5543,6 +5561,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)

						cpu_rq(j)->core_pick = NULL;
					}
					occ = 1;
					goto again;
				}
			}
@@ -5588,6 +5607,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
		if (!(fi_before && rq->core->core_forceidle))
			task_vruntime_update(rq_i, rq_i->core_pick, rq->core->core_forceidle);

		rq_i->core_pick->core_occupation = occ;

		if (i == cpu) {
			rq_i->core_pick = NULL;
			continue;
@@ -5609,6 +5630,113 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
	return next;
}

static bool try_steal_cookie(int this, int that)
{
	struct rq *dst = cpu_rq(this), *src = cpu_rq(that);
	struct task_struct *p;
	unsigned long cookie;
	bool success = false;

	local_irq_disable();
	double_rq_lock(dst, src);

	cookie = dst->core->core_cookie;
	if (!cookie)
		goto unlock;

	if (dst->curr != dst->idle)
		goto unlock;

	p = sched_core_find(src, cookie);
	if (p == src->idle)
		goto unlock;

	do {
		if (p == src->core_pick || p == src->curr)
			goto next;

		if (!cpumask_test_cpu(this, &p->cpus_mask))
			goto next;

		if (p->core_occupation > dst->idle->core_occupation)
			goto next;

		p->on_rq = TASK_ON_RQ_MIGRATING;
		deactivate_task(src, p, 0);
		set_task_cpu(p, this);
		activate_task(dst, p, 0);
		p->on_rq = TASK_ON_RQ_QUEUED;

		resched_curr(dst);

		success = true;
		break;

next:
		p = sched_core_next(p, cookie);
	} while (p);

unlock:
	double_rq_unlock(dst, src);
	local_irq_enable();

	return success;
}

static bool steal_cookie_task(int cpu, struct sched_domain *sd)
{
	int i;

	for_each_cpu_wrap(i, sched_domain_span(sd), cpu) {
		if (i == cpu)
			continue;

		if (need_resched())
			break;

		if (try_steal_cookie(cpu, i))
			return true;
	}

	return false;
}

static void sched_core_balance(struct rq *rq)
{
	struct sched_domain *sd;
	int cpu = cpu_of(rq);

	preempt_disable();
	rcu_read_lock();
	raw_spin_rq_unlock_irq(rq);
	for_each_domain(cpu, sd) {
		if (need_resched())
			break;

		if (steal_cookie_task(cpu, sd))
			break;
	}
	raw_spin_rq_lock_irq(rq);
	rcu_read_unlock();
	preempt_enable();
}

static DEFINE_PER_CPU(struct callback_head, core_balance_head);

void queue_core_balance(struct rq *rq)
{
	if (!sched_core_enabled(rq))
		return;

	if (!rq->core->core_cookie)
		return;

	if (!rq->nr_running) /* not forced idle */
		return;

	queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
}

static inline void sched_core_cpu_starting(unsigned int cpu)
{
	const struct cpumask *smt_mask = cpu_smt_mask(cpu);
+1 −0
Original line number Diff line number Diff line
@@ -437,6 +437,7 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir
{
	update_idle_core(rq);
	schedstat_inc(rq->sched_goidle);
	queue_core_balance(rq);
}

#ifdef CONFIG_SMP
+6 −0
Original line number Diff line number Diff line
@@ -1170,6 +1170,8 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)

bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool fi);

extern void queue_core_balance(struct rq *rq);

#else /* !CONFIG_SCHED_CORE */

static inline bool sched_core_enabled(struct rq *rq)
@@ -1192,6 +1194,10 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
	return &rq->__lock;
}

static inline void queue_core_balance(struct rq *rq)
{
}

#endif /* CONFIG_SCHED_CORE */

static inline void lockdep_assert_rq_held(struct rq *rq)