Commit e4108f66 authored by Guan Jing's avatar Guan Jing Committed by Tiancheng Lu
Browse files

sched/fair: Introduce QOS_SMT_EXPELL priority reversion mechanism

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I6SIY2



-------------------------------

Here is the typical case that priority inversion will caused occasionally
by SMT expelling: Assuming that there are two SMT cores-cA and cB,
online tasks are running on cA while offline tasks on cB. With SMT
expelling, online task will drives off offline tasks to occupy all SMT
cores exclusively, which, in turn, will starve the offline task to release
the related resources other tasks with higher priority need.

Hence, this patch will introduce another mechanism to alleviate this
situation. For all offline tasks, one metric of profiling the maximum task
expelling duration is set up and the default value is 5 seconds, if
such offline task exsits, all offline tasks will be allowed to run into
one small sleep(msleep) loop in kernel before they goes into
usermode; and further, if the two SMT cores(such as cA and cB) are
idle or don't get any online tasks to run, for these offline tasks, they
will continue to run in usermode for the next schedule.

Signed-off-by: default avatarGuan Jing <guanjing6@huawei.com>
parent 57b5487b
Loading
Loading
Loading
Loading
+34 −12
Original line number Diff line number Diff line
@@ -137,6 +137,7 @@ static DEFINE_PER_CPU(int, qos_cpu_overload);
unsigned int sysctl_overload_detect_period = 5000;  /* in ms */
unsigned int sysctl_offline_wait_interval = 100;  /* in ms */
static int unthrottle_qos_cfs_rqs(int cpu);
static bool qos_smt_expelled(int this_cpu);
#endif

#ifdef CONFIG_QOS_SCHED_PRIO_LB
@@ -7480,6 +7481,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
}

#ifdef CONFIG_QOS_SCHED
static inline bool qos_timer_is_activated(int cpu)
{
	return hrtimer_active(per_cpu_ptr(&qos_overload_timer, cpu));
}

static inline void cancel_qos_timer(int cpu)
{
	hrtimer_cancel(per_cpu_ptr(&qos_overload_timer, cpu));
}

static inline bool is_offline_task(struct task_struct *p)
{
@@ -7539,7 +7549,7 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq)

	}

	if (list_empty(&per_cpu(qos_throttled_cfs_rq, cpu_of(rq))))
	if (!qos_timer_is_activated(cpu_of(rq)))
		start_qos_hrtimer(cpu_of(rq));

	cfs_rq->throttled = QOS_THROTTLED;
@@ -7639,10 +7649,6 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq)
	}

	assert_list_leaf_cfs_rq(rq);

	/* Determine whether we need to wake up potentially idle CPU: */
	if (rq->curr == rq->idle && rq->cfs.nr_running)
		resched_curr(rq);
}

static int __unthrottle_qos_cfs_rqs(int cpu)
@@ -7664,10 +7670,10 @@ static int __unthrottle_qos_cfs_rqs(int cpu)
static int unthrottle_qos_cfs_rqs(int cpu)
{
	int res;

	res = __unthrottle_qos_cfs_rqs(cpu);
	if (res)
		hrtimer_cancel(&(per_cpu(qos_overload_timer, cpu)));

	if (qos_timer_is_activated(cpu) && !qos_smt_expelled(cpu))
		cancel_qos_timer(cpu);

	return res;
}
@@ -7720,8 +7726,13 @@ static enum hrtimer_restart qos_overload_timer_handler(struct hrtimer *timer)
	struct rq *rq = this_rq();

	rq_lock_irqsave(rq, &rf);
	if (__unthrottle_qos_cfs_rqs(smp_processor_id()))
	__unthrottle_qos_cfs_rqs(smp_processor_id());
	__this_cpu_write(qos_cpu_overload, 1);

	/* Determine whether we need to wake up potentially idle CPU. */
	if (rq->curr == rq->idle && rq->cfs.nr_running)
		resched_curr(rq);

	rq_unlock_irqrestore(rq, &rf);

	return HRTIMER_NORESTART;
@@ -7761,6 +7772,12 @@ static void qos_schedule_throttle(struct task_struct *p)
	}
}

#ifndef CONFIG_QOS_SCHED_SMT_EXPELLER
static bool qos_smt_expelled(int this_cpu)
{
	return false;
}
#endif
#endif

#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
@@ -7948,8 +7965,12 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf

again:
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	if (qos_smt_expelled(this_cpu)) {
	if (qos_smt_expelled(this_cpu) && !__this_cpu_read(qos_cpu_overload)) {
		__this_cpu_write(qos_smt_status, QOS_LEVEL_OFFLINE);

		if (!qos_timer_is_activated(this_cpu))
			start_qos_hrtimer(this_cpu);

		schedstat_inc(rq->curr->se.statistics.nr_qos_smt_expelled);
		trace_sched_qos_smt_expelled(rq->curr, per_cpu(qos_smt_status, this_cpu));
		return NULL;
@@ -8161,6 +8182,7 @@ done: __maybe_unused;
		goto again;
	}

	if (!qos_smt_expelled(cpu_of(rq)))
		__this_cpu_write(qos_cpu_overload, 0);
#endif
	/*