Commit 2701a7bb authored by Zhang Qiao's avatar Zhang Qiao Committed by Yongqiang Liu
Browse files

sched: Throttle offline task at tracehook_notify_resume()

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4VZJT


CVE: NA

--------------------------------

Before, when detect the cpu is overloaded, we throttle offline
tasks at exit_to_user_mode_loop() before returning to user mode.
Some architects(e.g.,arm64) do not support QOS scheduler because
a task do not via exit_to_user_mode_loop() return to userspace at
these platforms.
In order to slove this problem and support qos scheduler on all
architectures, if we require throttling offline tasks, we set flag
TIF_NOTIFY_RESUME to an offline task when it is picked and throttle
it at tracehook_notify_resume().

Signed-off-by: default avatarZhang Qiao <zhangqiao22@huawei.com>
Reviewed-by: default avatarChen Hui <judy.chenhui@huawei.com>
Signed-off-by: default avatarYongqiang Liu <liuyongqiang13@huawei.com>
parent 70d21cfa
Loading
Loading
Loading
Loading
+1 −6
Original line number Diff line number Diff line
@@ -162,10 +162,6 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
		if (cached_flags & _TIF_SIGPENDING)
			do_signal(regs);

#ifdef CONFIG_QOS_SCHED
		sched_qos_offline_wait();
#endif

		if (cached_flags & _TIF_NOTIFY_RESUME) {
			clear_thread_flag(TIF_NOTIFY_RESUME);
			tracehook_notify_resume(regs);
@@ -198,8 +194,7 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)

	cached_flags = READ_ONCE(ti->flags);

	if (unlikely((cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS) ||
		     sched_qos_cpu_overload()))
	if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
		exit_to_usermode_loop(regs, cached_flags);

#ifdef CONFIG_COMPAT
+4 −0
Original line number Diff line number Diff line
@@ -194,6 +194,10 @@ static inline void tracehook_notify_resume(struct pt_regs *regs)

	mem_cgroup_handle_over_high();
	blkcg_maybe_throttle_current();
#ifdef CONFIG_QOS_SCHED
	sched_qos_offline_wait();
#endif

}

#endif	/* <linux/tracehook.h> */
+27 −7
Original line number Diff line number Diff line
@@ -26,6 +26,7 @@
#endif
#ifdef CONFIG_QOS_SCHED
#include <linux/delay.h>
#include <linux/tracehook.h>
#endif
#include <trace/events/sched.h>

@@ -7029,20 +7030,18 @@ static bool check_qos_cfs_rq(struct cfs_rq *cfs_rq)
void sched_qos_offline_wait(void)
{
	long qos_level;
	unsigned long wait_interval;

	while (unlikely(this_cpu_read(qos_cpu_overload))) {
		rcu_read_lock();
		qos_level = task_group(current)->qos_level;
		rcu_read_unlock();
		if (qos_level != -1 || signal_pending(current))
		if (qos_level != -1 || fatal_signal_pending(current))
			break;
		msleep_interruptible(sysctl_offline_wait_interval);
	}
}

int sched_qos_cpu_overload(void)
{
	return __this_cpu_read(qos_cpu_overload);
		wait_interval = msecs_to_jiffies(sysctl_offline_wait_interval);
		schedule_timeout_killable(wait_interval);
	}
}

static enum hrtimer_restart qos_overload_timer_handler(struct hrtimer *timer)
@@ -7075,6 +7074,23 @@ void init_qos_hrtimer(int cpu)
	hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
	hrtimer->function = qos_overload_timer_handler;
}

/*
 * To avoid Priority inversion issues, when this cpu is qos_cpu_overload,
 * we should schedule offline tasks to run so that they can leave kernel
 * critical sections, and throttle them before returning to user mode.
 */
static void qos_schedule_throttle(struct task_struct *p)
{
	if (unlikely(current->flags & PF_KTHREAD))
		return;

	if (unlikely(this_cpu_read(qos_cpu_overload))) {
		if (task_group(p)->qos_level < 0)
			set_notify_resume(p);
	}
}

#endif

static struct task_struct *
@@ -7202,6 +7218,10 @@ done: __maybe_unused;
	if (hrtick_enabled(rq))
		hrtick_start_fair(rq, p);

#ifdef CONFIG_QOS_SCHED
	qos_schedule_throttle(p);
#endif

	return p;

idle: