Commit 453eaea6 authored by Zhang Qiao's avatar Zhang Qiao Committed by Yongqiang Liu
Browse files

sched/qos: Add qos_tg_{throttle,unthrottle}_{up,down}

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4VZJT


CVE: NA

--------------------------------

1. Qos throttle reuse tg_{throttle,unthrottle}_{up,down} that
can write some cfs-bandwidth fields, it may cause some unknown
data error. So add qos_tg_{throttle,unthrottle}_{up,down} for
qos throttle.

2. walk_tg_tree_from() caller must hold rcu_lock, currently there is
none, so add it now.

Signed-off-by: default avatarZhang Qiao <zhangqiao22@huawei.com>
Reviewed-by: default avatarChen Hui <judy.chenhui@huawei.com>
Signed-off-by: default avatarYongqiang Liu <liuyongqiang13@huawei.com>
parent 2701a7bb
Loading
Loading
Loading
Loading
+29 −10
Original line number Diff line number Diff line
@@ -5120,6 +5120,10 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)

	lockdep_assert_held(&rq->lock);

#ifdef CONFIG_QOS_SCHED
	unthrottle_qos_cfs_rqs(cpu_of(rq));
#endif

	rcu_read_lock();
	list_for_each_entry_rcu(tg, &task_groups, list) {
		struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
@@ -5142,10 +5146,6 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
			unthrottle_cfs_rq(cfs_rq);
	}
	rcu_read_unlock();

#ifdef CONFIG_QOS_SCHED
	unthrottle_qos_cfs_rqs(cpu_of(rq));
#endif
}

#else /* CONFIG_CFS_BANDWIDTH */
@@ -6890,6 +6890,27 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_

#ifdef CONFIG_QOS_SCHED
static void start_qos_hrtimer(int cpu);

static int qos_tg_unthrottle_up(struct task_group *tg, void *data)
{
	struct rq *rq = data;
	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];

	cfs_rq->throttle_count--;

	return 0;
}

static int qos_tg_throttle_down(struct task_group *tg, void *data)
{
	struct rq *rq = data;
	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];

	cfs_rq->throttle_count++;

	return 0;
}

static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq)
{
	struct rq *rq = rq_of(cfs_rq);
@@ -6900,7 +6921,7 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq)

	/* freeze hierarchy runnable averages while throttled */
	rcu_read_lock();
	walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
	walk_tg_tree_from(cfs_rq->tg, qos_tg_throttle_down, tg_nop, (void *)rq);
	rcu_read_unlock();

	task_delta = cfs_rq->h_nr_running;
@@ -6928,7 +6949,6 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq)
		start_qos_hrtimer(cpu_of(rq));

	cfs_rq->throttled = 1;
	cfs_rq->throttled_clock = rq_clock(rq);

	list_add(&cfs_rq->qos_throttled_list,
		 &per_cpu(qos_throttled_cfs_rq, cpu_of(rq)));
@@ -6937,7 +6957,6 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq)
static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq)
{
	struct rq *rq = rq_of(cfs_rq);
	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
	struct sched_entity *se;
	int enqueue = 1;
	long task_delta, idle_task_delta;
@@ -6947,12 +6966,12 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq)
	cfs_rq->throttled = 0;

	update_rq_clock(rq);

	cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
	list_del_init(&cfs_rq->qos_throttled_list);

	/* update hierarchical throttle state */
	walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
	rcu_read_lock();
	walk_tg_tree_from(cfs_rq->tg, tg_nop, qos_tg_unthrottle_up, (void *)rq);
	rcu_read_unlock();

	if (!cfs_rq->load.weight)
		return;