Commit d0fe0b9c authored by Dietmar Eggemann's avatar Dietmar Eggemann Committed by Ingo Molnar
Browse files

sched/fair: Simplify post_init_entity_util_avg() by calling it with a task_struct pointer argument



Since commit:

  d0326691 ("sched/fair: Fix task group initialization")

the utilization of a sched entity representing a task group is no longer
initialized to any other value than 0. So post_init_entity_util_avg() is
only used for tasks, not for sched_entities.

Make this clear by calling it with a task_struct pointer argument which
also eliminates the entity_is_task(se) if condition in the fork path and
get rid of the stale comment in remove_entity_load_avg() accordingly.

Signed-off-by: default avatarDietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Morten Rasmussen <morten.rasmussen@arm.com>
Cc: Patrick Bellasi <patrick.bellasi@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Perret <quentin.perret@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20190122162501.12000-1-dietmar.eggemann@arm.com


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 039ae8bc
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -2433,7 +2433,7 @@ void wake_up_new_task(struct task_struct *p)
#endif
	rq = __task_rq_lock(p, &rf);
	update_rq_clock(rq);
	post_init_entity_util_avg(&p->se);
	post_init_entity_util_avg(p);

	activate_task(rq, p, ENQUEUE_NOCLOCK);
	p->on_rq = TASK_ON_RQ_QUEUED;
+16 −22
Original line number Diff line number Diff line
@@ -759,8 +759,9 @@ static void attach_entity_cfs_rq(struct sched_entity *se);
 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
 * if util_avg > util_avg_cap.
 */
void post_init_entity_util_avg(struct sched_entity *se)
void post_init_entity_util_avg(struct task_struct *p)
{
	struct sched_entity *se = &p->se;
	struct cfs_rq *cfs_rq = cfs_rq_of(se);
	struct sched_avg *sa = &se->avg;
	long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
@@ -778,8 +779,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
		}
	}

	if (entity_is_task(se)) {
		struct task_struct *p = task_of(se);
	if (p->sched_class != &fair_sched_class) {
		/*
		 * For !fair tasks do:
@@ -794,7 +793,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
		se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq);
		return;
	}
	}

	attach_entity_cfs_rq(se);
}
@@ -803,7 +801,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
void init_entity_runnable_average(struct sched_entity *se)
{
}
void post_init_entity_util_avg(struct sched_entity *se)
void post_init_entity_util_avg(struct task_struct *p)
{
}
static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
@@ -3590,10 +3588,6 @@ void remove_entity_load_avg(struct sched_entity *se)
	 * tasks cannot exit without having gone through wake_up_new_task() ->
	 * post_init_entity_util_avg() which will have added things to the
	 * cfs_rq, so we can remove unconditionally.
	 *
	 * Similarly for groups, they will have passed through
	 * post_init_entity_util_avg() before unregister_sched_fair_group()
	 * calls this.
	 */

	sync_entity_load_avg(se);
+1 −1
Original line number Diff line number Diff line
@@ -1800,7 +1800,7 @@ extern void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
unsigned long to_ratio(u64 period, u64 runtime);

extern void init_entity_runnable_average(struct sched_entity *se);
extern void post_init_entity_util_avg(struct sched_entity *se);
extern void post_init_entity_util_avg(struct task_struct *p);

#ifdef CONFIG_NO_HZ_FULL
extern bool sched_can_stop_tick(struct rq *rq);