Commit 57b5487b authored by Guan Jing's avatar Guan Jing Committed by Tiancheng Lu
Browse files

sched/fair: Start tracking qos_offline tasks count in cfs_rq

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I6SIY2



-------------------------------

Track how many tasks are present with qos_offline_policy
in each cfs_rq. This will be used by later commits.

Signed-off-by: default avatarGuan Jing <guanjing6@huawei.com>
parent ab866dc7
Loading
Loading
Loading
Loading
+71 −5
Original line number Diff line number Diff line
@@ -4886,6 +4886,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
	struct sched_entity *se;
	long task_delta, idle_task_delta, dequeue = 1;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	long qos_idle_delta;
#endif

	raw_spin_lock(&cfs_b->lock);
	/* This will start the period timer if necessary */
@@ -4917,6 +4920,10 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)

	task_delta = cfs_rq->h_nr_running;
	idle_task_delta = cfs_rq->idle_h_nr_running;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	qos_idle_delta = cfs_rq->qos_idle_h_nr_running;
#endif

	for_each_sched_entity(se) {
		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
		/* throttled entity or throttle-on-deactivate */
@@ -4932,6 +4939,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)

		qcfs_rq->h_nr_running -= task_delta;
		qcfs_rq->idle_h_nr_running -= idle_task_delta;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		qcfs_rq->qos_idle_h_nr_running -= qos_idle_delta;
#endif

		if (qcfs_rq->load.weight)
			dequeue = 0;
@@ -4959,6 +4969,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
	struct sched_entity *se;
	long task_delta, idle_task_delta;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	long qos_idle_delta;
#endif

	se = cfs_rq->tg->se[cpu_of(rq)];

@@ -4987,6 +5000,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)

	task_delta = cfs_rq->h_nr_running;
	idle_task_delta = cfs_rq->idle_h_nr_running;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	qos_idle_delta = cfs_rq->qos_idle_h_nr_running;
#endif
	for_each_sched_entity(se) {
		if (se->on_rq)
			break;
@@ -4995,6 +5011,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)

		cfs_rq->h_nr_running += task_delta;
		cfs_rq->idle_h_nr_running += idle_task_delta;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		cfs_rq->qos_idle_h_nr_running += qos_idle_delta;
#endif

		/* end evaluation on encountering a throttled cfs_rq */
		if (cfs_rq_throttled(cfs_rq))
@@ -5009,7 +5028,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)

		cfs_rq->h_nr_running += task_delta;
		cfs_rq->idle_h_nr_running += idle_task_delta;

#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		cfs_rq->qos_idle_h_nr_running += qos_idle_delta;
#endif

		/* end evaluation on encountering a throttled cfs_rq */
		if (cfs_rq_throttled(cfs_rq))
@@ -5643,6 +5664,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
	struct cfs_rq *cfs_rq;
	struct sched_entity *se = &p->se;
	int idle_h_nr_running = task_has_idle_policy(p);
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	int qos_idle_h_nr_running = task_has_qos_idle_policy(p);
#endif
	int task_new = !(flags & ENQUEUE_WAKEUP);
	unsigned int prev_nr = rq->cfs.h_nr_running;

@@ -5670,6 +5694,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)

		cfs_rq->h_nr_running++;
		cfs_rq->idle_h_nr_running += idle_h_nr_running;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		cfs_rq->qos_idle_h_nr_running += qos_idle_h_nr_running;
#endif

		/* end evaluation on encountering a throttled cfs_rq */
		if (cfs_rq_throttled(cfs_rq))
@@ -5687,6 +5714,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)

		cfs_rq->h_nr_running++;
		cfs_rq->idle_h_nr_running += idle_h_nr_running;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		cfs_rq->qos_idle_h_nr_running += qos_idle_h_nr_running;
#endif

		/* end evaluation on encountering a throttled cfs_rq */
		if (cfs_rq_throttled(cfs_rq))
@@ -5761,6 +5791,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
	struct sched_entity *se = &p->se;
	int task_sleep = flags & DEQUEUE_SLEEP;
	int idle_h_nr_running = task_has_idle_policy(p);
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	int qos_idle_h_nr_running = task_has_qos_idle_policy(p);
#endif
	unsigned int prev_nr = rq->cfs.h_nr_running;
	bool was_sched_idle = sched_idle_rq(rq);

@@ -5772,6 +5805,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)

		cfs_rq->h_nr_running--;
		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		cfs_rq->qos_idle_h_nr_running -= qos_idle_h_nr_running;
#endif

		/* end evaluation on encountering a throttled cfs_rq */
		if (cfs_rq_throttled(cfs_rq))
@@ -5801,6 +5837,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)

		cfs_rq->h_nr_running--;
		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		cfs_rq->qos_idle_h_nr_running -= qos_idle_h_nr_running;
#endif

		/* end evaluation on encountering a throttled cfs_rq */
		if (cfs_rq_throttled(cfs_rq))
@@ -7455,7 +7494,9 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq)
	struct sched_entity *se;
	unsigned int prev_nr = cfs_rq->h_nr_running;
	long task_delta, idle_task_delta, dequeue = 1;

#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	long qos_idle_delta;
#endif
	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];

	/* freeze hierarchy runnable averages while throttled */
@@ -7465,6 +7506,9 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq)

	task_delta = cfs_rq->h_nr_running;
	idle_task_delta = cfs_rq->idle_h_nr_running;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	qos_idle_delta = cfs_rq->qos_idle_h_nr_running;
#endif
	for_each_sched_entity(se) {
		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
		/* throttled entity or throttle-on-deactivate */
@@ -7480,6 +7524,9 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq)

		qcfs_rq->h_nr_running -= task_delta;
		qcfs_rq->idle_h_nr_running -= idle_task_delta;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		qcfs_rq->qos_idle_h_nr_running -= qos_idle_delta;
#endif

		if (qcfs_rq->load.weight)
			dequeue = 0;
@@ -7507,6 +7554,9 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq)
	struct sched_entity *se;
	unsigned int prev_nr = cfs_rq->h_nr_running;
	long task_delta, idle_task_delta;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	long qos_idle_delta;
#endif

	se = cfs_rq->tg->se[cpu_of(rq)];

@@ -7528,6 +7578,9 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq)

	task_delta = cfs_rq->h_nr_running;
	idle_task_delta = cfs_rq->idle_h_nr_running;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	qos_idle_delta = cfs_rq->qos_idle_h_nr_running;
#endif
	for_each_sched_entity(se) {
		if (se->on_rq)
			break;
@@ -7537,6 +7590,9 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq)

		cfs_rq->h_nr_running += task_delta;
		cfs_rq->idle_h_nr_running += idle_task_delta;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		cfs_rq->qos_idle_h_nr_running += qos_idle_delta;
#endif

		if (cfs_rq_throttled(cfs_rq))
			goto unthrottle_throttle;
@@ -7550,7 +7606,9 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq)

		cfs_rq->h_nr_running += task_delta;
		cfs_rq->idle_h_nr_running += idle_task_delta;

#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		cfs_rq->qos_idle_h_nr_running += qos_idle_delta;
#endif
		/* end evaluation on encountering a throttled cfs_rq */
		if (cfs_rq_throttled(cfs_rq))
			goto unthrottle_throttle;
@@ -7724,6 +7782,14 @@ static bool qos_smt_check_siblings_status(int this_cpu)
	return false;
}

static bool qos_sched_idle_cpu(int this_cpu)
{
	struct rq *rq = cpu_rq(this_cpu);

	return unlikely(rq->nr_running == rq->cfs.qos_idle_h_nr_running &&
			rq->nr_running);
}

static bool qos_smt_expelled(int this_cpu)
{
	/*
@@ -7731,7 +7797,7 @@ static bool qos_smt_expelled(int this_cpu)
	 * offline tasks enqueued, there is not suitable task,
	 * so pick_next_task_fair return null.
	 */
	if (qos_smt_check_siblings_status(this_cpu) && sched_idle_cpu(this_cpu))
	if (qos_smt_check_siblings_status(this_cpu) && qos_sched_idle_cpu(this_cpu))
		return true;

	return false;
@@ -7811,7 +7877,7 @@ static bool _qos_smt_check_need_resched(int this_cpu, struct rq *rq)
		}

		if (per_cpu(qos_smt_status, cpu) == QOS_LEVEL_OFFLINE &&
		    rq->curr == rq->idle && sched_idle_cpu(this_cpu)) {
		    rq->curr == rq->idle && qos_sched_idle_cpu(this_cpu)) {
			trace_sched_qos_smt_expel(cpu_curr(cpu), per_cpu(qos_smt_status, cpu));
			return true;
		}
+20 −0
Original line number Diff line number Diff line
@@ -650,6 +650,12 @@ struct cfs_rq {
	unsigned int		forceidle_seq;
	KABI_FILL_HOLE(unsigned int kabi_hole)
	u64			min_vruntime_fi;
#elif defined CONFIG_QOS_SCHED_SMT_EXPELLER && !defined(__GENKSYMS__)
	union {
		unsigned int            qos_idle_h_nr_running; /* qos_level:-1 */
		unsigned long           qos_idle_h_nr_running_padding;
	};
	KABI_FILL_HOLE(unsigned long kabi_hole)
#else
	KABI_RESERVE(3)
	KABI_RESERVE(4)
@@ -3019,5 +3025,19 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
}
#endif

#ifdef CONFIG_QOS_SCHED
static inline int qos_idle_policy(int policy)
{
	return policy == QOS_LEVEL_OFFLINE;
}
#endif

#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
static inline int task_has_qos_idle_policy(struct task_struct *p)
{
	return qos_idle_policy(task_group(p)->qos_level) && p->policy == SCHED_IDLE;
}
#endif

void swake_up_all_locked(struct swait_queue_head *q);
void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);