Commit 11637430 authored by Guan Jing's avatar Guan Jing Committed by Xia Fukun
Browse files

sched/fair: Start tracking qos_offline tasks count in cfs_rq

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I8O3MY



-------------------------------

Track how many tasks are present with qos_offline_policy
in each cfs_rq. This will be used by later commits.

Signed-off-by: default avatarGuan Jing <guanjing6@huawei.com>
Signed-off-by: default avatarXia Fukun <xiafukun@huawei.com>
parent c1f78687
Loading
Loading
Loading
Loading
+77 −4
Original line number Diff line number Diff line
@@ -5831,6 +5831,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
	struct sched_entity *se;
	long task_delta, idle_task_delta, dequeue = 1;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	long qos_idle_delta;
#endif

	raw_spin_lock(&cfs_b->lock);
	/* This will start the period timer if necessary */
@@ -5862,6 +5865,10 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)

	task_delta = cfs_rq->h_nr_running;
	idle_task_delta = cfs_rq->idle_h_nr_running;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	qos_idle_delta = cfs_rq->qos_idle_h_nr_running;
#endif

	for_each_sched_entity(se) {
		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
		/* throttled entity or throttle-on-deactivate */
@@ -5875,6 +5882,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)

		qcfs_rq->h_nr_running -= task_delta;
		qcfs_rq->idle_h_nr_running -= idle_task_delta;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		qcfs_rq->qos_idle_h_nr_running -= qos_idle_delta;
#endif

		if (qcfs_rq->load.weight) {
			/* Avoid re-evaluating load for this entity: */
@@ -5897,6 +5907,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)

		qcfs_rq->h_nr_running -= task_delta;
		qcfs_rq->idle_h_nr_running -= idle_task_delta;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		qcfs_rq->qos_idle_h_nr_running -= qos_idle_delta;
#endif
	}

	/* At this point se is NULL and we are at root level*/
@@ -5923,6 +5936,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
	struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
	struct sched_entity *se;
	long task_delta, idle_task_delta;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	long qos_idle_delta;
#endif

	se = cfs_rq->tg->se[cpu_of(rq)];

@@ -5965,6 +5981,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)

	task_delta = cfs_rq->h_nr_running;
	idle_task_delta = cfs_rq->idle_h_nr_running;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	qos_idle_delta = cfs_rq->qos_idle_h_nr_running;
#endif
	for_each_sched_entity(se) {
		struct cfs_rq *qcfs_rq = cfs_rq_of(se);

@@ -5977,6 +5996,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)

		qcfs_rq->h_nr_running += task_delta;
		qcfs_rq->idle_h_nr_running += idle_task_delta;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		qcfs_rq->qos_idle_h_nr_running += qos_idle_delta;
#endif

		/* end evaluation on encountering a throttled cfs_rq */
		if (cfs_rq_throttled(qcfs_rq))
@@ -5994,6 +6016,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)

		qcfs_rq->h_nr_running += task_delta;
		qcfs_rq->idle_h_nr_running += idle_task_delta;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		qcfs_rq->qos_idle_h_nr_running += qos_idle_delta;
#endif

		/* end evaluation on encountering a throttled cfs_rq */
		if (cfs_rq_throttled(qcfs_rq))
@@ -6815,6 +6840,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
	struct cfs_rq *cfs_rq;
	struct sched_entity *se = &p->se;
	int idle_h_nr_running = task_has_idle_policy(p);
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	int qos_idle_h_nr_running = task_has_qos_idle_policy(p);
#endif
	int task_new = !(flags & ENQUEUE_WAKEUP);
	unsigned int prev_nr = rq->cfs.h_nr_running;

@@ -6842,6 +6870,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)

		cfs_rq->h_nr_running++;
		cfs_rq->idle_h_nr_running += idle_h_nr_running;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		cfs_rq->qos_idle_h_nr_running += qos_idle_h_nr_running;
#endif

		if (cfs_rq_is_idle(cfs_rq))
			idle_h_nr_running = 1;
@@ -6862,7 +6893,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)

		cfs_rq->h_nr_running++;
		cfs_rq->idle_h_nr_running += idle_h_nr_running;

#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		cfs_rq->qos_idle_h_nr_running += qos_idle_h_nr_running;
#endif
		if (cfs_rq_is_idle(cfs_rq))
			idle_h_nr_running = 1;

@@ -6912,6 +6945,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
	struct sched_entity *se = &p->se;
	int task_sleep = flags & DEQUEUE_SLEEP;
	int idle_h_nr_running = task_has_idle_policy(p);
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	int qos_idle_h_nr_running = task_has_qos_idle_policy(p);
#endif
	unsigned int prev_nr = rq->cfs.h_nr_running;
	bool was_sched_idle = sched_idle_rq(rq);

@@ -6923,6 +6959,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)

		cfs_rq->h_nr_running--;
		cfs_rq->idle_h_nr_running -= idle_h_nr_running;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		cfs_rq->qos_idle_h_nr_running -= qos_idle_h_nr_running;
#endif

		if (cfs_rq_is_idle(cfs_rq))
			idle_h_nr_running = 1;
@@ -6955,7 +6994,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)

		cfs_rq->h_nr_running--;
		cfs_rq->idle_h_nr_running -= idle_h_nr_running;

#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		cfs_rq->qos_idle_h_nr_running -= qos_idle_h_nr_running;
#endif
		if (cfs_rq_is_idle(cfs_rq))
			idle_h_nr_running = 1;

@@ -8636,6 +8677,9 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq)
	struct rq *rq = rq_of(cfs_rq);
	struct sched_entity *se;
	long task_delta, idle_task_delta;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	long qos_idle_delta;
#endif
	unsigned int prev_nr = cfs_rq->h_nr_running;

	se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
@@ -8647,6 +8691,9 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq)

	task_delta = cfs_rq->h_nr_running;
	idle_task_delta = cfs_rq->idle_h_nr_running;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	qos_idle_delta = cfs_rq->qos_idle_h_nr_running;
#endif
	for_each_sched_entity(se) {
		struct cfs_rq *qcfs_rq = cfs_rq_of(se);
		/* throttled entity or throttle-on-deactivate */
@@ -8657,6 +8704,9 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq)

		qcfs_rq->h_nr_running -= task_delta;
		qcfs_rq->idle_h_nr_running -= idle_task_delta;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		qcfs_rq->qos_idle_h_nr_running -= qos_idle_delta;
#endif

		if (qcfs_rq->load.weight) {
			/* Avoid re-evaluating load for this entity: */
@@ -8679,6 +8729,9 @@ static void throttle_qos_cfs_rq(struct cfs_rq *cfs_rq)

		qcfs_rq->h_nr_running -= task_delta;
		qcfs_rq->idle_h_nr_running -= idle_task_delta;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		qcfs_rq->qos_idle_h_nr_running -= qos_idle_delta;
#endif
	}

	/* At this point se is NULL and we are at root level*/
@@ -8702,6 +8755,9 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq)
	struct sched_entity *se;
	unsigned int prev_nr = cfs_rq->h_nr_running;
	long task_delta, idle_task_delta;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	long qos_idle_delta;
#endif

	se = cfs_rq->tg->se[cpu_of(rq)];

@@ -8734,6 +8790,9 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq)

	task_delta = cfs_rq->h_nr_running;
	idle_task_delta = cfs_rq->idle_h_nr_running;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	qos_idle_delta = cfs_rq->qos_idle_h_nr_running;
#endif
	for_each_sched_entity(se) {
		if (se->on_rq)
			break;
@@ -8743,6 +8802,9 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq)

		cfs_rq->h_nr_running += task_delta;
		cfs_rq->idle_h_nr_running += idle_task_delta;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		cfs_rq->qos_idle_h_nr_running += qos_idle_delta;
#endif

		if (cfs_rq_throttled(cfs_rq))
			goto unthrottle_throttle;
@@ -8756,6 +8818,9 @@ static void unthrottle_qos_cfs_rq(struct cfs_rq *cfs_rq)

		cfs_rq->h_nr_running += task_delta;
		cfs_rq->idle_h_nr_running += idle_task_delta;
#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
		cfs_rq->qos_idle_h_nr_running += qos_idle_delta;
#endif

		/* end evaluation on encountering a throttled cfs_rq */
		if (cfs_rq_throttled(cfs_rq))
@@ -8920,6 +8985,14 @@ static bool qos_smt_check_siblings_status(int this_cpu)
	return false;
}

static bool qos_sched_idle_cpu(int this_cpu)
{
	struct rq *rq = cpu_rq(this_cpu);

	return unlikely(rq->nr_running == rq->cfs.qos_idle_h_nr_running &&
			rq->nr_running);
}

static bool qos_smt_expelled(int this_cpu)
{
	/*
@@ -8927,7 +9000,7 @@ static bool qos_smt_expelled(int this_cpu)
	 * offline tasks enqueued, there is not suitable task,
	 * so pick_next_task_fair return null.
	 */
	if (qos_smt_check_siblings_status(this_cpu) && sched_idle_cpu(this_cpu))
	if (qos_smt_check_siblings_status(this_cpu) && qos_sched_idle_cpu(this_cpu))
		return true;

	return false;
@@ -9007,7 +9080,7 @@ static bool _qos_smt_check_need_resched(int this_cpu, struct rq *rq)
		}

		if (per_cpu(qos_smt_status, cpu) == QOS_LEVEL_OFFLINE &&
		    rq->curr == rq->idle && sched_idle_cpu(this_cpu)) {
		    rq->curr == rq->idle && qos_sched_idle_cpu(this_cpu)) {
			trace_sched_qos_smt_expel(cpu_curr(cpu), per_cpu(qos_smt_status, cpu));
			return true;
		}
+22 −0
Original line number Diff line number Diff line
@@ -660,6 +660,14 @@ struct cfs_rq {
#if defined(CONFIG_QOS_SCHED)
	struct list_head	qos_throttled_list;
#endif

#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
	union {
		unsigned int            qos_idle_h_nr_running; /* qos_level:-1 */
		unsigned long           qos_idle_h_nr_running_padding;
	};
#endif

};

static inline int rt_bandwidth_enabled(void)
@@ -3310,6 +3318,20 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
}
#endif

#ifdef CONFIG_QOS_SCHED
static inline int qos_idle_policy(int policy)
{
	return policy == QOS_LEVEL_OFFLINE;
}
#endif

#ifdef CONFIG_QOS_SCHED_SMT_EXPELLER
static inline int task_has_qos_idle_policy(struct task_struct *p)
{
	return qos_idle_policy(task_group(p)->qos_level) && p->policy == SCHED_IDLE;
}
#endif

extern void swake_up_all_locked(struct swait_queue_head *q);
extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);