Loading include/linux/sched.h +0 −1 Original line number Diff line number Diff line Loading @@ -863,7 +863,6 @@ struct sched_entity { struct load_weight load; /* for load-balancing */ struct rb_node run_node; unsigned int on_rq; int peer_preempt; u64 exec_start; u64 sum_exec_runtime; Loading kernel/sched.c +1 −3 Original line number Diff line number Diff line Loading @@ -460,7 +460,6 @@ enum { SCHED_FEAT_TREE_AVG = 4, SCHED_FEAT_APPROX_AVG = 8, SCHED_FEAT_WAKEUP_PREEMPT = 16, SCHED_FEAT_PREEMPT_RESTRICT = 32, }; const_debug unsigned int sysctl_sched_features = Loading @@ -468,8 +467,7 @@ const_debug unsigned int sysctl_sched_features = SCHED_FEAT_START_DEBIT * 1 | SCHED_FEAT_TREE_AVG * 0 | SCHED_FEAT_APPROX_AVG * 0 | SCHED_FEAT_WAKEUP_PREEMPT * 1 | SCHED_FEAT_PREEMPT_RESTRICT * 0; SCHED_FEAT_WAKEUP_PREEMPT * 1; #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) Loading kernel/sched_fair.c +2 −8 Original line number Diff line number Diff line Loading @@ -546,7 +546,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) update_stats_dequeue(cfs_rq, se); if (sleep) { se->peer_preempt = 0; #ifdef CONFIG_SCHEDSTATS if (entity_is_task(se)) { struct task_struct *tsk = task_of(se); Loading Loading @@ -574,10 +573,8 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime || (sched_feat(PREEMPT_RESTRICT) && curr->peer_preempt)) if (delta_exec > ideal_runtime) resched_task(rq_of(cfs_rq)->curr); curr->peer_preempt = 0; } static void Loading Loading @@ -867,9 +864,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) gran = calc_delta_fair(gran, &se->load); if (delta > gran) { int now = !sched_feat(PREEMPT_RESTRICT); if (now || p->prio < curr->prio || !se->peer_preempt++) if (p->prio < curr->prio) resched_task(curr); } } Loading Loading @@ -1083,7 +1078,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) swap(curr->vruntime, se->vruntime); } se->peer_preempt = 0; enqueue_task_fair(rq, p, 0); resched_task(rq->curr); } Loading Loading
include/linux/sched.h +0 −1 Original line number Diff line number Diff line Loading @@ -863,7 +863,6 @@ struct sched_entity { struct load_weight load; /* for load-balancing */ struct rb_node run_node; unsigned int on_rq; int peer_preempt; u64 exec_start; u64 sum_exec_runtime; Loading
kernel/sched.c +1 −3 Original line number Diff line number Diff line Loading @@ -460,7 +460,6 @@ enum { SCHED_FEAT_TREE_AVG = 4, SCHED_FEAT_APPROX_AVG = 8, SCHED_FEAT_WAKEUP_PREEMPT = 16, SCHED_FEAT_PREEMPT_RESTRICT = 32, }; const_debug unsigned int sysctl_sched_features = Loading @@ -468,8 +467,7 @@ const_debug unsigned int sysctl_sched_features = SCHED_FEAT_START_DEBIT * 1 | SCHED_FEAT_TREE_AVG * 0 | SCHED_FEAT_APPROX_AVG * 0 | SCHED_FEAT_WAKEUP_PREEMPT * 1 | SCHED_FEAT_PREEMPT_RESTRICT * 0; SCHED_FEAT_WAKEUP_PREEMPT * 1; #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) Loading
kernel/sched_fair.c +2 −8 Original line number Diff line number Diff line Loading @@ -546,7 +546,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) update_stats_dequeue(cfs_rq, se); if (sleep) { se->peer_preempt = 0; #ifdef CONFIG_SCHEDSTATS if (entity_is_task(se)) { struct task_struct *tsk = task_of(se); Loading Loading @@ -574,10 +573,8 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime || (sched_feat(PREEMPT_RESTRICT) && curr->peer_preempt)) if (delta_exec > ideal_runtime) resched_task(rq_of(cfs_rq)->curr); curr->peer_preempt = 0; } static void Loading Loading @@ -867,9 +864,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) gran = calc_delta_fair(gran, &se->load); if (delta > gran) { int now = !sched_feat(PREEMPT_RESTRICT); if (now || p->prio < curr->prio || !se->peer_preempt++) if (p->prio < curr->prio) resched_task(curr); } } Loading Loading @@ -1083,7 +1078,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) swap(curr->vruntime, se->vruntime); } se->peer_preempt = 0; enqueue_task_fair(rq, p, 0); resched_task(rq->curr); } Loading