Loading include/linux/sched.h +1 −0 Original line number Diff line number Diff line Loading @@ -1406,6 +1406,7 @@ extern unsigned int sysctl_sched_wakeup_granularity; extern unsigned int sysctl_sched_batch_wakeup_granularity; extern unsigned int sysctl_sched_stat_granularity; extern unsigned int sysctl_sched_runtime_limit; extern unsigned int sysctl_sched_compat_yield; extern unsigned int sysctl_sched_child_runs_first; extern unsigned int sysctl_sched_features; Loading kernel/sched.c +6 −4 Original line number Diff line number Diff line Loading @@ -1682,6 +1682,11 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) p->prio = effective_prio(p); if (rt_prio(p->prio)) p->sched_class = &rt_sched_class; else p->sched_class = &fair_sched_class; if (!p->sched_class->task_new || !sysctl_sched_child_runs_first || (clone_flags & CLONE_VM) || task_cpu(p) != this_cpu || !current->se.on_rq) { Loading Loading @@ -4550,9 +4555,6 @@ asmlinkage long sys_sched_yield(void) struct rq *rq = this_rq_lock(); schedstat_inc(rq, yld_cnt); if (unlikely(rq->nr_running == 1)) schedstat_inc(rq, yld_act_empty); else current->sched_class->yield_task(rq, current); /* Loading kernel/sched_fair.c +57 −6 Original line number Diff line number Diff line Loading @@ -42,6 +42,14 @@ unsigned int sysctl_sched_latency __read_mostly = 20000000ULL; */ unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL; /* * sys_sched_yield() compat mode * * This option switches the agressive yield implementation of the * old scheduler back on. */ unsigned int __read_mostly sysctl_sched_compat_yield; /* * SCHED_BATCH wake-up granularity. * (default: 25 msec, units: nanoseconds) Loading Loading @@ -897,12 +905,24 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) } /* * sched_yield() support is very simple - we dequeue and enqueue * sched_yield() support is very simple - we dequeue and enqueue. * * If compat_yield is turned on then we requeue to the end of the tree. */ static void yield_task_fair(struct rq *rq, struct task_struct *p) { struct cfs_rq *cfs_rq = task_cfs_rq(p); struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; struct sched_entity *rightmost, *se = &p->se; struct rb_node *parent; /* * Are we the only task in the tree? */ if (unlikely(cfs_rq->nr_running == 1)) return; if (likely(!sysctl_sched_compat_yield)) { __update_rq_clock(rq); /* * Dequeue and enqueue the task to update its Loading @@ -910,6 +930,37 @@ static void yield_task_fair(struct rq *rq, struct task_struct *p) */ dequeue_entity(cfs_rq, &p->se, 0); enqueue_entity(cfs_rq, &p->se, 0); return; } /* * Find the rightmost entry in the rbtree: */ do { parent = *link; link = &parent->rb_right; } while (*link); rightmost = rb_entry(parent, struct sched_entity, run_node); /* * Already in the rightmost position? */ if (unlikely(rightmost == se)) return; /* * Minimally necessary key value to be last in the tree: */ se->fair_key = rightmost->fair_key + 1; if (cfs_rq->rb_leftmost == &se->run_node) cfs_rq->rb_leftmost = rb_next(&se->run_node); /* * Relink the task to the rightmost position: */ rb_erase(&se->run_node, &cfs_rq->tasks_timeline); rb_link_node(&se->run_node, parent, link); rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); } /* Loading kernel/sysctl.c +8 −0 Original line number Diff line number Diff line Loading @@ -303,6 +303,14 @@ static ctl_table kern_table[] = { .proc_handler = &proc_dointvec, }, #endif { .ctl_name = CTL_UNNUMBERED, .procname = "sched_compat_yield", .data = &sysctl_sched_compat_yield, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = &proc_dointvec, }, #ifdef CONFIG_PROVE_LOCKING { .ctl_name = CTL_UNNUMBERED, Loading Loading
include/linux/sched.h +1 −0 Original line number Diff line number Diff line Loading @@ -1406,6 +1406,7 @@ extern unsigned int sysctl_sched_wakeup_granularity; extern unsigned int sysctl_sched_batch_wakeup_granularity; extern unsigned int sysctl_sched_stat_granularity; extern unsigned int sysctl_sched_runtime_limit; extern unsigned int sysctl_sched_compat_yield; extern unsigned int sysctl_sched_child_runs_first; extern unsigned int sysctl_sched_features; Loading
kernel/sched.c +6 −4 Original line number Diff line number Diff line Loading @@ -1682,6 +1682,11 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) p->prio = effective_prio(p); if (rt_prio(p->prio)) p->sched_class = &rt_sched_class; else p->sched_class = &fair_sched_class; if (!p->sched_class->task_new || !sysctl_sched_child_runs_first || (clone_flags & CLONE_VM) || task_cpu(p) != this_cpu || !current->se.on_rq) { Loading Loading @@ -4550,9 +4555,6 @@ asmlinkage long sys_sched_yield(void) struct rq *rq = this_rq_lock(); schedstat_inc(rq, yld_cnt); if (unlikely(rq->nr_running == 1)) schedstat_inc(rq, yld_act_empty); else current->sched_class->yield_task(rq, current); /* Loading
kernel/sched_fair.c +57 −6 Original line number Diff line number Diff line Loading @@ -42,6 +42,14 @@ unsigned int sysctl_sched_latency __read_mostly = 20000000ULL; */ unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL; /* * sys_sched_yield() compat mode * * This option switches the agressive yield implementation of the * old scheduler back on. */ unsigned int __read_mostly sysctl_sched_compat_yield; /* * SCHED_BATCH wake-up granularity. * (default: 25 msec, units: nanoseconds) Loading Loading @@ -897,12 +905,24 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) } /* * sched_yield() support is very simple - we dequeue and enqueue * sched_yield() support is very simple - we dequeue and enqueue. * * If compat_yield is turned on then we requeue to the end of the tree. */ static void yield_task_fair(struct rq *rq, struct task_struct *p) { struct cfs_rq *cfs_rq = task_cfs_rq(p); struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; struct sched_entity *rightmost, *se = &p->se; struct rb_node *parent; /* * Are we the only task in the tree? */ if (unlikely(cfs_rq->nr_running == 1)) return; if (likely(!sysctl_sched_compat_yield)) { __update_rq_clock(rq); /* * Dequeue and enqueue the task to update its Loading @@ -910,6 +930,37 @@ static void yield_task_fair(struct rq *rq, struct task_struct *p) */ dequeue_entity(cfs_rq, &p->se, 0); enqueue_entity(cfs_rq, &p->se, 0); return; } /* * Find the rightmost entry in the rbtree: */ do { parent = *link; link = &parent->rb_right; } while (*link); rightmost = rb_entry(parent, struct sched_entity, run_node); /* * Already in the rightmost position? */ if (unlikely(rightmost == se)) return; /* * Minimally necessary key value to be last in the tree: */ se->fair_key = rightmost->fair_key + 1; if (cfs_rq->rb_leftmost == &se->run_node) cfs_rq->rb_leftmost = rb_next(&se->run_node); /* * Relink the task to the rightmost position: */ rb_erase(&se->run_node, &cfs_rq->tasks_timeline); rb_link_node(&se->run_node, parent, link); rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); } /* Loading
kernel/sysctl.c +8 −0 Original line number Diff line number Diff line Loading @@ -303,6 +303,14 @@ static ctl_table kern_table[] = { .proc_handler = &proc_dointvec, }, #endif { .ctl_name = CTL_UNNUMBERED, .procname = "sched_compat_yield", .data = &sysctl_sched_compat_yield, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = &proc_dointvec, }, #ifdef CONFIG_PROVE_LOCKING { .ctl_name = CTL_UNNUMBERED, Loading