Loading kernel/sched_fair.c +2 −2 Original line number Diff line number Diff line Loading @@ -1249,6 +1249,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) if (unlikely(se == pse)) return; cfs_rq_of(pse)->next = pse; /* * We can come here with TIF_NEED_RESCHED already set from new task * wake up path. Loading @@ -1256,8 +1258,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) if (test_tsk_need_resched(curr)) return; cfs_rq_of(pse)->next = pse; /* * Batch tasks do not preempt (their preemption is driven by * the tick): Loading Loading
kernel/sched_fair.c +2 −2 Original line number Diff line number Diff line Loading @@ -1249,6 +1249,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) if (unlikely(se == pse)) return; cfs_rq_of(pse)->next = pse; /* * We can come here with TIF_NEED_RESCHED already set from new task * wake up path. Loading @@ -1256,8 +1258,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) if (test_tsk_need_resched(curr)) return; cfs_rq_of(pse)->next = pse; /* * Batch tasks do not preempt (their preemption is driven by * the tick): Loading