Loading kernel/sched.c +26 −15 Original line number Diff line number Diff line Loading @@ -4279,9 +4279,9 @@ pick_next_task(struct rq *rq) } /* * schedule() is the main scheduler function. * __schedule() is the main scheduler function. */ asmlinkage void __sched schedule(void) static void __sched __schedule(void) { struct task_struct *prev, *next; unsigned long *switch_count; Loading Loading @@ -4322,16 +4322,6 @@ asmlinkage void __sched schedule(void) if (to_wakeup) try_to_wake_up_local(to_wakeup); } /* * If we are going to sleep and we have plugged IO * queued, make sure to submit it to avoid deadlocks. */ if (blk_needs_flush_plug(prev)) { raw_spin_unlock(&rq->lock); blk_schedule_flush_plug(prev); raw_spin_lock(&rq->lock); } } switch_count = &prev->nvcsw; } Loading Loading @@ -4369,6 +4359,26 @@ asmlinkage void __sched schedule(void) if (need_resched()) goto need_resched; } static inline void sched_submit_work(struct task_struct *tsk) { if (!tsk->state) return; /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. */ if (blk_needs_flush_plug(tsk)) blk_schedule_flush_plug(tsk); } asmlinkage void schedule(void) { struct task_struct *tsk = current; sched_submit_work(tsk); __schedule(); } EXPORT_SYMBOL(schedule); #ifdef CONFIG_MUTEX_SPIN_ON_OWNER Loading Loading @@ -4435,7 +4445,7 @@ asmlinkage void __sched notrace preempt_schedule(void) do { add_preempt_count_notrace(PREEMPT_ACTIVE); schedule(); __schedule(); sub_preempt_count_notrace(PREEMPT_ACTIVE); /* Loading Loading @@ -4463,7 +4473,7 @@ asmlinkage void __sched preempt_schedule_irq(void) do { add_preempt_count(PREEMPT_ACTIVE); local_irq_enable(); schedule(); __schedule(); local_irq_disable(); sub_preempt_count(PREEMPT_ACTIVE); Loading Loading @@ -5588,7 +5598,7 @@ static inline int should_resched(void) static void __cond_resched(void) { add_preempt_count(PREEMPT_ACTIVE); schedule(); __schedule(); sub_preempt_count(PREEMPT_ACTIVE); } Loading Loading @@ -7443,6 +7453,7 @@ static void __sdt_free(const struct cpumask *cpu_map) struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); if (sd && (sd->flags & SD_OVERLAP)) free_sched_groups(sd->groups, 0); kfree(*per_cpu_ptr(sdd->sd, j)); kfree(*per_cpu_ptr(sdd->sg, j)); kfree(*per_cpu_ptr(sdd->sgp, j)); } Loading Loading
kernel/sched.c +26 −15 Original line number Diff line number Diff line Loading @@ -4279,9 +4279,9 @@ pick_next_task(struct rq *rq) } /* * schedule() is the main scheduler function. * __schedule() is the main scheduler function. */ asmlinkage void __sched schedule(void) static void __sched __schedule(void) { struct task_struct *prev, *next; unsigned long *switch_count; Loading Loading @@ -4322,16 +4322,6 @@ asmlinkage void __sched schedule(void) if (to_wakeup) try_to_wake_up_local(to_wakeup); } /* * If we are going to sleep and we have plugged IO * queued, make sure to submit it to avoid deadlocks. */ if (blk_needs_flush_plug(prev)) { raw_spin_unlock(&rq->lock); blk_schedule_flush_plug(prev); raw_spin_lock(&rq->lock); } } switch_count = &prev->nvcsw; } Loading Loading @@ -4369,6 +4359,26 @@ asmlinkage void __sched schedule(void) if (need_resched()) goto need_resched; } static inline void sched_submit_work(struct task_struct *tsk) { if (!tsk->state) return; /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. */ if (blk_needs_flush_plug(tsk)) blk_schedule_flush_plug(tsk); } asmlinkage void schedule(void) { struct task_struct *tsk = current; sched_submit_work(tsk); __schedule(); } EXPORT_SYMBOL(schedule); #ifdef CONFIG_MUTEX_SPIN_ON_OWNER Loading Loading @@ -4435,7 +4445,7 @@ asmlinkage void __sched notrace preempt_schedule(void) do { add_preempt_count_notrace(PREEMPT_ACTIVE); schedule(); __schedule(); sub_preempt_count_notrace(PREEMPT_ACTIVE); /* Loading Loading @@ -4463,7 +4473,7 @@ asmlinkage void __sched preempt_schedule_irq(void) do { add_preempt_count(PREEMPT_ACTIVE); local_irq_enable(); schedule(); __schedule(); local_irq_disable(); sub_preempt_count(PREEMPT_ACTIVE); Loading Loading @@ -5588,7 +5598,7 @@ static inline int should_resched(void) static void __cond_resched(void) { add_preempt_count(PREEMPT_ACTIVE); schedule(); __schedule(); sub_preempt_count(PREEMPT_ACTIVE); } Loading Loading @@ -7443,6 +7453,7 @@ static void __sdt_free(const struct cpumask *cpu_map) struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); if (sd && (sd->flags & SD_OVERLAP)) free_sched_groups(sd->groups, 0); kfree(*per_cpu_ptr(sdd->sd, j)); kfree(*per_cpu_ptr(sdd->sg, j)); kfree(*per_cpu_ptr(sdd->sgp, j)); } Loading