Loading kernel/sched.c +162 −165 Original line number Diff line number Diff line Loading @@ -562,16 +562,13 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) static inline struct rq *__task_rq_lock(struct task_struct *p) __acquires(rq->lock) { struct rq *rq; repeat_lock_task: rq = task_rq(p); for (;;) { struct rq *rq = task_rq(p); spin_lock(&rq->lock); if (unlikely(rq != task_rq(p))) { if (likely(rq == task_rq(p))) return rq; spin_unlock(&rq->lock); goto repeat_lock_task; } return rq; } /* Loading @@ -584,15 +581,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) { struct rq *rq; repeat_lock_task: for (;;) { local_irq_save(*flags); rq = task_rq(p); spin_lock(&rq->lock); if (unlikely(rq != task_rq(p))) { if (likely(rq == task_rq(p))) return rq; spin_unlock_irqrestore(&rq->lock, *flags); goto repeat_lock_task; } return rq; } static void __task_rq_unlock(struct rq *rq) Loading Loading @@ -1083,7 +1079,7 @@ void wait_task_inactive(struct task_struct *p) int running, on_rq; struct rq *rq; repeat: for (;;) { /* * We do the initial early heuristics without holding * any task-queue locks at all. We'll only try to get Loading Loading @@ -1124,7 +1120,7 @@ void wait_task_inactive(struct task_struct *p) */ if (unlikely(running)) { cpu_relax(); goto repeat; continue; } /* Loading @@ -1138,7 +1134,7 @@ void wait_task_inactive(struct task_struct *p) */ if (unlikely(on_rq)) { schedule_timeout_uninterruptible(1); goto repeat; continue; } /* Loading @@ -1146,6 +1142,8 @@ void wait_task_inactive(struct task_struct *p) * runnable, which means that it will never become * running in the future either. We're all done! */ break; } } /*** Loading Loading @@ -1236,7 +1234,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) /* Skip over this group if it has no CPUs allowed */ if (!cpus_intersects(group->cpumask, p->cpus_allowed)) goto nextgroup; continue; local_group = cpu_isset(this_cpu, group->cpumask); Loading Loading @@ -1264,9 +1262,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) min_load = avg_load; idlest = group; } nextgroup: group = group->next; } while (group != sd->groups); } while (group = group->next, group != sd->groups); if (!idlest || 100*this_load < imbalance*min_load) return NULL; Loading Loading @@ -3517,8 +3513,9 @@ asmlinkage void __sched preempt_schedule(void) if (likely(ti->preempt_count || irqs_disabled())) return; need_resched: do { add_preempt_count(PREEMPT_ACTIVE); /* * We keep the big kernel semaphore locked, but we * clear ->lock_depth so that schedule() doesnt Loading @@ -3534,10 +3531,12 @@ asmlinkage void __sched preempt_schedule(void) #endif sub_preempt_count(PREEMPT_ACTIVE); /* we could miss a preemption opportunity between schedule and now */ /* * Check again in case we missed a preemption opportunity * between schedule and now. */ barrier(); if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); } EXPORT_SYMBOL(preempt_schedule); Loading @@ -3557,8 +3556,9 @@ asmlinkage void __sched preempt_schedule_irq(void) /* Catch callers which need to be fixed */ BUG_ON(ti->preempt_count || !irqs_disabled()); need_resched: do { add_preempt_count(PREEMPT_ACTIVE); /* * We keep the big kernel semaphore locked, but we * clear ->lock_depth so that schedule() doesnt Loading @@ -3576,10 +3576,12 @@ asmlinkage void __sched preempt_schedule_irq(void) #endif sub_preempt_count(PREEMPT_ACTIVE); /* we could miss a preemption opportunity between schedule and now */ /* * Check again in case we missed a preemption opportunity * between schedule and now. */ barrier(); if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); } #endif /* CONFIG_PREEMPT */ Loading Loading @@ -4324,10 +4326,10 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) asmlinkage long sys_sched_getscheduler(pid_t pid) { struct task_struct *p; int retval = -EINVAL; int retval; if (pid < 0) goto out_nounlock; return -EINVAL; retval = -ESRCH; read_lock(&tasklist_lock); Loading @@ -4338,8 +4340,6 @@ asmlinkage long sys_sched_getscheduler(pid_t pid) retval = p->policy; } read_unlock(&tasklist_lock); out_nounlock: return retval; } Loading @@ -4352,10 +4352,10 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) { struct sched_param lp; struct task_struct *p; int retval = -EINVAL; int retval; if (!param || pid < 0) goto out_nounlock; return -EINVAL; read_lock(&tasklist_lock); p = find_process_by_pid(pid); Loading @@ -4375,7 +4375,6 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) */ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; out_nounlock: return retval; out_unlock: Loading Loading @@ -4731,11 +4730,11 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) { struct task_struct *p; unsigned int time_slice; int retval = -EINVAL; int retval; struct timespec t; if (pid < 0) goto out_nounlock; return -EINVAL; retval = -ESRCH; read_lock(&tasklist_lock); Loading Loading @@ -4763,8 +4762,8 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) read_unlock(&tasklist_lock); jiffies_to_timespec(time_slice, &t); retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; out_nounlock: return retval; out_unlock: read_unlock(&tasklist_lock); return retval; Loading Loading @@ -5070,7 +5069,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) struct rq *rq; int dest_cpu; restart: do { /* On same node? */ mask = node_to_cpumask(cpu_to_node(dead_cpu)); cpus_and(mask, mask, p->cpus_allowed); Loading @@ -5097,8 +5096,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) "longer affine to cpu%d\n", p->pid, p->comm, dead_cpu); } if (!__migrate_task(p, dead_cpu, dest_cpu)) goto restart; } while (!__migrate_task(p, dead_cpu, dest_cpu)); } /* Loading Loading @@ -5913,7 +5911,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) if (!sg) return; next_sg: do { for_each_cpu_mask(j, sg->cpumask) { struct sched_domain *sd; Loading @@ -5929,8 +5927,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) sg_inc_cpu_power(sg, sd->groups->__cpu_power); } sg = sg->next; if (sg != group_head) goto next_sg; } while (sg != group_head); } #endif Loading Loading
kernel/sched.c +162 −165 Original line number Diff line number Diff line Loading @@ -562,16 +562,13 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) static inline struct rq *__task_rq_lock(struct task_struct *p) __acquires(rq->lock) { struct rq *rq; repeat_lock_task: rq = task_rq(p); for (;;) { struct rq *rq = task_rq(p); spin_lock(&rq->lock); if (unlikely(rq != task_rq(p))) { if (likely(rq == task_rq(p))) return rq; spin_unlock(&rq->lock); goto repeat_lock_task; } return rq; } /* Loading @@ -584,15 +581,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) { struct rq *rq; repeat_lock_task: for (;;) { local_irq_save(*flags); rq = task_rq(p); spin_lock(&rq->lock); if (unlikely(rq != task_rq(p))) { if (likely(rq == task_rq(p))) return rq; spin_unlock_irqrestore(&rq->lock, *flags); goto repeat_lock_task; } return rq; } static void __task_rq_unlock(struct rq *rq) Loading Loading @@ -1083,7 +1079,7 @@ void wait_task_inactive(struct task_struct *p) int running, on_rq; struct rq *rq; repeat: for (;;) { /* * We do the initial early heuristics without holding * any task-queue locks at all. We'll only try to get Loading Loading @@ -1124,7 +1120,7 @@ void wait_task_inactive(struct task_struct *p) */ if (unlikely(running)) { cpu_relax(); goto repeat; continue; } /* Loading @@ -1138,7 +1134,7 @@ void wait_task_inactive(struct task_struct *p) */ if (unlikely(on_rq)) { schedule_timeout_uninterruptible(1); goto repeat; continue; } /* Loading @@ -1146,6 +1142,8 @@ void wait_task_inactive(struct task_struct *p) * runnable, which means that it will never become * running in the future either. We're all done! */ break; } } /*** Loading Loading @@ -1236,7 +1234,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) /* Skip over this group if it has no CPUs allowed */ if (!cpus_intersects(group->cpumask, p->cpus_allowed)) goto nextgroup; continue; local_group = cpu_isset(this_cpu, group->cpumask); Loading Loading @@ -1264,9 +1262,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) min_load = avg_load; idlest = group; } nextgroup: group = group->next; } while (group != sd->groups); } while (group = group->next, group != sd->groups); if (!idlest || 100*this_load < imbalance*min_load) return NULL; Loading Loading @@ -3517,8 +3513,9 @@ asmlinkage void __sched preempt_schedule(void) if (likely(ti->preempt_count || irqs_disabled())) return; need_resched: do { add_preempt_count(PREEMPT_ACTIVE); /* * We keep the big kernel semaphore locked, but we * clear ->lock_depth so that schedule() doesnt Loading @@ -3534,10 +3531,12 @@ asmlinkage void __sched preempt_schedule(void) #endif sub_preempt_count(PREEMPT_ACTIVE); /* we could miss a preemption opportunity between schedule and now */ /* * Check again in case we missed a preemption opportunity * between schedule and now. */ barrier(); if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); } EXPORT_SYMBOL(preempt_schedule); Loading @@ -3557,8 +3556,9 @@ asmlinkage void __sched preempt_schedule_irq(void) /* Catch callers which need to be fixed */ BUG_ON(ti->preempt_count || !irqs_disabled()); need_resched: do { add_preempt_count(PREEMPT_ACTIVE); /* * We keep the big kernel semaphore locked, but we * clear ->lock_depth so that schedule() doesnt Loading @@ -3576,10 +3576,12 @@ asmlinkage void __sched preempt_schedule_irq(void) #endif sub_preempt_count(PREEMPT_ACTIVE); /* we could miss a preemption opportunity between schedule and now */ /* * Check again in case we missed a preemption opportunity * between schedule and now. */ barrier(); if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); } #endif /* CONFIG_PREEMPT */ Loading Loading @@ -4324,10 +4326,10 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) asmlinkage long sys_sched_getscheduler(pid_t pid) { struct task_struct *p; int retval = -EINVAL; int retval; if (pid < 0) goto out_nounlock; return -EINVAL; retval = -ESRCH; read_lock(&tasklist_lock); Loading @@ -4338,8 +4340,6 @@ asmlinkage long sys_sched_getscheduler(pid_t pid) retval = p->policy; } read_unlock(&tasklist_lock); out_nounlock: return retval; } Loading @@ -4352,10 +4352,10 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) { struct sched_param lp; struct task_struct *p; int retval = -EINVAL; int retval; if (!param || pid < 0) goto out_nounlock; return -EINVAL; read_lock(&tasklist_lock); p = find_process_by_pid(pid); Loading @@ -4375,7 +4375,6 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) */ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; out_nounlock: return retval; out_unlock: Loading Loading @@ -4731,11 +4730,11 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) { struct task_struct *p; unsigned int time_slice; int retval = -EINVAL; int retval; struct timespec t; if (pid < 0) goto out_nounlock; return -EINVAL; retval = -ESRCH; read_lock(&tasklist_lock); Loading Loading @@ -4763,8 +4762,8 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) read_unlock(&tasklist_lock); jiffies_to_timespec(time_slice, &t); retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; out_nounlock: return retval; out_unlock: read_unlock(&tasklist_lock); return retval; Loading Loading @@ -5070,7 +5069,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) struct rq *rq; int dest_cpu; restart: do { /* On same node? */ mask = node_to_cpumask(cpu_to_node(dead_cpu)); cpus_and(mask, mask, p->cpus_allowed); Loading @@ -5097,8 +5096,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) "longer affine to cpu%d\n", p->pid, p->comm, dead_cpu); } if (!__migrate_task(p, dead_cpu, dest_cpu)) goto restart; } while (!__migrate_task(p, dead_cpu, dest_cpu)); } /* Loading Loading @@ -5913,7 +5911,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) if (!sg) return; next_sg: do { for_each_cpu_mask(j, sg->cpumask) { struct sched_domain *sd; Loading @@ -5929,8 +5927,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) sg_inc_cpu_power(sg, sd->groups->__cpu_power); } sg = sg->next; if (sg != group_head) goto next_sg; } while (sg != group_head); } #endif Loading