Loading kernel/sched.c +46 −33 Original line number Diff line number Diff line Loading @@ -1387,38 +1387,24 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load) update_load_sub(&rq->load, load); } #ifdef CONFIG_SMP static unsigned long source_load(int cpu, int type); static unsigned long target_load(int cpu, int type); static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); static unsigned long cpu_avg_load_per_task(int cpu) { struct rq *rq = cpu_rq(cpu); if (rq->nr_running) rq->avg_load_per_task = rq->load.weight / rq->nr_running; return rq->avg_load_per_task; } #ifdef CONFIG_FAIR_GROUP_SCHED typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *); #if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) typedef int (*tg_visitor)(struct task_group *, void *); /* * Iterate the full tree, calling @down when first entering a node and @up when * leaving it for the final time. */ static void walk_tg_tree(tg_visitor down, tg_visitor up, int cpu, struct sched_domain *sd) static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) { struct task_group *parent, *child; int ret; rcu_read_lock(); parent = &root_task_group; down: (*down)(parent, cpu, sd); ret = (*down)(parent, data); if (ret) goto out_unlock; list_for_each_entry_rcu(child, &parent->children, siblings) { parent = child; goto down; Loading @@ -1426,14 +1412,42 @@ down: up: continue; } (*up)(parent, cpu, sd); ret = (*up)(parent, data); if (ret) goto out_unlock; child = parent; parent = parent->parent; if (parent) goto up; out_unlock: rcu_read_unlock(); return ret; } static int tg_nop(struct task_group *tg, void *data) { return 0; } #endif #ifdef CONFIG_SMP static unsigned long source_load(int cpu, int type); static unsigned long target_load(int cpu, int type); static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); static unsigned long cpu_avg_load_per_task(int cpu) { struct rq *rq = cpu_rq(cpu); if (rq->nr_running) rq->avg_load_per_task = rq->load.weight / rq->nr_running; return rq->avg_load_per_task; } #ifdef CONFIG_FAIR_GROUP_SCHED static void __set_se_shares(struct sched_entity *se, unsigned long shares); Loading Loading @@ -1493,11 +1507,11 @@ __update_group_shares_cpu(struct task_group *tg, int cpu, * This needs to be done in a bottom-up fashion because the rq weight of a * parent group depends on the shares of its child groups. */ static void tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd) static int tg_shares_up(struct task_group *tg, void *data) { unsigned long rq_weight = 0; unsigned long shares = 0; struct sched_domain *sd = data; int i; for_each_cpu_mask(i, sd->span) { Loading @@ -1522,6 +1536,8 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd) __update_group_shares_cpu(tg, i, shares, rq_weight); spin_unlock_irqrestore(&rq->lock, flags); } return 0; } /* Loading @@ -1529,10 +1545,10 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd) * This needs to be done in a top-down fashion because the load of a child * group is a fraction of its parents load. */ static void tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd) static int tg_load_down(struct task_group *tg, void *data) { unsigned long load; long cpu = (long)data; if (!tg->parent) { load = cpu_rq(cpu)->load.weight; Loading @@ -1543,11 +1559,8 @@ tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd) } tg->cfs_rq[cpu]->h_load = load; } static void tg_nop(struct task_group *tg, int cpu, struct sched_domain *sd) { return 0; } static void update_shares(struct sched_domain *sd) Loading @@ -1557,7 +1570,7 @@ static void update_shares(struct sched_domain *sd) if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { sd->last_update = now; walk_tg_tree(tg_nop, tg_shares_up, 0, sd); walk_tg_tree(tg_nop, tg_shares_up, sd); } } Loading @@ -1568,9 +1581,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd) spin_lock(&rq->lock); } static void update_h_load(int cpu) static void update_h_load(long cpu) { walk_tg_tree(tg_load_down, tg_nop, cpu, NULL); walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); } #else Loading Loading
kernel/sched.c +46 −33 Original line number Diff line number Diff line Loading @@ -1387,38 +1387,24 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load) update_load_sub(&rq->load, load); } #ifdef CONFIG_SMP static unsigned long source_load(int cpu, int type); static unsigned long target_load(int cpu, int type); static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); static unsigned long cpu_avg_load_per_task(int cpu) { struct rq *rq = cpu_rq(cpu); if (rq->nr_running) rq->avg_load_per_task = rq->load.weight / rq->nr_running; return rq->avg_load_per_task; } #ifdef CONFIG_FAIR_GROUP_SCHED typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *); #if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) typedef int (*tg_visitor)(struct task_group *, void *); /* * Iterate the full tree, calling @down when first entering a node and @up when * leaving it for the final time. */ static void walk_tg_tree(tg_visitor down, tg_visitor up, int cpu, struct sched_domain *sd) static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) { struct task_group *parent, *child; int ret; rcu_read_lock(); parent = &root_task_group; down: (*down)(parent, cpu, sd); ret = (*down)(parent, data); if (ret) goto out_unlock; list_for_each_entry_rcu(child, &parent->children, siblings) { parent = child; goto down; Loading @@ -1426,14 +1412,42 @@ down: up: continue; } (*up)(parent, cpu, sd); ret = (*up)(parent, data); if (ret) goto out_unlock; child = parent; parent = parent->parent; if (parent) goto up; out_unlock: rcu_read_unlock(); return ret; } static int tg_nop(struct task_group *tg, void *data) { return 0; } #endif #ifdef CONFIG_SMP static unsigned long source_load(int cpu, int type); static unsigned long target_load(int cpu, int type); static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); static unsigned long cpu_avg_load_per_task(int cpu) { struct rq *rq = cpu_rq(cpu); if (rq->nr_running) rq->avg_load_per_task = rq->load.weight / rq->nr_running; return rq->avg_load_per_task; } #ifdef CONFIG_FAIR_GROUP_SCHED static void __set_se_shares(struct sched_entity *se, unsigned long shares); Loading Loading @@ -1493,11 +1507,11 @@ __update_group_shares_cpu(struct task_group *tg, int cpu, * This needs to be done in a bottom-up fashion because the rq weight of a * parent group depends on the shares of its child groups. */ static void tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd) static int tg_shares_up(struct task_group *tg, void *data) { unsigned long rq_weight = 0; unsigned long shares = 0; struct sched_domain *sd = data; int i; for_each_cpu_mask(i, sd->span) { Loading @@ -1522,6 +1536,8 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd) __update_group_shares_cpu(tg, i, shares, rq_weight); spin_unlock_irqrestore(&rq->lock, flags); } return 0; } /* Loading @@ -1529,10 +1545,10 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd) * This needs to be done in a top-down fashion because the load of a child * group is a fraction of its parents load. */ static void tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd) static int tg_load_down(struct task_group *tg, void *data) { unsigned long load; long cpu = (long)data; if (!tg->parent) { load = cpu_rq(cpu)->load.weight; Loading @@ -1543,11 +1559,8 @@ tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd) } tg->cfs_rq[cpu]->h_load = load; } static void tg_nop(struct task_group *tg, int cpu, struct sched_domain *sd) { return 0; } static void update_shares(struct sched_domain *sd) Loading @@ -1557,7 +1570,7 @@ static void update_shares(struct sched_domain *sd) if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { sd->last_update = now; walk_tg_tree(tg_nop, tg_shares_up, 0, sd); walk_tg_tree(tg_nop, tg_shares_up, sd); } } Loading @@ -1568,9 +1581,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd) spin_lock(&rq->lock); } static void update_h_load(int cpu) static void update_h_load(long cpu) { walk_tg_tree(tg_load_down, tg_nop, cpu, NULL); walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); } #else Loading