Loading include/linux/sched.h +6 −6 Original line number Diff line number Diff line Loading @@ -136,7 +136,7 @@ extern unsigned long weighted_cpuload(const int cpu); struct seq_file; struct cfs_rq; struct task_grp; struct task_group; #ifdef CONFIG_SCHED_DEBUG extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); extern void proc_sched_set_task(struct task_struct *p); Loading Loading @@ -598,7 +598,7 @@ struct user_struct { uid_t uid; #ifdef CONFIG_FAIR_USER_SCHED struct task_grp *tg; struct task_group *tg; #endif }; Loading Loading @@ -1842,12 +1842,12 @@ extern void normalize_rt_tasks(void); #ifdef CONFIG_FAIR_GROUP_SCHED extern struct task_grp init_task_grp; extern struct task_group init_task_group; extern struct task_grp *sched_create_group(void); extern void sched_destroy_group(struct task_grp *tg); extern struct task_group *sched_create_group(void); extern void sched_destroy_group(struct task_group *tg); extern void sched_move_task(struct task_struct *tsk); extern int sched_group_set_shares(struct task_grp *tg, unsigned long shares); extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); #endif Loading kernel/sched.c +18 −18 Original line number Diff line number Diff line Loading @@ -156,7 +156,7 @@ struct rt_prio_array { struct cfs_rq; /* task group related information */ struct task_grp { struct task_group { /* schedulable entities of this group on each cpu */ struct sched_entity **se; /* runqueue "owned" by this group on each cpu */ Loading @@ -175,7 +175,7 @@ static struct cfs_rq *init_cfs_rq_p[NR_CPUS]; /* Default task group. * Every task in system belong to this group at bootup. */ struct task_grp init_task_grp = { struct task_group init_task_group = { .se = init_sched_entity_p, .cfs_rq = init_cfs_rq_p, }; Loading @@ -186,17 +186,17 @@ struct task_grp init_task_grp = { # define INIT_TASK_GRP_LOAD NICE_0_LOAD #endif static int init_task_grp_load = INIT_TASK_GRP_LOAD; static int init_task_group_load = INIT_TASK_GRP_LOAD; /* return group to which a task belongs */ static inline struct task_grp *task_grp(struct task_struct *p) static inline struct task_group *task_group(struct task_struct *p) { struct task_grp *tg; struct task_group *tg; #ifdef CONFIG_FAIR_USER_SCHED tg = p->user->tg; #else tg = &init_task_grp; tg = &init_task_group; #endif return tg; Loading @@ -205,8 +205,8 @@ static inline struct task_grp *task_grp(struct task_struct *p) /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ static inline void set_task_cfs_rq(struct task_struct *p) { p->se.cfs_rq = task_grp(p)->cfs_rq[task_cpu(p)]; p->se.parent = task_grp(p)->se[task_cpu(p)]; p->se.cfs_rq = task_group(p)->cfs_rq[task_cpu(p)]; p->se.parent = task_group(p)->se[task_cpu(p)]; } #else Loading Loading @@ -244,7 +244,7 @@ struct cfs_rq { * list is used during load balance. */ struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */ struct task_grp *tg; /* group that "owns" this runqueue */ struct task_group *tg; /* group that "owns" this runqueue */ struct rcu_head rcu; #endif }; Loading Loading @@ -6522,19 +6522,19 @@ void __init sched_init(void) init_cfs_rq_p[i] = cfs_rq; init_cfs_rq(cfs_rq, rq); cfs_rq->tg = &init_task_grp; cfs_rq->tg = &init_task_group; list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); init_sched_entity_p[i] = se; se->cfs_rq = &rq->cfs; se->my_q = cfs_rq; se->load.weight = init_task_grp_load; se->load.weight = init_task_group_load; se->load.inv_weight = div64_64(1ULL<<32, init_task_grp_load); div64_64(1ULL<<32, init_task_group_load); se->parent = NULL; } init_task_grp.shares = init_task_grp_load; init_task_group.shares = init_task_group_load; #endif for (j = 0; j < CPU_LOAD_IDX_MAX; j++) Loading Loading @@ -6725,9 +6725,9 @@ void set_curr_task(int cpu, struct task_struct *p) #ifdef CONFIG_FAIR_GROUP_SCHED /* allocate runqueue etc for a new task group */ struct task_grp *sched_create_group(void) struct task_group *sched_create_group(void) { struct task_grp *tg; struct task_group *tg; struct cfs_rq *cfs_rq; struct sched_entity *se; struct rq *rq; Loading Loading @@ -6800,7 +6800,7 @@ struct task_grp *sched_create_group(void) static void free_sched_group(struct rcu_head *rhp) { struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu); struct task_grp *tg = cfs_rq->tg; struct task_group *tg = cfs_rq->tg; struct sched_entity *se; int i; Loading @@ -6819,7 +6819,7 @@ static void free_sched_group(struct rcu_head *rhp) } /* Destroy runqueue etc associated with a task group */ void sched_destroy_group(struct task_grp *tg) void sched_destroy_group(struct task_group *tg) { struct cfs_rq *cfs_rq; int i; Loading Loading @@ -6895,7 +6895,7 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares) spin_unlock_irq(&rq->lock); } int sched_group_set_shares(struct task_grp *tg, unsigned long shares) int sched_group_set_shares(struct task_group *tg, unsigned long shares) { int i; Loading kernel/sched_debug.c +3 −3 Original line number Diff line number Diff line Loading @@ -239,7 +239,7 @@ static int root_user_share_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { return sprintf(page, "%d\n", init_task_grp_load); return sprintf(page, "%d\n", init_task_group_load); } static int Loading @@ -260,8 +260,8 @@ root_user_share_write_proc(struct file *file, const char __user *buffer, mutex_lock(&root_user_share_mutex); init_task_grp_load = shares; rc = sched_group_set_shares(&init_task_grp, shares); init_task_group_load = shares; rc = sched_group_set_shares(&init_task_group, shares); mutex_unlock(&root_user_share_mutex); Loading kernel/user.c +1 −1 Original line number Diff line number Diff line Loading @@ -51,7 +51,7 @@ struct user_struct root_user = { .session_keyring = &root_session_keyring, #endif #ifdef CONFIG_FAIR_USER_SCHED .tg = &init_task_grp, .tg = &init_task_group, #endif }; Loading Loading
include/linux/sched.h +6 −6 Original line number Diff line number Diff line Loading @@ -136,7 +136,7 @@ extern unsigned long weighted_cpuload(const int cpu); struct seq_file; struct cfs_rq; struct task_grp; struct task_group; #ifdef CONFIG_SCHED_DEBUG extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); extern void proc_sched_set_task(struct task_struct *p); Loading Loading @@ -598,7 +598,7 @@ struct user_struct { uid_t uid; #ifdef CONFIG_FAIR_USER_SCHED struct task_grp *tg; struct task_group *tg; #endif }; Loading Loading @@ -1842,12 +1842,12 @@ extern void normalize_rt_tasks(void); #ifdef CONFIG_FAIR_GROUP_SCHED extern struct task_grp init_task_grp; extern struct task_group init_task_group; extern struct task_grp *sched_create_group(void); extern void sched_destroy_group(struct task_grp *tg); extern struct task_group *sched_create_group(void); extern void sched_destroy_group(struct task_group *tg); extern void sched_move_task(struct task_struct *tsk); extern int sched_group_set_shares(struct task_grp *tg, unsigned long shares); extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); #endif Loading
kernel/sched.c +18 −18 Original line number Diff line number Diff line Loading @@ -156,7 +156,7 @@ struct rt_prio_array { struct cfs_rq; /* task group related information */ struct task_grp { struct task_group { /* schedulable entities of this group on each cpu */ struct sched_entity **se; /* runqueue "owned" by this group on each cpu */ Loading @@ -175,7 +175,7 @@ static struct cfs_rq *init_cfs_rq_p[NR_CPUS]; /* Default task group. * Every task in system belong to this group at bootup. */ struct task_grp init_task_grp = { struct task_group init_task_group = { .se = init_sched_entity_p, .cfs_rq = init_cfs_rq_p, }; Loading @@ -186,17 +186,17 @@ struct task_grp init_task_grp = { # define INIT_TASK_GRP_LOAD NICE_0_LOAD #endif static int init_task_grp_load = INIT_TASK_GRP_LOAD; static int init_task_group_load = INIT_TASK_GRP_LOAD; /* return group to which a task belongs */ static inline struct task_grp *task_grp(struct task_struct *p) static inline struct task_group *task_group(struct task_struct *p) { struct task_grp *tg; struct task_group *tg; #ifdef CONFIG_FAIR_USER_SCHED tg = p->user->tg; #else tg = &init_task_grp; tg = &init_task_group; #endif return tg; Loading @@ -205,8 +205,8 @@ static inline struct task_grp *task_grp(struct task_struct *p) /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ static inline void set_task_cfs_rq(struct task_struct *p) { p->se.cfs_rq = task_grp(p)->cfs_rq[task_cpu(p)]; p->se.parent = task_grp(p)->se[task_cpu(p)]; p->se.cfs_rq = task_group(p)->cfs_rq[task_cpu(p)]; p->se.parent = task_group(p)->se[task_cpu(p)]; } #else Loading Loading @@ -244,7 +244,7 @@ struct cfs_rq { * list is used during load balance. */ struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */ struct task_grp *tg; /* group that "owns" this runqueue */ struct task_group *tg; /* group that "owns" this runqueue */ struct rcu_head rcu; #endif }; Loading Loading @@ -6522,19 +6522,19 @@ void __init sched_init(void) init_cfs_rq_p[i] = cfs_rq; init_cfs_rq(cfs_rq, rq); cfs_rq->tg = &init_task_grp; cfs_rq->tg = &init_task_group; list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); init_sched_entity_p[i] = se; se->cfs_rq = &rq->cfs; se->my_q = cfs_rq; se->load.weight = init_task_grp_load; se->load.weight = init_task_group_load; se->load.inv_weight = div64_64(1ULL<<32, init_task_grp_load); div64_64(1ULL<<32, init_task_group_load); se->parent = NULL; } init_task_grp.shares = init_task_grp_load; init_task_group.shares = init_task_group_load; #endif for (j = 0; j < CPU_LOAD_IDX_MAX; j++) Loading Loading @@ -6725,9 +6725,9 @@ void set_curr_task(int cpu, struct task_struct *p) #ifdef CONFIG_FAIR_GROUP_SCHED /* allocate runqueue etc for a new task group */ struct task_grp *sched_create_group(void) struct task_group *sched_create_group(void) { struct task_grp *tg; struct task_group *tg; struct cfs_rq *cfs_rq; struct sched_entity *se; struct rq *rq; Loading Loading @@ -6800,7 +6800,7 @@ struct task_grp *sched_create_group(void) static void free_sched_group(struct rcu_head *rhp) { struct cfs_rq *cfs_rq = container_of(rhp, struct cfs_rq, rcu); struct task_grp *tg = cfs_rq->tg; struct task_group *tg = cfs_rq->tg; struct sched_entity *se; int i; Loading @@ -6819,7 +6819,7 @@ static void free_sched_group(struct rcu_head *rhp) } /* Destroy runqueue etc associated with a task group */ void sched_destroy_group(struct task_grp *tg) void sched_destroy_group(struct task_group *tg) { struct cfs_rq *cfs_rq; int i; Loading Loading @@ -6895,7 +6895,7 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares) spin_unlock_irq(&rq->lock); } int sched_group_set_shares(struct task_grp *tg, unsigned long shares) int sched_group_set_shares(struct task_group *tg, unsigned long shares) { int i; Loading
kernel/sched_debug.c +3 −3 Original line number Diff line number Diff line Loading @@ -239,7 +239,7 @@ static int root_user_share_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { return sprintf(page, "%d\n", init_task_grp_load); return sprintf(page, "%d\n", init_task_group_load); } static int Loading @@ -260,8 +260,8 @@ root_user_share_write_proc(struct file *file, const char __user *buffer, mutex_lock(&root_user_share_mutex); init_task_grp_load = shares; rc = sched_group_set_shares(&init_task_grp, shares); init_task_group_load = shares; rc = sched_group_set_shares(&init_task_group, shares); mutex_unlock(&root_user_share_mutex); Loading
kernel/user.c +1 −1 Original line number Diff line number Diff line Loading @@ -51,7 +51,7 @@ struct user_struct root_user = { .session_keyring = &root_session_keyring, #endif #ifdef CONFIG_FAIR_USER_SCHED .tg = &init_task_grp, .tg = &init_task_group, #endif }; Loading