Loading kernel/cgroup/cgroup-internal.h +2 −0 Original line number Diff line number Diff line Loading @@ -250,6 +250,8 @@ int cgroup_migrate(struct task_struct *leader, bool threadgroup, int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, bool threadgroup); void cgroup_attach_lock(bool lock_threadgroup); void cgroup_attach_unlock(bool lock_threadgroup); struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, bool *locked) __acquires(&cgroup_threadgroup_rwsem); Loading kernel/cgroup/cgroup-v1.c +2 −4 Original line number Diff line number Diff line Loading @@ -59,8 +59,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) int retval = 0; mutex_lock(&cgroup_mutex); cpus_read_lock(); percpu_down_write(&cgroup_threadgroup_rwsem); cgroup_attach_lock(true); for_each_root(root) { struct cgroup *from_cgrp; Loading @@ -72,8 +71,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) if (retval) break; } percpu_up_write(&cgroup_threadgroup_rwsem); cpus_read_unlock(); cgroup_attach_unlock(true); mutex_unlock(&cgroup_mutex); return retval; Loading kernel/cgroup/cgroup.c +2 −2 Original line number Diff line number Diff line Loading @@ -2393,7 +2393,7 @@ EXPORT_SYMBOL_GPL(task_cgroup_path); * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that * CPU hotplug is disabled on entry. */ static void cgroup_attach_lock(bool lock_threadgroup) void cgroup_attach_lock(bool lock_threadgroup) { cpus_read_lock(); if (lock_threadgroup) Loading @@ -2404,7 +2404,7 @@ static void cgroup_attach_lock(bool lock_threadgroup) * cgroup_attach_unlock - Undo cgroup_attach_lock() * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem */ static void cgroup_attach_unlock(bool lock_threadgroup) void cgroup_attach_unlock(bool lock_threadgroup) { if (lock_threadgroup) percpu_up_write(&cgroup_threadgroup_rwsem); Loading Loading
kernel/cgroup/cgroup-internal.h +2 −0 Original line number Diff line number Diff line Loading @@ -250,6 +250,8 @@ int cgroup_migrate(struct task_struct *leader, bool threadgroup, int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader, bool threadgroup); void cgroup_attach_lock(bool lock_threadgroup); void cgroup_attach_unlock(bool lock_threadgroup); struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup, bool *locked) __acquires(&cgroup_threadgroup_rwsem); Loading
kernel/cgroup/cgroup-v1.c +2 −4 Original line number Diff line number Diff line Loading @@ -59,8 +59,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) int retval = 0; mutex_lock(&cgroup_mutex); cpus_read_lock(); percpu_down_write(&cgroup_threadgroup_rwsem); cgroup_attach_lock(true); for_each_root(root) { struct cgroup *from_cgrp; Loading @@ -72,8 +71,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) if (retval) break; } percpu_up_write(&cgroup_threadgroup_rwsem); cpus_read_unlock(); cgroup_attach_unlock(true); mutex_unlock(&cgroup_mutex); return retval; Loading
kernel/cgroup/cgroup.c +2 −2 Original line number Diff line number Diff line Loading @@ -2393,7 +2393,7 @@ EXPORT_SYMBOL_GPL(task_cgroup_path); * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that * CPU hotplug is disabled on entry. */ static void cgroup_attach_lock(bool lock_threadgroup) void cgroup_attach_lock(bool lock_threadgroup) { cpus_read_lock(); if (lock_threadgroup) Loading @@ -2404,7 +2404,7 @@ static void cgroup_attach_lock(bool lock_threadgroup) * cgroup_attach_unlock - Undo cgroup_attach_lock() * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem */ static void cgroup_attach_unlock(bool lock_threadgroup) void cgroup_attach_unlock(bool lock_threadgroup) { if (lock_threadgroup) percpu_up_write(&cgroup_threadgroup_rwsem); Loading