forked from luck/tmp_suning_uos_patched
cgroups: more safe tasklist locking in cgroup_attach_proc
Fix unstable tasklist locking in cgroup_attach_proc. According to this thread - https://lkml.org/lkml/2011/7/27/243 - RCU is not sufficient to guarantee the tasklist is stable w.r.t. de_thread and exit. Taking tasklist_lock for reading, instead of rcu_read_lock, ensures proper exclusion. Signed-off-by: Ben Blum <bblum@andrew.cmu.edu> Acked-by: Paul Menage <paul@paulmenage.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Neil Brown <neilb@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
434a964daa
commit
33ef6b6984
|
@ -2027,7 +2027,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|||
goto out_free_group_list;
|
||||
|
||||
/* prevent changes to the threadgroup list while we take a snapshot. */
|
||||
rcu_read_lock();
|
||||
read_lock(&tasklist_lock);
|
||||
if (!thread_group_leader(leader)) {
|
||||
/*
|
||||
* a race with de_thread from another thread's exec() may strip
|
||||
|
@ -2036,7 +2036,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|||
* throw this task away and try again (from cgroup_procs_write);
|
||||
* this is "double-double-toil-and-trouble-check locking".
|
||||
*/
|
||||
rcu_read_unlock();
|
||||
read_unlock(&tasklist_lock);
|
||||
retval = -EAGAIN;
|
||||
goto out_free_group_list;
|
||||
}
|
||||
|
@ -2057,7 +2057,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
|
|||
} while_each_thread(leader, tsk);
|
||||
/* remember the number of threads in the array for later. */
|
||||
group_size = i;
|
||||
rcu_read_unlock();
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
/*
|
||||
* step 1: check that we can legitimately attach to the cgroup.
|
||||
|
|
Loading…
Reference in New Issue
Block a user