cgroup: revert ss_id_lock to spinlock
Commit c1e2ee2dc4
("memcg: replace ss->id_lock with a rwlock") has now
been seen to cause the unfair behavior we should have expected from
converting a spinlock to an rwlock: softlockup in cgroup_mkdir(), whose
get_new_cssid() is waiting for the wlock, while there are 19 tasks using
the rlock in css_get_next() to get on with their memcg workload (in an
artificial test, admittedly). Yet lib/idr.c was made suitable for RCU
way back: revert that commit, restoring ss->id_lock to a spinlock.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Li Zefan <lizf@cn.fujitsu.com>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
9f7de8275b
commit
42aee6c495
|
@ -498,7 +498,7 @@ struct cgroup_subsys {
|
|||
struct list_head sibling;
|
||||
/* used when use_id == true */
|
||||
struct idr idr;
|
||||
rwlock_t id_lock;
|
||||
spinlock_t id_lock;
|
||||
|
||||
/* should be defined only by modular subsystems */
|
||||
struct module *module;
|
||||
|
|
|
@ -4885,9 +4885,9 @@ void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
|
|||
|
||||
rcu_assign_pointer(id->css, NULL);
|
||||
rcu_assign_pointer(css->id, NULL);
|
||||
write_lock(&ss->id_lock);
|
||||
spin_lock(&ss->id_lock);
|
||||
idr_remove(&ss->idr, id->id);
|
||||
write_unlock(&ss->id_lock);
|
||||
spin_unlock(&ss->id_lock);
|
||||
kfree_rcu(id, rcu_head);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(free_css_id);
|
||||
|
@ -4913,10 +4913,10 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
|
|||
error = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
write_lock(&ss->id_lock);
|
||||
spin_lock(&ss->id_lock);
|
||||
/* Don't use 0. allocates an ID of 1-65535 */
|
||||
error = idr_get_new_above(&ss->idr, newid, 1, &myid);
|
||||
write_unlock(&ss->id_lock);
|
||||
spin_unlock(&ss->id_lock);
|
||||
|
||||
/* Returns error when there are no free spaces for new ID.*/
|
||||
if (error) {
|
||||
|
@ -4931,9 +4931,9 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
|
|||
return newid;
|
||||
remove_idr:
|
||||
error = -ENOSPC;
|
||||
write_lock(&ss->id_lock);
|
||||
spin_lock(&ss->id_lock);
|
||||
idr_remove(&ss->idr, myid);
|
||||
write_unlock(&ss->id_lock);
|
||||
spin_unlock(&ss->id_lock);
|
||||
err_out:
|
||||
kfree(newid);
|
||||
return ERR_PTR(error);
|
||||
|
@ -4945,7 +4945,7 @@ static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
|
|||
{
|
||||
struct css_id *newid;
|
||||
|
||||
rwlock_init(&ss->id_lock);
|
||||
spin_lock_init(&ss->id_lock);
|
||||
idr_init(&ss->idr);
|
||||
|
||||
newid = get_new_cssid(ss, 0);
|
||||
|
@ -5040,9 +5040,9 @@ css_get_next(struct cgroup_subsys *ss, int id,
|
|||
* scan next entry from bitmap(tree), tmpid is updated after
|
||||
* idr_get_next().
|
||||
*/
|
||||
read_lock(&ss->id_lock);
|
||||
spin_lock(&ss->id_lock);
|
||||
tmp = idr_get_next(&ss->idr, &tmpid);
|
||||
read_unlock(&ss->id_lock);
|
||||
spin_unlock(&ss->id_lock);
|
||||
|
||||
if (!tmp)
|
||||
break;
|
||||
|
|
Loading…
Reference in New Issue
Block a user