memcg: change memcg_oom_mutex to spinlock

memcg_oom_mutex is used to protect memcg OOM path and eventfd interface
for oom_control.  None of the critical sections which it protects sleep
(eventfd_signal works from atomic context and the rest are simple linked
list resp.  oom_lock atomic operations).

Mutex is also too heavyweight for those code paths because it triggers a
lot of scheduling.  It also makes makes convoying effects more visible
when we have a big number of oom killing because we take the lock
mutliple times during mem_cgroup_handle_oom so we have multiple places
where many processes can sleep.

Signed-off-by: Michal Hocko <mhocko@suse.cz>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Michal Hocko 2011-07-26 16:08:24 -07:00 committed by Linus Torvalds
parent 79dfdaccd1
commit 1af8efe965

View File

@ -1725,7 +1725,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
/*
* Check OOM-Killer is already running under our hierarchy.
* If someone is running, return false.
* Has to be called with memcg_oom_mutex
* Has to be called with memcg_oom_lock
*/
static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
{
@ -1770,7 +1770,7 @@ static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
}
/*
* Has to be called with memcg_oom_mutex
* Has to be called with memcg_oom_lock
*/
static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
{
@ -1802,7 +1802,7 @@ static void mem_cgroup_unmark_under_oom(struct mem_cgroup *mem)
atomic_add_unless(&iter->under_oom, -1, 0);
}
static DEFINE_MUTEX(memcg_oom_mutex);
static DEFINE_SPINLOCK(memcg_oom_lock);
static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
struct oom_wait_info {
@ -1864,7 +1864,7 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
mem_cgroup_mark_under_oom(mem);
/* At first, try to OOM lock hierarchy under mem.*/
mutex_lock(&memcg_oom_mutex);
spin_lock(&memcg_oom_lock);
locked = mem_cgroup_oom_lock(mem);
/*
* Even if signal_pending(), we can't quit charge() loop without
@ -1876,7 +1876,7 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
need_to_kill = false;
if (locked)
mem_cgroup_oom_notify(mem);
mutex_unlock(&memcg_oom_mutex);
spin_unlock(&memcg_oom_lock);
if (need_to_kill) {
finish_wait(&memcg_oom_waitq, &owait.wait);
@ -1885,11 +1885,11 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
schedule();
finish_wait(&memcg_oom_waitq, &owait.wait);
}
mutex_lock(&memcg_oom_mutex);
spin_lock(&memcg_oom_lock);
if (locked)
mem_cgroup_oom_unlock(mem);
memcg_wakeup_oom(mem);
mutex_unlock(&memcg_oom_mutex);
spin_unlock(&memcg_oom_lock);
mem_cgroup_unmark_under_oom(mem);
@ -4553,7 +4553,7 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
if (!event)
return -ENOMEM;
mutex_lock(&memcg_oom_mutex);
spin_lock(&memcg_oom_lock);
event->eventfd = eventfd;
list_add(&event->list, &memcg->oom_notify);
@ -4561,7 +4561,7 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
/* already in OOM ? */
if (atomic_read(&memcg->under_oom))
eventfd_signal(eventfd, 1);
mutex_unlock(&memcg_oom_mutex);
spin_unlock(&memcg_oom_lock);
return 0;
}
@ -4575,7 +4575,7 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
BUG_ON(type != _OOM_TYPE);
mutex_lock(&memcg_oom_mutex);
spin_lock(&memcg_oom_lock);
list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
if (ev->eventfd == eventfd) {
@ -4584,7 +4584,7 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
}
}
mutex_unlock(&memcg_oom_mutex);
spin_unlock(&memcg_oom_lock);
}
static int mem_cgroup_oom_control_read(struct cgroup *cgrp,