forked from luck/tmp_suning_uos_patched
memcg: fix memcg_cache_name() to use cgroup_name()
As cgroup supports rename, it's unsafe to dereference dentry->d_name without proper vfs locks. Fix this by using cgroup_name() rather than dentry directly. Also open code memcg_cache_name because it is called only from kmem_cache_dup which frees the returned name right after kmem_cache_create_memcg makes a copy of it. Such a short-lived allocation doesn't make too much sense. So replace it by a static buffer as kmem_cache_dup is called with memcg_cache_mutex. Signed-off-by: Li Zefan <lizefan@huawei.com> Signed-off-by: Michal Hocko <mhocko@suse.cz> Acked-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
1e2ccd1c0f
commit
d9c10ddddc
|
@ -3214,43 +3214,6 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
|
|||
schedule_work(&cachep->memcg_params->destroy);
|
||||
}
|
||||
|
||||
static char *memcg_cache_name(struct mem_cgroup *memcg, struct kmem_cache *s)
|
||||
{
|
||||
char *name;
|
||||
struct dentry *dentry;
|
||||
|
||||
rcu_read_lock();
|
||||
dentry = rcu_dereference(memcg->css.cgroup->dentry);
|
||||
rcu_read_unlock();
|
||||
|
||||
BUG_ON(dentry == NULL);
|
||||
|
||||
name = kasprintf(GFP_KERNEL, "%s(%d:%s)", s->name,
|
||||
memcg_cache_id(memcg), dentry->d_name.name);
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
|
||||
struct kmem_cache *s)
|
||||
{
|
||||
char *name;
|
||||
struct kmem_cache *new;
|
||||
|
||||
name = memcg_cache_name(memcg, s);
|
||||
if (!name)
|
||||
return NULL;
|
||||
|
||||
new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align,
|
||||
(s->flags & ~SLAB_PANIC), s->ctor, s);
|
||||
|
||||
if (new)
|
||||
new->allocflags |= __GFP_KMEMCG;
|
||||
|
||||
kfree(name);
|
||||
return new;
|
||||
}
|
||||
|
||||
/*
|
||||
* This lock protects updaters, not readers. We want readers to be as fast as
|
||||
* they can, and they will either see NULL or a valid cache value. Our model
|
||||
|
@ -3260,6 +3223,44 @@ static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
|
|||
* will span more than one worker. Only one of them can create the cache.
|
||||
*/
|
||||
static DEFINE_MUTEX(memcg_cache_mutex);
|
||||
|
||||
/*
|
||||
* Called with memcg_cache_mutex held
|
||||
*/
|
||||
static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
|
||||
struct kmem_cache *s)
|
||||
{
|
||||
struct kmem_cache *new;
|
||||
static char *tmp_name = NULL;
|
||||
|
||||
lockdep_assert_held(&memcg_cache_mutex);
|
||||
|
||||
/*
|
||||
* kmem_cache_create_memcg duplicates the given name and
|
||||
* cgroup_name for this name requires RCU context.
|
||||
* This static temporary buffer is used to prevent from
|
||||
* pointless shortliving allocation.
|
||||
*/
|
||||
if (!tmp_name) {
|
||||
tmp_name = kmalloc(PATH_MAX, GFP_KERNEL);
|
||||
if (!tmp_name)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name,
|
||||
memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup));
|
||||
rcu_read_unlock();
|
||||
|
||||
new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align,
|
||||
(s->flags & ~SLAB_PANIC), s->ctor, s);
|
||||
|
||||
if (new)
|
||||
new->allocflags |= __GFP_KMEMCG;
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
|
||||
struct kmem_cache *cachep)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue
Block a user