forked from luck/tmp_suning_uos_patched
cgroup: rename css_tryget*() to css_tryget_online*()
Unlike the more usual refcnting, what css_tryget() provides is the distinction between online and offline csses instead of protection against upping a refcnt which already reached zero. cgroup is planning to provide actual tryget which fails if the refcnt already reached zero. Let's rename the existing trygets so that they clearly indicate that they're onliness. I thought about keeping the existing names as-are and introducing new names for the planned actual tryget; however, given that each controller participates in the synchronization of the online state, it seems worthwhile to make it explicit that these functions are about on/offline state. Rename css_tryget() to css_tryget_online() and css_tryget_from_dir() to css_tryget_online_from_dir(). This is pure rename. v2: cgroup_freezer grew new usages of css_tryget(). Update accordingly. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Li Zefan <lizefan@huawei.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
This commit is contained in:
parent
46cfeb043b
commit
ec903c0c85
|
@ -185,7 +185,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
|
|||
lockdep_assert_held(q->queue_lock);
|
||||
|
||||
/* blkg holds a reference to blkcg */
|
||||
if (!css_tryget(&blkcg->css)) {
|
||||
if (!css_tryget_online(&blkcg->css)) {
|
||||
ret = -EINVAL;
|
||||
goto err_free_blkg;
|
||||
}
|
||||
|
|
2
fs/bio.c
2
fs/bio.c
|
@ -1970,7 +1970,7 @@ int bio_associate_current(struct bio *bio)
|
|||
/* associate blkcg if exists */
|
||||
rcu_read_lock();
|
||||
css = task_css(current, blkio_cgrp_id);
|
||||
if (css && css_tryget(css))
|
||||
if (css && css_tryget_online(css))
|
||||
bio->bi_css = css;
|
||||
rcu_read_unlock();
|
||||
|
||||
|
|
|
@ -95,16 +95,16 @@ static inline void css_get(struct cgroup_subsys_state *css)
|
|||
}
|
||||
|
||||
/**
|
||||
* css_tryget - try to obtain a reference on the specified css
|
||||
* css_tryget_online - try to obtain a reference on the specified css if online
|
||||
* @css: target css
|
||||
*
|
||||
* Obtain a reference on @css if it's alive. The caller naturally needs to
|
||||
* ensure that @css is accessible but doesn't have to be holding a
|
||||
* Obtain a reference on @css if it's online. The caller naturally needs
|
||||
* to ensure that @css is accessible but doesn't have to be holding a
|
||||
* reference on it - IOW, RCU protected access is good enough for this
|
||||
* function. Returns %true if a reference count was successfully obtained;
|
||||
* %false otherwise.
|
||||
*/
|
||||
static inline bool css_tryget(struct cgroup_subsys_state *css)
|
||||
static inline bool css_tryget_online(struct cgroup_subsys_state *css)
|
||||
{
|
||||
if (css->flags & CSS_ROOT)
|
||||
return true;
|
||||
|
@ -115,7 +115,7 @@ static inline bool css_tryget(struct cgroup_subsys_state *css)
|
|||
* css_put - put a css reference
|
||||
* @css: target css
|
||||
*
|
||||
* Put a reference obtained via css_get() and css_tryget().
|
||||
* Put a reference obtained via css_get() and css_tryget_online().
|
||||
*/
|
||||
static inline void css_put(struct cgroup_subsys_state *css)
|
||||
{
|
||||
|
@ -905,8 +905,8 @@ void css_task_iter_end(struct css_task_iter *it);
|
|||
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
|
||||
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
|
||||
|
||||
struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry,
|
||||
struct cgroup_subsys *ss);
|
||||
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
|
||||
struct cgroup_subsys *ss);
|
||||
|
||||
#else /* !CONFIG_CGROUPS */
|
||||
|
||||
|
|
|
@ -3771,7 +3771,7 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
|
|||
|
||||
/*
|
||||
* We aren't being called from kernfs and there's no guarantee on
|
||||
* @kn->priv's validity. For this and css_tryget_from_dir(),
|
||||
* @kn->priv's validity. For this and css_tryget_online_from_dir(),
|
||||
* @kn->priv is RCU safe. Let's do the RCU dancing.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
|
@ -4060,9 +4060,9 @@ static int cgroup_populate_dir(struct cgroup *cgrp, unsigned int subsys_mask)
|
|||
* Implemented in kill_css().
|
||||
*
|
||||
* 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
|
||||
* and thus css_tryget() is guaranteed to fail, the css can be offlined
|
||||
* by invoking offline_css(). After offlining, the base ref is put.
|
||||
* Implemented in css_killed_work_fn().
|
||||
* and thus css_tryget_online() is guaranteed to fail, the css can be
|
||||
* offlined by invoking offline_css(). After offlining, the base ref is
|
||||
* put. Implemented in css_killed_work_fn().
|
||||
*
|
||||
* 3. When the percpu_ref reaches zero, the only possible remaining
|
||||
* accessors are inside RCU read sections. css_release() schedules the
|
||||
|
@ -4386,7 +4386,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
|
|||
|
||||
/*
|
||||
* This is called when the refcnt of a css is confirmed to be killed.
|
||||
* css_tryget() is now guaranteed to fail.
|
||||
* css_tryget_online() is now guaranteed to fail.
|
||||
*/
|
||||
static void css_killed_work_fn(struct work_struct *work)
|
||||
{
|
||||
|
@ -4398,8 +4398,8 @@ static void css_killed_work_fn(struct work_struct *work)
|
|||
mutex_lock(&cgroup_mutex);
|
||||
|
||||
/*
|
||||
* css_tryget() is guaranteed to fail now. Tell subsystems to
|
||||
* initate destruction.
|
||||
* css_tryget_online() is guaranteed to fail now. Tell subsystems
|
||||
* to initate destruction.
|
||||
*/
|
||||
offline_css(css);
|
||||
|
||||
|
@ -4440,8 +4440,8 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
|
|||
*
|
||||
* This function initiates destruction of @css by removing cgroup interface
|
||||
* files and putting its base reference. ->css_offline() will be invoked
|
||||
* asynchronously once css_tryget() is guaranteed to fail and when the
|
||||
* reference count reaches zero, @css will be released.
|
||||
* asynchronously once css_tryget_online() is guaranteed to fail and when
|
||||
* the reference count reaches zero, @css will be released.
|
||||
*/
|
||||
static void kill_css(struct cgroup_subsys_state *css)
|
||||
{
|
||||
|
@ -4462,7 +4462,7 @@ static void kill_css(struct cgroup_subsys_state *css)
|
|||
/*
|
||||
* cgroup core guarantees that, by the time ->css_offline() is
|
||||
* invoked, no new css reference will be given out via
|
||||
* css_tryget(). We can't simply call percpu_ref_kill() and
|
||||
* css_tryget_online(). We can't simply call percpu_ref_kill() and
|
||||
* proceed to offlining css's because percpu_ref_kill() doesn't
|
||||
* guarantee that the ref is seen as killed on all CPUs on return.
|
||||
*
|
||||
|
@ -4478,9 +4478,9 @@ static void kill_css(struct cgroup_subsys_state *css)
|
|||
*
|
||||
* css's make use of percpu refcnts whose killing latency shouldn't be
|
||||
* exposed to userland and are RCU protected. Also, cgroup core needs to
|
||||
* guarantee that css_tryget() won't succeed by the time ->css_offline() is
|
||||
* invoked. To satisfy all the requirements, destruction is implemented in
|
||||
* the following two steps.
|
||||
* guarantee that css_tryget_online() won't succeed by the time
|
||||
* ->css_offline() is invoked. To satisfy all the requirements,
|
||||
* destruction is implemented in the following two steps.
|
||||
*
|
||||
* s1. Verify @cgrp can be destroyed and mark it dying. Remove all
|
||||
* userland visible parts and start killing the percpu refcnts of
|
||||
|
@ -4574,9 +4574,9 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
|
|||
/*
|
||||
* There are two control paths which try to determine cgroup from
|
||||
* dentry without going through kernfs - cgroupstats_build() and
|
||||
* css_tryget_from_dir(). Those are supported by RCU protecting
|
||||
* clearing of cgrp->kn->priv backpointer, which should happen
|
||||
* after all files under it have been removed.
|
||||
* css_tryget_online_from_dir(). Those are supported by RCU
|
||||
* protecting clearing of cgrp->kn->priv backpointer, which should
|
||||
* happen after all files under it have been removed.
|
||||
*/
|
||||
kernfs_remove(cgrp->kn); /* @cgrp has an extra ref on its kn */
|
||||
RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL);
|
||||
|
@ -5173,7 +5173,7 @@ static int __init cgroup_disable(char *str)
|
|||
__setup("cgroup_disable=", cgroup_disable);
|
||||
|
||||
/**
|
||||
* css_tryget_from_dir - get corresponding css from the dentry of a cgroup dir
|
||||
* css_tryget_online_from_dir - get corresponding css from a cgroup dentry
|
||||
* @dentry: directory dentry of interest
|
||||
* @ss: subsystem of interest
|
||||
*
|
||||
|
@ -5181,8 +5181,8 @@ __setup("cgroup_disable=", cgroup_disable);
|
|||
* to get the corresponding css and return it. If such css doesn't exist
|
||||
* or can't be pinned, an ERR_PTR value is returned.
|
||||
*/
|
||||
struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry,
|
||||
struct cgroup_subsys *ss)
|
||||
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
|
||||
struct cgroup_subsys *ss)
|
||||
{
|
||||
struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
|
||||
struct cgroup_subsys_state *css = NULL;
|
||||
|
@ -5204,7 +5204,7 @@ struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry,
|
|||
if (cgrp)
|
||||
css = cgroup_css(cgrp, ss);
|
||||
|
||||
if (!css || !css_tryget(css))
|
||||
if (!css || !css_tryget_online(css))
|
||||
css = ERR_PTR(-ENOENT);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -304,7 +304,7 @@ static int freezer_read(struct seq_file *m, void *v)
|
|||
|
||||
/* update states bottom-up */
|
||||
css_for_each_descendant_post(pos, css) {
|
||||
if (!css_tryget(pos))
|
||||
if (!css_tryget_online(pos))
|
||||
continue;
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -404,7 +404,7 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
|
|||
struct freezer *pos_f = css_freezer(pos);
|
||||
struct freezer *parent = parent_freezer(pos_f);
|
||||
|
||||
if (!css_tryget(pos))
|
||||
if (!css_tryget_online(pos))
|
||||
continue;
|
||||
rcu_read_unlock();
|
||||
|
||||
|
|
|
@ -872,7 +872,7 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs, bool update_root)
|
|||
continue;
|
||||
}
|
||||
}
|
||||
if (!css_tryget(&cp->css))
|
||||
if (!css_tryget_online(&cp->css))
|
||||
continue;
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -1108,7 +1108,7 @@ static void update_tasks_nodemask_hier(struct cpuset *root_cs, bool update_root)
|
|||
continue;
|
||||
}
|
||||
}
|
||||
if (!css_tryget(&cp->css))
|
||||
if (!css_tryget_online(&cp->css))
|
||||
continue;
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -2153,7 +2153,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
|
|||
|
||||
rcu_read_lock();
|
||||
cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
|
||||
if (cs == &top_cpuset || !css_tryget(&cs->css))
|
||||
if (cs == &top_cpuset || !css_tryget_online(&cs->css))
|
||||
continue;
|
||||
rcu_read_unlock();
|
||||
|
||||
|
|
|
@ -607,7 +607,8 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
|
|||
if (!f.file)
|
||||
return -EBADF;
|
||||
|
||||
css = css_tryget_from_dir(f.file->f_dentry, &perf_event_cgrp_subsys);
|
||||
css = css_tryget_online_from_dir(f.file->f_dentry,
|
||||
&perf_event_cgrp_subsys);
|
||||
if (IS_ERR(css)) {
|
||||
ret = PTR_ERR(css);
|
||||
goto out;
|
||||
|
|
|
@ -181,7 +181,7 @@ int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
|
|||
again:
|
||||
rcu_read_lock();
|
||||
h_cg = hugetlb_cgroup_from_task(current);
|
||||
if (!css_tryget(&h_cg->css)) {
|
||||
if (!css_tryget_online(&h_cg->css)) {
|
||||
rcu_read_unlock();
|
||||
goto again;
|
||||
}
|
||||
|
|
|
@ -567,7 +567,8 @@ void sock_update_memcg(struct sock *sk)
|
|||
memcg = mem_cgroup_from_task(current);
|
||||
cg_proto = sk->sk_prot->proto_cgroup(memcg);
|
||||
if (!mem_cgroup_is_root(memcg) &&
|
||||
memcg_proto_active(cg_proto) && css_tryget(&memcg->css)) {
|
||||
memcg_proto_active(cg_proto) &&
|
||||
css_tryget_online(&memcg->css)) {
|
||||
sk->sk_cgrp = cg_proto;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -834,7 +835,7 @@ __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
|
|||
*/
|
||||
__mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
|
||||
if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
|
||||
!css_tryget(&mz->memcg->css))
|
||||
!css_tryget_online(&mz->memcg->css))
|
||||
goto retry;
|
||||
done:
|
||||
return mz;
|
||||
|
@ -1076,7 +1077,7 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
|
|||
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
|
||||
if (unlikely(!memcg))
|
||||
memcg = root_mem_cgroup;
|
||||
} while (!css_tryget(&memcg->css));
|
||||
} while (!css_tryget_online(&memcg->css));
|
||||
rcu_read_unlock();
|
||||
return memcg;
|
||||
}
|
||||
|
@ -1113,7 +1114,8 @@ static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
|
|||
*/
|
||||
if (next_css) {
|
||||
if ((next_css == &root->css) ||
|
||||
((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
|
||||
((next_css->flags & CSS_ONLINE) &&
|
||||
css_tryget_online(next_css)))
|
||||
return mem_cgroup_from_css(next_css);
|
||||
|
||||
prev_css = next_css;
|
||||
|
@ -1159,7 +1161,7 @@ mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
|
|||
* would be returned all the time.
|
||||
*/
|
||||
if (position && position != root &&
|
||||
!css_tryget(&position->css))
|
||||
!css_tryget_online(&position->css))
|
||||
position = NULL;
|
||||
}
|
||||
return position;
|
||||
|
@ -2785,9 +2787,9 @@ static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
|
|||
|
||||
/*
|
||||
* A helper function to get mem_cgroup from ID. must be called under
|
||||
* rcu_read_lock(). The caller is responsible for calling css_tryget if
|
||||
* the mem_cgroup is used for charging. (dropping refcnt from swap can be
|
||||
* called against removed memcg.)
|
||||
* rcu_read_lock(). The caller is responsible for calling
|
||||
* css_tryget_online() if the mem_cgroup is used for charging. (dropping
|
||||
* refcnt from swap can be called against removed memcg.)
|
||||
*/
|
||||
static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
|
||||
{
|
||||
|
@ -2810,14 +2812,14 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
|
|||
lock_page_cgroup(pc);
|
||||
if (PageCgroupUsed(pc)) {
|
||||
memcg = pc->mem_cgroup;
|
||||
if (memcg && !css_tryget(&memcg->css))
|
||||
if (memcg && !css_tryget_online(&memcg->css))
|
||||
memcg = NULL;
|
||||
} else if (PageSwapCache(page)) {
|
||||
ent.val = page_private(page);
|
||||
id = lookup_swap_cgroup_id(ent);
|
||||
rcu_read_lock();
|
||||
memcg = mem_cgroup_lookup(id);
|
||||
if (memcg && !css_tryget(&memcg->css))
|
||||
if (memcg && !css_tryget_online(&memcg->css))
|
||||
memcg = NULL;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -3473,7 +3475,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
|
|||
}
|
||||
|
||||
/* The corresponding put will be done in the workqueue. */
|
||||
if (!css_tryget(&memcg->css))
|
||||
if (!css_tryget_online(&memcg->css))
|
||||
goto out;
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -4246,8 +4248,8 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
|
|||
memcg = mem_cgroup_lookup(id);
|
||||
if (memcg) {
|
||||
/*
|
||||
* We uncharge this because swap is freed.
|
||||
* This memcg can be obsolete one. We avoid calling css_tryget
|
||||
* We uncharge this because swap is freed. This memcg can
|
||||
* be obsolete one. We avoid calling css_tryget_online().
|
||||
*/
|
||||
if (!mem_cgroup_is_root(memcg))
|
||||
res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
|
||||
|
@ -5840,10 +5842,10 @@ static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
|
|||
* which is then paired with css_put during uncharge resp. here.
|
||||
*
|
||||
* Although this might sound strange as this path is called from
|
||||
* css_offline() when the referencemight have dropped down to 0
|
||||
* and shouldn't be incremented anymore (css_tryget would fail)
|
||||
* we do not have other options because of the kmem allocations
|
||||
* lifetime.
|
||||
* css_offline() when the referencemight have dropped down to 0 and
|
||||
* shouldn't be incremented anymore (css_tryget_online() would
|
||||
* fail) we do not have other options because of the kmem
|
||||
* allocations lifetime.
|
||||
*/
|
||||
css_get(&memcg->css);
|
||||
|
||||
|
@ -6051,8 +6053,8 @@ static int memcg_write_event_control(struct cgroup_subsys_state *css,
|
|||
* automatically removed on cgroup destruction but the removal is
|
||||
* asynchronous, so take an extra ref on @css.
|
||||
*/
|
||||
cfile_css = css_tryget_from_dir(cfile.file->f_dentry->d_parent,
|
||||
&memory_cgrp_subsys);
|
||||
cfile_css = css_tryget_online_from_dir(cfile.file->f_dentry->d_parent,
|
||||
&memory_cgrp_subsys);
|
||||
ret = -EINVAL;
|
||||
if (IS_ERR(cfile_css))
|
||||
goto out_put_cfile;
|
||||
|
@ -6496,7 +6498,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
|
|||
/*
|
||||
* XXX: css_offline() would be where we should reparent all
|
||||
* memory to prepare the cgroup for destruction. However,
|
||||
* memcg does not do css_tryget() and res_counter charging
|
||||
* memcg does not do css_tryget_online() and res_counter charging
|
||||
* under the same RCU lock region, which means that charging
|
||||
* could race with offlining. Offlining only happens to
|
||||
* cgroups with no tasks in them but charges can show up
|
||||
|
@ -6510,9 +6512,9 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
|
|||
* lookup_swap_cgroup_id()
|
||||
* rcu_read_lock()
|
||||
* mem_cgroup_lookup()
|
||||
* css_tryget()
|
||||
* css_tryget_online()
|
||||
* rcu_read_unlock()
|
||||
* disable css_tryget()
|
||||
* disable css_tryget_online()
|
||||
* call_rcu()
|
||||
* offline_css()
|
||||
* reparent_charges()
|
||||
|
|
Loading…
Reference in New Issue
Block a user