forked from luck/tmp_suning_uos_patched
slab: Replace synchronize_sched() with synchronize_rcu()
Now that synchronize_rcu() waits for preempt-disable regions of code as well as RCU read-side critical sections, synchronize_sched() can be replaced by synchronize_rcu(). This commit therefore makes this change. Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: <linux-mm@kvack.org>
This commit is contained in:
parent
36bd1a8e91
commit
6564a25e6c
|
@ -962,10 +962,10 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
|
|||
* To protect lockless access to n->shared during irq disabled context.
|
||||
* If n->shared isn't NULL in irq disabled context, accessing to it is
|
||||
* guaranteed to be valid until irq is re-enabled, because it will be
|
||||
* freed after synchronize_sched().
|
||||
* freed after synchronize_rcu().
|
||||
*/
|
||||
if (old_shared && force_change)
|
||||
synchronize_sched();
|
||||
synchronize_rcu();
|
||||
|
||||
fail:
|
||||
kfree(old_shared);
|
||||
|
|
|
@ -724,7 +724,7 @@ void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
|
|||
css_get(&s->memcg_params.memcg->css);
|
||||
|
||||
s->memcg_params.deact_fn = deact_fn;
|
||||
call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
|
||||
call_rcu(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
|
||||
}
|
||||
|
||||
void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
|
||||
|
@ -839,11 +839,11 @@ static void flush_memcg_workqueue(struct kmem_cache *s)
|
|||
mutex_unlock(&slab_mutex);
|
||||
|
||||
/*
|
||||
* SLUB deactivates the kmem_caches through call_rcu_sched. Make
|
||||
* SLUB deactivates the kmem_caches through call_rcu. Make
|
||||
* sure all registered rcu callbacks have been invoked.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_SLUB))
|
||||
rcu_barrier_sched();
|
||||
rcu_barrier();
|
||||
|
||||
/*
|
||||
* SLAB and SLUB create memcg kmem_caches through workqueue and SLUB
|
||||
|
|
Loading…
Reference in New Issue
Block a user