forked from luck/tmp_suning_uos_patched
stop_machine: Unexport __stop_machine()
The only caller outside of stop_machine.c is _cpu_down(), it can use stop_machine(). get_online_cpus() is fine under cpu_hotplug_begin(). Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: dave@stgolabs.net Cc: der.herr@hofr.at Cc: paulmck@linux.vnet.ibm.com Cc: riel@redhat.com Cc: viro@ZenIV.linux.org.uk Link: http://lkml.kernel.org/r/20150630012951.GA23934@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
b377c2a089
commit
7eeb088e72
|
@ -114,23 +114,11 @@ static inline int try_stop_cpus(const struct cpumask *cpumask,
|
|||
* grabbing every spinlock in the kernel. */
|
||||
int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
|
||||
|
||||
/**
|
||||
* __stop_machine: freeze the machine on all CPUs and run this function
|
||||
* @fn: the function to run
|
||||
* @data: the data ptr for the @fn
|
||||
* @cpus: the cpus to run the @fn() on (NULL = any online cpu)
|
||||
*
|
||||
* Description: This is a special version of the above, which assumes cpus
|
||||
* won't come or go while it's being called. Used by hotplug cpu.
|
||||
*/
|
||||
int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
|
||||
|
||||
int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
|
||||
const struct cpumask *cpus);
|
||||
|
||||
#else /* CONFIG_STOP_MACHINE && CONFIG_SMP */
|
||||
|
||||
static inline int __stop_machine(int (*fn)(void *), void *data,
|
||||
static inline int stop_machine(int (*fn)(void *), void *data,
|
||||
const struct cpumask *cpus)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -141,16 +129,10 @@ static inline int __stop_machine(int (*fn)(void *), void *data,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int stop_machine(int (*fn)(void *), void *data,
|
||||
const struct cpumask *cpus)
|
||||
{
|
||||
return __stop_machine(fn, data, cpus);
|
||||
}
|
||||
|
||||
static inline int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
|
||||
const struct cpumask *cpus)
|
||||
{
|
||||
return __stop_machine(fn, data, cpus);
|
||||
return stop_machine(fn, data, cpus);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */
|
||||
|
|
|
@ -395,7 +395,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
|||
* So now all preempt/rcu users must observe !cpu_active().
|
||||
*/
|
||||
|
||||
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
|
||||
err = stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
|
||||
if (err) {
|
||||
/* CPU didn't die: tell everyone. Can't complain. */
|
||||
cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
|
||||
|
|
|
@ -513,7 +513,7 @@ early_initcall(cpu_stop_init);
|
|||
|
||||
#ifdef CONFIG_STOP_MACHINE
|
||||
|
||||
int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
|
||||
static int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
|
||||
{
|
||||
struct multi_stop_data msdata = {
|
||||
.fn = fn,
|
||||
|
|
Loading…
Reference in New Issue
Block a user