forked from luck/tmp_suning_uos_patched
x86/smp: Move smp_function_call implementations into IPI code
Move it where it belongs. That allows to keep all the shorthand logic in one place. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20190722105220.677835995@linutronix.de
This commit is contained in:
parent
22ca7ee933
commit
d0a7166bc7
|
@ -143,6 +143,7 @@ void play_dead_common(void);
|
|||
void wbinvd_on_cpu(int cpu);
|
||||
int wbinvd_on_all_cpus(void);
|
||||
|
||||
void native_smp_send_reschedule(int cpu);
|
||||
void native_send_call_func_ipi(const struct cpumask *mask);
|
||||
void native_send_call_func_single_ipi(int cpu);
|
||||
void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
|
||||
|
|
|
@ -62,6 +62,46 @@ void apic_send_IPI_allbutself(unsigned int vector)
|
|||
apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
/*
|
||||
* Send a 'reschedule' IPI to another CPU. It goes straight through and
|
||||
* wastes no time serializing anything. Worst case is that we lose a
|
||||
* reschedule ...
|
||||
*/
|
||||
void native_smp_send_reschedule(int cpu)
|
||||
{
|
||||
if (unlikely(cpu_is_offline(cpu))) {
|
||||
WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
|
||||
return;
|
||||
}
|
||||
apic->send_IPI(cpu, RESCHEDULE_VECTOR);
|
||||
}
|
||||
|
||||
void native_send_call_func_single_ipi(int cpu)
|
||||
{
|
||||
apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
|
||||
}
|
||||
|
||||
void native_send_call_func_ipi(const struct cpumask *mask)
|
||||
{
|
||||
cpumask_var_t allbutself;
|
||||
|
||||
if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
|
||||
apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
|
||||
return;
|
||||
}
|
||||
|
||||
cpumask_copy(allbutself, cpu_online_mask);
|
||||
__cpumask_clear_cpu(smp_processor_id(), allbutself);
|
||||
|
||||
if (cpumask_equal(mask, allbutself) &&
|
||||
cpumask_equal(cpu_online_mask, cpu_callout_mask))
|
||||
apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
|
||||
else
|
||||
apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
|
||||
|
||||
free_cpumask_var(allbutself);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static inline int __prepare_ICR2(unsigned int mask)
|
||||
|
|
|
@ -115,46 +115,6 @@
|
|||
static atomic_t stopping_cpu = ATOMIC_INIT(-1);
|
||||
static bool smp_no_nmi_ipi = false;
|
||||
|
||||
/*
|
||||
* this function sends a 'reschedule' IPI to another CPU.
|
||||
* it goes straight through and wastes no time serializing
|
||||
* anything. Worst case is that we lose a reschedule ...
|
||||
*/
|
||||
static void native_smp_send_reschedule(int cpu)
|
||||
{
|
||||
if (unlikely(cpu_is_offline(cpu))) {
|
||||
WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
|
||||
return;
|
||||
}
|
||||
apic->send_IPI(cpu, RESCHEDULE_VECTOR);
|
||||
}
|
||||
|
||||
void native_send_call_func_single_ipi(int cpu)
|
||||
{
|
||||
apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
|
||||
}
|
||||
|
||||
void native_send_call_func_ipi(const struct cpumask *mask)
|
||||
{
|
||||
cpumask_var_t allbutself;
|
||||
|
||||
if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
|
||||
apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
|
||||
return;
|
||||
}
|
||||
|
||||
cpumask_copy(allbutself, cpu_online_mask);
|
||||
__cpumask_clear_cpu(smp_processor_id(), allbutself);
|
||||
|
||||
if (cpumask_equal(mask, allbutself) &&
|
||||
cpumask_equal(cpu_online_mask, cpu_callout_mask))
|
||||
apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
|
||||
else
|
||||
apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
|
||||
|
||||
free_cpumask_var(allbutself);
|
||||
}
|
||||
|
||||
static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
|
||||
{
|
||||
/* We are registered on stopping cpu too, avoid spurious NMI */
|
||||
|
|
Loading…
Reference in New Issue
Block a user