smp: Add task_struct argument to __cpu_up()
Preparatory patch to make the idle thread allocation for secondary cpus generic. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: Mike Frysinger <vapier@gentoo.org> Cc: Jesper Nilsson <jesper.nilsson@axis.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Hirokazu Takata <takata@linux-m32r.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: David Howells <dhowells@redhat.com> Cc: James E.J. Bottomley <jejb@parisc-linux.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: David S. Miller <davem@davemloft.net> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Richard Weinberger <richard@nod.at> Cc: x86@kernel.org Link: http://lkml.kernel.org/r/20120420124556.964170564@linutronix.de
This commit is contained in:
parent
bda3bdc9af
commit
8239c25f47
|
@ -487,7 +487,7 @@ smp_prepare_boot_cpu(void)
|
|||
}
|
||||
|
||||
int __cpuinit
|
||||
__cpu_up(unsigned int cpu)
|
||||
__cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
smp_boot_one_cpu(cpu);
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ enum ipi_msg_type {
|
|||
|
||||
static DECLARE_COMPLETION(cpu_running);
|
||||
|
||||
int __cpuinit __cpu_up(unsigned int cpu)
|
||||
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
|
||||
struct task_struct *idle = ci->idle;
|
||||
|
|
|
@ -340,7 +340,7 @@ void smp_send_stop(void)
|
|||
return;
|
||||
}
|
||||
|
||||
int __cpuinit __cpu_up(unsigned int cpu)
|
||||
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
int ret;
|
||||
struct blackfin_cpudata *ci = &per_cpu(cpu_data, cpu);
|
||||
|
|
|
@ -207,7 +207,7 @@ int setup_profiling_timer(unsigned int multiplier)
|
|||
*/
|
||||
unsigned long cache_decay_ticks = 1;
|
||||
|
||||
int __cpuinit __cpu_up(unsigned int cpu)
|
||||
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
smp_boot_one_cpu(cpu);
|
||||
return cpu_online(cpu) ? 0 : -ENOSYS;
|
||||
|
|
|
@ -196,7 +196,7 @@ void __cpuinit start_secondary(void)
|
|||
* maintains control until "cpu_online(cpu)" is set.
|
||||
*/
|
||||
|
||||
int __cpuinit __cpu_up(unsigned int cpu)
|
||||
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
struct task_struct *idle;
|
||||
struct thread_info *thread;
|
||||
|
|
|
@ -793,7 +793,7 @@ set_cpu_sibling_map(int cpu)
|
|||
}
|
||||
|
||||
int __cpuinit
|
||||
__cpu_up (unsigned int cpu)
|
||||
__cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
int ret;
|
||||
int sapicid;
|
||||
|
|
|
@ -343,7 +343,7 @@ static void __init do_boot_cpu(int phys_id)
|
|||
}
|
||||
}
|
||||
|
||||
int __cpuinit __cpu_up(unsigned int cpu_id)
|
||||
int __cpuinit __cpu_up(unsigned int cpu_id, struct task_struct *tidle)
|
||||
{
|
||||
int timeout;
|
||||
|
||||
|
|
|
@ -209,7 +209,7 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
|
|||
complete(&c_idle->done);
|
||||
}
|
||||
|
||||
int __cpuinit __cpu_up(unsigned int cpu)
|
||||
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
struct task_struct *idle;
|
||||
|
||||
|
|
|
@ -921,7 +921,7 @@ void initialize_secondary(void)
|
|||
* __cpu_up - Set smp_commenced_mask for the nominated CPU
|
||||
* @cpu: The target CPU.
|
||||
*/
|
||||
int __devinit __cpu_up(unsigned int cpu)
|
||||
int __devinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
int timeout;
|
||||
|
||||
|
|
|
@ -449,7 +449,7 @@ void smp_cpus_done(unsigned int cpu_max)
|
|||
}
|
||||
|
||||
|
||||
int __cpuinit __cpu_up(unsigned int cpu)
|
||||
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
if (cpu != 0 && cpu < parisc_max_cpus)
|
||||
smp_boot_one_cpu(cpu);
|
||||
|
|
|
@ -482,7 +482,7 @@ static int __cpuinit create_idle(unsigned int cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int __cpuinit __cpu_up(unsigned int cpu)
|
||||
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
int rc, c;
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
extern struct mutex smp_cpu_state_mutex;
|
||||
extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
|
||||
|
||||
extern int __cpu_up(unsigned int cpu);
|
||||
extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
|
||||
|
||||
extern void arch_send_call_function_single_ipi(int cpu);
|
||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
|
|
|
@ -738,7 +738,7 @@ static void __cpuinit smp_fork_idle(struct work_struct *work)
|
|||
}
|
||||
|
||||
/* Upping and downing of CPUs */
|
||||
int __cpuinit __cpu_up(unsigned int cpu)
|
||||
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
struct create_idle c_idle;
|
||||
struct pcpu *pcpu;
|
||||
|
|
|
@ -220,7 +220,7 @@ extern struct {
|
|||
void *thread_info;
|
||||
} stack_start;
|
||||
|
||||
int __cpuinit __cpu_up(unsigned int cpu)
|
||||
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
struct task_struct *tsk;
|
||||
unsigned long timeout;
|
||||
|
|
|
@ -411,7 +411,7 @@ void __init smp_prepare_boot_cpu(void)
|
|||
set_cpu_possible(cpuid, true);
|
||||
}
|
||||
|
||||
int __cpuinit __cpu_up(unsigned int cpu)
|
||||
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
extern int __cpuinit smp4m_boot_one_cpu(int);
|
||||
extern int __cpuinit smp4d_boot_one_cpu(int);
|
||||
|
|
|
@ -1227,7 +1227,7 @@ void __devinit smp_fill_in_sib_core_maps(void)
|
|||
}
|
||||
}
|
||||
|
||||
int __cpuinit __cpu_up(unsigned int cpu)
|
||||
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
int ret = smp_boot_one_cpu(cpu);
|
||||
|
||||
|
|
|
@ -222,7 +222,7 @@ void __cpuinit online_secondary(void)
|
|||
cpu_idle();
|
||||
}
|
||||
|
||||
int __cpuinit __cpu_up(unsigned int cpu)
|
||||
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
/* Wait 5s total for all CPUs for them to come online */
|
||||
static int timeout;
|
||||
|
|
|
@ -140,7 +140,7 @@ void smp_prepare_boot_cpu(void)
|
|||
set_cpu_online(smp_processor_id(), true);
|
||||
}
|
||||
|
||||
int __cpu_up(unsigned int cpu)
|
||||
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
cpu_set(cpu, smp_commenced_mask);
|
||||
while (!cpu_online(cpu))
|
||||
|
|
|
@ -62,6 +62,8 @@ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid);
|
|||
/* Static state in head.S used to set up a CPU */
|
||||
extern unsigned long stack_start; /* Initial stack pointer address */
|
||||
|
||||
struct task_struct;
|
||||
|
||||
struct smp_ops {
|
||||
void (*smp_prepare_boot_cpu)(void);
|
||||
void (*smp_prepare_cpus)(unsigned max_cpus);
|
||||
|
@ -113,7 +115,7 @@ static inline void smp_cpus_done(unsigned int max_cpus)
|
|||
smp_ops.smp_cpus_done(max_cpus);
|
||||
}
|
||||
|
||||
static inline int __cpu_up(unsigned int cpu)
|
||||
static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
return smp_ops.cpu_up(cpu);
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ extern void smp_prepare_cpus(unsigned int max_cpus);
|
|||
/*
|
||||
* Bring a CPU up
|
||||
*/
|
||||
extern int __cpu_up(unsigned int cpunum);
|
||||
extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
|
||||
|
||||
/*
|
||||
* Final polishing of CPUs
|
||||
|
|
|
@ -309,7 +309,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
|
|||
}
|
||||
|
||||
/* Arch-specific enabling code. */
|
||||
ret = __cpu_up(cpu);
|
||||
ret = __cpu_up(cpu, NULL);
|
||||
if (ret != 0)
|
||||
goto out_notify;
|
||||
BUG_ON(!cpu_online(cpu));
|
||||
|
|
Loading…
Reference in New Issue
Block a user