call cpu_chain with CPU_DOWN_FAILED if CPU_DOWN_PREPARE failed

This makes cpu hotplug symmetrical: if CPU_UP_PREPARE fails we get
CPU_UP_CANCELED, so we can undo what ever happened on PREPARE.  The same
should happen for CPU_DOWN_PREPARE.

[akpm@linux-foundation.org: fix for reduce-size-of-task_struct-on-64-bit-machines]
Cc: Srivatsa Vaddagiri <vatsa@in.ibm.com>
Cc: Gautham Shenoy <ego@in.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Heiko Carstens 2007-05-09 02:34:04 -07:00 committed by Linus Torvalds
parent 5be9361cdf
commit e7407dcc69

View File

@ -97,7 +97,7 @@ static inline void check_for_tasks(int cpu)
(!cputime_eq(p->utime, cputime_zero) || (!cputime_eq(p->utime, cputime_zero) ||
!cputime_eq(p->stime, cputime_zero))) !cputime_eq(p->stime, cputime_zero)))
printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
(state = %ld, flags = %lx) \n", (state = %ld, flags = %x) \n",
p->comm, p->pid, cpu, p->state, p->flags); p->comm, p->pid, cpu, p->state, p->flags);
} }
write_unlock_irq(&tasklist_lock); write_unlock_irq(&tasklist_lock);
@ -122,9 +122,10 @@ static int take_cpu_down(void *unused)
/* Requires cpu_add_remove_lock to be held */ /* Requires cpu_add_remove_lock to be held */
static int _cpu_down(unsigned int cpu) static int _cpu_down(unsigned int cpu)
{ {
int err; int err, nr_calls = 0;
struct task_struct *p; struct task_struct *p;
cpumask_t old_allowed, tmp; cpumask_t old_allowed, tmp;
void *hcpu = (void *)(long)cpu;
if (num_online_cpus() == 1) if (num_online_cpus() == 1)
return -EBUSY; return -EBUSY;
@ -132,11 +133,12 @@ static int _cpu_down(unsigned int cpu)
if (!cpu_online(cpu)) if (!cpu_online(cpu))
return -EINVAL; return -EINVAL;
raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu);
(void *)(long)cpu); err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
err = raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, hcpu, -1, &nr_calls);
(void *)(long)cpu);
if (err == NOTIFY_BAD) { if (err == NOTIFY_BAD) {
__raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, hcpu,
nr_calls, NULL);
printk("%s: attempt to take down CPU %u failed\n", printk("%s: attempt to take down CPU %u failed\n",
__FUNCTION__, cpu); __FUNCTION__, cpu);
err = -EINVAL; err = -EINVAL;
@ -156,7 +158,7 @@ static int _cpu_down(unsigned int cpu)
if (IS_ERR(p) || cpu_online(cpu)) { if (IS_ERR(p) || cpu_online(cpu)) {
/* CPU didn't die: tell everyone. Can't complain. */ /* CPU didn't die: tell everyone. Can't complain. */
if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
(void *)(long)cpu) == NOTIFY_BAD) hcpu) == NOTIFY_BAD)
BUG(); BUG();
if (IS_ERR(p)) { if (IS_ERR(p)) {
@ -178,8 +180,7 @@ static int _cpu_down(unsigned int cpu)
put_cpu(); put_cpu();
/* CPU is completely dead: tell everyone. Too late to complain. */ /* CPU is completely dead: tell everyone. Too late to complain. */
if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD, if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu) == NOTIFY_BAD)
(void *)(long)cpu) == NOTIFY_BAD)
BUG(); BUG();
check_for_tasks(cpu); check_for_tasks(cpu);