forked from luck/tmp_suning_uos_patched
Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq fix from Thomas Gleixner: "A single fix for a cpu hotplug race vs. interrupt descriptors: Prevent irq setup/teardown across the cpu starting/dying parts of cpu hotplug so that the starting/dying cpu has a stable view of the descriptor space. This has been an issue for all architectures in the cpu dying phase, where interrupts are migrated away from the dying cpu. In the starting phase its mostly a x86 issue vs the vector space update" * 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: hotplug: Prevent alloc/free of irq descriptors during cpu up/down
This commit is contained in:
commit
c4bc680cf7
@ -87,7 +87,12 @@ struct irq_desc {
|
||||
const char *name;
|
||||
} ____cacheline_internodealigned_in_smp;
|
||||
|
||||
#ifndef CONFIG_SPARSE_IRQ
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
extern void irq_lock_sparse(void);
|
||||
extern void irq_unlock_sparse(void);
|
||||
#else
|
||||
static inline void irq_lock_sparse(void) { }
|
||||
static inline void irq_unlock_sparse(void) { }
|
||||
extern struct irq_desc irq_desc[NR_IRQS];
|
||||
#endif
|
||||
|
||||
|
22
kernel/cpu.c
22
kernel/cpu.c
@ -21,6 +21,7 @@
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/irq.h>
|
||||
#include <trace/events/power.h>
|
||||
|
||||
#include "smpboot.h"
|
||||
@ -391,14 +392,20 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
||||
|
||||
smpboot_park_threads(cpu);
|
||||
|
||||
/*
|
||||
* Prevent irq alloc/free while the dying cpu reorganizes the
|
||||
* interrupt affinities.
|
||||
*/
|
||||
irq_lock_sparse();
|
||||
|
||||
/*
|
||||
* So now all preempt/rcu users must observe !cpu_active().
|
||||
*/
|
||||
|
||||
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
|
||||
if (err) {
|
||||
/* CPU didn't die: tell everyone. Can't complain. */
|
||||
cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
|
||||
irq_unlock_sparse();
|
||||
goto out_release;
|
||||
}
|
||||
BUG_ON(cpu_online(cpu));
|
||||
@ -415,6 +422,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
||||
smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
|
||||
per_cpu(cpu_dead_idle, cpu) = false;
|
||||
|
||||
/* Interrupts are moved away from the dying cpu, reenable alloc/free */
|
||||
irq_unlock_sparse();
|
||||
|
||||
hotplug_cpu__broadcast_tick_pull(cpu);
|
||||
/* This actually kills the CPU. */
|
||||
__cpu_die(cpu);
|
||||
@ -517,8 +527,18 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
|
||||
goto out_notify;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some architectures have to walk the irq descriptors to
|
||||
* setup the vector space for the cpu which comes online.
|
||||
* Prevent irq alloc/free across the bringup.
|
||||
*/
|
||||
irq_lock_sparse();
|
||||
|
||||
/* Arch-specific enabling code. */
|
||||
ret = __cpu_up(cpu, idle);
|
||||
|
||||
irq_unlock_sparse();
|
||||
|
||||
if (ret != 0)
|
||||
goto out_notify;
|
||||
BUG_ON(!cpu_online(cpu));
|
||||
|
@ -76,12 +76,8 @@ extern void unmask_threaded_irq(struct irq_desc *desc);
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
static inline void irq_mark_irq(unsigned int irq) { }
|
||||
extern void irq_lock_sparse(void);
|
||||
extern void irq_unlock_sparse(void);
|
||||
#else
|
||||
extern void irq_mark_irq(unsigned int irq);
|
||||
static inline void irq_lock_sparse(void) { }
|
||||
static inline void irq_unlock_sparse(void) { }
|
||||
#endif
|
||||
|
||||
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
||||
|
Loading…
Reference in New Issue
Block a user