forked from luck/tmp_suning_uos_patched
powerpc/irq: Don't switch to irq stack from softirq stack
irq_exit() is now called on the irq stack, which can trigger a switch to the softirq stack from the irq stack. If an interrupt happens at that point, we will not properly detect the re-entrancy and clobber the original return context on the irq stack. This fixes it. The side effect is to prevent all nesting from softirq stack to irq stack even in the "safe" case but it's simpler that way and matches what x86_64 does. Reported-by: Cédric Le Goater <clg@fr.ibm.com> Tested-by: Cédric Le Goater <clg@fr.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
fd848319e7
commit
8b5ede69d2
@ -495,14 +495,15 @@ void __do_irq(struct pt_regs *regs)
|
||||
void do_IRQ(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
struct thread_info *curtp, *irqtp;
|
||||
struct thread_info *curtp, *irqtp, *sirqtp;
|
||||
|
||||
/* Switch to the irq stack to handle this */
|
||||
curtp = current_thread_info();
|
||||
irqtp = hardirq_ctx[raw_smp_processor_id()];
|
||||
sirqtp = softirq_ctx[raw_smp_processor_id()];
|
||||
|
||||
/* Already there ? */
|
||||
if (unlikely(curtp == irqtp)) {
|
||||
if (unlikely(curtp == irqtp || curtp == sirqtp)) {
|
||||
__do_irq(regs);
|
||||
set_irq_regs(old_regs);
|
||||
return;
|
||||
|
Loading…
Reference in New Issue
Block a user