Three interrupt related fixes for X86:
- Move disabling of the local APIC after invoking fixup_irqs() to ensure that interrupts which are incoming are noted in the IRR and not ignored. - Unbreak affinity setting. The rework of the entry code reused the regular exception entry code for device interrupts. The vector number is pushed into the errorcode slot on the stack which is then lifted into an argument and set to -1 because that's regs->orig_ax which is used in quite some places to check whether the entry came from a syscall. But it was overlooked that orig_ax is used in the affinity cleanup code to validate whether the interrupt has arrived on the new target. It turned out that this vector check is pointless because interrupts are never moved from one vector to another on the same CPU. That check is a historical leftover from the time where x86 supported multi-CPU affinities, but not longer needed with the now strict single CPU affinity. Famous last words ... - Add a missing check for an empty cpumask into the matrix allocator. The affinity change added a warning to catch the case where an interrupt is moved on the same CPU to a different vector. This triggers because a condition with an empty cpumask returns an assignment from the allocator as the allocator uses for_each_cpu() without checking the cpumask for being empty. The historical inconsistent for_each_cpu() behaviour of ignoring the cpumask and unconditionally claiming that CPU0 is in the mask striked again. Sigh. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEQp8+kY+LLUocC4bMphj1TA10mKEFAl9L6WYTHHRnbHhAbGlu dXRyb25peC5kZQAKCRCmGPVMDXSYoRV5D/9dRq/4pn5g1esnzm4GhIr2To3Qp6cl s7VswTdN8FmWBqVz79ZVYqj663UpL3pPY1np01ctrxRQLeDVfWcI2BMR5irnny8h otORhFysuDUl+yfuomWVbzfQQNJ+VeQVeWKD3cIhD1I3sXqDX5Wpa8n086hYKQXx eutVC3+JdzJZFm68xarlLW7h2f1au1eZZFgVnyY+J5KO9Dwm63a4RITdDVk7KV4t uKEDza5P9SY+kE9LAGNq8BAEObf9FeMXw0mRM7atRKVsJQQGVk6bgiuaRr01w1+W hQCPx/3g6PHFnGgx/KQgHf1jgrZFhXOyIDo6ZeFy+SJGIZRB3n8o5Kjns2l8Pa+K 2qy1TRoZIsGkwGCi/BM6viLzBikbh/gnGYy/8KTEJdKs8P3ZKHUZVSAB1dpapOWX 4n+rKoVPnvxgRSeZZo+tgLkvUdh+/9Huyr9vHiYjtbbB8tFvjlkOmrZ6sirHByDy jg6TjOJVb1CC/PoW4M7JNfmeKvHQnTACwH6djdVGDLPJspuUsYkPI0Uk0CX21SA3 45Tuylvl9jT6+vq95Av2RbAiipmSpZ/O1NHV8Paf466SKmhUgG3lv5PHh3xTm1U2 Be/RbJ75x4Muuw42ttU1LcpcLPcOZRQNEREoNd5UysgYYgWRekBvU+ZRQNW4g2nw 3JDgJgm0iBUN9w== =zIi4 -----END PGP SIGNATURE----- Merge tag 'x86-urgent-2020-08-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 fixes from Thomas Gleixner: "Three interrupt related fixes for X86: - Move disabling of the local APIC after invoking fixup_irqs() to ensure that interrupts which are incoming are noted in the IRR and not ignored. - Unbreak affinity setting. The rework of the entry code reused the regular exception entry code for device interrupts. The vector number is pushed into the errorcode slot on the stack which is then lifted into an argument and set to -1 because that's regs->orig_ax which is used in quite some places to check whether the entry came from a syscall. But it was overlooked that orig_ax is used in the affinity cleanup code to validate whether the interrupt has arrived on the new target. It turned out that this vector check is pointless because interrupts are never moved from one vector to another on the same CPU. That check is a historical leftover from the time where x86 supported multi-CPU affinities, but not longer needed with the now strict single CPU affinity. Famous last words ... - Add a missing check for an empty cpumask into the matrix allocator. The affinity change added a warning to catch the case where an interrupt is moved on the same CPU to a different vector. This triggers because a condition with an empty cpumask returns an assignment from the allocator as the allocator uses for_each_cpu() without checking the cpumask for being empty. The historical inconsistent for_each_cpu() behaviour of ignoring the cpumask and unconditionally claiming that CPU0 is in the mask struck again. Sigh. plus a new entry into the MAINTAINER file for the HPE/UV platform" * tag 'x86-urgent-2020-08-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: genirq/matrix: Deal with the sillyness of for_each_cpu() on UP x86/irq: Unbreak interrupt affinity setting x86/hotplug: Silence APIC only after all interrupts are migrated MAINTAINERS: Add entry for HPE Superdome Flex (UV) maintainers
This commit is contained in:
commit
dcc5c6f013
|
@ -18875,6 +18875,15 @@ S: Maintained
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
|
||||||
F: arch/x86/platform
|
F: arch/x86/platform
|
||||||
|
|
||||||
|
X86 PLATFORM UV HPE SUPERDOME FLEX
|
||||||
|
M: Steve Wahl <steve.wahl@hpe.com>
|
||||||
|
R: Dimitri Sivanich <dimitri.sivanich@hpe.com>
|
||||||
|
R: Russ Anderson <russ.anderson@hpe.com>
|
||||||
|
S: Supported
|
||||||
|
F: arch/x86/include/asm/uv/
|
||||||
|
F: arch/x86/kernel/apic/x2apic_uv_x.c
|
||||||
|
F: arch/x86/platform/uv/
|
||||||
|
|
||||||
X86 VDSO
|
X86 VDSO
|
||||||
M: Andy Lutomirski <luto@kernel.org>
|
M: Andy Lutomirski <luto@kernel.org>
|
||||||
L: linux-kernel@vger.kernel.org
|
L: linux-kernel@vger.kernel.org
|
||||||
|
|
|
@ -161,6 +161,7 @@ static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
|
||||||
apicd->move_in_progress = true;
|
apicd->move_in_progress = true;
|
||||||
apicd->prev_vector = apicd->vector;
|
apicd->prev_vector = apicd->vector;
|
||||||
apicd->prev_cpu = apicd->cpu;
|
apicd->prev_cpu = apicd->cpu;
|
||||||
|
WARN_ON_ONCE(apicd->cpu == newcpu);
|
||||||
} else {
|
} else {
|
||||||
irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
|
irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
|
||||||
managed);
|
managed);
|
||||||
|
@ -910,7 +911,7 @@ void send_cleanup_vector(struct irq_cfg *cfg)
|
||||||
__send_cleanup_vector(apicd);
|
__send_cleanup_vector(apicd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
|
void irq_complete_move(struct irq_cfg *cfg)
|
||||||
{
|
{
|
||||||
struct apic_chip_data *apicd;
|
struct apic_chip_data *apicd;
|
||||||
|
|
||||||
|
@ -918,15 +919,16 @@ static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
|
||||||
if (likely(!apicd->move_in_progress))
|
if (likely(!apicd->move_in_progress))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (vector == apicd->vector && apicd->cpu == smp_processor_id())
|
/*
|
||||||
|
* If the interrupt arrived on the new target CPU, cleanup the
|
||||||
|
* vector on the old target CPU. A vector check is not required
|
||||||
|
* because an interrupt can never move from one vector to another
|
||||||
|
* on the same CPU.
|
||||||
|
*/
|
||||||
|
if (apicd->cpu == smp_processor_id())
|
||||||
__send_cleanup_vector(apicd);
|
__send_cleanup_vector(apicd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void irq_complete_move(struct irq_cfg *cfg)
|
|
||||||
{
|
|
||||||
__irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called from fixup_irqs() with @desc->lock held and interrupts disabled.
|
* Called from fixup_irqs() with @desc->lock held and interrupts disabled.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -1594,14 +1594,28 @@ int native_cpu_disable(void)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/*
|
|
||||||
* Disable the local APIC. Otherwise IPI broadcasts will reach
|
|
||||||
* it. It still responds normally to INIT, NMI, SMI, and SIPI
|
|
||||||
* messages.
|
|
||||||
*/
|
|
||||||
apic_soft_disable();
|
|
||||||
cpu_disable_common();
|
cpu_disable_common();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Disable the local APIC. Otherwise IPI broadcasts will reach
|
||||||
|
* it. It still responds normally to INIT, NMI, SMI, and SIPI
|
||||||
|
* messages.
|
||||||
|
*
|
||||||
|
* Disabling the APIC must happen after cpu_disable_common()
|
||||||
|
* which invokes fixup_irqs().
|
||||||
|
*
|
||||||
|
* Disabling the APIC preserves already set bits in IRR, but
|
||||||
|
* an interrupt arriving after disabling the local APIC does not
|
||||||
|
* set the corresponding IRR bit.
|
||||||
|
*
|
||||||
|
* fixup_irqs() scans IRR for set bits so it can raise a not
|
||||||
|
* yet handled interrupt on the new destination CPU via an IPI
|
||||||
|
* but obviously it can't do so for IRR bits which are not set.
|
||||||
|
* IOW, interrupts arriving after disabling the local APIC will
|
||||||
|
* be lost.
|
||||||
|
*/
|
||||||
|
apic_soft_disable();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -380,6 +380,13 @@ int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
|
||||||
unsigned int cpu, bit;
|
unsigned int cpu, bit;
|
||||||
struct cpumap *cm;
|
struct cpumap *cm;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Not required in theory, but matrix_find_best_cpu() uses
|
||||||
|
* for_each_cpu() which ignores the cpumask on UP .
|
||||||
|
*/
|
||||||
|
if (cpumask_empty(msk))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
cpu = matrix_find_best_cpu(m, msk);
|
cpu = matrix_find_best_cpu(m, msk);
|
||||||
if (cpu == UINT_MAX)
|
if (cpu == UINT_MAX)
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
|
Loading…
Reference in New Issue
Block a user