forked from luck/tmp_suning_uos_patched
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar: "Various low level fixes: fix more fallout from the FPU rework and the asm entry code rework, plus an MSI rework fix, and an idle-tracing fix" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/fpu/math-emu: Fix crash in fork() x86/fpu/math-emu: Fix math-emu boot crash x86/idle: Restore trace_cpu_idle to mwait_idle() calls x86/irq: Build correct vector mapping for multiple MSI interrupts Revert "sched/x86_64: Don't save flags on context switch"
This commit is contained in:
commit
d0b89bd548
|
@ -79,12 +79,12 @@ do { \
|
|||
#else /* CONFIG_X86_32 */
|
||||
|
||||
/* frame pointer must be last for get_wchan */
|
||||
#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
|
||||
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
|
||||
#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
|
||||
#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
|
||||
|
||||
#define __EXTRA_CLOBBER \
|
||||
, "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
|
||||
"r12", "r13", "r14", "r15", "flags"
|
||||
"r12", "r13", "r14", "r15"
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
#define __switch_canary \
|
||||
|
@ -100,11 +100,7 @@ do { \
|
|||
#define __switch_canary_iparam
|
||||
#endif /* CC_STACKPROTECTOR */
|
||||
|
||||
/*
|
||||
* There is no need to save or restore flags, because flags are always
|
||||
* clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
|
||||
* has no effect.
|
||||
*/
|
||||
/* Save restore flags to clear handle leaking NT */
|
||||
#define switch_to(prev, next, last) \
|
||||
asm volatile(SAVE_CONTEXT \
|
||||
"movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
|
||||
|
|
|
@ -322,7 +322,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
|
|||
irq_data->chip = &lapic_controller;
|
||||
irq_data->chip_data = data;
|
||||
irq_data->hwirq = virq + i;
|
||||
err = assign_irq_vector_policy(virq, irq_data->node, data,
|
||||
err = assign_irq_vector_policy(virq + i, irq_data->node, data,
|
||||
info);
|
||||
if (err)
|
||||
goto error;
|
||||
|
|
|
@ -270,7 +270,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
|
|||
dst_fpu->fpregs_active = 0;
|
||||
dst_fpu->last_cpu = -1;
|
||||
|
||||
if (src_fpu->fpstate_active)
|
||||
if (src_fpu->fpstate_active && cpu_has_fpu)
|
||||
fpu_copy(dst_fpu, src_fpu);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -40,7 +40,12 @@ static void fpu__init_cpu_generic(void)
|
|||
write_cr0(cr0);
|
||||
|
||||
/* Flush out any pending x87 state: */
|
||||
asm volatile ("fninit");
|
||||
#ifdef CONFIG_MATH_EMULATION
|
||||
if (!cpu_has_fpu)
|
||||
fpstate_init_soft(¤t->thread.fpu.state.soft);
|
||||
else
|
||||
#endif
|
||||
asm volatile ("fninit");
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -408,6 +408,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
|
|||
static void mwait_idle(void)
|
||||
{
|
||||
if (!current_set_polling_and_test()) {
|
||||
trace_cpu_idle_rcuidle(1, smp_processor_id());
|
||||
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
|
||||
smp_mb(); /* quirk */
|
||||
clflush((void *)¤t_thread_info()->flags);
|
||||
|
@ -419,6 +420,7 @@ static void mwait_idle(void)
|
|||
__sti_mwait(0, 0);
|
||||
else
|
||||
local_irq_enable();
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
|
||||
} else {
|
||||
local_irq_enable();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user