forked from luck/tmp_suning_uos_patched
KVM: VMX: Flatten __vmx_vcpu_run()
commit 8bd200d23ec42d66ccd517a72dd0b9cc6132d2fd upstream. Move the vmx_vm{enter,exit}() functionality into __vmx_vcpu_run(). This will make it easier to do the spec_ctrl handling before the first RET. Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Borislav Petkov <bp@suse.de> [cascardo: remove ENDBR] Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com> Signed-off-by: Ben Hutchings <ben@decadent.org.uk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
df93717a32
commit
07401c2311
|
@ -30,68 +30,6 @@
|
||||||
|
|
||||||
.section .noinstr.text, "ax"
|
.section .noinstr.text, "ax"
|
||||||
|
|
||||||
/**
|
|
||||||
* vmx_vmenter - VM-Enter the current loaded VMCS
|
|
||||||
*
|
|
||||||
* %RFLAGS.ZF: !VMCS.LAUNCHED, i.e. controls VMLAUNCH vs. VMRESUME
|
|
||||||
*
|
|
||||||
* Returns:
|
|
||||||
* %RFLAGS.CF is set on VM-Fail Invalid
|
|
||||||
* %RFLAGS.ZF is set on VM-Fail Valid
|
|
||||||
* %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
|
|
||||||
*
|
|
||||||
* Note that VMRESUME/VMLAUNCH fall-through and return directly if
|
|
||||||
* they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump
|
|
||||||
* to vmx_vmexit.
|
|
||||||
*/
|
|
||||||
SYM_FUNC_START_LOCAL(vmx_vmenter)
|
|
||||||
/* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */
|
|
||||||
je 2f
|
|
||||||
|
|
||||||
1: vmresume
|
|
||||||
RET
|
|
||||||
|
|
||||||
2: vmlaunch
|
|
||||||
RET
|
|
||||||
|
|
||||||
3: cmpb $0, kvm_rebooting
|
|
||||||
je 4f
|
|
||||||
RET
|
|
||||||
4: ud2
|
|
||||||
|
|
||||||
_ASM_EXTABLE(1b, 3b)
|
|
||||||
_ASM_EXTABLE(2b, 3b)
|
|
||||||
|
|
||||||
SYM_FUNC_END(vmx_vmenter)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* vmx_vmexit - Handle a VMX VM-Exit
|
|
||||||
*
|
|
||||||
* Returns:
|
|
||||||
* %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
|
|
||||||
*
|
|
||||||
* This is vmx_vmenter's partner in crime. On a VM-Exit, control will jump
|
|
||||||
* here after hardware loads the host's state, i.e. this is the destination
|
|
||||||
* referred to by VMCS.HOST_RIP.
|
|
||||||
*/
|
|
||||||
SYM_FUNC_START(vmx_vmexit)
|
|
||||||
#ifdef CONFIG_RETPOLINE
|
|
||||||
ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE
|
|
||||||
/* Preserve guest's RAX, it's used to stuff the RSB. */
|
|
||||||
push %_ASM_AX
|
|
||||||
|
|
||||||
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
|
|
||||||
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
|
|
||||||
|
|
||||||
/* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */
|
|
||||||
or $1, %_ASM_AX
|
|
||||||
|
|
||||||
pop %_ASM_AX
|
|
||||||
.Lvmexit_skip_rsb:
|
|
||||||
#endif
|
|
||||||
RET
|
|
||||||
SYM_FUNC_END(vmx_vmexit)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
|
* __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
|
||||||
* @vmx: struct vcpu_vmx * (forwarded to vmx_update_host_rsp)
|
* @vmx: struct vcpu_vmx * (forwarded to vmx_update_host_rsp)
|
||||||
|
@ -124,8 +62,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
|
||||||
/* Copy @launched to BL, _ASM_ARG3 is volatile. */
|
/* Copy @launched to BL, _ASM_ARG3 is volatile. */
|
||||||
mov %_ASM_ARG3B, %bl
|
mov %_ASM_ARG3B, %bl
|
||||||
|
|
||||||
/* Adjust RSP to account for the CALL to vmx_vmenter(). */
|
lea (%_ASM_SP), %_ASM_ARG2
|
||||||
lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2
|
|
||||||
call vmx_update_host_rsp
|
call vmx_update_host_rsp
|
||||||
|
|
||||||
/* Load @regs to RAX. */
|
/* Load @regs to RAX. */
|
||||||
|
@ -154,11 +91,36 @@ SYM_FUNC_START(__vmx_vcpu_run)
|
||||||
/* Load guest RAX. This kills the @regs pointer! */
|
/* Load guest RAX. This kills the @regs pointer! */
|
||||||
mov VCPU_RAX(%_ASM_AX), %_ASM_AX
|
mov VCPU_RAX(%_ASM_AX), %_ASM_AX
|
||||||
|
|
||||||
/* Enter guest mode */
|
/* Check EFLAGS.ZF from 'testb' above */
|
||||||
call vmx_vmenter
|
je .Lvmlaunch
|
||||||
|
|
||||||
/* Jump on VM-Fail. */
|
/*
|
||||||
jbe 2f
|
* After a successful VMRESUME/VMLAUNCH, control flow "magically"
|
||||||
|
* resumes below at 'vmx_vmexit' due to the VMCS HOST_RIP setting.
|
||||||
|
* So this isn't a typical function and objtool needs to be told to
|
||||||
|
* save the unwind state here and restore it below.
|
||||||
|
*/
|
||||||
|
UNWIND_HINT_SAVE
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If VMRESUME/VMLAUNCH and corresponding vmexit succeed, execution resumes at
|
||||||
|
* the 'vmx_vmexit' label below.
|
||||||
|
*/
|
||||||
|
.Lvmresume:
|
||||||
|
vmresume
|
||||||
|
jmp .Lvmfail
|
||||||
|
|
||||||
|
.Lvmlaunch:
|
||||||
|
vmlaunch
|
||||||
|
jmp .Lvmfail
|
||||||
|
|
||||||
|
_ASM_EXTABLE(.Lvmresume, .Lfixup)
|
||||||
|
_ASM_EXTABLE(.Lvmlaunch, .Lfixup)
|
||||||
|
|
||||||
|
SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
|
||||||
|
|
||||||
|
/* Restore unwind state from before the VMRESUME/VMLAUNCH. */
|
||||||
|
UNWIND_HINT_RESTORE
|
||||||
|
|
||||||
/* Temporarily save guest's RAX. */
|
/* Temporarily save guest's RAX. */
|
||||||
push %_ASM_AX
|
push %_ASM_AX
|
||||||
|
@ -185,9 +147,13 @@ SYM_FUNC_START(__vmx_vcpu_run)
|
||||||
mov %r15, VCPU_R15(%_ASM_AX)
|
mov %r15, VCPU_R15(%_ASM_AX)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* IMPORTANT: RSB must be stuffed before the first return. */
|
||||||
|
FILL_RETURN_BUFFER %_ASM_BX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
|
||||||
|
|
||||||
/* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
|
/* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */
|
||||||
xor %eax, %eax
|
xor %eax, %eax
|
||||||
|
|
||||||
|
.Lclear_regs:
|
||||||
/*
|
/*
|
||||||
* Clear all general purpose registers except RSP and RAX to prevent
|
* Clear all general purpose registers except RSP and RAX to prevent
|
||||||
* speculative use of the guest's values, even those that are reloaded
|
* speculative use of the guest's values, even those that are reloaded
|
||||||
|
@ -197,7 +163,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
|
||||||
* free. RSP and RAX are exempt as RSP is restored by hardware during
|
* free. RSP and RAX are exempt as RSP is restored by hardware during
|
||||||
* VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
|
* VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail.
|
||||||
*/
|
*/
|
||||||
1: xor %ecx, %ecx
|
xor %ecx, %ecx
|
||||||
xor %edx, %edx
|
xor %edx, %edx
|
||||||
xor %ebx, %ebx
|
xor %ebx, %ebx
|
||||||
xor %ebp, %ebp
|
xor %ebp, %ebp
|
||||||
|
@ -216,8 +182,8 @@ SYM_FUNC_START(__vmx_vcpu_run)
|
||||||
|
|
||||||
/* "POP" @regs. */
|
/* "POP" @regs. */
|
||||||
add $WORD_SIZE, %_ASM_SP
|
add $WORD_SIZE, %_ASM_SP
|
||||||
pop %_ASM_BX
|
|
||||||
|
|
||||||
|
pop %_ASM_BX
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
pop %r12
|
pop %r12
|
||||||
pop %r13
|
pop %r13
|
||||||
|
@ -230,9 +196,15 @@ SYM_FUNC_START(__vmx_vcpu_run)
|
||||||
pop %_ASM_BP
|
pop %_ASM_BP
|
||||||
RET
|
RET
|
||||||
|
|
||||||
/* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */
|
.Lfixup:
|
||||||
2: mov $1, %eax
|
cmpb $0, kvm_rebooting
|
||||||
jmp 1b
|
jne .Lvmfail
|
||||||
|
ud2
|
||||||
|
.Lvmfail:
|
||||||
|
/* VM-Fail: set return value to 1 */
|
||||||
|
mov $1, %eax
|
||||||
|
jmp .Lclear_regs
|
||||||
|
|
||||||
SYM_FUNC_END(__vmx_vcpu_run)
|
SYM_FUNC_END(__vmx_vcpu_run)
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user