forked from luck/tmp_suning_uos_patched
KVM: X86: Dynamically allocate user_fpu
After reverting commit 240c35a378
(kvm: x86: Use task structs fpu field
for user), struct kvm_vcpu is 19456 bytes on my server, PAGE_ALLOC_COSTLY_ORDER(3)
is the order at which allocations are deemed costly to service. In serveless
scenario, one host can service hundreds/thoudands firecracker/kata-container
instances, howerver, new instance will fail to launch after memory is too
fragmented to allocate kvm_vcpu struct on host, this was observed in some
cloud provider product environments.
This patch dynamically allocates user_fpu, kvm_vcpu is 15168 bytes now on my
Skylake server.
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
e751732486
commit
d9a710e5fc
|
@ -616,7 +616,7 @@ struct kvm_vcpu_arch {
|
|||
* "guest_fpu" state here contains the guest FPU context, with the
|
||||
* host PRKU bits.
|
||||
*/
|
||||
struct fpu user_fpu;
|
||||
struct fpu *user_fpu;
|
||||
struct fpu *guest_fpu;
|
||||
|
||||
u64 xcr0;
|
||||
|
|
|
@ -2143,12 +2143,20 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
|
|||
goto out;
|
||||
}
|
||||
|
||||
svm->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
|
||||
GFP_KERNEL_ACCOUNT);
|
||||
if (!svm->vcpu.arch.user_fpu) {
|
||||
printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
|
||||
err = -ENOMEM;
|
||||
goto free_partial_svm;
|
||||
}
|
||||
|
||||
svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
|
||||
GFP_KERNEL_ACCOUNT);
|
||||
if (!svm->vcpu.arch.guest_fpu) {
|
||||
printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
|
||||
err = -ENOMEM;
|
||||
goto free_partial_svm;
|
||||
goto free_user_fpu;
|
||||
}
|
||||
|
||||
err = kvm_vcpu_init(&svm->vcpu, kvm, id);
|
||||
|
@ -2211,6 +2219,8 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
|
|||
kvm_vcpu_uninit(&svm->vcpu);
|
||||
free_svm:
|
||||
kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
|
||||
free_user_fpu:
|
||||
kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu);
|
||||
free_partial_svm:
|
||||
kmem_cache_free(kvm_vcpu_cache, svm);
|
||||
out:
|
||||
|
@ -2241,6 +2251,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
|
|||
__free_page(virt_to_page(svm->nested.hsave));
|
||||
__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu);
|
||||
kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
|
||||
kmem_cache_free(kvm_vcpu_cache, svm);
|
||||
}
|
||||
|
|
|
@ -6598,6 +6598,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
|
|||
free_loaded_vmcs(vmx->loaded_vmcs);
|
||||
kfree(vmx->guest_msrs);
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu);
|
||||
kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu);
|
||||
kmem_cache_free(kvm_vcpu_cache, vmx);
|
||||
}
|
||||
|
@ -6613,12 +6614,20 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
|
|||
if (!vmx)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
vmx->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
|
||||
GFP_KERNEL_ACCOUNT);
|
||||
if (!vmx->vcpu.arch.user_fpu) {
|
||||
printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
|
||||
err = -ENOMEM;
|
||||
goto free_partial_vcpu;
|
||||
}
|
||||
|
||||
vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
|
||||
GFP_KERNEL_ACCOUNT);
|
||||
if (!vmx->vcpu.arch.guest_fpu) {
|
||||
printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
|
||||
err = -ENOMEM;
|
||||
goto free_partial_vcpu;
|
||||
goto free_user_fpu;
|
||||
}
|
||||
|
||||
vmx->vpid = allocate_vpid();
|
||||
|
@ -6721,6 +6730,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
|
|||
free_vcpu:
|
||||
free_vpid(vmx->vpid);
|
||||
kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu);
|
||||
free_user_fpu:
|
||||
kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu);
|
||||
free_partial_vcpu:
|
||||
kmem_cache_free(kvm_vcpu_cache, vmx);
|
||||
return ERR_PTR(err);
|
||||
|
|
|
@ -8273,7 +8273,7 @@ static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
fpregs_lock();
|
||||
|
||||
copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
|
||||
copy_fpregs_to_fpstate(vcpu->arch.user_fpu);
|
||||
/* PKRU is separately restored in kvm_x86_ops->run. */
|
||||
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
|
||||
~XFEATURE_MASK_PKRU);
|
||||
|
@ -8290,7 +8290,7 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
|
|||
fpregs_lock();
|
||||
|
||||
copy_fpregs_to_fpstate(vcpu->arch.guest_fpu);
|
||||
copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
|
||||
copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state);
|
||||
|
||||
fpregs_mark_activate();
|
||||
fpregs_unlock();
|
||||
|
|
Loading…
Reference in New Issue
Block a user