forked from luck/tmp_suning_uos_patched
KVM: MMU: move mmu pages calculated out of mmu lock
kvm_mmu_calculate_mmu_pages need to walk all memslots and it's protected by kvm->slots_lock, so move it out of mmu spinlock Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
1b7fd45c32
commit
48c0e4e906
@ -6105,7 +6105,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
int user_alloc)
|
||||
{
|
||||
|
||||
int npages = mem->memory_size >> PAGE_SHIFT;
|
||||
int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
|
||||
|
||||
if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
|
||||
int ret;
|
||||
@ -6120,12 +6120,12 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
"failed to munmap memory\n");
|
||||
}
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
if (!kvm->arch.n_requested_mmu_pages) {
|
||||
unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
|
||||
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
|
||||
}
|
||||
if (!kvm->arch.n_requested_mmu_pages)
|
||||
nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
if (nr_mmu_pages)
|
||||
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
|
||||
kvm_mmu_slot_remove_write_access(kvm, mem->slot);
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user