KVM: leverage change to adjust slots->used_slots in update_memslots()

update_memslots() is only called by __kvm_set_memory_region(), in which
"change" is calculated and indicates how to adjust slots->used_slots

  * increase by one if it is KVM_MR_CREATE
  * decrease by one if it is KVM_MR_DELETE
  * not change for others

This patch adjusts slots->used_slots in update_memslots() based on "change"
value instead of re-calculate those states again.

Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Wei Yang 2018-08-22 21:57:11 +08:00 committed by Paolo Bonzini
parent 3c6e099fa1
commit 31fc4f95dd

View File

@ -805,20 +805,25 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
* sorted array and known changed memslot position.
*/
static void update_memslots(struct kvm_memslots *slots,
struct kvm_memory_slot *new)
struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
int id = new->id;
int i = slots->id_to_index[id];
struct kvm_memory_slot *mslots = slots->memslots;
WARN_ON(mslots[i].id != id);
if (!new->npages) {
WARN_ON(!mslots[i].npages);
if (mslots[i].npages)
slots->used_slots--;
} else {
if (!mslots[i].npages)
switch (change) {
case KVM_MR_CREATE:
slots->used_slots++;
WARN_ON(mslots[i].npages || !new->npages);
break;
case KVM_MR_DELETE:
slots->used_slots--;
WARN_ON(new->npages || !mslots[i].npages);
break;
default:
break;
}
while (i < KVM_MEM_SLOTS_NUM - 1 &&
@ -1054,7 +1059,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
memset(&new.arch, 0, sizeof(new.arch));
}
update_memslots(slots, &new);
update_memslots(slots, &new, change);
old_memslots = install_new_memslots(kvm, as_id, slots);
kvm_arch_commit_memory_region(kvm, mem, &old, &new, change);