Merge branch 'kvm-updates/2.6.37' of git://git.kernel.org/pub/scm/virt/kvm/kvm

* 'kvm-updates/2.6.37' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: PPC: BookE: Load the lower half of MSR
  KVM: PPC: BookE: fix sleep with interrupts disabled
  KVM: PPC: e500: Call kvm_vcpu_uninit() before kvmppc_e500_tlb_uninit().
  PPC: KVM: Book E doesn't have __end_interrupts.
  KVM: x86: Issue smp_call_function_many with preemption disabled
  KVM: x86: fix information leak to userland
  KVM: PPC: fix information leak to userland
  KVM: MMU: fix rmap_remove on non present sptes
  KVM: Write protect memory after slot swap
This commit is contained in:
Linus Torvalds 2010-11-05 17:49:22 -07:00
commit d4285bd6be
7 changed files with 21 additions and 13 deletions

View File

@ -127,7 +127,7 @@ static void kvm_patch_ins_nop(u32 *inst)
static void kvm_patch_ins_b(u32 *inst, int addr) static void kvm_patch_ins_b(u32 *inst, int addr)
{ {
#ifdef CONFIG_RELOCATABLE #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
/* On relocatable kernels interrupts handlers and our code /* On relocatable kernels interrupts handlers and our code
can be in different regions, so we don't patch them */ can be in different regions, so we don't patch them */

View File

@ -416,7 +416,7 @@ lightweight_exit:
lwz r3, VCPU_PC(r4) lwz r3, VCPU_PC(r4)
mtsrr0 r3 mtsrr0 r3
lwz r3, VCPU_SHARED(r4) lwz r3, VCPU_SHARED(r4)
lwz r3, VCPU_SHARED_MSR(r3) lwz r3, (VCPU_SHARED_MSR + 4)(r3)
oris r3, r3, KVMPPC_MSR_MASK@h oris r3, r3, KVMPPC_MSR_MASK@h
ori r3, r3, KVMPPC_MSR_MASK@l ori r3, r3, KVMPPC_MSR_MASK@l
mtsrr1 r3 mtsrr1 r3

View File

@ -138,8 +138,8 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
free_page((unsigned long)vcpu->arch.shared); free_page((unsigned long)vcpu->arch.shared);
kvmppc_e500_tlb_uninit(vcpu_e500);
kvm_vcpu_uninit(vcpu); kvm_vcpu_uninit(vcpu);
kvmppc_e500_tlb_uninit(vcpu_e500);
kmem_cache_free(kvm_vcpu_cache, vcpu_e500); kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
} }

View File

@ -617,6 +617,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
switch (ioctl) { switch (ioctl) {
case KVM_PPC_GET_PVINFO: { case KVM_PPC_GET_PVINFO: {
struct kvm_ppc_pvinfo pvinfo; struct kvm_ppc_pvinfo pvinfo;
memset(&pvinfo, 0, sizeof(pvinfo));
r = kvm_vm_ioctl_get_pvinfo(&pvinfo); r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
r = -EFAULT; r = -EFAULT;

View File

@ -35,7 +35,6 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
int i; int i;
/* pause guest execution to avoid concurrent updates */ /* pause guest execution to avoid concurrent updates */
local_irq_disable();
mutex_lock(&vcpu->mutex); mutex_lock(&vcpu->mutex);
vcpu->arch.last_exit_type = 0xDEAD; vcpu->arch.last_exit_type = 0xDEAD;
@ -51,7 +50,6 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu)
vcpu->arch.timing_last_enter.tv64 = 0; vcpu->arch.timing_last_enter.tv64 = 0;
mutex_unlock(&vcpu->mutex); mutex_unlock(&vcpu->mutex);
local_irq_enable();
} }
static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type) static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)

View File

@ -720,7 +720,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
} }
} }
static void set_spte_track_bits(u64 *sptep, u64 new_spte) static int set_spte_track_bits(u64 *sptep, u64 new_spte)
{ {
pfn_t pfn; pfn_t pfn;
u64 old_spte = *sptep; u64 old_spte = *sptep;
@ -731,19 +731,20 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
old_spte = __xchg_spte(sptep, new_spte); old_spte = __xchg_spte(sptep, new_spte);
if (!is_rmap_spte(old_spte)) if (!is_rmap_spte(old_spte))
return; return 0;
pfn = spte_to_pfn(old_spte); pfn = spte_to_pfn(old_spte);
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn); kvm_set_pfn_accessed(pfn);
if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
kvm_set_pfn_dirty(pfn); kvm_set_pfn_dirty(pfn);
return 1;
} }
static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
{ {
set_spte_track_bits(sptep, new_spte); if (set_spte_track_bits(sptep, new_spte))
rmap_remove(kvm, sptep); rmap_remove(kvm, sptep);
} }
static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)

View File

@ -2560,6 +2560,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
!kvm_exception_is_soft(vcpu->arch.exception.nr); !kvm_exception_is_soft(vcpu->arch.exception.nr);
events->exception.nr = vcpu->arch.exception.nr; events->exception.nr = vcpu->arch.exception.nr;
events->exception.has_error_code = vcpu->arch.exception.has_error_code; events->exception.has_error_code = vcpu->arch.exception.has_error_code;
events->exception.pad = 0;
events->exception.error_code = vcpu->arch.exception.error_code; events->exception.error_code = vcpu->arch.exception.error_code;
events->interrupt.injected = events->interrupt.injected =
@ -2573,12 +2574,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.injected = vcpu->arch.nmi_injected;
events->nmi.pending = vcpu->arch.nmi_pending; events->nmi.pending = vcpu->arch.nmi_pending;
events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
events->nmi.pad = 0;
events->sipi_vector = vcpu->arch.sipi_vector; events->sipi_vector = vcpu->arch.sipi_vector;
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SIPI_VECTOR
| KVM_VCPUEVENT_VALID_SHADOW); | KVM_VCPUEVENT_VALID_SHADOW);
memset(&events->reserved, 0, sizeof(events->reserved));
} }
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
@ -2623,6 +2626,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
dbgregs->dr6 = vcpu->arch.dr6; dbgregs->dr6 = vcpu->arch.dr6;
dbgregs->dr7 = vcpu->arch.dr7; dbgregs->dr7 = vcpu->arch.dr7;
dbgregs->flags = 0; dbgregs->flags = 0;
memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
} }
static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
@ -3106,6 +3110,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
sizeof(ps->channels)); sizeof(ps->channels));
ps->flags = kvm->arch.vpit->pit_state.flags; ps->flags = kvm->arch.vpit->pit_state.flags;
mutex_unlock(&kvm->arch.vpit->pit_state.lock); mutex_unlock(&kvm->arch.vpit->pit_state.lock);
memset(&ps->reserved, 0, sizeof(ps->reserved));
return r; return r;
} }
@ -3169,10 +3174,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_memslots *slots, *old_slots; struct kvm_memslots *slots, *old_slots;
unsigned long *dirty_bitmap; unsigned long *dirty_bitmap;
spin_lock(&kvm->mmu_lock);
kvm_mmu_slot_remove_write_access(kvm, log->slot);
spin_unlock(&kvm->mmu_lock);
r = -ENOMEM; r = -ENOMEM;
dirty_bitmap = vmalloc(n); dirty_bitmap = vmalloc(n);
if (!dirty_bitmap) if (!dirty_bitmap)
@ -3194,6 +3195,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
kfree(old_slots); kfree(old_slots);
spin_lock(&kvm->mmu_lock);
kvm_mmu_slot_remove_write_access(kvm, log->slot);
spin_unlock(&kvm->mmu_lock);
r = -EFAULT; r = -EFAULT;
if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
vfree(dirty_bitmap); vfree(dirty_bitmap);
@ -3486,6 +3491,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
user_ns.clock = kvm->arch.kvmclock_offset + now_ns; user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
local_irq_enable(); local_irq_enable();
user_ns.flags = 0; user_ns.flags = 0;
memset(&user_ns.pad, 0, sizeof(user_ns.pad));
r = -EFAULT; r = -EFAULT;
if (copy_to_user(argp, &user_ns, sizeof(user_ns))) if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
@ -3972,8 +3978,10 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
if (kvm_x86_ops->has_wbinvd_exit()) { if (kvm_x86_ops->has_wbinvd_exit()) {
preempt_disable();
smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
wbinvd_ipi, NULL, 1); wbinvd_ipi, NULL, 1);
preempt_enable();
cpumask_clear(vcpu->arch.wbinvd_dirty_mask); cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
} }
wbinvd(); wbinvd();