KVM: PPC: Rename deliver_interrupts to prepare_to_enter
This function also updates paravirt int_pending, so rename it to be more obvious that this is a collection of checks run prior to (re)entering a guest. Signed-off-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
1d1ef22208
commit
7e28e60ef9
@ -94,7 +94,7 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
|
||||
extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
|
||||
extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
|
||||
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
|
||||
|
@ -258,7 +258,7 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
|
||||
return true;
|
||||
}
|
||||
|
||||
void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
|
||||
void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long *pending = &vcpu->arch.pending_exceptions;
|
||||
unsigned long old_pending = vcpu->arch.pending_exceptions;
|
||||
|
@ -482,7 +482,7 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
|
||||
if (now > vcpu->arch.dec_expires) {
|
||||
/* decrementer has already gone negative */
|
||||
kvmppc_core_queue_dec(vcpu);
|
||||
kvmppc_core_deliver_interrupts(vcpu);
|
||||
kvmppc_core_prepare_to_enter(vcpu);
|
||||
return;
|
||||
}
|
||||
dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
|
||||
@ -797,7 +797,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
|
||||
list_for_each_entry_safe(v, vn, &vc->runnable_threads,
|
||||
arch.run_list) {
|
||||
kvmppc_core_deliver_interrupts(v);
|
||||
kvmppc_core_prepare_to_enter(v);
|
||||
if (signal_pending(v->arch.run_task)) {
|
||||
kvmppc_remove_runnable(vc, v);
|
||||
v->stat.signal_exits++;
|
||||
@ -857,7 +857,7 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
|
||||
!(vcpu->arch.shregs.msr & MSR_PR)) {
|
||||
r = kvmppc_pseries_do_hcall(vcpu);
|
||||
kvmppc_core_deliver_interrupts(vcpu);
|
||||
kvmppc_core_prepare_to_enter(vcpu);
|
||||
}
|
||||
} while (r == RESUME_GUEST);
|
||||
return r;
|
||||
|
@ -764,7 +764,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
/* In case an interrupt came in that was triggered
|
||||
* from userspace (like DEC), we need to check what
|
||||
* to inject now! */
|
||||
kvmppc_core_deliver_interrupts(vcpu);
|
||||
kvmppc_core_prepare_to_enter(vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -289,7 +289,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
|
||||
/* Check pending exceptions and deliver one, if possible. */
|
||||
void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
|
||||
void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long *pending = &vcpu->arch.pending_exceptions;
|
||||
unsigned long old_pending = vcpu->arch.pending_exceptions;
|
||||
@ -611,7 +611,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
kvmppc_core_deliver_interrupts(vcpu);
|
||||
kvmppc_core_prepare_to_enter(vcpu);
|
||||
|
||||
if (!(r & RESUME_HOST)) {
|
||||
/* To avoid clobbering exit_reason, only check for signals if
|
||||
|
@ -559,7 +559,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
vcpu->arch.hcall_needed = 0;
|
||||
}
|
||||
|
||||
kvmppc_core_deliver_interrupts(vcpu);
|
||||
kvmppc_core_prepare_to_enter(vcpu);
|
||||
|
||||
r = kvmppc_vcpu_run(run, vcpu);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user