forked from luck/tmp_suning_uos_patched
KVM: PPC: Convert u64 -> ulong
There are some pieces in the code that I overlooked that still use u64s instead of longs. This slows down 32 bit hosts unnecessarily, so let's just move them to ulong. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
4f84139037
commit
af7b4d104b
@ -107,9 +107,9 @@ struct kvmppc_vcpu_book3s {
|
||||
#define VSID_BAT 0x7fffffffffb00000ULL
|
||||
#define VSID_PR 0x8000000000000000ULL
|
||||
|
||||
extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 ea, u64 ea_mask);
|
||||
extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
|
||||
extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
|
||||
extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end);
|
||||
extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
|
||||
extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
|
||||
extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
|
||||
|
@ -124,9 +124,9 @@ struct kvm_arch {
|
||||
};
|
||||
|
||||
struct kvmppc_pte {
|
||||
u64 eaddr;
|
||||
ulong eaddr;
|
||||
u64 vpage;
|
||||
u64 raddr;
|
||||
ulong raddr;
|
||||
bool may_read : 1;
|
||||
bool may_write : 1;
|
||||
bool may_execute : 1;
|
||||
@ -145,7 +145,7 @@ struct kvmppc_mmu {
|
||||
int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data);
|
||||
void (*reset_msr)(struct kvm_vcpu *vcpu);
|
||||
void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
|
||||
int (*esid_to_vsid)(struct kvm_vcpu *vcpu, u64 esid, u64 *vsid);
|
||||
int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid);
|
||||
u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
|
||||
bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
|
||||
};
|
||||
|
@ -813,12 +813,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
* so we can't use the NX bit inside the guest. Let's cross our fingers,
|
||||
* that no guest that needs the dcbz hack does NX.
|
||||
*/
|
||||
kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFULL);
|
||||
kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
|
||||
r = RESUME_GUEST;
|
||||
} else {
|
||||
vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
|
||||
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
||||
kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFULL);
|
||||
kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
|
||||
r = RESUME_GUEST;
|
||||
}
|
||||
break;
|
||||
@ -844,7 +844,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
vcpu->arch.dear = dar;
|
||||
to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
|
||||
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
|
||||
kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL);
|
||||
kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFUL);
|
||||
r = RESUME_GUEST;
|
||||
}
|
||||
break;
|
||||
|
@ -60,7 +60,7 @@ static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
struct kvmppc_pte *pte, bool data);
|
||||
static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
|
||||
static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
|
||||
u64 *vsid);
|
||||
|
||||
static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr)
|
||||
@ -183,7 +183,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
struct kvmppc_sr *sre;
|
||||
hva_t ptegp;
|
||||
u32 pteg[16];
|
||||
u64 ptem = 0;
|
||||
u32 ptem = 0;
|
||||
int i;
|
||||
int found = 0;
|
||||
|
||||
@ -327,7 +327,7 @@ static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool lar
|
||||
kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000);
|
||||
}
|
||||
|
||||
static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
|
||||
static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
|
||||
u64 *vsid)
|
||||
{
|
||||
/* In case we only have one of MSR_IR or MSR_DR set, let's put
|
||||
|
@ -77,11 +77,9 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
|
||||
kvm_release_pfn_clean(pte->pfn);
|
||||
}
|
||||
|
||||
void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 _guest_ea, u64 _ea_mask)
|
||||
void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
|
||||
{
|
||||
int i;
|
||||
u32 guest_ea = _guest_ea;
|
||||
u32 ea_mask = _ea_mask;
|
||||
|
||||
dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%x & 0x%x\n",
|
||||
vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
|
||||
@ -127,7 +125,7 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
|
||||
}
|
||||
}
|
||||
|
||||
void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end)
|
||||
void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -265,7 +263,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
|
||||
/* Get host physical address for gpa */
|
||||
hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
|
||||
if (kvm_is_error_hva(hpaddr)) {
|
||||
printk(KERN_INFO "Couldn't get guest page for gfn %llx!\n",
|
||||
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
|
||||
orig_pte->eaddr);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -232,7 +232,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
}
|
||||
|
||||
dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
|
||||
"-> 0x%llx\n",
|
||||
"-> 0x%lx\n",
|
||||
eaddr, avpn, gpte->vpage, gpte->raddr);
|
||||
found = true;
|
||||
break;
|
||||
@ -439,7 +439,7 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
|
||||
kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
|
||||
}
|
||||
|
||||
static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
|
||||
static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
|
||||
u64 *vsid)
|
||||
{
|
||||
switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
|
||||
|
@ -62,7 +62,7 @@ static void invalidate_pte(struct hpte_cache *pte)
|
||||
kvm_release_pfn_clean(pte->pfn);
|
||||
}
|
||||
|
||||
void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 guest_ea, u64 ea_mask)
|
||||
void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -110,7 +110,7 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
|
||||
}
|
||||
}
|
||||
|
||||
void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end)
|
||||
void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -216,7 +216,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
|
||||
/* Get host physical address for gpa */
|
||||
hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
|
||||
if (kvm_is_error_hva(hpaddr)) {
|
||||
printk(KERN_INFO "Couldn't get guest page for gfn %llx!\n", orig_pte->eaddr);
|
||||
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
|
||||
return -EINVAL;
|
||||
}
|
||||
hpaddr <<= PAGE_SHIFT;
|
||||
|
Loading…
Reference in New Issue
Block a user