forked from luck/tmp_suning_uos_patched
KVM: MMU: do not write-protect large mappings
There is not much point in write protecting large mappings. This can only happen when a page is shadowed during the window between is_largepage_backed and mmu_lock acquision. Zap the entry instead, so the next pagefault will find a shadowed page via is_largepage_backed and fallback to 4k translations. Simplifies out of sync shadow. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
a378b4e64c
commit
38187c830c
@ -1180,11 +1180,16 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
|
||||
|| (write_fault && !is_write_protection(vcpu) && !user_fault)) {
|
||||
struct kvm_mmu_page *shadow;
|
||||
|
||||
if (largepage && has_wrprotected_page(vcpu->kvm, gfn)) {
|
||||
ret = 1;
|
||||
spte = shadow_trap_nonpresent_pte;
|
||||
goto set_pte;
|
||||
}
|
||||
|
||||
spte |= PT_WRITABLE_MASK;
|
||||
|
||||
shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
|
||||
if (shadow ||
|
||||
(largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
|
||||
if (shadow) {
|
||||
pgprintk("%s: found shadow page for %lx, marking ro\n",
|
||||
__func__, gfn);
|
||||
ret = 1;
|
||||
@ -1197,6 +1202,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
|
||||
if (pte_access & ACC_WRITE_MASK)
|
||||
mark_page_dirty(vcpu->kvm, gfn);
|
||||
|
||||
set_pte:
|
||||
set_shadow_pte(shadow_pte, spte);
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user