forked from luck/tmp_suning_uos_patched
KVM: MMU: When updating the dirty bit, inform the mmu about it
Since the mmu uses different shadow pages for dirty large pages and clean large pages, this allows the mmu to drop ptes that are now invalid. Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
parent
5df34a86f9
commit
c4fcc27246
@ -74,9 +74,14 @@ static void FNAME(update_dirty_bit)(struct kvm_vcpu *vcpu,
|
|||||||
pt_element_t *ptep,
|
pt_element_t *ptep,
|
||||||
gfn_t table_gfn)
|
gfn_t table_gfn)
|
||||||
{
|
{
|
||||||
|
gpa_t pte_gpa;
|
||||||
|
|
||||||
if (write_fault && !is_dirty_pte(*ptep)) {
|
if (write_fault && !is_dirty_pte(*ptep)) {
|
||||||
mark_page_dirty(vcpu->kvm, table_gfn);
|
mark_page_dirty(vcpu->kvm, table_gfn);
|
||||||
*ptep |= PT_DIRTY_MASK;
|
*ptep |= PT_DIRTY_MASK;
|
||||||
|
pte_gpa = ((gpa_t)table_gfn << PAGE_SHIFT);
|
||||||
|
pte_gpa += offset_in_page(ptep);
|
||||||
|
kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)ptep, sizeof(*ptep));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user