KVM: MMU: Store nx bit for large page shadows
We need to distinguish between large page shadows which have the nx bit set and those which don't. The problem shows up when booting a newer smp Linux kernel, where the trampoline page (which is in real mode, which uses the same shadow pages as large pages) is using the same mapping as a kernel data page, which is mapped using nx, causing kvm to spin on that page. Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
parent
2cb7e71422
commit
d55e2cb201
@ -121,7 +121,7 @@ struct kvm_pte_chain {
|
||||
* bits 4:7 - page table level for this shadow (1-4)
|
||||
* bits 8:9 - page table quadrant for 2-level guests
|
||||
* bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
|
||||
* bits 17:18 - "access" - the user and writable bits of a huge page pde
|
||||
* bits 17:19 - "access" - the user, writable, and nx bits of a huge page pde
|
||||
*/
|
||||
union kvm_mmu_page_role {
|
||||
unsigned word;
|
||||
@ -131,7 +131,7 @@ union kvm_mmu_page_role {
|
||||
unsigned quadrant : 2;
|
||||
unsigned pad_for_nice_hex_output : 6;
|
||||
unsigned metaphysical : 1;
|
||||
unsigned hugepage_access : 2;
|
||||
unsigned hugepage_access : 3;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -366,6 +366,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||
metaphysical = 1;
|
||||
hugepage_access = *guest_ent;
|
||||
hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
|
||||
if (*guest_ent & PT64_NX_MASK)
|
||||
hugepage_access |= (1 << 2);
|
||||
hugepage_access >>= PT_WRITABLE_SHIFT;
|
||||
table_gfn = (*guest_ent & PT_BASE_ADDR_MASK)
|
||||
>> PAGE_SHIFT;
|
||||
|
Loading…
Reference in New Issue
Block a user