forked from luck/tmp_suning_uos_patched
KVM/arm64 fixes for 5.10, take #1
- Force PTE mapping on device pages provided via VFIO - Fix detection of cacheable mapping at S2 - Fallback to PMD/PTE mappings for composite huge pages - Fix accounting of Stage-2 PGD allocation - Fix AArch32 handling of some of the debug registers - Simplify host HYP entry - Fix stray pointer conversion on nVHE TLB invalidation - Fix initialization of the nVHE code - Simplify handling of capabilities exposed to HYP - Nuke VCPUs caught using a forbidden AArch32 EL0 -----BEGIN PGP SIGNATURE----- iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAl+cO3oPHG1hekBrZXJu ZWwub3JnAAoJECPQ0LrRPXpDJdoP/jiKYR8iVkq/RmIsQl383KwQiJGTMi0iL2Zw /tHnf8bKowAPyG8bqyXMJqlWOb7tcp6U3m+WhENAZHWH02r2M921q0DGVW5p48ou Ek4zJnFF1iL5ryOBgROKK1nymUZOi3W1a1SsD6ZPImQsKsjNGbqKgWsGs8i9ft0P vkNZwlqebzJp+OR3agJemc8dkXcGlcRHk7fffdMcU8jsF5RJ9zC0XU0+scKryxhV o8PzKSlwCeisyL+Vz+s7POzoD3Rt+P+qjblz5NWqy/NHuLh+V9hzUSDOjWbZb70f Er29vGv7Yjb4nKK2KUzNqirSfXsRylfsjGr+YibP6uKEUMuUm/V41DqzT7nMalIm cOBGtPk6W9wOL8JNDmlyVGCfATI+5RrErQ8nFClrPu3qw4Hv4pb1Ad5OgAhNE0u1 PfUyBBtQKNAjTdVCRfSuFL4d2yegy1rrpCmYWrvdQjLlXemwgYgKnSQN98cZHgjA foCAP5gJpAWGualyhKJx2CkY/5deeWKS39ISiNgHo5eRvKsGEnMN7j9UX77VbhRr PkwCmeUJ3kjzaAfmtcBN/iLjwQbWypidjX2Vbfl5WoVdLuiYXFZIvsdaqRHGl56F 5zhYxM8DKODNEJKMl7a89oEFGKy8x1PQ0kqer9a6GBWkNDrQMOSL4+FkxCyM2m9g RoHtmdy0 =gVaX -----END PGP SIGNATURE----- Merge tag 'kvmarm-fixes-5.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD KVM/arm64 fixes for 5.10, take #1 - Force PTE mapping on device pages provided via VFIO - Fix detection of cacheable mapping at S2 - Fallback to PMD/PTE mappings for composite huge pages - Fix accounting of Stage-2 PGD allocation - Fix AArch32 handling of some of the debug registers - Simplify host HYP entry - Fix stray pointer conversion on nVHE TLB invalidation - Fix initialization of the nVHE code - Simplify handling of capabilities exposed to HYP - Nuke VCPUs caught using a forbidden AArch32 EL0
This commit is contained in:
commit
699116c45e
|
@ -375,6 +375,23 @@ cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
|
|||
return false;
|
||||
}
|
||||
|
||||
static __always_inline bool is_vhe_hyp_code(void)
|
||||
{
|
||||
/* Only defined for code run in VHE hyp context */
|
||||
return __is_defined(__KVM_VHE_HYPERVISOR__);
|
||||
}
|
||||
|
||||
static __always_inline bool is_nvhe_hyp_code(void)
|
||||
{
|
||||
/* Only defined for code run in NVHE hyp context */
|
||||
return __is_defined(__KVM_NVHE_HYPERVISOR__);
|
||||
}
|
||||
|
||||
static __always_inline bool is_hyp_code(void)
|
||||
{
|
||||
return is_vhe_hyp_code() || is_nvhe_hyp_code();
|
||||
}
|
||||
|
||||
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
|
||||
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
|
||||
extern struct static_key_false arm64_const_caps_ready;
|
||||
|
@ -427,22 +444,6 @@ static __always_inline bool __cpus_have_const_cap(int num)
|
|||
return static_branch_unlikely(&cpu_hwcap_keys[num]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Test for a capability, possibly with a runtime check.
|
||||
*
|
||||
* Before capabilities are finalized, this behaves as cpus_have_cap().
|
||||
* After capabilities are finalized, this is patched to avoid a runtime check.
|
||||
*
|
||||
* @num must be a compile-time constant.
|
||||
*/
|
||||
static __always_inline bool cpus_have_const_cap(int num)
|
||||
{
|
||||
if (system_capabilities_finalized())
|
||||
return __cpus_have_const_cap(num);
|
||||
else
|
||||
return cpus_have_cap(num);
|
||||
}
|
||||
|
||||
/*
|
||||
* Test for a capability without a runtime check.
|
||||
*
|
||||
|
@ -459,6 +460,27 @@ static __always_inline bool cpus_have_final_cap(int num)
|
|||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Test for a capability, possibly with a runtime check for non-hyp code.
|
||||
*
|
||||
* For hyp code, this behaves the same as cpus_have_final_cap().
|
||||
*
|
||||
* For non-hyp code:
|
||||
* Before capabilities are finalized, this behaves as cpus_have_cap().
|
||||
* After capabilities are finalized, this is patched to avoid a runtime check.
|
||||
*
|
||||
* @num must be a compile-time constant.
|
||||
*/
|
||||
static __always_inline bool cpus_have_const_cap(int num)
|
||||
{
|
||||
if (is_hyp_code())
|
||||
return cpus_have_final_cap(num);
|
||||
else if (system_capabilities_finalized())
|
||||
return __cpus_have_const_cap(num);
|
||||
else
|
||||
return cpus_have_cap(num);
|
||||
}
|
||||
|
||||
static inline void cpus_set_cap(unsigned int num)
|
||||
{
|
||||
if (num >= ARM64_NCAPS) {
|
||||
|
|
|
@ -239,6 +239,7 @@ enum vcpu_sysreg {
|
|||
#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
|
||||
#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
|
||||
#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
|
||||
#define cp14_DBGVCR (DBGVCR32_EL2 * 2)
|
||||
|
||||
#define NR_COPRO_REGS (NR_SYS_REGS * 2)
|
||||
|
||||
|
|
|
@ -86,13 +86,12 @@ static inline bool is_kernel_in_hyp_mode(void)
|
|||
static __always_inline bool has_vhe(void)
|
||||
{
|
||||
/*
|
||||
* The following macros are defined for code specic to VHE/nVHE.
|
||||
* If has_vhe() is inlined into those compilation units, it can
|
||||
* be determined statically. Otherwise fall back to caps.
|
||||
* Code only run in VHE/NVHE hyp context can assume VHE is present or
|
||||
* absent. Otherwise fall back to caps.
|
||||
*/
|
||||
if (__is_defined(__KVM_VHE_HYPERVISOR__))
|
||||
if (is_vhe_hyp_code())
|
||||
return true;
|
||||
else if (__is_defined(__KVM_NVHE_HYPERVISOR__))
|
||||
else if (is_nvhe_hyp_code())
|
||||
return false;
|
||||
else
|
||||
return cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN);
|
||||
|
|
|
@ -87,7 +87,6 @@ KVM_NVHE_ALIAS(__icache_flags);
|
|||
/* Kernel symbols needed for cpus_have_final/const_caps checks. */
|
||||
KVM_NVHE_ALIAS(arm64_const_caps_ready);
|
||||
KVM_NVHE_ALIAS(cpu_hwcap_keys);
|
||||
KVM_NVHE_ALIAS(cpu_hwcaps);
|
||||
|
||||
/* Static keys which are set if a vGIC trap should be handled in hyp. */
|
||||
KVM_NVHE_ALIAS(vgic_v2_cpuif_trap);
|
||||
|
|
|
@ -808,6 +808,25 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
|
|||
|
||||
preempt_enable();
|
||||
|
||||
/*
|
||||
* The ARMv8 architecture doesn't give the hypervisor
|
||||
* a mechanism to prevent a guest from dropping to AArch32 EL0
|
||||
* if implemented by the CPU. If we spot the guest in such
|
||||
* state and that we decided it wasn't supposed to do so (like
|
||||
* with the asymmetric AArch32 case), return to userspace with
|
||||
* a fatal error.
|
||||
*/
|
||||
if (!system_supports_32bit_el0() && vcpu_mode_is_32bit(vcpu)) {
|
||||
/*
|
||||
* As we have caught the guest red-handed, decide that
|
||||
* it isn't fit for purpose anymore by making the vcpu
|
||||
* invalid. The VMM can try and fix it by issuing a
|
||||
* KVM_ARM_VCPU_INIT if it really wants to.
|
||||
*/
|
||||
vcpu->arch.target = -1;
|
||||
ret = ARM_EXCEPTION_IL;
|
||||
}
|
||||
|
||||
ret = handle_exit(vcpu, ret);
|
||||
}
|
||||
|
||||
|
|
|
@ -17,8 +17,6 @@ SYM_FUNC_START(__host_exit)
|
|||
|
||||
get_host_ctxt x0, x1
|
||||
|
||||
ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
|
||||
|
||||
/* Store the host regs x2 and x3 */
|
||||
stp x2, x3, [x0, #CPU_XREG_OFFSET(2)]
|
||||
|
||||
|
|
|
@ -57,16 +57,25 @@ __do_hyp_init:
|
|||
cmp x0, #HVC_STUB_HCALL_NR
|
||||
b.lo __kvm_handle_stub_hvc
|
||||
|
||||
/* Set tpidr_el2 for use by HYP to free a register */
|
||||
msr tpidr_el2, x2
|
||||
// We only actively check bits [24:31], and everything
|
||||
// else has to be zero, which we check at build time.
|
||||
#if (KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) & 0xFFFFFFFF00FFFFFF)
|
||||
#error Unexpected __KVM_HOST_SMCCC_FUNC___kvm_hyp_init value
|
||||
#endif
|
||||
|
||||
mov x2, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
|
||||
cmp x0, x2
|
||||
b.eq 1f
|
||||
ror x0, x0, #24
|
||||
eor x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 24) & 0xF)
|
||||
ror x0, x0, #4
|
||||
eor x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 28) & 0xF)
|
||||
cbz x0, 1f
|
||||
mov x0, #SMCCC_RET_NOT_SUPPORTED
|
||||
eret
|
||||
|
||||
1: phys_to_ttbr x0, x1
|
||||
1:
|
||||
/* Set tpidr_el2 for use by HYP to free a register */
|
||||
msr tpidr_el2, x2
|
||||
|
||||
phys_to_ttbr x0, x1
|
||||
alternative_if ARM64_HAS_CNP
|
||||
orr x0, x0, #TTBR_CNP_BIT
|
||||
alternative_else_nop_endif
|
||||
|
|
|
@ -128,7 +128,6 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
|
|||
struct tlb_inv_context cxt;
|
||||
|
||||
/* Switch to requested VMID */
|
||||
mmu = kern_hyp_va(mmu);
|
||||
__tlb_switch_to_guest(mmu, &cxt);
|
||||
|
||||
__tlbi(vmalle1);
|
||||
|
|
|
@ -635,7 +635,7 @@ static void stage2_flush_dcache(void *addr, u64 size)
|
|||
|
||||
static bool stage2_pte_cacheable(kvm_pte_t pte)
|
||||
{
|
||||
u64 memattr = FIELD_GET(KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR, pte);
|
||||
u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
|
||||
return memattr == PAGE_S2_MEMATTR(NORMAL);
|
||||
}
|
||||
|
||||
|
@ -846,7 +846,7 @@ int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm *kvm)
|
|||
u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
|
||||
|
||||
pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
|
||||
pgt->pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL | __GFP_ZERO);
|
||||
pgt->pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
|
||||
if (!pgt->pgd)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -787,14 +787,26 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
vma_shift = PAGE_SHIFT;
|
||||
}
|
||||
|
||||
if (vma_shift == PUD_SHIFT &&
|
||||
!fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
|
||||
vma_shift = PMD_SHIFT;
|
||||
|
||||
if (vma_shift == PMD_SHIFT &&
|
||||
!fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) {
|
||||
force_pte = true;
|
||||
switch (vma_shift) {
|
||||
case PUD_SHIFT:
|
||||
if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
|
||||
break;
|
||||
fallthrough;
|
||||
case CONT_PMD_SHIFT:
|
||||
vma_shift = PMD_SHIFT;
|
||||
fallthrough;
|
||||
case PMD_SHIFT:
|
||||
if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
|
||||
break;
|
||||
fallthrough;
|
||||
case CONT_PTE_SHIFT:
|
||||
vma_shift = PAGE_SHIFT;
|
||||
force_pte = true;
|
||||
fallthrough;
|
||||
case PAGE_SHIFT:
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
|
||||
}
|
||||
|
||||
vma_pagesize = 1UL << vma_shift;
|
||||
|
@ -839,6 +851,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
|
||||
if (kvm_is_device_pfn(pfn)) {
|
||||
device = true;
|
||||
force_pte = true;
|
||||
} else if (logging_active && !write_fault) {
|
||||
/*
|
||||
* Only actually map the page as writable if this was a write
|
||||
|
|
|
@ -1897,9 +1897,9 @@ static const struct sys_reg_desc cp14_regs[] = {
|
|||
{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
|
||||
DBG_BCR_BVR_WCR_WVR(1),
|
||||
/* DBGDCCINT */
|
||||
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
|
||||
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32, NULL, cp14_DBGDCCINT },
|
||||
/* DBGDSCRext */
|
||||
{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
|
||||
{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32, NULL, cp14_DBGDSCRext },
|
||||
DBG_BCR_BVR_WCR_WVR(2),
|
||||
/* DBGDTR[RT]Xint */
|
||||
{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
|
||||
|
@ -1914,7 +1914,7 @@ static const struct sys_reg_desc cp14_regs[] = {
|
|||
{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
|
||||
DBG_BCR_BVR_WCR_WVR(6),
|
||||
/* DBGVCR */
|
||||
{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
|
||||
{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32, NULL, cp14_DBGVCR },
|
||||
DBG_BCR_BVR_WCR_WVR(7),
|
||||
DBG_BCR_BVR_WCR_WVR(8),
|
||||
DBG_BCR_BVR_WCR_WVR(9),
|
||||
|
|
Loading…
Reference in New Issue
Block a user