forked from luck/tmp_suning_uos_patched
arm: KVM: Allow unaligned accesses at HYP
We currently have the HSCTLR.A bit set, trapping unaligned accesses at HYP, but we're not really prepared to deal with it. Since the rest of the kernel is pretty happy about that, let's follow its example and set HSCTLR.A to zero. Modern CPUs don't really care. Cc: stable@vger.kernel.org Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
This commit is contained in:
parent
78fd6dcf11
commit
33b5c38852
|
@ -104,7 +104,6 @@ __do_hyp_init:
|
|||
@ - Write permission implies XN: disabled
|
||||
@ - Instruction cache: enabled
|
||||
@ - Data/Unified cache: enabled
|
||||
@ - Memory alignment checks: enabled
|
||||
@ - MMU: enabled (this code must be run from an identity mapping)
|
||||
mrc p15, 4, r0, c1, c0, 0 @ HSCR
|
||||
ldr r2, =HSCTLR_MASK
|
||||
|
@ -112,8 +111,8 @@ __do_hyp_init:
|
|||
mrc p15, 0, r1, c1, c0, 0 @ SCTLR
|
||||
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
|
||||
and r1, r1, r2
|
||||
ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) )
|
||||
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
|
||||
ARM( ldr r2, =(HSCTLR_M) )
|
||||
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
|
||||
orr r1, r1, r2
|
||||
orr r0, r0, r1
|
||||
mcr p15, 4, r0, c1, c0, 0 @ HSCR
|
||||
|
|
Loading…
Reference in New Issue
Block a user