forked from luck/tmp_suning_uos_patched
powerpc/book3s64/keys/kuap: Reset AMR/IAMR values on kexec
As we kexec across kernels that use AMR/IAMR for different purposes we need to ensure that new kernels get kexec'd with a reset value of AMR/IAMR. For ex: the new kernel can use key 0 for kernel mapping and the old AMR value prevents access to key 0. This patch also removes reset if IAMR and AMOR in kexec_sequence. Reset of AMOR is not needed and the IAMR reset is partial (it doesn't do the reset on secondary cpus) and is redundant with this patch. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200709032946.881753-19-aneesh.kumar@linux.ibm.com
This commit is contained in:
parent
7cdd3745f2
commit
000a42b35a
23
arch/powerpc/include/asm/book3s/64/kexec.h
Normal file
23
arch/powerpc/include/asm/book3s/64/kexec.h
Normal file
|
@ -0,0 +1,23 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef _ASM_POWERPC_BOOK3S_64_KEXEC_H_
|
||||
#define _ASM_POWERPC_BOOK3S_64_KEXEC_H_
|
||||
|
||||
|
||||
#define reset_sprs reset_sprs
|
||||
static inline void reset_sprs(void)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_206)) {
|
||||
mtspr(SPRN_AMR, 0);
|
||||
mtspr(SPRN_UAMOR, 0);
|
||||
}
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
|
||||
mtspr(SPRN_IAMR, 0);
|
||||
}
|
||||
|
||||
/* Do we need isync()? We are going via a kexec reset */
|
||||
isync();
|
||||
}
|
||||
|
||||
#endif
|
|
@ -150,6 +150,18 @@ static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
|
|||
}
|
||||
|
||||
#endif /* CONFIG_KEXEC_CORE */
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
#include <asm/book3s/64/kexec.h>
|
||||
#endif
|
||||
|
||||
#ifndef reset_sprs
|
||||
#define reset_sprs reset_sprs
|
||||
static inline void reset_sprs(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ! __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_POWERPC_KEXEC_H */
|
||||
|
|
|
@ -413,20 +413,6 @@ _GLOBAL(kexec_sequence)
|
|||
li r0,0
|
||||
std r0,16(r1)
|
||||
|
||||
BEGIN_FTR_SECTION
|
||||
/*
|
||||
* This is the best time to turn AMR/IAMR off.
|
||||
* key 0 is used in radix for supervisor<->user
|
||||
* protection, but on hash key 0 is reserved
|
||||
* ideally we want to enter with a clean state.
|
||||
* NOTE, we rely on r0 being 0 from above.
|
||||
*/
|
||||
mtspr SPRN_IAMR,r0
|
||||
BEGIN_FTR_SECTION_NESTED(42)
|
||||
mtspr SPRN_AMOR,r0
|
||||
END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
|
||||
/* save regs for local vars on new stack.
|
||||
* yes, we won't go back, but ...
|
||||
*/
|
||||
|
|
|
@ -152,6 +152,8 @@ static void kexec_smp_down(void *arg)
|
|||
if (ppc_md.kexec_cpu_down)
|
||||
ppc_md.kexec_cpu_down(0, 1);
|
||||
|
||||
reset_sprs();
|
||||
|
||||
kexec_smp_wait();
|
||||
/* NOTREACHED */
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <asm/powernv.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/ultravisor.h>
|
||||
#include <asm/kexec.h>
|
||||
|
||||
#include <mm/mmu_decl.h>
|
||||
#include <trace/events/thp.h>
|
||||
|
@ -165,6 +166,8 @@ void mmu_cleanup_all(void)
|
|||
radix__mmu_cleanup_all();
|
||||
else if (mmu_hash_ops.hpte_clear_all)
|
||||
mmu_hash_ops.hpte_clear_all();
|
||||
|
||||
reset_sprs();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
|
|
Loading…
Reference in New Issue
Block a user