powerpc/32: Warn and return ENOSYS on syscalls from kernel

Since commit b86fb88855 ("powerpc/32: implement fast entry for
syscalls on non BOOKE") and commit 1a4b739bbb ("powerpc/32:
implement fast entry for syscalls on BOOKE"), syscalls from
kernel are unexpected and can have catastrophic consequences
as it will destroy the kernel stack.

Test MSR_PR on syscall entry. In case syscall is from kernel,
emit a warning and return ENOSYS error.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/8ee3bdbbdfdfc64ca7001e90c43b2aee6f333578.1580470482.git.christophe.leroy@c-s.fr
This commit is contained in:
Christophe Leroy 2020-01-31 11:34:54 +00:00 committed by Michael Ellerman
parent 030e347430
commit 9e27086292
3 changed files with 40 additions and 8 deletions

View File

@ -575,6 +575,33 @@ syscall_exit_work:
bl do_syscall_trace_leave bl do_syscall_trace_leave
b ret_from_except_full b ret_from_except_full
/*
* System call was called from kernel. We get here with SRR1 in r9.
* Mark the exception as recoverable once we have retrieved SRR0,
* trap a warning and return ENOSYS with CR[SO] set.
*/
.globl ret_from_kernel_syscall
ret_from_kernel_syscall:
mfspr r9, SPRN_SRR0
mfspr r10, SPRN_SRR1
#if !defined(CONFIG_4xx) && !defined(CONFIG_BOOKE)
LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_IR|MSR_DR))
mtmsr r11
#endif
0: trap
EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
li r3, ENOSYS
crset so
#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
mtspr SPRN_NRI, r0
#endif
mtspr SPRN_SRR0, r9
mtspr SPRN_SRR1, r10
SYNC
RFI
/* /*
* The fork/clone functions need to copy the full register set into * The fork/clone functions need to copy the full register set into
* the child process. Therefore we need to save all the nonvolatile * the child process. Therefore we need to save all the nonvolatile

View File

@ -111,14 +111,16 @@
.macro SYSCALL_ENTRY trapno .macro SYSCALL_ENTRY trapno
mfspr r12,SPRN_SPRG_THREAD mfspr r12,SPRN_SPRG_THREAD
mfspr r9, SPRN_SRR1
#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_VMAP_STACK
mfspr r9, SPRN_SRR0 mfspr r11, SPRN_SRR0
mfspr r11, SPRN_SRR1 stw r11, SRR0(r12)
stw r9, SRR0(r12) stw r9, SRR1(r12)
stw r11, SRR1(r12)
#endif #endif
mfcr r10 mfcr r10
andi. r11, r9, MSR_PR
lwz r11,TASK_STACK-THREAD(r12) lwz r11,TASK_STACK-THREAD(r12)
beq- 99f
rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */ rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */
addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_VMAP_STACK
@ -128,15 +130,14 @@
#endif #endif
tovirt_vmstack r12, r12 tovirt_vmstack r12, r12
tophys_novmstack r11, r11 tophys_novmstack r11, r11
mflr r9
stw r10,_CCR(r11) /* save registers */ stw r10,_CCR(r11) /* save registers */
stw r9, _LINK(r11) mflr r10
stw r10, _LINK(r11)
#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_VMAP_STACK
lwz r10, SRR0(r12) lwz r10, SRR0(r12)
lwz r9, SRR1(r12) lwz r9, SRR1(r12)
#else #else
mfspr r10,SPRN_SRR0 mfspr r10,SPRN_SRR0
mfspr r9,SPRN_SRR1
#endif #endif
stw r1,GPR1(r11) stw r1,GPR1(r11)
stw r1,0(r11) stw r1,0(r11)
@ -209,6 +210,7 @@
mtspr SPRN_SRR0,r11 mtspr SPRN_SRR0,r11
SYNC SYNC
RFI /* jump to handler, enable MMU */ RFI /* jump to handler, enable MMU */
99: b ret_from_kernel_syscall
.endm .endm
.macro save_dar_dsisr_on_stack reg1, reg2, sp .macro save_dar_dsisr_on_stack reg1, reg2, sp

View File

@ -104,16 +104,18 @@ FTR_SECTION_ELSE
#ifdef CONFIG_KVM_BOOKE_HV #ifdef CONFIG_KVM_BOOKE_HV
ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV) ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
#endif #endif
mfspr r9, SPRN_SRR1
BOOKE_CLEAR_BTB(r11) BOOKE_CLEAR_BTB(r11)
andi. r11, r9, MSR_PR
lwz r11, TASK_STACK - THREAD(r10) lwz r11, TASK_STACK - THREAD(r10)
rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */ rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */
beq- 99f
ALLOC_STACK_FRAME(r11, THREAD_SIZE - INT_FRAME_SIZE) ALLOC_STACK_FRAME(r11, THREAD_SIZE - INT_FRAME_SIZE)
stw r12, _CCR(r11) /* save various registers */ stw r12, _CCR(r11) /* save various registers */
mflr r12 mflr r12
stw r12,_LINK(r11) stw r12,_LINK(r11)
mfspr r12,SPRN_SRR0 mfspr r12,SPRN_SRR0
stw r1, GPR1(r11) stw r1, GPR1(r11)
mfspr r9,SPRN_SRR1
stw r1, 0(r11) stw r1, 0(r11)
mr r1, r11 mr r1, r11
stw r12,_NIP(r11) stw r12,_NIP(r11)
@ -176,6 +178,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
mtspr SPRN_SRR0,r11 mtspr SPRN_SRR0,r11
SYNC SYNC
RFI /* jump to handler, enable MMU */ RFI /* jump to handler, enable MMU */
99: b ret_from_kernel_syscall
.endm .endm
/* To handle the additional exception priority levels on 40x and Book-E /* To handle the additional exception priority levels on 40x and Book-E