forked from luck/tmp_suning_uos_patched
1400eb6567
For non-SMP, uses the new random canary value that is stored in the
task struct whenever a new task is forked. Based on ARM version in
df0698be14
and subject to the same
limitations: the variable GCC expects, __stack_chk_guard, is global,
so this will not work on SMP.
Quoting Nicolas Pitre <nico@fluxnic.net>: "One way to overcome this
GCC limitation would be to locate the __stack_chk_guard variable into
a memory page of its own for each CPU, and then use TLB locking to
have each CPU see its own page at the same virtual address for each of
them."
Signed-off-by: Gregory Fong <gregory.0xf0@gmail.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/5488/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
168 lines
3.3 KiB
ArmAsm
168 lines
3.3 KiB
ArmAsm
/*
|
|
* r2300_switch.S: R2300 specific task switching code.
|
|
*
|
|
* Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle
|
|
* Copyright (C) 1994, 1995, 1996 by Andreas Busse
|
|
*
|
|
* Multi-cpu abstraction and macros for easier reading:
|
|
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
|
|
*
|
|
* Further modifications to make this work:
|
|
* Copyright (c) 1998-2000 Harald Koerfgen
|
|
*/
|
|
#include <asm/asm.h>
|
|
#include <asm/cachectl.h>
|
|
#include <asm/fpregdef.h>
|
|
#include <asm/mipsregs.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/regdef.h>
|
|
#include <asm/stackframe.h>
|
|
#include <asm/thread_info.h>
|
|
|
|
#include <asm/asmmacro.h>
|
|
|
|
.set mips1
|
|
.align 5
|
|
|
|
/*
|
|
* Offset to the current process status flags, the first 32 bytes of the
|
|
* stack are not used.
|
|
*/
|
|
#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
|
|
|
|
/*
|
|
* FPU context is saved iff the process has used it's FPU in the current
|
|
* time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user
|
|
* space STATUS register should be 0, so that a process *always* starts its
|
|
* userland with FPU disabled after each context switch.
|
|
*
|
|
* FPU will be enabled as soon as the process accesses FPU again, through
|
|
* do_cpu() trap.
|
|
*/
|
|
|
|
/*
|
|
* task_struct *resume(task_struct *prev, task_struct *next,
|
|
* struct thread_info *next_ti, int usedfpu)
|
|
*/
|
|
LEAF(resume)
|
|
mfc0 t1, CP0_STATUS
|
|
sw t1, THREAD_STATUS(a0)
|
|
cpu_save_nonscratch a0
|
|
sw ra, THREAD_REG31(a0)
|
|
|
|
beqz a3, 1f
|
|
|
|
PTR_L t3, TASK_THREAD_INFO(a0)
|
|
|
|
/*
|
|
* clear saved user stack CU1 bit
|
|
*/
|
|
lw t0, ST_OFF(t3)
|
|
li t1, ~ST0_CU1
|
|
and t0, t0, t1
|
|
sw t0, ST_OFF(t3)
|
|
|
|
fpu_save_single a0, t0 # clobbers t0
|
|
|
|
1:
|
|
|
|
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
|
PTR_L t8, __stack_chk_guard
|
|
LONG_L t9, TASK_STACK_CANARY(a1)
|
|
LONG_S t9, 0(t8)
|
|
#endif
|
|
|
|
/*
|
|
* The order of restoring the registers takes care of the race
|
|
* updating $28, $29 and kernelsp without disabling ints.
|
|
*/
|
|
move $28, a2
|
|
cpu_restore_nonscratch a1
|
|
|
|
addiu t1, $28, _THREAD_SIZE - 32
|
|
sw t1, kernelsp
|
|
|
|
mfc0 t1, CP0_STATUS /* Do we really need this? */
|
|
li a3, 0xff01
|
|
and t1, a3
|
|
lw a2, THREAD_STATUS(a1)
|
|
nor a3, $0, a3
|
|
and a2, a3
|
|
or a2, t1
|
|
mtc0 a2, CP0_STATUS
|
|
move v0, a0
|
|
jr ra
|
|
END(resume)
|
|
|
|
/*
|
|
* Save a thread's fp context.
|
|
*/
|
|
LEAF(_save_fp)
|
|
fpu_save_single a0, t1 # clobbers t1
|
|
jr ra
|
|
END(_save_fp)
|
|
|
|
/*
|
|
* Restore a thread's fp context.
|
|
*/
|
|
LEAF(_restore_fp)
|
|
fpu_restore_single a0, t1 # clobbers t1
|
|
jr ra
|
|
END(_restore_fp)
|
|
|
|
/*
|
|
* Load the FPU with signalling NANS. This bit pattern we're using has
|
|
* the property that no matter whether considered as single or as double
|
|
* precision represents signaling NANS.
|
|
*
|
|
* We initialize fcr31 to rounding to nearest, no exceptions.
|
|
*/
|
|
|
|
#define FPU_DEFAULT 0x00000000
|
|
|
|
LEAF(_init_fpu)
|
|
mfc0 t0, CP0_STATUS
|
|
li t1, ST0_CU1
|
|
or t0, t1
|
|
mtc0 t0, CP0_STATUS
|
|
|
|
li t1, FPU_DEFAULT
|
|
ctc1 t1, fcr31
|
|
|
|
li t0, -1
|
|
|
|
mtc1 t0, $f0
|
|
mtc1 t0, $f1
|
|
mtc1 t0, $f2
|
|
mtc1 t0, $f3
|
|
mtc1 t0, $f4
|
|
mtc1 t0, $f5
|
|
mtc1 t0, $f6
|
|
mtc1 t0, $f7
|
|
mtc1 t0, $f8
|
|
mtc1 t0, $f9
|
|
mtc1 t0, $f10
|
|
mtc1 t0, $f11
|
|
mtc1 t0, $f12
|
|
mtc1 t0, $f13
|
|
mtc1 t0, $f14
|
|
mtc1 t0, $f15
|
|
mtc1 t0, $f16
|
|
mtc1 t0, $f17
|
|
mtc1 t0, $f18
|
|
mtc1 t0, $f19
|
|
mtc1 t0, $f20
|
|
mtc1 t0, $f21
|
|
mtc1 t0, $f22
|
|
mtc1 t0, $f23
|
|
mtc1 t0, $f24
|
|
mtc1 t0, $f25
|
|
mtc1 t0, $f26
|
|
mtc1 t0, $f27
|
|
mtc1 t0, $f28
|
|
mtc1 t0, $f29
|
|
mtc1 t0, $f30
|
|
mtc1 t0, $f31
|
|
jr ra
|
|
END(_init_fpu)
|