forked from luck/tmp_suning_uos_patched
ec0d7f18ab
Pull fpu state cleanups from Ingo Molnar: "This tree streamlines further aspects of FPU handling by eliminating the prepare_to_copy() complication and moving that logic to arch_dup_task_struct(). It also fixes the FPU dumps in threaded core dumps, removes and old (and now invalid) assumption plus micro-optimizes the exit path by avoiding an FPU save for dead tasks." Fixed up trivial add-add conflict in arch/sh/kernel/process.c that came in because we now do the FPU handling in arch_dup_task_struct() rather than the legacy (and now gone) prepare_to_copy(). * 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, fpu: drop the fpu state during thread exit x86, xsave: remove thread_has_fpu() bug check in __sanitize_i387_state() coredump: ensure the fpu state is flushed for proper multi-threaded core dump fork: move the real prepare_to_copy() users to arch_dup_task_struct()
76 lines
1.6 KiB
C
76 lines
1.6 KiB
C
#include <linux/mm.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/export.h>
|
|
#include <linux/stackprotector.h>
|
|
|
|
struct kmem_cache *task_xstate_cachep = NULL;
|
|
unsigned int xstate_size;
|
|
|
|
#ifdef CONFIG_CC_STACKPROTECTOR
|
|
unsigned long __stack_chk_guard __read_mostly;
|
|
EXPORT_SYMBOL(__stack_chk_guard);
|
|
#endif
|
|
|
|
/*
|
|
* this gets called so that we can store lazy state into memory and copy the
|
|
* current task into the new thread.
|
|
*/
|
|
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
|
{
|
|
#ifdef CONFIG_SUPERH32
|
|
unlazy_fpu(src, task_pt_regs(src));
|
|
#endif
|
|
*dst = *src;
|
|
|
|
if (src->thread.xstate) {
|
|
dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
|
|
GFP_KERNEL);
|
|
if (!dst->thread.xstate)
|
|
return -ENOMEM;
|
|
memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
void free_thread_xstate(struct task_struct *tsk)
|
|
{
|
|
if (tsk->thread.xstate) {
|
|
kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
|
|
tsk->thread.xstate = NULL;
|
|
}
|
|
}
|
|
|
|
void arch_release_task_struct(struct task_struct *tsk)
|
|
{
|
|
free_thread_xstate(tsk);
|
|
}
|
|
|
|
void arch_task_cache_init(void)
|
|
{
|
|
if (!xstate_size)
|
|
return;
|
|
|
|
task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
|
|
__alignof__(union thread_xstate),
|
|
SLAB_PANIC | SLAB_NOTRACK, NULL);
|
|
}
|
|
|
|
#ifdef CONFIG_SH_FPU_EMU
|
|
# define HAVE_SOFTFP 1
|
|
#else
|
|
# define HAVE_SOFTFP 0
|
|
#endif
|
|
|
|
void __cpuinit init_thread_xstate(void)
|
|
{
|
|
if (boot_cpu_data.flags & CPU_HAS_FPU)
|
|
xstate_size = sizeof(struct sh_fpu_hard_struct);
|
|
else if (HAVE_SOFTFP)
|
|
xstate_size = sizeof(struct sh_fpu_soft_struct);
|
|
else
|
|
xstate_size = 0;
|
|
}
|