forked from luck/tmp_suning_uos_patched
3e6f2ac480
UML was panicing in the case of failures of libc calls which shouldn't happen. This is an overreaction since a failure from libc doesn't normally mean that kernel data structures are in an unknown state. Instead, the current process should just be killed if there is no way to recover. The case that prompted this was a failure of PTRACE_SETREGS restoring the same state that was read by PTRACE_GETREGS. It appears that when a process tries to load a bogus value into a segment register, it segfaults (as expected) and the value is actually loaded and is seen by PTRACE_GETREGS (not expected). This case is fixed by forcing a fatal SIGSEGV on the process so that it immediately dies. fatal_sigsegv was added for this purpose. It was declared as noreturn, so in order to pursuade gcc that it actually does not return, I added a call to os_dump_core (and declared it noreturn) so that I get a core file if somehow the process survives. All other calls in arch/um/os-Linux/skas/process.c got the same treatment, with failures causing the process to die instead of a kernel panic, with some exceptions. userspace_tramp exits with status 1 if anything goes wrong there. That will cause start_userspace to return an error. copy_context_skas0 and map_stub_pages also now return errors instead of panicing. Callers of thes functions were changed to check for errors and do something appropriate. Usually that's to return an error to their callers. check_skas3_ptrace_faultinfo just exits since that's too early to do anything else. save_registers, restore_registers, and init_registers now return status instead of panicing on failure, with their callers doing something appropriate. There were also duplicate declarations of save_registers and restore_registers in os.h - these are gone. I noticed and fixed up some whitespace damage. Signed-off-by: Jeff Dike <jdike@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
161 lines
3.7 KiB
C
161 lines
3.7 KiB
C
/*
|
|
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
|
* Licensed under the GPL
|
|
*/
|
|
|
|
#include "linux/mm.h"
|
|
#include "linux/sched.h"
|
|
#include "asm/pgalloc.h"
|
|
#include "asm/pgtable.h"
|
|
#include "as-layout.h"
|
|
#include "os.h"
|
|
#include "skas.h"
|
|
|
|
extern int __syscall_stub_start;
|
|
|
|
static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
|
|
unsigned long kernel)
|
|
{
|
|
pgd_t *pgd;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
pte_t *pte;
|
|
|
|
pgd = pgd_offset(mm, proc);
|
|
pud = pud_alloc(mm, pgd, proc);
|
|
if (!pud)
|
|
goto out;
|
|
|
|
pmd = pmd_alloc(mm, pud, proc);
|
|
if (!pmd)
|
|
goto out_pmd;
|
|
|
|
pte = pte_alloc_map(mm, pmd, proc);
|
|
if (!pte)
|
|
goto out_pte;
|
|
|
|
/*
|
|
* There's an interaction between the skas0 stub pages, stack
|
|
* randomization, and the BUG at the end of exit_mmap. exit_mmap
|
|
* checks that the number of page tables freed is the same as had
|
|
* been allocated. If the stack is on the last page table page,
|
|
* then the stack pte page will be freed, and if not, it won't. To
|
|
* avoid having to know where the stack is, or if the process mapped
|
|
* something at the top of its address space for some other reason,
|
|
* we set TASK_SIZE to end at the start of the last page table.
|
|
* This keeps exit_mmap off the last page, but introduces a leak
|
|
* of that page. So, we hang onto it here and free it in
|
|
* destroy_context_skas.
|
|
*/
|
|
|
|
mm->context.last_page_table = pmd_page_vaddr(*pmd);
|
|
#ifdef CONFIG_3_LEVEL_PGTABLES
|
|
mm->context.last_pmd = (unsigned long) __va(pud_val(*pud));
|
|
#endif
|
|
|
|
*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
|
|
*pte = pte_mkread(*pte);
|
|
return 0;
|
|
|
|
out_pmd:
|
|
pud_free(mm, pud);
|
|
out_pte:
|
|
pmd_free(mm, pmd);
|
|
out:
|
|
return -ENOMEM;
|
|
}
|
|
|
|
int init_new_context(struct task_struct *task, struct mm_struct *mm)
|
|
{
|
|
struct mm_context *from_mm = NULL;
|
|
struct mm_context *to_mm = &mm->context;
|
|
unsigned long stack = 0;
|
|
int ret = -ENOMEM;
|
|
|
|
if (skas_needs_stub) {
|
|
stack = get_zeroed_page(GFP_KERNEL);
|
|
if (stack == 0)
|
|
goto out;
|
|
|
|
/*
|
|
* This zeros the entry that pgd_alloc didn't, needed since
|
|
* we are about to reinitialize it, and want mm.nr_ptes to
|
|
* be accurate.
|
|
*/
|
|
mm->pgd[USER_PTRS_PER_PGD] = __pgd(0);
|
|
|
|
ret = init_stub_pte(mm, STUB_CODE,
|
|
(unsigned long) &__syscall_stub_start);
|
|
if (ret)
|
|
goto out_free;
|
|
|
|
ret = init_stub_pte(mm, STUB_DATA, stack);
|
|
if (ret)
|
|
goto out_free;
|
|
|
|
mm->nr_ptes--;
|
|
}
|
|
|
|
to_mm->id.stack = stack;
|
|
if (current->mm != NULL && current->mm != &init_mm)
|
|
from_mm = ¤t->mm->context;
|
|
|
|
if (proc_mm) {
|
|
ret = new_mm(stack);
|
|
if (ret < 0) {
|
|
printk(KERN_ERR "init_new_context_skas - "
|
|
"new_mm failed, errno = %d\n", ret);
|
|
goto out_free;
|
|
}
|
|
to_mm->id.u.mm_fd = ret;
|
|
}
|
|
else {
|
|
if (from_mm)
|
|
to_mm->id.u.pid = copy_context_skas0(stack,
|
|
from_mm->id.u.pid);
|
|
else to_mm->id.u.pid = start_userspace(stack);
|
|
|
|
if (to_mm->id.u.pid < 0) {
|
|
ret = to_mm->id.u.pid;
|
|
goto out_free;
|
|
}
|
|
}
|
|
|
|
ret = init_new_ldt(to_mm, from_mm);
|
|
if (ret < 0) {
|
|
printk(KERN_ERR "init_new_context_skas - init_ldt"
|
|
" failed, errno = %d\n", ret);
|
|
goto out_free;
|
|
}
|
|
|
|
return 0;
|
|
|
|
out_free:
|
|
if (to_mm->id.stack != 0)
|
|
free_page(to_mm->id.stack);
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
void destroy_context(struct mm_struct *mm)
|
|
{
|
|
struct mm_context *mmu = &mm->context;
|
|
|
|
if (proc_mm)
|
|
os_close_file(mmu->id.u.mm_fd);
|
|
else
|
|
os_kill_ptraced_process(mmu->id.u.pid, 1);
|
|
|
|
if (!proc_mm || !ptrace_faultinfo) {
|
|
free_page(mmu->id.stack);
|
|
pte_lock_deinit(virt_to_page(mmu->last_page_table));
|
|
pte_free_kernel(mm, (pte_t *) mmu->last_page_table);
|
|
dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE);
|
|
#ifdef CONFIG_3_LEVEL_PGTABLES
|
|
pmd_free(mm, (pmd_t *) mmu->last_pmd);
|
|
#endif
|
|
}
|
|
|
|
free_ldt(mmu);
|
|
}
|