forked from luck/tmp_suning_uos_patched
9786a8f3cb
This patch implements the clone-stub mechanism, which allows skas0 to run with proc_mm==0, even if the clib in UML uses modify_ldt. Note: There is a bug in skas3.v7 host patch, that avoids UML-skas from running properly on a SMP-box. In full skas3, I never really saw problems, but in skas0 they showed up. More commentary by jdike - What this patch does is makes sure that the host parent of each new host process matches the UML parent of the corresponding UML process. This ensures that any changed LDTs are inherited. This is done by having clone actually called by the UML process from its stub, rather than by the kernel. We have special syscall stubs that are loaded onto the stub code page because that code must be completely self-contained. These stubs are given C interfaces, and used like normal C functions, but there are subtleties. Principally, we have to be careful about stack variables in stub_clone_handler after the clone. The code is written so that there aren't any - everything boils down to a fixed address. If there were any locals, references to them after the clone would be wrong because the stack just changed. Signed-off-by: Bodo Stroesser <bstroesser@fujitsu-siemens.com> Signed-off-by: Jeff Dike <jdike@addtoit.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
146 lines
3.4 KiB
C
146 lines
3.4 KiB
C
/*
|
|
* Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
|
|
* Licensed under the GPL
|
|
*/
|
|
|
|
#include "linux/config.h"
|
|
#include "linux/sched.h"
|
|
#include "linux/list.h"
|
|
#include "linux/spinlock.h"
|
|
#include "linux/slab.h"
|
|
#include "linux/errno.h"
|
|
#include "linux/mm.h"
|
|
#include "asm/current.h"
|
|
#include "asm/segment.h"
|
|
#include "asm/mmu.h"
|
|
#include "asm/pgalloc.h"
|
|
#include "asm/pgtable.h"
|
|
#include "os.h"
|
|
#include "skas.h"
|
|
|
|
extern int __syscall_stub_start;
|
|
|
|
static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
|
|
unsigned long kernel)
|
|
{
|
|
pgd_t *pgd;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
pte_t *pte;
|
|
|
|
spin_lock(&mm->page_table_lock);
|
|
pgd = pgd_offset(mm, proc);
|
|
pud = pud_alloc(mm, pgd, proc);
|
|
if (!pud)
|
|
goto out;
|
|
|
|
pmd = pmd_alloc(mm, pud, proc);
|
|
if (!pmd)
|
|
goto out_pmd;
|
|
|
|
pte = pte_alloc_map(mm, pmd, proc);
|
|
if (!pte)
|
|
goto out_pte;
|
|
|
|
/* There's an interaction between the skas0 stub pages, stack
|
|
* randomization, and the BUG at the end of exit_mmap. exit_mmap
|
|
* checks that the number of page tables freed is the same as had
|
|
* been allocated. If the stack is on the last page table page,
|
|
* then the stack pte page will be freed, and if not, it won't. To
|
|
* avoid having to know where the stack is, or if the process mapped
|
|
* something at the top of its address space for some other reason,
|
|
* we set TASK_SIZE to end at the start of the last page table.
|
|
* This keeps exit_mmap off the last page, but introduces a leak
|
|
* of that page. So, we hang onto it here and free it in
|
|
* destroy_context_skas.
|
|
*/
|
|
|
|
mm->context.skas.last_page_table = pmd_page_kernel(*pmd);
|
|
|
|
*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
|
|
*pte = pte_mkexec(*pte);
|
|
*pte = pte_wrprotect(*pte);
|
|
spin_unlock(&mm->page_table_lock);
|
|
return(0);
|
|
|
|
out_pmd:
|
|
pud_free(pud);
|
|
out_pte:
|
|
pmd_free(pmd);
|
|
out:
|
|
spin_unlock(&mm->page_table_lock);
|
|
return(-ENOMEM);
|
|
}
|
|
|
|
int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
|
|
{
|
|
struct mm_struct *cur_mm = current->mm;
|
|
struct mm_id *cur_mm_id = &cur_mm->context.skas.id;
|
|
struct mm_id *mm_id = &mm->context.skas.id;
|
|
unsigned long stack;
|
|
int from, ret;
|
|
|
|
if(proc_mm){
|
|
if((cur_mm != NULL) && (cur_mm != &init_mm))
|
|
from = cur_mm->context.skas.id.u.mm_fd;
|
|
else from = -1;
|
|
|
|
ret = new_mm(from);
|
|
if(ret < 0){
|
|
printk("init_new_context_skas - new_mm failed, "
|
|
"errno = %d\n", ret);
|
|
return ret;
|
|
}
|
|
mm_id->u.mm_fd = ret;
|
|
}
|
|
else {
|
|
/* This zeros the entry that pgd_alloc didn't, needed since
|
|
* we are about to reinitialize it, and want mm.nr_ptes to
|
|
* be accurate.
|
|
*/
|
|
mm->pgd[USER_PTRS_PER_PGD] = __pgd(0);
|
|
|
|
ret = init_stub_pte(mm, CONFIG_STUB_CODE,
|
|
(unsigned long) &__syscall_stub_start);
|
|
if(ret)
|
|
goto out;
|
|
|
|
ret = -ENOMEM;
|
|
stack = get_zeroed_page(GFP_KERNEL);
|
|
if(stack == 0)
|
|
goto out;
|
|
mm_id->stack = stack;
|
|
|
|
ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack);
|
|
if(ret)
|
|
goto out_free;
|
|
|
|
mm->nr_ptes--;
|
|
|
|
if((cur_mm != NULL) && (cur_mm != &init_mm))
|
|
mm_id->u.pid = copy_context_skas0(stack,
|
|
cur_mm_id->u.pid);
|
|
else mm_id->u.pid = start_userspace(stack);
|
|
}
|
|
|
|
return 0;
|
|
|
|
out_free:
|
|
free_page(mm_id->stack);
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
void destroy_context_skas(struct mm_struct *mm)
|
|
{
|
|
struct mmu_context_skas *mmu = &mm->context.skas;
|
|
|
|
if(proc_mm)
|
|
os_close_file(mmu->id.u.mm_fd);
|
|
else {
|
|
os_kill_ptraced_process(mmu->id.u.pid, 1);
|
|
free_page(mmu->id.stack);
|
|
free_page(mmu->last_page_table);
|
|
}
|
|
}
|