forked from luck/tmp_suning_uos_patched
c34d1b4d16
check_user_page_readable is a problematic variant of follow_page. It's used only by oprofile's i386 and arm backtrace code, at interrupt time, to establish whether a userspace stackframe is currently readable. This is problematic, because we want to push the page_table_lock down inside follow_page, and later split it; whereas oprofile is doing a spin_trylock on it (in the i386 case, forgotten in the arm case), and needs that to pin perhaps two pages spanned by the stackframe (which might be covered by different locks when we split). I think oprofile is going about this in the wrong way: it doesn't need to know the area is readable (neither i386 nor arm uses read protection of user pages), it doesn't need to pin the memory, it should simply __copy_from_user_inatomic, and see if that succeeds or not. Sorry, but I've not got around to devising the sparse __user annotations for this. Then we can eliminate check_user_page_readable, and return to a single follow_page without the __follow_page variants. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
100 lines
2.3 KiB
C
100 lines
2.3 KiB
C
/**
|
|
* @file backtrace.c
|
|
*
|
|
* @remark Copyright 2002 OProfile authors
|
|
* @remark Read the file COPYING
|
|
*
|
|
* @author John Levon
|
|
* @author David Smith
|
|
*/
|
|
|
|
#include <linux/oprofile.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/mm.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/uaccess.h>
|
|
|
|
struct frame_head {
|
|
struct frame_head * ebp;
|
|
unsigned long ret;
|
|
} __attribute__((packed));
|
|
|
|
static struct frame_head *
|
|
dump_backtrace(struct frame_head * head)
|
|
{
|
|
struct frame_head bufhead[2];
|
|
|
|
/* Also check accessibility of one struct frame_head beyond */
|
|
if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
|
|
return NULL;
|
|
if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
|
|
return NULL;
|
|
|
|
oprofile_add_trace(bufhead[0].ret);
|
|
|
|
/* frame pointers should strictly progress back up the stack
|
|
* (towards higher addresses) */
|
|
if (head >= bufhead[0].ebp)
|
|
return NULL;
|
|
|
|
return bufhead[0].ebp;
|
|
}
|
|
|
|
/*
|
|
* | | /\ Higher addresses
|
|
* | |
|
|
* --------------- stack base (address of current_thread_info)
|
|
* | thread info |
|
|
* . .
|
|
* | stack |
|
|
* --------------- saved regs->ebp value if valid (frame_head address)
|
|
* . .
|
|
* --------------- struct pt_regs stored on stack (struct pt_regs *)
|
|
* | |
|
|
* . .
|
|
* | |
|
|
* --------------- %esp
|
|
* | |
|
|
* | | \/ Lower addresses
|
|
*
|
|
* Thus, &pt_regs <-> stack base restricts the valid(ish) ebp values
|
|
*/
|
|
#ifdef CONFIG_FRAME_POINTER
|
|
static int valid_kernel_stack(struct frame_head * head, struct pt_regs * regs)
|
|
{
|
|
unsigned long headaddr = (unsigned long)head;
|
|
unsigned long stack = (unsigned long)regs;
|
|
unsigned long stack_base = (stack & ~(THREAD_SIZE - 1)) + THREAD_SIZE;
|
|
|
|
return headaddr > stack && headaddr < stack_base;
|
|
}
|
|
#else
|
|
/* without fp, it's just junk */
|
|
static int valid_kernel_stack(struct frame_head * head, struct pt_regs * regs)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
|
|
void
|
|
x86_backtrace(struct pt_regs * const regs, unsigned int depth)
|
|
{
|
|
struct frame_head *head;
|
|
|
|
#ifdef CONFIG_X86_64
|
|
head = (struct frame_head *)regs->rbp;
|
|
#else
|
|
head = (struct frame_head *)regs->ebp;
|
|
#endif
|
|
|
|
if (!user_mode_vm(regs)) {
|
|
while (depth-- && valid_kernel_stack(head, regs))
|
|
head = dump_backtrace(head);
|
|
return;
|
|
}
|
|
|
|
while (depth-- && head)
|
|
head = dump_backtrace(head);
|
|
}
|