Merge branch 'tip/tracing/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/ftrace
This commit is contained in:
commit
44fc6ee923
|
@ -79,11 +79,11 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
|
|||
*
|
||||
* 1) Put the instruction pointer into the IP buffer
|
||||
* and the new code into the "code" buffer.
|
||||
* 2) Set a flag that says we are modifying code
|
||||
* 3) Wait for any running NMIs to finish.
|
||||
* 4) Write the code
|
||||
* 5) clear the flag.
|
||||
* 6) Wait for any running NMIs to finish.
|
||||
* 2) Wait for any running NMIs to finish and set a flag that says
|
||||
* we are modifying code, it is done in an atomic operation.
|
||||
* 3) Write the code
|
||||
* 4) clear the flag.
|
||||
* 5) Wait for any running NMIs to finish.
|
||||
*
|
||||
* If an NMI is executed, the first thing it does is to call
|
||||
* "ftrace_nmi_enter". This will check if the flag is set to write
|
||||
|
@ -95,9 +95,9 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
|
|||
* are the same as what exists.
|
||||
*/
|
||||
|
||||
#define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */
|
||||
static atomic_t nmi_running = ATOMIC_INIT(0);
|
||||
static int mod_code_status; /* holds return value of text write */
|
||||
static int mod_code_write; /* set when NMI should do the write */
|
||||
static void *mod_code_ip; /* holds the IP to write to */
|
||||
static void *mod_code_newcode; /* holds the text to write to the IP */
|
||||
|
||||
|
@ -114,6 +114,20 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
|
|||
return r;
|
||||
}
|
||||
|
||||
static void clear_mod_flag(void)
|
||||
{
|
||||
int old = atomic_read(&nmi_running);
|
||||
|
||||
for (;;) {
|
||||
int new = old & ~MOD_CODE_WRITE_FLAG;
|
||||
|
||||
if (old == new)
|
||||
break;
|
||||
|
||||
old = atomic_cmpxchg(&nmi_running, old, new);
|
||||
}
|
||||
}
|
||||
|
||||
static void ftrace_mod_code(void)
|
||||
{
|
||||
/*
|
||||
|
@ -127,27 +141,39 @@ static void ftrace_mod_code(void)
|
|||
|
||||
/* if we fail, then kill any new writers */
|
||||
if (mod_code_status)
|
||||
mod_code_write = 0;
|
||||
clear_mod_flag();
|
||||
}
|
||||
|
||||
void ftrace_nmi_enter(void)
|
||||
{
|
||||
atomic_inc(&nmi_running);
|
||||
/* Must have nmi_running seen before reading write flag */
|
||||
smp_mb();
|
||||
if (mod_code_write) {
|
||||
if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
|
||||
smp_rmb();
|
||||
ftrace_mod_code();
|
||||
atomic_inc(&nmi_update_count);
|
||||
}
|
||||
/* Must have previous changes seen before executions */
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
void ftrace_nmi_exit(void)
|
||||
{
|
||||
/* Finish all executions before clearing nmi_running */
|
||||
smp_wmb();
|
||||
smp_mb();
|
||||
atomic_dec(&nmi_running);
|
||||
}
|
||||
|
||||
static void wait_for_nmi_and_set_mod_flag(void)
|
||||
{
|
||||
if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
|
||||
return;
|
||||
|
||||
do {
|
||||
cpu_relax();
|
||||
} while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
|
||||
|
||||
nmi_wait_count++;
|
||||
}
|
||||
|
||||
static void wait_for_nmi(void)
|
||||
{
|
||||
if (!atomic_read(&nmi_running))
|
||||
|
@ -167,14 +193,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
|
|||
mod_code_newcode = new_code;
|
||||
|
||||
/* The buffers need to be visible before we let NMIs write them */
|
||||
smp_wmb();
|
||||
|
||||
mod_code_write = 1;
|
||||
|
||||
/* Make sure write bit is visible before we wait on NMIs */
|
||||
smp_mb();
|
||||
|
||||
wait_for_nmi();
|
||||
wait_for_nmi_and_set_mod_flag();
|
||||
|
||||
/* Make sure all running NMIs have finished before we write the code */
|
||||
smp_mb();
|
||||
|
@ -182,13 +203,9 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
|
|||
ftrace_mod_code();
|
||||
|
||||
/* Make sure the write happens before clearing the bit */
|
||||
smp_wmb();
|
||||
|
||||
mod_code_write = 0;
|
||||
|
||||
/* make sure NMIs see the cleared bit */
|
||||
smp_mb();
|
||||
|
||||
clear_mod_flag();
|
||||
wait_for_nmi();
|
||||
|
||||
return mod_code_status;
|
||||
|
|
|
@ -1201,7 +1201,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
|
|||
* trace_vbprintk - write binary msg to tracing buffer
|
||||
*
|
||||
*/
|
||||
int trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args)
|
||||
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
|
||||
{
|
||||
static raw_spinlock_t trace_buf_lock =
|
||||
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
|
@ -1243,7 +1243,6 @@ int trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args)
|
|||
goto out_unlock;
|
||||
entry = ring_buffer_event_data(event);
|
||||
entry->ip = ip;
|
||||
entry->depth = depth;
|
||||
entry->fmt = fmt;
|
||||
|
||||
memcpy(entry->buf, trace_buf, sizeof(u32) * len);
|
||||
|
@ -1261,7 +1260,7 @@ int trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(trace_vbprintk);
|
||||
|
||||
int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
|
||||
int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
|
||||
{
|
||||
static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
||||
static char trace_buf[TRACE_BUF_SIZE];
|
||||
|
@ -1298,7 +1297,6 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
|
|||
goto out_unlock;
|
||||
entry = ring_buffer_event_data(event);
|
||||
entry->ip = ip;
|
||||
entry->depth = depth;
|
||||
|
||||
memcpy(&entry->buf, trace_buf, len);
|
||||
entry->buf[len] = 0;
|
||||
|
@ -1701,38 +1699,6 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
|
|||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
static enum print_line_t print_bprintk_msg_only(struct trace_iterator *iter)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct trace_entry *entry = iter->ent;
|
||||
struct bprint_entry *field;
|
||||
int ret;
|
||||
|
||||
trace_assign_type(field, entry);
|
||||
|
||||
ret = trace_seq_bprintf(s, field->fmt, field->buf);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
static enum print_line_t print_printk_msg_only(struct trace_iterator *iter)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct trace_entry *entry = iter->ent;
|
||||
struct print_entry *field;
|
||||
int ret;
|
||||
|
||||
trace_assign_type(field, entry);
|
||||
|
||||
ret = trace_seq_printf(s, "%s", field->buf);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
|
@ -1794,12 +1760,12 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
|
|||
if (iter->ent->type == TRACE_BPRINT &&
|
||||
trace_flags & TRACE_ITER_PRINTK &&
|
||||
trace_flags & TRACE_ITER_PRINTK_MSGONLY)
|
||||
return print_bprintk_msg_only(iter);
|
||||
return trace_print_bprintk_msg_only(iter);
|
||||
|
||||
if (iter->ent->type == TRACE_PRINT &&
|
||||
trace_flags & TRACE_ITER_PRINTK &&
|
||||
trace_flags & TRACE_ITER_PRINTK_MSGONLY)
|
||||
return print_printk_msg_only(iter);
|
||||
return trace_print_printk_msg_only(iter);
|
||||
|
||||
if (trace_flags & TRACE_ITER_BIN)
|
||||
return print_bin_fmt(iter);
|
||||
|
@ -1948,9 +1914,14 @@ int tracing_open_generic(struct inode *inode, struct file *filp)
|
|||
static int tracing_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct seq_file *m = (struct seq_file *)file->private_data;
|
||||
struct trace_iterator *iter = m->private;
|
||||
struct trace_iterator *iter;
|
||||
int cpu;
|
||||
|
||||
if (!(file->f_mode & FMODE_READ))
|
||||
return 0;
|
||||
|
||||
iter = m->private;
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
for_each_tracing_cpu(cpu) {
|
||||
if (iter->buffer_iter[cpu])
|
||||
|
@ -1976,12 +1947,24 @@ static int tracing_open(struct inode *inode, struct file *file)
|
|||
struct trace_iterator *iter;
|
||||
int ret = 0;
|
||||
|
||||
iter = __tracing_open(inode, file);
|
||||
if (IS_ERR(iter))
|
||||
ret = PTR_ERR(iter);
|
||||
else if (trace_flags & TRACE_ITER_LATENCY_FMT)
|
||||
iter->iter_flags |= TRACE_FILE_LAT_FMT;
|
||||
/* If this file was open for write, then erase contents */
|
||||
if ((file->f_mode & FMODE_WRITE) &&
|
||||
!(file->f_flags & O_APPEND)) {
|
||||
long cpu = (long) inode->i_private;
|
||||
|
||||
if (cpu == TRACE_PIPE_ALL_CPU)
|
||||
tracing_reset_online_cpus(&global_trace);
|
||||
else
|
||||
tracing_reset(&global_trace, cpu);
|
||||
}
|
||||
|
||||
if (file->f_mode & FMODE_READ) {
|
||||
iter = __tracing_open(inode, file);
|
||||
if (IS_ERR(iter))
|
||||
ret = PTR_ERR(iter);
|
||||
else if (trace_flags & TRACE_ITER_LATENCY_FMT)
|
||||
iter->iter_flags |= TRACE_FILE_LAT_FMT;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2056,9 +2039,17 @@ static int show_traces_open(struct inode *inode, struct file *file)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
tracing_write_stub(struct file *filp, const char __user *ubuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
return count;
|
||||
}
|
||||
|
||||
static const struct file_operations tracing_fops = {
|
||||
.open = tracing_open,
|
||||
.read = seq_read,
|
||||
.write = tracing_write_stub,
|
||||
.llseek = seq_lseek,
|
||||
.release = tracing_release,
|
||||
};
|
||||
|
@ -3154,7 +3145,7 @@ static int mark_printk(const char *fmt, ...)
|
|||
int ret;
|
||||
va_list args;
|
||||
va_start(args, fmt);
|
||||
ret = trace_vprintk(0, -1, fmt, args);
|
||||
ret = trace_vprintk(0, fmt, args);
|
||||
va_end(args);
|
||||
return ret;
|
||||
}
|
||||
|
@ -3583,7 +3574,7 @@ static void tracing_init_debugfs_percpu(long cpu)
|
|||
pr_warning("Could not create debugfs 'trace_pipe' entry\n");
|
||||
|
||||
/* per cpu trace */
|
||||
entry = debugfs_create_file("trace", 0444, d_cpu,
|
||||
entry = debugfs_create_file("trace", 0644, d_cpu,
|
||||
(void *) cpu, &tracing_fops);
|
||||
if (!entry)
|
||||
pr_warning("Could not create debugfs 'trace' entry\n");
|
||||
|
@ -3897,7 +3888,7 @@ static __init int tracer_init_debugfs(void)
|
|||
if (!entry)
|
||||
pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
|
||||
|
||||
entry = debugfs_create_file("trace", 0444, d_tracer,
|
||||
entry = debugfs_create_file("trace", 0644, d_tracer,
|
||||
(void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
|
||||
if (!entry)
|
||||
pr_warning("Could not create debugfs 'trace' entry\n");
|
||||
|
|
|
@ -123,7 +123,6 @@ struct userstack_entry {
|
|||
struct bprint_entry {
|
||||
struct trace_entry ent;
|
||||
unsigned long ip;
|
||||
int depth;
|
||||
const char *fmt;
|
||||
u32 buf[];
|
||||
};
|
||||
|
@ -131,7 +130,6 @@ struct bprint_entry {
|
|||
struct print_entry {
|
||||
struct trace_entry ent;
|
||||
unsigned long ip;
|
||||
int depth;
|
||||
char buf[];
|
||||
};
|
||||
|
||||
|
@ -598,9 +596,9 @@ extern int trace_selftest_startup_branch(struct tracer *trace,
|
|||
extern void *head_page(struct trace_array_cpu *data);
|
||||
extern long ns2usecs(cycle_t nsec);
|
||||
extern int
|
||||
trace_vbprintk(unsigned long ip, int depth, const char *fmt, va_list args);
|
||||
trace_vbprintk(unsigned long ip, const char *fmt, va_list args);
|
||||
extern int
|
||||
trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args);
|
||||
trace_vprintk(unsigned long ip, const char *fmt, va_list args);
|
||||
|
||||
extern unsigned long trace_flags;
|
||||
|
||||
|
|
|
@ -105,7 +105,6 @@ TRACE_EVENT_FORMAT(user_stack, TRACE_USER_STACK, userstack_entry, ignore,
|
|||
TRACE_EVENT_FORMAT(bprint, TRACE_BPRINT, bprint_entry, ignore,
|
||||
TRACE_STRUCT(
|
||||
TRACE_FIELD(unsigned long, ip, ip)
|
||||
TRACE_FIELD(unsigned int, depth, depth)
|
||||
TRACE_FIELD(char *, fmt, fmt)
|
||||
TRACE_FIELD_ZERO_CHAR(buf)
|
||||
),
|
||||
|
@ -115,7 +114,6 @@ TRACE_EVENT_FORMAT(bprint, TRACE_BPRINT, bprint_entry, ignore,
|
|||
TRACE_EVENT_FORMAT(print, TRACE_PRINT, print_entry, ignore,
|
||||
TRACE_STRUCT(
|
||||
TRACE_FIELD(unsigned long, ip, ip)
|
||||
TRACE_FIELD(unsigned int, depth, depth)
|
||||
TRACE_FIELD_ZERO_CHAR(buf)
|
||||
),
|
||||
TP_RAW_FMT("%08lx (%d) fmt:%p %s")
|
||||
|
|
|
@ -14,6 +14,11 @@
|
|||
#include "trace.h"
|
||||
#include "trace_output.h"
|
||||
|
||||
struct fgraph_data {
|
||||
pid_t last_pid;
|
||||
int depth;
|
||||
};
|
||||
|
||||
#define TRACE_GRAPH_INDENT 2
|
||||
|
||||
/* Flag options */
|
||||
|
@ -231,16 +236,16 @@ print_graph_proc(struct trace_seq *s, pid_t pid)
|
|||
|
||||
/* If the pid changed since the last trace, output this event */
|
||||
static enum print_line_t
|
||||
verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu)
|
||||
verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
|
||||
{
|
||||
pid_t prev_pid;
|
||||
pid_t *last_pid;
|
||||
int ret;
|
||||
|
||||
if (!last_pids_cpu)
|
||||
if (!data)
|
||||
return TRACE_TYPE_HANDLED;
|
||||
|
||||
last_pid = per_cpu_ptr(last_pids_cpu, cpu);
|
||||
last_pid = &(per_cpu_ptr(data, cpu)->last_pid);
|
||||
|
||||
if (*last_pid == pid)
|
||||
return TRACE_TYPE_HANDLED;
|
||||
|
@ -471,6 +476,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
|
|||
struct ftrace_graph_ent_entry *entry,
|
||||
struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s)
|
||||
{
|
||||
struct fgraph_data *data = iter->private;
|
||||
struct ftrace_graph_ret *graph_ret;
|
||||
struct ftrace_graph_ent *call;
|
||||
unsigned long long duration;
|
||||
|
@ -481,6 +487,18 @@ print_graph_entry_leaf(struct trace_iterator *iter,
|
|||
call = &entry->graph_ent;
|
||||
duration = graph_ret->rettime - graph_ret->calltime;
|
||||
|
||||
if (data) {
|
||||
int cpu = iter->cpu;
|
||||
int *depth = &(per_cpu_ptr(data, cpu)->depth);
|
||||
|
||||
/*
|
||||
* Comments display at + 1 to depth. Since
|
||||
* this is a leaf function, keep the comments
|
||||
* equal to this depth.
|
||||
*/
|
||||
*depth = call->depth - 1;
|
||||
}
|
||||
|
||||
/* Overhead */
|
||||
ret = print_graph_overhead(duration, s);
|
||||
if (!ret)
|
||||
|
@ -512,12 +530,21 @@ print_graph_entry_leaf(struct trace_iterator *iter,
|
|||
}
|
||||
|
||||
static enum print_line_t
|
||||
print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
|
||||
struct trace_seq *s, pid_t pid, int cpu)
|
||||
print_graph_entry_nested(struct trace_iterator *iter,
|
||||
struct ftrace_graph_ent_entry *entry,
|
||||
struct trace_seq *s, int cpu)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
struct ftrace_graph_ent *call = &entry->graph_ent;
|
||||
struct fgraph_data *data = iter->private;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (data) {
|
||||
int cpu = iter->cpu;
|
||||
int *depth = &(per_cpu_ptr(data, cpu)->depth);
|
||||
|
||||
*depth = call->depth;
|
||||
}
|
||||
|
||||
/* No overhead */
|
||||
ret = print_graph_overhead(-1, s);
|
||||
|
@ -554,24 +581,24 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
|
|||
}
|
||||
|
||||
static enum print_line_t
|
||||
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
|
||||
struct trace_iterator *iter)
|
||||
print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
|
||||
int type, unsigned long addr)
|
||||
{
|
||||
int ret;
|
||||
int cpu = iter->cpu;
|
||||
pid_t *last_entry = iter->private;
|
||||
struct fgraph_data *data = iter->private;
|
||||
struct trace_entry *ent = iter->ent;
|
||||
struct ftrace_graph_ent *call = &field->graph_ent;
|
||||
struct ftrace_graph_ret_entry *leaf_ret;
|
||||
int cpu = iter->cpu;
|
||||
int ret;
|
||||
|
||||
/* Pid */
|
||||
if (verif_pid(s, ent->pid, cpu, last_entry) == TRACE_TYPE_PARTIAL_LINE)
|
||||
if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Interrupt */
|
||||
ret = print_graph_irq(iter, call->func, TRACE_GRAPH_ENT, cpu, ent->pid);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
if (type) {
|
||||
/* Interrupt */
|
||||
ret = print_graph_irq(iter, addr, type, cpu, ent->pid);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Absolute time */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
|
||||
|
@ -598,11 +625,25 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
|
|||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum print_line_t
|
||||
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
|
||||
struct trace_iterator *iter)
|
||||
{
|
||||
int cpu = iter->cpu;
|
||||
struct ftrace_graph_ent *call = &field->graph_ent;
|
||||
struct ftrace_graph_ret_entry *leaf_ret;
|
||||
|
||||
if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func))
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
leaf_ret = get_return_for_leaf(iter, field);
|
||||
if (leaf_ret)
|
||||
return print_graph_entry_leaf(iter, field, leaf_ret, s);
|
||||
else
|
||||
return print_graph_entry_nested(field, s, iter->ent->pid, cpu);
|
||||
return print_graph_entry_nested(iter, field, s, cpu);
|
||||
|
||||
}
|
||||
|
||||
|
@ -610,41 +651,28 @@ static enum print_line_t
|
|||
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
|
||||
struct trace_entry *ent, struct trace_iterator *iter)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
int cpu = iter->cpu;
|
||||
pid_t *last_pid = iter->private, pid = ent->pid;
|
||||
unsigned long long duration = trace->rettime - trace->calltime;
|
||||
struct fgraph_data *data = iter->private;
|
||||
pid_t pid = ent->pid;
|
||||
int cpu = iter->cpu;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* Pid */
|
||||
if (verif_pid(s, pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE)
|
||||
if (data) {
|
||||
int cpu = iter->cpu;
|
||||
int *depth = &(per_cpu_ptr(data, cpu)->depth);
|
||||
|
||||
/*
|
||||
* Comments display at + 1 to depth. This is the
|
||||
* return from a function, we now want the comments
|
||||
* to display at the same level of the bracket.
|
||||
*/
|
||||
*depth = trace->depth - 1;
|
||||
}
|
||||
|
||||
if (print_graph_prologue(iter, s, 0, 0))
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Absolute time */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
|
||||
ret = print_graph_abs_time(iter->ts, s);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Cpu */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
|
||||
ret = print_graph_cpu(s, cpu);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Proc */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
|
||||
ret = print_graph_proc(s, ent->pid);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Overhead */
|
||||
ret = print_graph_overhead(duration, s);
|
||||
if (!ret)
|
||||
|
@ -684,43 +712,22 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
|
|||
}
|
||||
|
||||
static enum print_line_t
|
||||
print_graph_comment(struct bprint_entry *trace, struct trace_seq *s,
|
||||
struct trace_entry *ent, struct trace_iterator *iter)
|
||||
print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
|
||||
struct trace_iterator *iter)
|
||||
{
|
||||
int i;
|
||||
unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
|
||||
struct fgraph_data *data = iter->private;
|
||||
struct trace_event *event;
|
||||
int depth = 0;
|
||||
int ret;
|
||||
int cpu = iter->cpu;
|
||||
pid_t *last_pid = iter->private;
|
||||
int i;
|
||||
|
||||
/* Pid */
|
||||
if (verif_pid(s, ent->pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE)
|
||||
if (data)
|
||||
depth = per_cpu_ptr(data, iter->cpu)->depth;
|
||||
|
||||
if (print_graph_prologue(iter, s, 0, 0))
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
/* Absolute time */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) {
|
||||
ret = print_graph_abs_time(iter->ts, s);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Cpu */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
|
||||
ret = print_graph_cpu(s, cpu);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* Proc */
|
||||
if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) {
|
||||
ret = print_graph_proc(s, ent->pid);
|
||||
if (ret == TRACE_TYPE_PARTIAL_LINE)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = trace_seq_printf(s, " | ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
}
|
||||
|
||||
/* No overhead */
|
||||
ret = print_graph_overhead(-1, s);
|
||||
if (!ret)
|
||||
|
@ -734,8 +741,8 @@ print_graph_comment(struct bprint_entry *trace, struct trace_seq *s,
|
|||
}
|
||||
|
||||
/* Indentation */
|
||||
if (trace->depth > 0)
|
||||
for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) {
|
||||
if (depth > 0)
|
||||
for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
|
||||
ret = trace_seq_printf(s, " ");
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
@ -746,9 +753,26 @@ print_graph_comment(struct bprint_entry *trace, struct trace_seq *s,
|
|||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
ret = trace_seq_bprintf(s, trace->fmt, trace->buf);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
switch (iter->ent->type) {
|
||||
case TRACE_BPRINT:
|
||||
ret = trace_print_bprintk_msg_only(iter);
|
||||
if (ret != TRACE_TYPE_HANDLED)
|
||||
return ret;
|
||||
break;
|
||||
case TRACE_PRINT:
|
||||
ret = trace_print_printk_msg_only(iter);
|
||||
if (ret != TRACE_TYPE_HANDLED)
|
||||
return ret;
|
||||
break;
|
||||
default:
|
||||
event = ftrace_find_event(ent->type);
|
||||
if (!event)
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
|
||||
ret = event->trace(iter, sym_flags);
|
||||
if (ret != TRACE_TYPE_HANDLED)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Strip ending newline */
|
||||
if (s->buffer[s->len - 1] == '\n') {
|
||||
|
@ -767,8 +791,8 @@ print_graph_comment(struct bprint_entry *trace, struct trace_seq *s,
|
|||
enum print_line_t
|
||||
print_graph_function(struct trace_iterator *iter)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct trace_entry *entry = iter->ent;
|
||||
struct trace_seq *s = &iter->seq;
|
||||
|
||||
switch (entry->type) {
|
||||
case TRACE_GRAPH_ENT: {
|
||||
|
@ -781,14 +805,11 @@ print_graph_function(struct trace_iterator *iter)
|
|||
trace_assign_type(field, entry);
|
||||
return print_graph_return(&field->ret, s, entry, iter);
|
||||
}
|
||||
case TRACE_BPRINT: {
|
||||
struct bprint_entry *field;
|
||||
trace_assign_type(field, entry);
|
||||
return print_graph_comment(field, s, entry, iter);
|
||||
}
|
||||
default:
|
||||
return TRACE_TYPE_UNHANDLED;
|
||||
return print_graph_comment(s, entry, iter);
|
||||
}
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
static void print_graph_headers(struct seq_file *s)
|
||||
|
@ -820,19 +841,21 @@ static void print_graph_headers(struct seq_file *s)
|
|||
|
||||
static void graph_trace_open(struct trace_iterator *iter)
|
||||
{
|
||||
/* pid on the last trace processed */
|
||||
pid_t *last_pid = alloc_percpu(pid_t);
|
||||
/* pid and depth on the last trace processed */
|
||||
struct fgraph_data *data = alloc_percpu(struct fgraph_data);
|
||||
int cpu;
|
||||
|
||||
if (!last_pid)
|
||||
if (!data)
|
||||
pr_warning("function graph tracer: not enough memory\n");
|
||||
else
|
||||
for_each_possible_cpu(cpu) {
|
||||
pid_t *pid = per_cpu_ptr(last_pid, cpu);
|
||||
pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid);
|
||||
int *depth = &(per_cpu_ptr(data, cpu)->depth);
|
||||
*pid = -1;
|
||||
*depth = 0;
|
||||
}
|
||||
|
||||
iter->private = last_pid;
|
||||
iter->private = data;
|
||||
}
|
||||
|
||||
static void graph_trace_close(struct trace_iterator *iter)
|
||||
|
|
|
@ -359,5 +359,5 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
|
|||
|
||||
int mmio_trace_printk(const char *fmt, va_list args)
|
||||
{
|
||||
return trace_vprintk(0, -1, fmt, args);
|
||||
return trace_vprintk(0, fmt, args);
|
||||
}
|
||||
|
|
|
@ -19,6 +19,38 @@ static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
|
|||
|
||||
static int next_event_type = __TRACE_LAST_TYPE + 1;
|
||||
|
||||
enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct trace_entry *entry = iter->ent;
|
||||
struct bprint_entry *field;
|
||||
int ret;
|
||||
|
||||
trace_assign_type(field, entry);
|
||||
|
||||
ret = trace_seq_bprintf(s, field->fmt, field->buf);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
|
||||
{
|
||||
struct trace_seq *s = &iter->seq;
|
||||
struct trace_entry *entry = iter->ent;
|
||||
struct print_entry *field;
|
||||
int ret;
|
||||
|
||||
trace_assign_type(field, entry);
|
||||
|
||||
ret = trace_seq_printf(s, "%s", field->buf);
|
||||
if (!ret)
|
||||
return TRACE_TYPE_PARTIAL_LINE;
|
||||
|
||||
return TRACE_TYPE_HANDLED;
|
||||
}
|
||||
|
||||
/**
|
||||
* trace_seq_printf - sequence printing of trace information
|
||||
* @s: trace sequence descriptor
|
||||
|
|
|
@ -15,6 +15,11 @@ struct trace_event {
|
|||
trace_print_func binary;
|
||||
};
|
||||
|
||||
extern enum print_line_t
|
||||
trace_print_bprintk_msg_only(struct trace_iterator *iter);
|
||||
extern enum print_line_t
|
||||
trace_print_printk_msg_only(struct trace_iterator *iter);
|
||||
|
||||
extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
|
||||
__attribute__ ((format (printf, 2, 3)));
|
||||
extern int
|
||||
|
|
|
@ -112,7 +112,7 @@ int __trace_bprintk(unsigned long ip, const char *fmt, ...)
|
|||
return 0;
|
||||
|
||||
va_start(ap, fmt);
|
||||
ret = trace_vbprintk(ip, task_curr_ret_stack(current), fmt, ap);
|
||||
ret = trace_vbprintk(ip, fmt, ap);
|
||||
va_end(ap);
|
||||
return ret;
|
||||
}
|
||||
|
@ -126,7 +126,7 @@ int __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap)
|
|||
if (!(trace_flags & TRACE_ITER_PRINTK))
|
||||
return 0;
|
||||
|
||||
return trace_vbprintk(ip, task_curr_ret_stack(current), fmt, ap);
|
||||
return trace_vbprintk(ip, fmt, ap);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__ftrace_vbprintk);
|
||||
|
||||
|
@ -139,7 +139,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...)
|
|||
return 0;
|
||||
|
||||
va_start(ap, fmt);
|
||||
ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
|
||||
ret = trace_vprintk(ip, fmt, ap);
|
||||
va_end(ap);
|
||||
return ret;
|
||||
}
|
||||
|
@ -150,7 +150,7 @@ int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap)
|
|||
if (!(trace_flags & TRACE_ITER_PRINTK))
|
||||
return 0;
|
||||
|
||||
return trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
|
||||
return trace_vprintk(ip, fmt, ap);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__ftrace_vprintk);
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user