perf_counter: hook up the tracepoint events

Impact: new perfcounters feature

Enable usage of tracepoints as perf counter events.

tracepoint event ids can be found in /debug/tracing/event/*/*/id
and (for now) are represented as -65536+id in the type field.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Orig-LKML-Reference: <20090319194233.744044174@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra 2009-03-19 20:26:17 +01:00 committed by Ingo Molnar
parent f160095275
commit e077df4f43
3 changed files with 51 additions and 0 deletions

View File

@ -53,6 +53,8 @@ enum hw_event_types {
PERF_COUNT_PAGE_FAULTS_MAJ = -7, PERF_COUNT_PAGE_FAULTS_MAJ = -7,
PERF_SW_EVENTS_MIN = -8, PERF_SW_EVENTS_MIN = -8,
PERF_TP_EVENTS_MIN = -65536
}; };
/* /*
@ -222,6 +224,7 @@ struct perf_counter {
struct perf_data *usrdata; struct perf_data *usrdata;
struct perf_data data[2]; struct perf_data data[2];
void (*destroy)(struct perf_counter *);
struct rcu_head rcu_head; struct rcu_head rcu_head;
#endif #endif
}; };

View File

@ -947,6 +947,11 @@ config PERF_COUNTERS
Say Y if unsure. Say Y if unsure.
config EVENT_PROFILE
bool "Tracepoint profile sources"
depends on PERF_COUNTERS && EVENT_TRACER
default y
endmenu endmenu
config VM_EVENT_COUNTERS config VM_EVENT_COUNTERS

View File

@ -1152,6 +1152,9 @@ static void free_counter_rcu(struct rcu_head *head)
static void free_counter(struct perf_counter *counter) static void free_counter(struct perf_counter *counter)
{ {
if (counter->destroy)
counter->destroy(counter);
call_rcu(&counter->rcu_head, free_counter_rcu); call_rcu(&counter->rcu_head, free_counter_rcu);
} }
@ -1727,6 +1730,45 @@ static const struct hw_perf_counter_ops perf_ops_cpu_migrations = {
.read = cpu_migrations_perf_counter_read, .read = cpu_migrations_perf_counter_read,
}; };
#ifdef CONFIG_EVENT_PROFILE
void perf_tpcounter_event(int event_id)
{
perf_swcounter_event(PERF_TP_EVENTS_MIN + event_id, 1, 1,
task_pt_regs(current));
}
extern int ftrace_profile_enable(int);
extern void ftrace_profile_disable(int);
static void tp_perf_counter_destroy(struct perf_counter *counter)
{
int event_id = counter->hw_event.type - PERF_TP_EVENTS_MIN;
ftrace_profile_disable(event_id);
}
static const struct hw_perf_counter_ops *
tp_perf_counter_init(struct perf_counter *counter)
{
int event_id = counter->hw_event.type - PERF_TP_EVENTS_MIN;
int ret;
ret = ftrace_profile_enable(event_id);
if (ret)
return NULL;
counter->destroy = tp_perf_counter_destroy;
return &perf_ops_generic;
}
#else
static const struct hw_perf_counter_ops *
tp_perf_counter_init(struct perf_counter *counter)
{
return NULL;
}
#endif
static const struct hw_perf_counter_ops * static const struct hw_perf_counter_ops *
sw_perf_counter_init(struct perf_counter *counter) sw_perf_counter_init(struct perf_counter *counter)
{ {
@ -1772,6 +1814,7 @@ sw_perf_counter_init(struct perf_counter *counter)
hw_ops = &perf_ops_cpu_migrations; hw_ops = &perf_ops_cpu_migrations;
break; break;
default: default:
hw_ops = tp_perf_counter_init(counter);
break; break;
} }