forked from luck/tmp_suning_uos_patched
perf: Tighten (and fix) the grouping condition
The fix from9fc81d8742
("perf: Fix events installation during moving group") was incomplete in that it failed to recognise that creating a group with events for different CPUs is semantically broken -- they cannot be co-scheduled. Furthermore, it leads to real breakage where, when we create an event for CPU Y and then migrate it to form a group on CPU X, the code gets confused where the counter is programmed -- triggered in practice as well by me via the perf fuzzer. Fix this by tightening the rules for creating groups. Only allow grouping of counters that can be co-scheduled in the same context. This means for the same task and/or the same cpu. Fixes:9fc81d8742
("perf: Fix events installation during moving group") Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20150123125834.090683288@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
ef454caeb7
commit
c3c87e7704
|
@ -450,11 +450,6 @@ struct perf_event {
|
|||
#endif /* CONFIG_PERF_EVENTS */
|
||||
};
|
||||
|
||||
enum perf_event_context_type {
|
||||
task_context,
|
||||
cpu_context,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct perf_event_context - event context structure
|
||||
*
|
||||
|
@ -462,7 +457,6 @@ enum perf_event_context_type {
|
|||
*/
|
||||
struct perf_event_context {
|
||||
struct pmu *pmu;
|
||||
enum perf_event_context_type type;
|
||||
/*
|
||||
* Protect the states of the events in the list,
|
||||
* nr_active, and the list:
|
||||
|
|
|
@ -6776,7 +6776,6 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
|
|||
__perf_event_init_context(&cpuctx->ctx);
|
||||
lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
|
||||
lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
|
||||
cpuctx->ctx.type = cpu_context;
|
||||
cpuctx->ctx.pmu = pmu;
|
||||
|
||||
__perf_cpu_hrtimer_init(cpuctx, cpu);
|
||||
|
@ -7420,7 +7419,19 @@ SYSCALL_DEFINE5(perf_event_open,
|
|||
* task or CPU context:
|
||||
*/
|
||||
if (move_group) {
|
||||
if (group_leader->ctx->type != ctx->type)
|
||||
/*
|
||||
* Make sure we're both on the same task, or both
|
||||
* per-cpu events.
|
||||
*/
|
||||
if (group_leader->ctx->task != ctx->task)
|
||||
goto err_context;
|
||||
|
||||
/*
|
||||
* Make sure we're both events for the same CPU;
|
||||
* grouping events for different CPUs is broken; since
|
||||
* you can never concurrently schedule them anyhow.
|
||||
*/
|
||||
if (group_leader->cpu != event->cpu)
|
||||
goto err_context;
|
||||
} else {
|
||||
if (group_leader->ctx != ctx)
|
||||
|
|
Loading…
Reference in New Issue
Block a user