forked from luck/tmp_suning_uos_patched
perf: Pass last sampling period to perf_sample_data_init()
We always need to pass the last sample period to perf_sample_data_init(), otherwise the event distribution will be wrong. Thus, modifiyng the function interface with the required period as argument. So basically a pattern like this: perf_sample_data_init(&data, ~0ULL); data.period = event->hw.last_period; will now be like that: perf_sample_data_init(&data, ~0ULL, event->hw.last_period); Avoids unininitialized data.period and simplifies code. Signed-off-by: Robert Richter <robert.richter@amd.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1333390758-10893-3-git-send-email-robert.richter@amd.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
c75841a398
commit
fd0d000b2c
|
@ -824,7 +824,6 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
|
||||||
|
|
||||||
idx = la_ptr;
|
idx = la_ptr;
|
||||||
|
|
||||||
perf_sample_data_init(&data, 0);
|
|
||||||
for (j = 0; j < cpuc->n_events; j++) {
|
for (j = 0; j < cpuc->n_events; j++) {
|
||||||
if (cpuc->current_idx[j] == idx)
|
if (cpuc->current_idx[j] == idx)
|
||||||
break;
|
break;
|
||||||
|
@ -848,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
|
||||||
|
|
||||||
hwc = &event->hw;
|
hwc = &event->hw;
|
||||||
alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
|
alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
|
||||||
data.period = event->hw.last_period;
|
perf_sample_data_init(&data, 0, hwc->last_period);
|
||||||
|
|
||||||
if (alpha_perf_event_set_period(event, hwc, idx)) {
|
if (alpha_perf_event_set_period(event, hwc, idx)) {
|
||||||
if (perf_event_overflow(event, &data, regs)) {
|
if (perf_event_overflow(event, &data, regs)) {
|
||||||
|
|
|
@ -489,8 +489,6 @@ armv6pmu_handle_irq(int irq_num,
|
||||||
*/
|
*/
|
||||||
armv6_pmcr_write(pmcr);
|
armv6_pmcr_write(pmcr);
|
||||||
|
|
||||||
perf_sample_data_init(&data, 0);
|
|
||||||
|
|
||||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
||||||
struct perf_event *event = cpuc->events[idx];
|
struct perf_event *event = cpuc->events[idx];
|
||||||
|
@ -509,7 +507,7 @@ armv6pmu_handle_irq(int irq_num,
|
||||||
|
|
||||||
hwc = &event->hw;
|
hwc = &event->hw;
|
||||||
armpmu_event_update(event, hwc, idx);
|
armpmu_event_update(event, hwc, idx);
|
||||||
data.period = event->hw.last_period;
|
perf_sample_data_init(&data, 0, hwc->last_period);
|
||||||
if (!armpmu_event_set_period(event, hwc, idx))
|
if (!armpmu_event_set_period(event, hwc, idx))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
|
@ -1077,8 +1077,6 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
|
||||||
*/
|
*/
|
||||||
regs = get_irq_regs();
|
regs = get_irq_regs();
|
||||||
|
|
||||||
perf_sample_data_init(&data, 0);
|
|
||||||
|
|
||||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
||||||
struct perf_event *event = cpuc->events[idx];
|
struct perf_event *event = cpuc->events[idx];
|
||||||
|
@ -1097,7 +1095,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
|
||||||
|
|
||||||
hwc = &event->hw;
|
hwc = &event->hw;
|
||||||
armpmu_event_update(event, hwc, idx);
|
armpmu_event_update(event, hwc, idx);
|
||||||
data.period = event->hw.last_period;
|
perf_sample_data_init(&data, 0, hwc->last_period);
|
||||||
if (!armpmu_event_set_period(event, hwc, idx))
|
if (!armpmu_event_set_period(event, hwc, idx))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
|
@ -248,8 +248,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
|
||||||
|
|
||||||
regs = get_irq_regs();
|
regs = get_irq_regs();
|
||||||
|
|
||||||
perf_sample_data_init(&data, 0);
|
|
||||||
|
|
||||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
||||||
struct perf_event *event = cpuc->events[idx];
|
struct perf_event *event = cpuc->events[idx];
|
||||||
|
@ -263,7 +261,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
|
||||||
|
|
||||||
hwc = &event->hw;
|
hwc = &event->hw;
|
||||||
armpmu_event_update(event, hwc, idx);
|
armpmu_event_update(event, hwc, idx);
|
||||||
data.period = event->hw.last_period;
|
perf_sample_data_init(&data, 0, hwc->last_period);
|
||||||
if (!armpmu_event_set_period(event, hwc, idx))
|
if (!armpmu_event_set_period(event, hwc, idx))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -588,8 +586,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
|
||||||
|
|
||||||
regs = get_irq_regs();
|
regs = get_irq_regs();
|
||||||
|
|
||||||
perf_sample_data_init(&data, 0);
|
|
||||||
|
|
||||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
||||||
struct perf_event *event = cpuc->events[idx];
|
struct perf_event *event = cpuc->events[idx];
|
||||||
|
@ -603,7 +599,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
|
||||||
|
|
||||||
hwc = &event->hw;
|
hwc = &event->hw;
|
||||||
armpmu_event_update(event, hwc, idx);
|
armpmu_event_update(event, hwc, idx);
|
||||||
data.period = event->hw.last_period;
|
perf_sample_data_init(&data, 0, hwc->last_period);
|
||||||
if (!armpmu_event_set_period(event, hwc, idx))
|
if (!armpmu_event_set_period(event, hwc, idx))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
|
@ -1325,7 +1325,7 @@ static int mipsxx_pmu_handle_shared_irq(void)
|
||||||
|
|
||||||
regs = get_irq_regs();
|
regs = get_irq_regs();
|
||||||
|
|
||||||
perf_sample_data_init(&data, 0);
|
perf_sample_data_init(&data, 0, 0);
|
||||||
|
|
||||||
switch (counters) {
|
switch (counters) {
|
||||||
#define HANDLE_COUNTER(n) \
|
#define HANDLE_COUNTER(n) \
|
||||||
|
|
|
@ -1299,8 +1299,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
|
||||||
if (record) {
|
if (record) {
|
||||||
struct perf_sample_data data;
|
struct perf_sample_data data;
|
||||||
|
|
||||||
perf_sample_data_init(&data, ~0ULL);
|
perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
|
||||||
data.period = event->hw.last_period;
|
|
||||||
|
|
||||||
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
|
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
|
||||||
perf_get_data_addr(regs, &data.addr);
|
perf_get_data_addr(regs, &data.addr);
|
||||||
|
|
|
@ -613,8 +613,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
|
||||||
if (record) {
|
if (record) {
|
||||||
struct perf_sample_data data;
|
struct perf_sample_data data;
|
||||||
|
|
||||||
perf_sample_data_init(&data, 0);
|
perf_sample_data_init(&data, 0, event->hw.last_period);
|
||||||
data.period = event->hw.last_period;
|
|
||||||
|
|
||||||
if (perf_event_overflow(event, &data, regs))
|
if (perf_event_overflow(event, &data, regs))
|
||||||
fsl_emb_pmu_stop(event, 0);
|
fsl_emb_pmu_stop(event, 0);
|
||||||
|
|
|
@ -1296,8 +1296,6 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
|
||||||
|
|
||||||
regs = args->regs;
|
regs = args->regs;
|
||||||
|
|
||||||
perf_sample_data_init(&data, 0);
|
|
||||||
|
|
||||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
/* If the PMU has the TOE IRQ enable bits, we need to do a
|
/* If the PMU has the TOE IRQ enable bits, we need to do a
|
||||||
|
@ -1321,7 +1319,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
|
||||||
if (val & (1ULL << 31))
|
if (val & (1ULL << 31))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
data.period = event->hw.last_period;
|
perf_sample_data_init(&data, 0, hwc->last_period);
|
||||||
if (!sparc_perf_event_set_period(event, hwc, idx))
|
if (!sparc_perf_event_set_period(event, hwc, idx))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
|
@ -1183,8 +1183,6 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
|
||||||
int idx, handled = 0;
|
int idx, handled = 0;
|
||||||
u64 val;
|
u64 val;
|
||||||
|
|
||||||
perf_sample_data_init(&data, 0);
|
|
||||||
|
|
||||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1219,7 +1217,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
|
||||||
* event overflow
|
* event overflow
|
||||||
*/
|
*/
|
||||||
handled++;
|
handled++;
|
||||||
data.period = event->hw.last_period;
|
perf_sample_data_init(&data, 0, event->hw.last_period);
|
||||||
|
|
||||||
if (!x86_perf_event_set_period(event))
|
if (!x86_perf_event_set_period(event))
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -398,8 +398,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
|
||||||
}
|
}
|
||||||
|
|
||||||
perf_ibs_event_update(perf_ibs, event, config);
|
perf_ibs_event_update(perf_ibs, event, config);
|
||||||
perf_sample_data_init(&data, 0);
|
perf_sample_data_init(&data, 0, hwc->last_period);
|
||||||
data.period = event->hw.last_period;
|
|
||||||
|
|
||||||
if (event->attr.sample_type & PERF_SAMPLE_RAW) {
|
if (event->attr.sample_type & PERF_SAMPLE_RAW) {
|
||||||
ibs_data.caps = ibs_caps;
|
ibs_data.caps = ibs_caps;
|
||||||
|
|
|
@ -1027,8 +1027,6 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
||||||
u64 status;
|
u64 status;
|
||||||
int handled;
|
int handled;
|
||||||
|
|
||||||
perf_sample_data_init(&data, 0);
|
|
||||||
|
|
||||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1082,7 +1080,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
||||||
if (!intel_pmu_save_and_restart(event))
|
if (!intel_pmu_save_and_restart(event))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
data.period = event->hw.last_period;
|
perf_sample_data_init(&data, 0, event->hw.last_period);
|
||||||
|
|
||||||
if (has_branch_stack(event))
|
if (has_branch_stack(event))
|
||||||
data.br_stack = &cpuc->lbr_stack;
|
data.br_stack = &cpuc->lbr_stack;
|
||||||
|
|
|
@ -316,8 +316,7 @@ int intel_pmu_drain_bts_buffer(void)
|
||||||
|
|
||||||
ds->bts_index = ds->bts_buffer_base;
|
ds->bts_index = ds->bts_buffer_base;
|
||||||
|
|
||||||
perf_sample_data_init(&data, 0);
|
perf_sample_data_init(&data, 0, event->hw.last_period);
|
||||||
data.period = event->hw.last_period;
|
|
||||||
regs.ip = 0;
|
regs.ip = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -564,8 +563,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
|
||||||
if (!intel_pmu_save_and_restart(event))
|
if (!intel_pmu_save_and_restart(event))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
perf_sample_data_init(&data, 0);
|
perf_sample_data_init(&data, 0, event->hw.last_period);
|
||||||
data.period = event->hw.last_period;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We use the interrupt regs as a base because the PEBS record
|
* We use the interrupt regs as a base because the PEBS record
|
||||||
|
|
|
@ -1005,8 +1005,6 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
|
||||||
int idx, handled = 0;
|
int idx, handled = 0;
|
||||||
u64 val;
|
u64 val;
|
||||||
|
|
||||||
perf_sample_data_init(&data, 0);
|
|
||||||
|
|
||||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||||
|
|
||||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||||
|
@ -1034,10 +1032,12 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
|
||||||
handled += overflow;
|
handled += overflow;
|
||||||
|
|
||||||
/* event overflow for sure */
|
/* event overflow for sure */
|
||||||
data.period = event->hw.last_period;
|
perf_sample_data_init(&data, 0, hwc->last_period);
|
||||||
|
|
||||||
if (!x86_perf_event_set_period(event))
|
if (!x86_perf_event_set_period(event))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
|
||||||
if (perf_event_overflow(event, &data, regs))
|
if (perf_event_overflow(event, &data, regs))
|
||||||
x86_pmu_stop(event, 0);
|
x86_pmu_stop(event, 0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1132,11 +1132,14 @@ struct perf_sample_data {
|
||||||
struct perf_branch_stack *br_stack;
|
struct perf_branch_stack *br_stack;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
|
static inline void perf_sample_data_init(struct perf_sample_data *data,
|
||||||
|
u64 addr, u64 period)
|
||||||
{
|
{
|
||||||
|
/* remaining struct members initialized in perf_prepare_sample() */
|
||||||
data->addr = addr;
|
data->addr = addr;
|
||||||
data->raw = NULL;
|
data->raw = NULL;
|
||||||
data->br_stack = NULL;
|
data->br_stack = NULL;
|
||||||
|
data->period = period;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void perf_output_sample(struct perf_output_handle *handle,
|
extern void perf_output_sample(struct perf_output_handle *handle,
|
||||||
|
|
|
@ -4957,7 +4957,7 @@ void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
|
||||||
if (rctx < 0)
|
if (rctx < 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
perf_sample_data_init(&data, addr);
|
perf_sample_data_init(&data, addr, 0);
|
||||||
|
|
||||||
do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
|
do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
|
||||||
|
|
||||||
|
@ -5215,7 +5215,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
|
||||||
.data = record,
|
.data = record,
|
||||||
};
|
};
|
||||||
|
|
||||||
perf_sample_data_init(&data, addr);
|
perf_sample_data_init(&data, addr, 0);
|
||||||
data.raw = &raw;
|
data.raw = &raw;
|
||||||
|
|
||||||
hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
|
hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
|
||||||
|
@ -5318,7 +5318,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
|
||||||
struct perf_sample_data sample;
|
struct perf_sample_data sample;
|
||||||
struct pt_regs *regs = data;
|
struct pt_regs *regs = data;
|
||||||
|
|
||||||
perf_sample_data_init(&sample, bp->attr.bp_addr);
|
perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
|
||||||
|
|
||||||
if (!bp->hw.state && !perf_exclude_event(bp, regs))
|
if (!bp->hw.state && !perf_exclude_event(bp, regs))
|
||||||
perf_swevent_event(bp, 1, &sample, regs);
|
perf_swevent_event(bp, 1, &sample, regs);
|
||||||
|
@ -5344,8 +5344,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
|
||||||
|
|
||||||
event->pmu->read(event);
|
event->pmu->read(event);
|
||||||
|
|
||||||
perf_sample_data_init(&data, 0);
|
perf_sample_data_init(&data, 0, event->hw.last_period);
|
||||||
data.period = event->hw.last_period;
|
|
||||||
regs = get_irq_regs();
|
regs = get_irq_regs();
|
||||||
|
|
||||||
if (regs && !perf_exclude_event(event, regs)) {
|
if (regs && !perf_exclude_event(event, regs)) {
|
||||||
|
|
Loading…
Reference in New Issue
Block a user