Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf updates from Ingo Molnar:
 "Most of the changes are for tooling, the main changes in this cycle were:

   - Improve Intel-PT hardware tracing support, both on the kernel and
     on the tooling side: PTWRITE instruction support, power events for
     C-state tracing, etc. (Adrian Hunter)

   - Add support to measure SMI cost to the x86 architecture, with
     tooling support in 'perf stat' (Kan Liang)

   - Support function filtering in 'perf ftrace', plus related
     improvements (Namhyung Kim)

   - Allow adding and removing fields to the default 'perf script'
     columns, using + or - as field prefixes to do so (Andi Kleen)

   - Allow resolving the DSO name with 'perf script -F brstack{sym,off},dso'
     (Mark Santaniello)

   - Add perf tooling unwind support for PowerPC (Paolo Bonzini)

   - ... and various other improvements as well"

* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (84 commits)
  perf auxtrace: Add CPU filter support
  perf intel-pt: Do not use TSC packets for calculating CPU cycles to TSC
  perf intel-pt: Update documentation to include new ptwrite and power events
  perf intel-pt: Add example script for power events and PTWRITE
  perf intel-pt: Synthesize new power and "ptwrite" events
  perf intel-pt: Move code in intel_pt_synth_events() to simplify attr setting
  perf intel-pt: Factor out intel_pt_set_event_name()
  perf intel-pt: Tidy messages into called function intel_pt_synth_event()
  perf intel-pt: Tidy Intel PT evsel lookup into separate function
  perf intel-pt: Join needlessly wrapped lines
  perf intel-pt: Remove unused instructions_sample_period
  perf intel-pt: Factor out common code synthesizing event samples
  perf script: Add synthesized Intel PT power and ptwrite events
  perf/x86/intel: Constify the 'lbr_desc[]' array and make a function static
  perf script: Add 'synth' field for synthesized event payloads
  perf auxtrace: Add itrace option to output power events
  perf auxtrace: Add itrace option to output ptwrite events
  tools include: Add byte-swapping macros to kernel.h
  perf script: Add 'synth' event type for synthesized events
  x86/insn: perf tools: Add new ptwrite instruction
  ...
This commit is contained in:
Linus Torvalds 2017-07-03 12:40:46 -07:00
commit 7447d56217
92 changed files with 2530 additions and 631 deletions

View File

@ -1750,6 +1750,8 @@ ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
return ret;
}
static struct attribute_group x86_pmu_attr_group;
static int __init init_hw_perf_events(void)
{
struct x86_pmu_quirk *quirk;
@ -1813,6 +1815,14 @@ static int __init init_hw_perf_events(void)
x86_pmu_events_group.attrs = tmp;
}
if (x86_pmu.attrs) {
struct attribute **tmp;
tmp = merge_attr(x86_pmu_attr_group.attrs, x86_pmu.attrs);
if (!WARN_ON(!tmp))
x86_pmu_attr_group.attrs = tmp;
}
pr_info("... version: %d\n", x86_pmu.version);
pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
pr_info("... generic registers: %d\n", x86_pmu.num_counters);

View File

@ -3160,6 +3160,19 @@ static int intel_pmu_cpu_prepare(int cpu)
return -ENOMEM;
}
static void flip_smm_bit(void *data)
{
unsigned long set = *(unsigned long *)data;
if (set > 0) {
msr_set_bit(MSR_IA32_DEBUGCTLMSR,
DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
} else {
msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
}
}
static void intel_pmu_cpu_starting(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
@ -3174,6 +3187,8 @@ static void intel_pmu_cpu_starting(int cpu)
cpuc->lbr_sel = NULL;
flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
if (!cpuc->shared_regs)
return;
@ -3595,6 +3610,52 @@ static struct attribute *hsw_events_attrs[] = {
NULL
};
static ssize_t freeze_on_smi_show(struct device *cdev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n", x86_pmu.attr_freeze_on_smi);
}
static DEFINE_MUTEX(freeze_on_smi_mutex);
static ssize_t freeze_on_smi_store(struct device *cdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long val;
ssize_t ret;
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
if (val > 1)
return -EINVAL;
mutex_lock(&freeze_on_smi_mutex);
if (x86_pmu.attr_freeze_on_smi == val)
goto done;
x86_pmu.attr_freeze_on_smi = val;
get_online_cpus();
on_each_cpu(flip_smm_bit, &val, 1);
put_online_cpus();
done:
mutex_unlock(&freeze_on_smi_mutex);
return count;
}
static DEVICE_ATTR_RW(freeze_on_smi);
static struct attribute *intel_pmu_attrs[] = {
&dev_attr_freeze_on_smi.attr,
NULL,
};
__init int intel_pmu_init(void)
{
union cpuid10_edx edx;
@ -3641,6 +3702,8 @@ __init int intel_pmu_init(void)
x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
x86_pmu.attrs = intel_pmu_attrs;
/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
* assume at least 3 events, when not running in a hypervisor:

View File

@ -18,7 +18,7 @@ enum {
LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_TIME,
};
static enum {
static const enum {
LBR_EIP_FLAGS = 1,
LBR_TSX = 2,
} lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
@ -287,7 +287,7 @@ inline u64 lbr_from_signext_quirk_wr(u64 val)
/*
* If quirk is needed, ensure sign extension is 61 bits:
*/
u64 lbr_from_signext_quirk_rd(u64 val)
static u64 lbr_from_signext_quirk_rd(u64 val)
{
if (static_branch_unlikely(&lbr_from_quirk_key)) {
/*

View File

@ -562,6 +562,9 @@ struct x86_pmu {
ssize_t (*events_sysfs_show)(char *page, u64 config);
struct attribute **cpu_events;
unsigned long attr_freeze_on_smi;
struct attribute **attrs;
/*
* CPU Hotplug hooks
*/

View File

@ -137,6 +137,8 @@
#define DEBUGCTLMSR_BTS_OFF_OS (1UL << 9)
#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
#define DEBUGCTLMSR_FREEZE_IN_SMM_BIT 14
#define DEBUGCTLMSR_FREEZE_IN_SMM (1UL << DEBUGCTLMSR_FREEZE_IN_SMM_BIT)
#define MSR_PEBS_FRONTEND 0x000003f7

View File

@ -1009,7 +1009,7 @@ GrpTable: Grp15
1: fxstor | RDGSBASE Ry (F3),(11B)
2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
4: XSAVE
4: XSAVE | ptwrite Ey (F3),(11B)
5: XRSTOR | lfence (11B)
6: XSAVEOPT | clwb (66) | mfence (11B)
7: clflush | clflushopt (66) | sfence (11B)

View File

@ -925,11 +925,6 @@ static inline int is_cgroup_event(struct perf_event *event)
return 0;
}
static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
{
return 0;
}
static inline void update_cgrp_time_from_event(struct perf_event *event)
{
}
@ -5729,9 +5724,6 @@ static void perf_output_read_one(struct perf_output_handle *handle,
__output_copy(handle, values, n * sizeof(u64));
}
/*
* XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
*/
static void perf_output_read_group(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
@ -5776,6 +5768,13 @@ static void perf_output_read_group(struct perf_output_handle *handle,
#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
PERF_FORMAT_TOTAL_TIME_RUNNING)
/*
* XXX PERF_SAMPLE_READ vs inherited events seems difficult.
*
* The problem is that its both hard and excessively expensive to iterate the
* child list, not to mention that its impossible to IPI the children running
* on another CPU, from interrupt/NMI context.
*/
static void perf_output_read(struct perf_output_handle *handle,
struct perf_event *event)
{
@ -9193,7 +9192,7 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
static struct pmu *perf_init_event(struct perf_event *event)
{
struct pmu *pmu = NULL;
struct pmu *pmu;
int idx;
int ret;
@ -9462,9 +9461,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
local64_set(&hwc->period_left, hwc->sample_period);
/*
* we currently do not support PERF_FORMAT_GROUP on inherited events
* We currently do not support PERF_SAMPLE_READ on inherited events.
* See perf_output_read().
*/
if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))
goto err_ns;
if (!has_branch_stack(event))
@ -9477,9 +9477,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
}
pmu = perf_init_event(event);
if (!pmu)
goto err_ns;
else if (IS_ERR(pmu)) {
if (IS_ERR(pmu)) {
err = PTR_ERR(pmu);
goto err_ns;
}
@ -9492,8 +9490,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
sizeof(unsigned long),
GFP_KERNEL);
if (!event->addr_filters_offs)
if (!event->addr_filters_offs) {
err = -ENOMEM;
goto err_per_task;
}
/* force hw sync on the address filters */
event->addr_filters_gen = 1;

View File

@ -19,3 +19,13 @@
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
#define noinline __attribute__((noinline))
#define __packed __attribute__((packed))
#define __noreturn __attribute__((noreturn))
#define __aligned(x) __attribute__((aligned(x)))
#define __printf(a, b) __attribute__((format(printf, a, b)))
#define __scanf(a, b) __attribute__((format(scanf, a, b)))

View File

@ -17,6 +17,10 @@
# define __always_inline inline __attribute__((always_inline))
#endif
#ifndef noinline
#define noinline
#endif
/* Are two types/vars the same type (ignoring qualifiers)? */
#ifndef __same_type
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))

View File

@ -5,6 +5,8 @@
#include <stddef.h>
#include <assert.h>
#include <linux/compiler.h>
#include <endian.h>
#include <byteswap.h>
#ifndef UINT_MAX
#define UINT_MAX (~0U)
@ -68,12 +70,33 @@
#endif
#endif
/*
* Both need more care to handle endianness
* (Don't use bitmap_copy_le() for now)
*/
#define cpu_to_le64(x) (x)
#define cpu_to_le32(x) (x)
#if __BYTE_ORDER == __BIG_ENDIAN
#define cpu_to_le16 bswap_16
#define cpu_to_le32 bswap_32
#define cpu_to_le64 bswap_64
#define le16_to_cpu bswap_16
#define le32_to_cpu bswap_32
#define le64_to_cpu bswap_64
#define cpu_to_be16
#define cpu_to_be32
#define cpu_to_be64
#define be16_to_cpu
#define be32_to_cpu
#define be64_to_cpu
#else
#define cpu_to_le16
#define cpu_to_le32
#define cpu_to_le64
#define le16_to_cpu
#define le32_to_cpu
#define le64_to_cpu
#define cpu_to_be16 bswap_16
#define cpu_to_be32 bswap_32
#define cpu_to_be64 bswap_64
#define be16_to_cpu bswap_16
#define be32_to_cpu bswap_32
#define be64_to_cpu bswap_64
#endif
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
int scnprintf(char * buf, size_t size, const char * fmt, ...);

View File

@ -387,6 +387,22 @@ int filename__read_str(const char *filename, char **buf, size_t *sizep)
return err;
}
int filename__write_int(const char *filename, int value)
{
int fd = open(filename, O_WRONLY), err = -1;
char buf[64];
if (fd < 0)
return err;
sprintf(buf, "%d", value);
if (write(fd, buf, sizeof(buf)) == sizeof(buf))
err = 0;
close(fd);
return err;
}
int procfs__read_str(const char *entry, char **buf, size_t *sizep)
{
char path[PATH_MAX];
@ -480,3 +496,17 @@ int sysctl__read_int(const char *sysctl, int *value)
return filename__read_int(path, value);
}
int sysfs__write_int(const char *entry, int value)
{
char path[PATH_MAX];
const char *sysfs = sysfs__mountpoint();
if (!sysfs)
return -1;
if (snprintf(path, sizeof(path), "%s/%s", sysfs, entry) >= PATH_MAX)
return -1;
return filename__write_int(path, value);
}

View File

@ -31,6 +31,8 @@ int filename__read_int(const char *filename, int *value);
int filename__read_ull(const char *filename, unsigned long long *value);
int filename__read_str(const char *filename, char **buf, size_t *sizep);
int filename__write_int(const char *filename, int value);
int procfs__read_str(const char *entry, char **buf, size_t *sizep);
int sysctl__read_int(const char *sysctl, int *value);
@ -38,4 +40,6 @@ int sysfs__read_int(const char *entry, int *value);
int sysfs__read_ull(const char *entry, unsigned long long *value);
int sysfs__read_str(const char *entry, char **buf, size_t *sizep);
int sysfs__read_bool(const char *entry, bool *value);
int sysfs__write_int(const char *entry, int value);
#endif /* __API_FS__ */

View File

@ -1009,7 +1009,7 @@ GrpTable: Grp15
1: fxstor | RDGSBASE Ry (F3),(11B)
2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
4: XSAVE
4: XSAVE | ptwrite Ey (F3),(11B)
5: XRSTOR | lfence (11B)
6: XSAVEOPT | clwb (66) | mfence (11B)
7: clflush | clflushopt (66) | sfence (11B)

View File

@ -108,6 +108,9 @@ approach is available to export the data to a postgresql database. Refer to
script export-to-postgresql.py for more details, and to script
call-graph-from-postgresql.py for an example of using the database.
There is also script intel-pt-events.py which provides an example of how to
unpack the raw data for power events and PTWRITE.
As mentioned above, it is easy to capture too much data. One way to limit the
data captured is to use 'snapshot' mode which is explained further below.
Refer to 'new snapshot option' and 'Intel PT modes of operation' further below.
@ -364,6 +367,42 @@ cyc_thresh Specifies how frequently CYC packets are produced - see cyc
CYC packets are not requested by default.
pt Specifies pass-through which enables the 'branch' config term.
The default config selects 'pt' if it is available, so a user will
never need to specify this term.
branch Enable branch tracing. Branch tracing is enabled by default so to
disable branch tracing use 'branch=0'.
The default config selects 'branch' if it is available.
ptw Enable PTWRITE packets which are produced when a ptwrite instruction
is executed.
Support for this feature is indicated by:
/sys/bus/event_source/devices/intel_pt/caps/ptwrite
which contains "1" if the feature is supported and
"0" otherwise.
fup_on_ptw Enable a FUP packet to follow the PTWRITE packet. The FUP packet
provides the address of the ptwrite instruction. In the absence of
fup_on_ptw, the decoder will use the address of the previous branch
if branch tracing is enabled, otherwise the address will be zero.
Note that fup_on_ptw will work even when branch tracing is disabled.
pwr_evt Enable power events. The power events provide information about
changes to the CPU C-state.
Support for this feature is indicated by:
/sys/bus/event_source/devices/intel_pt/caps/power_event_trace
which contains "1" if the feature is supported and
"0" otherwise.
new snapshot option
-------------------
@ -674,13 +713,15 @@ Having no option is the same as
which, in turn, is the same as
--itrace=ibxe
--itrace=ibxwpe
The letters are:
i synthesize "instructions" events
b synthesize "branches" events
x synthesize "transactions" events
w synthesize "ptwrite" events
p synthesize "power" events
c synthesize branches events (calls only)
r synthesize branches events (returns only)
e synthesize tracing error events
@ -699,7 +740,40 @@ and "r" can be combined to get calls and returns.
'flags' field can be used in perf script to determine whether the event is a
tranasaction start, commit or abort.
Error events are new. They show where the decoder lost the trace. Error events
Note that "instructions", "branches" and "transactions" events depend on code
flow packets which can be disabled by using the config term "branch=0". Refer
to the config terms section above.
"ptwrite" events record the payload of the ptwrite instruction and whether
"fup_on_ptw" was used. "ptwrite" events depend on PTWRITE packets which are
recorded only if the "ptw" config term was used. Refer to the config terms
section above. perf script "synth" field displays "ptwrite" information like
this: "ip: 0 payload: 0x123456789abcdef0" where "ip" is 1 if "fup_on_ptw" was
used.
"Power" events correspond to power event packets and CBR (core-to-bus ratio)
packets. While CBR packets are always recorded when tracing is enabled, power
event packets are recorded only if the "pwr_evt" config term was used. Refer to
the config terms section above. The power events record information about
C-state changes, whereas CBR is indicative of CPU frequency. perf script
"event,synth" fields display information like this:
cbr: cbr: 22 freq: 2189 MHz (200%)
mwait: hints: 0x60 extensions: 0x1
pwre: hw: 0 cstate: 2 sub-cstate: 0
exstop: ip: 1
pwrx: deepest cstate: 2 last cstate: 2 wake reason: 0x4
Where:
"cbr" includes the frequency and the percentage of maximum non-turbo
"mwait" shows mwait hints and extensions
"pwre" shows C-state transitions (to a C-state deeper than C0) and
whether initiated by hardware
"exstop" indicates execution stopped and whether the IP was recorded
exactly,
"pwrx" indicates return to C0
For more details refer to the Intel 64 and IA-32 Architectures Software
Developer Manuals.
Error events show where the decoder lost the trace. Error events
are quite important. Users must know if what they are seeing is a complete
picture or not.

View File

@ -3,13 +3,15 @@
c synthesize branches events (calls only)
r synthesize branches events (returns only)
x synthesize transactions events
w synthesize ptwrite events
p synthesize power events
e synthesize error events
d create a debug log
g synthesize a call chain (use with i or x)
l synthesize last branch entries (use with i or x)
s skip initial number of events
The default is all events i.e. the same as --itrace=ibxe
The default is all events i.e. the same as --itrace=ibxwpe
In addition, the period (default 100000) for instructions events
can be specified in units of:
@ -26,8 +28,8 @@
Also the number of last branch entries (default 64, max. 1024) for
instructions or transactions events can be specified.
It is also possible to skip events generated (instructions, branches, transactions)
at the beginning. This is useful to ignore initialization code.
It is also possible to skip events generated (instructions, branches, transactions,
ptwrite, power) at the beginning. This is useful to ignore initialization code.
--itrace=i0nss1000000

View File

@ -48,6 +48,39 @@ OPTIONS
Ranges of CPUs are specified with -: 0-2.
Default is to trace on all online CPUs.
-T::
--trace-funcs=::
Only trace functions given by the argument. Multiple functions
can be given by using this option more than once. The function
argument also can be a glob pattern. It will be passed to
'set_ftrace_filter' in tracefs.
-N::
--notrace-funcs=::
Do not trace functions given by the argument. Like -T option,
this can be used more than once to specify multiple functions
(or glob patterns). It will be passed to 'set_ftrace_notrace'
in tracefs.
-G::
--graph-funcs=::
Set graph filter on the given function (or a glob pattern).
This is useful for the function_graph tracer only and enables
tracing for functions executed from the given function.
This can be used more than once to specify multiple functions.
It will be passed to 'set_graph_function' in tracefs.
-g::
--nograph-funcs=::
Set graph notrace filter on the given function (or a glob pattern).
Like -G option, this is useful for the function_graph tracer only
and disables tracing for function executed from the given function.
This can be used more than once to specify multiple functions.
It will be passed to 'set_graph_notrace' in tracefs.
-D::
--graph-depth=::
Set max depth for function graph tracer to follow
SEE ALSO
--------

View File

@ -116,8 +116,9 @@ OPTIONS
--fields::
Comma separated list of fields to print. Options are:
comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
srcline, period, iregs, brstack, brstacksym, flags, bpf-output, brstackinsn,
callindent, insn, insnlen. Field list can be prepended with the type, trace, sw or hw,
srcline, period, iregs, brstack, brstacksym, flags, bpf-output, brstackinsn, brstackoff,
callindent, insn, insnlen, synth.
Field list can be prepended with the type, trace, sw or hw,
to indicate to which event type the field list applies.
e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace
@ -130,6 +131,14 @@ OPTIONS
i.e., the specified fields apply to all event types if the type string
is not given.
In addition to overriding fields, it is also possible to add or remove
fields from the defaults. For example
-F -cpu,+insn
removes the cpu field and adds the insn field. Adding/removing fields
cannot be mixed with normal overriding.
The arguments are processed in the order received. A later usage can
reset a prior request. e.g.:
@ -185,6 +194,9 @@ OPTIONS
instruction bytes and the instruction length of the current
instruction.
The synth field is used by synthesized events which may be created when
Instruction Trace decoding.
Finally, a user may not set fields to none for all event types.
i.e., -F "" is not allowed.
@ -203,6 +215,8 @@ OPTIONS
is printed. This is the full execution path leading to the sample. This is only supported when the
sample was recorded with perf record -b or -j any.
The brstackoff field will print an offset into a specific dso/binary.
-k::
--vmlinux=<file>::
vmlinux pathname

View File

@ -239,6 +239,20 @@ taskset.
--no-merge::
Do not merge results from same PMUs.
--smi-cost::
Measure SMI cost if msr/aperf/ and msr/smi/ events are supported.
During the measurement, the /sys/device/cpu/freeze_on_smi will be set to
freeze core counters on SMI.
The aperf counter will not be effected by the setting.
The cost of SMI can be measured by (aperf - unhalted core cycles).
In practice, the percentages of SMI cycles is very useful for performance
oriented analysis. --metric_only will be applied by default.
The output is SMI cycles%, equals to (aperf - unhalted core cycles) / aperf
Users who wants to get the actual value can apply --no-metric-only.
EXAMPLES
--------

View File

@ -61,7 +61,7 @@ endif
# Disable it on all other architectures in case libdw unwind
# support is detected in system. Add supported architectures
# to the check.
ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm))
ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm powerpc))
NO_LIBDW_DWARF_UNWIND := 1
endif

View File

@ -17,6 +17,7 @@
#include <api/fs/fs.h>
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/coresight-pmu.h>
#include <linux/kernel.h>
#include <linux/log2.h>
@ -202,19 +203,18 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME,
opts->auxtrace_snapshot_size);
if (cs_etm_evsel) {
/*
* To obtain the auxtrace buffer file descriptor, the auxtrace
* event must come first.
*/
perf_evlist__to_front(evlist, cs_etm_evsel);
/*
* In the case of per-cpu mmaps, we need the CPU on the
* AUX event.
*/
if (!cpu_map__empty(cpus))
perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
}
/*
* To obtain the auxtrace buffer file descriptor, the auxtrace
* event must come first.
*/
perf_evlist__to_front(evlist, cs_etm_evsel);
/*
* In the case of per-cpu mmaps, we need the CPU on the
* AUX event.
*/
if (!cpu_map__empty(cpus))
perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
/* Add dummy event to keep tracking */
if (opts->full_auxtrace) {
@ -583,8 +583,7 @@ static FILE *cs_device__open_file(const char *name)
}
static __attribute__((format(printf, 2, 3)))
int cs_device__print_file(const char *name, const char *fmt, ...)
static int __printf(2, 3) cs_device__print_file(const char *name, const char *fmt, ...)
{
va_list args;
FILE *file;

View File

@ -5,4 +5,6 @@ libperf-y += perf_regs.o
libperf-$(CONFIG_DWARF) += dwarf-regs.o
libperf-$(CONFIG_DWARF) += skip-callchain-idx.o
libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o

View File

@ -0,0 +1,73 @@
#include <elfutils/libdwfl.h>
#include "../../util/unwind-libdw.h"
#include "../../util/perf_regs.h"
#include "../../util/event.h"
/* See backends/ppc_initreg.c and backends/ppc_regs.c in elfutils. */
static const int special_regs[3][2] = {
{ 65, PERF_REG_POWERPC_LINK },
{ 101, PERF_REG_POWERPC_XER },
{ 109, PERF_REG_POWERPC_CTR },
};
bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
{
struct unwind_info *ui = arg;
struct regs_dump *user_regs = &ui->sample->user_regs;
Dwarf_Word dwarf_regs[32], dwarf_nip;
size_t i;
#define REG(r) ({ \
Dwarf_Word val = 0; \
perf_reg_value(&val, user_regs, PERF_REG_POWERPC_##r); \
val; \
})
dwarf_regs[0] = REG(R0);
dwarf_regs[1] = REG(R1);
dwarf_regs[2] = REG(R2);
dwarf_regs[3] = REG(R3);
dwarf_regs[4] = REG(R4);
dwarf_regs[5] = REG(R5);
dwarf_regs[6] = REG(R6);
dwarf_regs[7] = REG(R7);
dwarf_regs[8] = REG(R8);
dwarf_regs[9] = REG(R9);
dwarf_regs[10] = REG(R10);
dwarf_regs[11] = REG(R11);
dwarf_regs[12] = REG(R12);
dwarf_regs[13] = REG(R13);
dwarf_regs[14] = REG(R14);
dwarf_regs[15] = REG(R15);
dwarf_regs[16] = REG(R16);
dwarf_regs[17] = REG(R17);
dwarf_regs[18] = REG(R18);
dwarf_regs[19] = REG(R19);
dwarf_regs[20] = REG(R20);
dwarf_regs[21] = REG(R21);
dwarf_regs[22] = REG(R22);
dwarf_regs[23] = REG(R23);
dwarf_regs[24] = REG(R24);
dwarf_regs[25] = REG(R25);
dwarf_regs[26] = REG(R26);
dwarf_regs[27] = REG(R27);
dwarf_regs[28] = REG(R28);
dwarf_regs[29] = REG(R29);
dwarf_regs[30] = REG(R30);
dwarf_regs[31] = REG(R31);
if (!dwfl_thread_state_registers(thread, 0, 32, dwarf_regs))
return false;
dwarf_nip = REG(NIP);
dwfl_thread_state_register_pc(thread, dwarf_nip);
for (i = 0; i < ARRAY_SIZE(special_regs); i++) {
Dwarf_Word val = 0;
perf_reg_value(&val, user_regs, special_regs[i][1]);
if (!dwfl_thread_state_registers(thread,
special_regs[i][0], 1,
&val))
return false;
}
return true;
}

View File

@ -1664,3 +1664,15 @@
"0f c7 1d 78 56 34 12 \txrstors 0x12345678",},
{{0x0f, 0xc7, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
"0f c7 9c c8 78 56 34 12 \txrstors 0x12345678(%eax,%ecx,8)",},
{{0xf3, 0x0f, 0xae, 0x20, }, 4, 0, "", "",
"f3 0f ae 20 \tptwritel (%eax)",},
{{0xf3, 0x0f, 0xae, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
"f3 0f ae 25 78 56 34 12 \tptwritel 0x12345678",},
{{0xf3, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
"f3 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%eax,%ecx,8)",},
{{0xf3, 0x0f, 0xae, 0x20, }, 4, 0, "", "",
"f3 0f ae 20 \tptwritel (%eax)",},
{{0xf3, 0x0f, 0xae, 0x25, 0x78, 0x56, 0x34, 0x12, }, 8, 0, "", "",
"f3 0f ae 25 78 56 34 12 \tptwritel 0x12345678",},
{{0xf3, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
"f3 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%eax,%ecx,8)",},

View File

@ -1696,3 +1696,33 @@
"0f c7 9c c8 78 56 34 12 \txrstors 0x12345678(%rax,%rcx,8)",},
{{0x41, 0x0f, 0xc7, 0x9c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
"41 0f c7 9c c8 78 56 34 12 \txrstors 0x12345678(%r8,%rcx,8)",},
{{0xf3, 0x0f, 0xae, 0x20, }, 4, 0, "", "",
"f3 0f ae 20 \tptwritel (%rax)",},
{{0xf3, 0x41, 0x0f, 0xae, 0x20, }, 5, 0, "", "",
"f3 41 0f ae 20 \tptwritel (%r8)",},
{{0xf3, 0x0f, 0xae, 0x24, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
"f3 0f ae 24 25 78 56 34 12 \tptwritel 0x12345678",},
{{0xf3, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
"f3 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%rax,%rcx,8)",},
{{0xf3, 0x41, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
"f3 41 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%r8,%rcx,8)",},
{{0xf3, 0x0f, 0xae, 0x20, }, 4, 0, "", "",
"f3 0f ae 20 \tptwritel (%rax)",},
{{0xf3, 0x41, 0x0f, 0xae, 0x20, }, 5, 0, "", "",
"f3 41 0f ae 20 \tptwritel (%r8)",},
{{0xf3, 0x0f, 0xae, 0x24, 0x25, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
"f3 0f ae 24 25 78 56 34 12 \tptwritel 0x12345678",},
{{0xf3, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "", "",
"f3 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%rax,%rcx,8)",},
{{0xf3, 0x41, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
"f3 41 0f ae a4 c8 78 56 34 12 \tptwritel 0x12345678(%r8,%rcx,8)",},
{{0xf3, 0x48, 0x0f, 0xae, 0x20, }, 5, 0, "", "",
"f3 48 0f ae 20 \tptwriteq (%rax)",},
{{0xf3, 0x49, 0x0f, 0xae, 0x20, }, 5, 0, "", "",
"f3 49 0f ae 20 \tptwriteq (%r8)",},
{{0xf3, 0x48, 0x0f, 0xae, 0x24, 0x25, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
"f3 48 0f ae 24 25 78 56 34 12 \tptwriteq 0x12345678",},
{{0xf3, 0x48, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
"f3 48 0f ae a4 c8 78 56 34 12 \tptwriteq 0x12345678(%rax,%rcx,8)",},
{{0xf3, 0x49, 0x0f, 0xae, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
"f3 49 0f ae a4 c8 78 56 34 12 \tptwriteq 0x12345678(%r8,%rcx,8)",},

View File

@ -1343,6 +1343,26 @@ int main(void)
asm volatile("xrstors 0x12345678(%rax,%rcx,8)");
asm volatile("xrstors 0x12345678(%r8,%rcx,8)");
/* ptwrite */
asm volatile("ptwrite (%rax)");
asm volatile("ptwrite (%r8)");
asm volatile("ptwrite (0x12345678)");
asm volatile("ptwrite 0x12345678(%rax,%rcx,8)");
asm volatile("ptwrite 0x12345678(%r8,%rcx,8)");
asm volatile("ptwritel (%rax)");
asm volatile("ptwritel (%r8)");
asm volatile("ptwritel (0x12345678)");
asm volatile("ptwritel 0x12345678(%rax,%rcx,8)");
asm volatile("ptwritel 0x12345678(%r8,%rcx,8)");
asm volatile("ptwriteq (%rax)");
asm volatile("ptwriteq (%r8)");
asm volatile("ptwriteq (0x12345678)");
asm volatile("ptwriteq 0x12345678(%rax,%rcx,8)");
asm volatile("ptwriteq 0x12345678(%r8,%rcx,8)");
#else /* #ifdef __x86_64__ */
/* bound r32, mem (same op code as EVEX prefix) */
@ -2653,6 +2673,16 @@ int main(void)
asm volatile("xrstors (0x12345678)");
asm volatile("xrstors 0x12345678(%eax,%ecx,8)");
/* ptwrite */
asm volatile("ptwrite (%eax)");
asm volatile("ptwrite (0x12345678)");
asm volatile("ptwrite 0x12345678(%eax,%ecx,8)");
asm volatile("ptwritel (%eax)");
asm volatile("ptwritel (0x12345678)");
asm volatile("ptwritel 0x12345678(%eax,%ecx,8)");
#endif /* #ifndef __x86_64__ */
/* Following line is a marker for the awk script - do not change */

View File

@ -35,10 +35,6 @@
#define KiB_MASK(x) (KiB(x) - 1)
#define MiB_MASK(x) (MiB(x) - 1)
#define INTEL_BTS_DFLT_SAMPLE_SIZE KiB(4)
#define INTEL_BTS_MAX_SAMPLE_SIZE KiB(60)
struct intel_bts_snapshot_ref {
void *ref_buf;
size_t ref_offset;

View File

@ -40,10 +40,6 @@
#define KiB_MASK(x) (KiB(x) - 1)
#define MiB_MASK(x) (MiB(x) - 1)
#define INTEL_PT_DEFAULT_SAMPLE_SIZE KiB(4)
#define INTEL_PT_MAX_SAMPLE_SIZE KiB(60)
#define INTEL_PT_PSB_PERIOD_NEAR 256
struct intel_pt_snapshot_ref {
@ -196,6 +192,7 @@ static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
int psb_cyc, psb_periods, psb_period;
int pos = 0;
u64 config;
char c;
pos += scnprintf(buf + pos, sizeof(buf) - pos, "tsc");
@ -229,6 +226,10 @@ static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
}
}
if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
perf_pmu__scan_file(intel_pt_pmu, "format/branch", "%c", &c) == 1)
pos += scnprintf(buf + pos, sizeof(buf) - pos, ",pt,branch");
pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf);
intel_pt_parse_terms(&intel_pt_pmu->format, buf, &config);

View File

@ -700,7 +700,7 @@ static inline uint32_t lfsr_32(uint32_t lfsr)
* kernel (KSM, zero page, etc.) cannot optimize away RAM
* accesses:
*/
static inline u64 access_data(u64 *data __attribute__((unused)), u64 val)
static inline u64 access_data(u64 *data, u64 val)
{
if (g->p.data_reads)
val += *data;

View File

@ -1725,10 +1725,10 @@ static int c2c_hists__init_sort(struct perf_hpp_list *hpp_list, char *name)
tok; tok = strtok_r(NULL, ", ", &tmp)) { \
ret = _fn(hpp_list, tok); \
if (ret == -EINVAL) { \
error("Invalid --fields key: `%s'", tok); \
pr_err("Invalid --fields key: `%s'", tok); \
break; \
} else if (ret == -ESRCH) { \
error("Unknown --fields key: `%s'", tok); \
pr_err("Unknown --fields key: `%s'", tok); \
break; \
} \
} \

View File

@ -156,7 +156,7 @@ static int parse_config_arg(char *arg, char **var, char **value)
int cmd_config(int argc, const char **argv)
{
int i, ret = 0;
int i, ret = -1;
struct perf_config_set *set;
char *user_config = mkpath("%s/.perfconfig", getenv("HOME"));
const char *config_filename;
@ -186,10 +186,8 @@ int cmd_config(int argc, const char **argv)
* because of reinitializing with options config file location.
*/
set = perf_config_set__new();
if (!set) {
ret = -1;
if (!set)
goto out_err;
}
switch (actions) {
case ACTION_LIST:
@ -197,41 +195,54 @@ int cmd_config(int argc, const char **argv)
pr_err("Error: takes no arguments\n");
parse_options_usage(config_usage, config_options, "l", 1);
} else {
ret = show_config(set);
if (ret < 0)
if (show_config(set) < 0) {
pr_err("Nothing configured, "
"please check your %s \n", config_filename);
goto out_err;
}
}
break;
default:
if (argc) {
for (i = 0; argv[i]; i++) {
char *var, *value;
char *arg = strdup(argv[i]);
if (!arg) {
pr_err("%s: strdup failed\n", __func__);
ret = -1;
break;
}
if (parse_config_arg(arg, &var, &value) < 0) {
free(arg);
ret = -1;
break;
}
if (value == NULL)
ret = show_spec_config(set, var);
else
ret = set_config(set, config_filename, var, value);
free(arg);
}
} else
if (!argc) {
usage_with_options(config_usage, config_options);
break;
}
for (i = 0; argv[i]; i++) {
char *var, *value;
char *arg = strdup(argv[i]);
if (!arg) {
pr_err("%s: strdup failed\n", __func__);
goto out_err;
}
if (parse_config_arg(arg, &var, &value) < 0) {
free(arg);
goto out_err;
}
if (value == NULL) {
if (show_spec_config(set, var) < 0) {
pr_err("%s is not configured: %s\n",
var, config_filename);
free(arg);
goto out_err;
}
} else {
if (set_config(set, config_filename, var, value) < 0) {
pr_err("Failed to set '%s=%s' on %s\n",
var, value, config_filename);
free(arg);
goto out_err;
}
}
free(arg);
}
}
perf_config_set__delete(set);
ret = 0;
out_err:
perf_config_set__delete(set);
return ret;
}

View File

@ -1302,7 +1302,10 @@ static int diff__config(const char *var, const char *value,
void *cb __maybe_unused)
{
if (!strcmp(var, "diff.order")) {
sort_compute = perf_config_int(var, value);
int ret;
if (perf_config_int(&ret, var, value) < 0)
return -1;
sort_compute = ret;
return 0;
}
if (!strcmp(var, "diff.compute")) {

View File

@ -28,9 +28,19 @@
#define DEFAULT_TRACER "function_graph"
struct perf_ftrace {
struct perf_evlist *evlist;
struct target target;
const char *tracer;
struct perf_evlist *evlist;
struct target target;
const char *tracer;
struct list_head filters;
struct list_head notrace;
struct list_head graph_funcs;
struct list_head nograph_funcs;
int graph_depth;
};
struct filter_entry {
struct list_head list;
char name[];
};
static bool done;
@ -61,6 +71,7 @@ static int __write_tracing_file(const char *name, const char *val, bool append)
int fd, ret = -1;
ssize_t size = strlen(val);
int flags = O_WRONLY;
char errbuf[512];
file = get_tracing_file(name);
if (!file) {
@ -75,14 +86,16 @@ static int __write_tracing_file(const char *name, const char *val, bool append)
fd = open(file, flags);
if (fd < 0) {
pr_debug("cannot open tracing file: %s\n", name);
pr_debug("cannot open tracing file: %s: %s\n",
name, str_error_r(errno, errbuf, sizeof(errbuf)));
goto out;
}
if (write(fd, val, size) == size)
ret = 0;
else
pr_debug("write '%s' to tracing/%s failed\n", val, name);
pr_debug("write '%s' to tracing/%s failed: %s\n",
val, name, str_error_r(errno, errbuf, sizeof(errbuf)));
close(fd);
out:
@ -101,6 +114,7 @@ static int append_tracing_file(const char *name, const char *val)
}
static int reset_tracing_cpu(void);
static void reset_tracing_filters(void);
static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
{
@ -116,6 +130,10 @@ static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
if (reset_tracing_cpu() < 0)
return -1;
if (write_tracing_file("max_graph_depth", "0") < 0)
return -1;
reset_tracing_filters();
return 0;
}
@ -181,6 +199,68 @@ static int reset_tracing_cpu(void)
return ret;
}
static int __set_tracing_filter(const char *filter_file, struct list_head *funcs)
{
struct filter_entry *pos;
list_for_each_entry(pos, funcs, list) {
if (append_tracing_file(filter_file, pos->name) < 0)
return -1;
}
return 0;
}
static int set_tracing_filters(struct perf_ftrace *ftrace)
{
int ret;
ret = __set_tracing_filter("set_ftrace_filter", &ftrace->filters);
if (ret < 0)
return ret;
ret = __set_tracing_filter("set_ftrace_notrace", &ftrace->notrace);
if (ret < 0)
return ret;
ret = __set_tracing_filter("set_graph_function", &ftrace->graph_funcs);
if (ret < 0)
return ret;
/* old kernels do not have this filter */
__set_tracing_filter("set_graph_notrace", &ftrace->nograph_funcs);
return ret;
}
static void reset_tracing_filters(void)
{
write_tracing_file("set_ftrace_filter", " ");
write_tracing_file("set_ftrace_notrace", " ");
write_tracing_file("set_graph_function", " ");
write_tracing_file("set_graph_notrace", " ");
}
static int set_tracing_depth(struct perf_ftrace *ftrace)
{
char buf[16];
if (ftrace->graph_depth == 0)
return 0;
if (ftrace->graph_depth < 0) {
pr_err("invalid graph depth: %d\n", ftrace->graph_depth);
return -1;
}
snprintf(buf, sizeof(buf), "%d", ftrace->graph_depth);
if (write_tracing_file("max_graph_depth", buf) < 0)
return -1;
return 0;
}
static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
{
char *trace_file;
@ -223,11 +303,23 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
goto out_reset;
}
if (set_tracing_filters(ftrace) < 0) {
pr_err("failed to set tracing filters\n");
goto out_reset;
}
if (set_tracing_depth(ftrace) < 0) {
pr_err("failed to set graph depth\n");
goto out_reset;
}
if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
goto out_reset;
}
setup_pager();
trace_file = get_tracing_file("trace_pipe");
if (!trace_file) {
pr_err("failed to open trace_pipe\n");
@ -251,8 +343,6 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
goto out_close_fd;
}
setup_pager();
perf_evlist__start_workload(ftrace->evlist);
while (!done) {
@ -307,6 +397,32 @@ static int perf_ftrace_config(const char *var, const char *value, void *cb)
return -1;
}
static int parse_filter_func(const struct option *opt, const char *str,
int unset __maybe_unused)
{
struct list_head *head = opt->value;
struct filter_entry *entry;
entry = malloc(sizeof(*entry) + strlen(str) + 1);
if (entry == NULL)
return -ENOMEM;
strcpy(entry->name, str);
list_add_tail(&entry->list, head);
return 0;
}
static void delete_filter_func(struct list_head *head)
{
struct filter_entry *pos, *tmp;
list_for_each_entry_safe(pos, tmp, head, list) {
list_del(&pos->list);
free(pos);
}
}
int cmd_ftrace(int argc, const char **argv)
{
int ret;
@ -330,9 +446,24 @@ int cmd_ftrace(int argc, const char **argv)
"system-wide collection from all CPUs"),
OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu",
"list of cpus to monitor"),
OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
"trace given functions only", parse_filter_func),
OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
"do not trace given functions", parse_filter_func),
OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
"Set graph filter on given functions", parse_filter_func),
OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
"Set nograph filter on given functions", parse_filter_func),
OPT_INTEGER('D', "graph-depth", &ftrace.graph_depth,
"Max depth for function graph tracer"),
OPT_END()
};
INIT_LIST_HEAD(&ftrace.filters);
INIT_LIST_HEAD(&ftrace.notrace);
INIT_LIST_HEAD(&ftrace.graph_funcs);
INIT_LIST_HEAD(&ftrace.nograph_funcs);
ret = perf_config(perf_ftrace_config, &ftrace);
if (ret < 0)
return -1;
@ -348,12 +479,14 @@ int cmd_ftrace(int argc, const char **argv)
target__strerror(&ftrace.target, ret, errbuf, 512);
pr_err("%s\n", errbuf);
return -EINVAL;
goto out_delete_filters;
}
ftrace.evlist = perf_evlist__new();
if (ftrace.evlist == NULL)
return -ENOMEM;
if (ftrace.evlist == NULL) {
ret = -ENOMEM;
goto out_delete_filters;
}
ret = perf_evlist__create_maps(ftrace.evlist, &ftrace.target);
if (ret < 0)
@ -364,5 +497,11 @@ int cmd_ftrace(int argc, const char **argv)
out_delete_evlist:
perf_evlist__delete(ftrace.evlist);
out_delete_filters:
delete_filter_func(&ftrace.filters);
delete_filter_func(&ftrace.notrace);
delete_filter_func(&ftrace.graph_funcs);
delete_filter_func(&ftrace.nograph_funcs);
return ret;
}

View File

@ -108,10 +108,14 @@ static int check_emacsclient_version(void)
return ret;
}
static void exec_woman_emacs(const char *path, const char *page)
static void exec_failed(const char *cmd)
{
char sbuf[STRERR_BUFSIZE];
pr_warning("failed to exec '%s': %s", cmd, str_error_r(errno, sbuf, sizeof(sbuf)));
}
static void exec_woman_emacs(const char *path, const char *page)
{
if (!check_emacsclient_version()) {
/* This works only with emacsclient version >= 22. */
char *man_page;
@ -122,8 +126,7 @@ static void exec_woman_emacs(const char *path, const char *page)
execlp(path, "emacsclient", "-e", man_page, NULL);
free(man_page);
}
warning("failed to exec '%s': %s", path,
str_error_r(errno, sbuf, sizeof(sbuf)));
exec_failed(path);
}
}
@ -134,7 +137,6 @@ static void exec_man_konqueror(const char *path, const char *page)
if (display && *display) {
char *man_page;
const char *filename = "kfmclient";
char sbuf[STRERR_BUFSIZE];
/* It's simpler to launch konqueror using kfmclient. */
if (path) {
@ -155,33 +157,27 @@ static void exec_man_konqueror(const char *path, const char *page)
execlp(path, filename, "newTab", man_page, NULL);
free(man_page);
}
warning("failed to exec '%s': %s", path,
str_error_r(errno, sbuf, sizeof(sbuf)));
exec_failed(path);
}
}
static void exec_man_man(const char *path, const char *page)
{
char sbuf[STRERR_BUFSIZE];
if (!path)
path = "man";
execlp(path, "man", page, NULL);
warning("failed to exec '%s': %s", path,
str_error_r(errno, sbuf, sizeof(sbuf)));
exec_failed(path);
}
static void exec_man_cmd(const char *cmd, const char *page)
{
char sbuf[STRERR_BUFSIZE];
char *shell_cmd;
if (asprintf(&shell_cmd, "%s %s", cmd, page) > 0) {
execl("/bin/sh", "sh", "-c", shell_cmd, NULL);
free(shell_cmd);
}
warning("failed to exec '%s': %s", cmd,
str_error_r(errno, sbuf, sizeof(sbuf)));
exec_failed(cmd);
}
static void add_man_viewer(const char *name)
@ -214,6 +210,12 @@ static void do_add_man_viewer_info(const char *name,
man_viewer_info_list = new;
}
static void unsupported_man_viewer(const char *name, const char *var)
{
pr_warning("'%s': path for unsupported man viewer.\n"
"Please consider using 'man.<tool>.%s' instead.", name, var);
}
static int add_man_viewer_path(const char *name,
size_t len,
const char *value)
@ -221,9 +223,7 @@ static int add_man_viewer_path(const char *name,
if (supported_man_viewer(name, len))
do_add_man_viewer_info(name, len, value);
else
warning("'%s': path for unsupported man viewer.\n"
"Please consider using 'man.<tool>.cmd' instead.",
name);
unsupported_man_viewer(name, "cmd");
return 0;
}
@ -233,9 +233,7 @@ static int add_man_viewer_cmd(const char *name,
const char *value)
{
if (supported_man_viewer(name, len))
warning("'%s': cmd for supported man viewer.\n"
"Please consider using 'man.<tool>.path' instead.",
name);
unsupported_man_viewer(name, "path");
else
do_add_man_viewer_info(name, len, value);
@ -247,8 +245,10 @@ static int add_man_viewer_info(const char *var, const char *value)
const char *name = var + 4;
const char *subkey = strrchr(name, '.');
if (!subkey)
return error("Config with no key for man viewer: %s", name);
if (!subkey) {
pr_err("Config with no key for man viewer: %s", name);
return -1;
}
if (!strcmp(subkey, ".path")) {
if (!value)
@ -261,7 +261,7 @@ static int add_man_viewer_info(const char *var, const char *value)
return add_man_viewer_cmd(name, subkey - name, value);
}
warning("'%s': unsupported man viewer sub key.", subkey);
pr_warning("'%s': unsupported man viewer sub key.", subkey);
return 0;
}
@ -332,7 +332,7 @@ static void setup_man_path(void)
setenv("MANPATH", new_path, 1);
free(new_path);
} else {
error("Unable to setup man path");
pr_err("Unable to setup man path");
}
}
@ -349,7 +349,7 @@ static void exec_viewer(const char *name, const char *page)
else if (info)
exec_man_cmd(info, page);
else
warning("'%s': unknown man viewer.", name);
pr_warning("'%s': unknown man viewer.", name);
}
static int show_man_page(const char *perf_cmd)

View File

@ -1715,7 +1715,7 @@ static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
if (!tok)
break;
if (slab_sort_dimension__add(tok, sort_list) < 0) {
error("Unknown slab --sort key: '%s'", tok);
pr_err("Unknown slab --sort key: '%s'", tok);
free(str);
return -1;
}
@ -1741,7 +1741,7 @@ static int setup_page_sorting(struct list_head *sort_list, const char *arg)
if (!tok)
break;
if (page_sort_dimension__add(tok, sort_list) < 0) {
error("Unknown page --sort key: '%s'", tok);
pr_err("Unknown page --sort key: '%s'", tok);
free(str);
return -1;
}

View File

@ -453,7 +453,7 @@ static int record__open(struct record *rec)
}
if (perf_evlist__apply_filters(evlist, &pos)) {
error("failed to set filter \"%s\" on event %s with %d (%s)\n",
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
pos->filter, perf_evsel__name(pos), errno,
str_error_r(errno, msg, sizeof(msg)));
rc = -1;
@ -461,7 +461,7 @@ static int record__open(struct record *rec)
}
if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
error("failed to set config \"%s\" on event %s with %d (%s)\n",
pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
err_term->val.drv_cfg, perf_evsel__name(pos), errno,
str_error_r(errno, msg, sizeof(msg)));
rc = -1;

View File

@ -94,10 +94,9 @@ static int report__config(const char *var, const char *value, void *cb)
symbol_conf.cumulate_callchain = perf_config_bool(var, value);
return 0;
}
if (!strcmp(var, "report.queue-size")) {
rep->queue_size = perf_config_u64(var, value);
return 0;
}
if (!strcmp(var, "report.queue-size"))
return perf_config_u64(&rep->queue_size, var, value);
if (!strcmp(var, "report.sort_order")) {
default_sort_order = strdup(value);
return 0;
@ -558,6 +557,7 @@ static int __cmd_report(struct report *rep)
ui__error("failed to set cpu bitmap\n");
return ret;
}
session->itrace_synth_opts->cpu_bitmap = rep->cpu_bitmap;
}
if (rep->show_threads) {

View File

@ -2066,7 +2066,7 @@ static void save_task_callchain(struct perf_sched *sched,
if (thread__resolve_callchain(thread, cursor, evsel, sample,
NULL, NULL, sched->max_stack + 2) != 0) {
if (verbose > 0)
error("Failed to resolve callchain. Skipping\n");
pr_err("Failed to resolve callchain. Skipping\n");
return;
}

View File

@ -85,6 +85,8 @@ enum perf_output_field {
PERF_OUTPUT_INSN = 1U << 21,
PERF_OUTPUT_INSNLEN = 1U << 22,
PERF_OUTPUT_BRSTACKINSN = 1U << 23,
PERF_OUTPUT_BRSTACKOFF = 1U << 24,
PERF_OUTPUT_SYNTH = 1U << 25,
};
struct output_option {
@ -115,6 +117,13 @@ struct output_option {
{.str = "insn", .field = PERF_OUTPUT_INSN},
{.str = "insnlen", .field = PERF_OUTPUT_INSNLEN},
{.str = "brstackinsn", .field = PERF_OUTPUT_BRSTACKINSN},
{.str = "brstackoff", .field = PERF_OUTPUT_BRSTACKOFF},
{.str = "synth", .field = PERF_OUTPUT_SYNTH},
};
enum {
OUTPUT_TYPE_SYNTH = PERF_TYPE_MAX,
OUTPUT_TYPE_MAX
};
/* default set to maintain compatibility with current format */
@ -124,7 +133,7 @@ static struct {
unsigned int print_ip_opts;
u64 fields;
u64 invalid_fields;
} output[PERF_TYPE_MAX] = {
} output[OUTPUT_TYPE_MAX] = {
[PERF_TYPE_HARDWARE] = {
.user_set = false,
@ -182,12 +191,44 @@ static struct {
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
},
[OUTPUT_TYPE_SYNTH] = {
.user_set = false,
.fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
PERF_OUTPUT_SYM | PERF_OUTPUT_DSO |
PERF_OUTPUT_SYNTH,
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
},
};
static inline int output_type(unsigned int type)
{
switch (type) {
case PERF_TYPE_SYNTH:
return OUTPUT_TYPE_SYNTH;
default:
return type;
}
}
static inline unsigned int attr_type(unsigned int type)
{
switch (type) {
case OUTPUT_TYPE_SYNTH:
return PERF_TYPE_SYNTH;
default:
return type;
}
}
static bool output_set_by_user(void)
{
int j;
for (j = 0; j < PERF_TYPE_MAX; ++j) {
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
if (output[j].user_set)
return true;
}
@ -208,7 +249,7 @@ static const char *output_field2str(enum perf_output_field field)
return str;
}
#define PRINT_FIELD(x) (output[attr->type].fields & PERF_OUTPUT_##x)
#define PRINT_FIELD(x) (output[output_type(attr->type)].fields & PERF_OUTPUT_##x)
static int perf_evsel__do_check_stype(struct perf_evsel *evsel,
u64 sample_type, const char *sample_msg,
@ -216,7 +257,7 @@ static int perf_evsel__do_check_stype(struct perf_evsel *evsel,
bool allow_user_set)
{
struct perf_event_attr *attr = &evsel->attr;
int type = attr->type;
int type = output_type(attr->type);
const char *evname;
if (attr->sample_type & sample_type)
@ -298,10 +339,10 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
"selected.\n");
return -EINVAL;
}
if (PRINT_FIELD(DSO) && !PRINT_FIELD(IP) && !PRINT_FIELD(ADDR)) {
pr_err("Display of DSO requested but neither sample IP nor "
"sample address\nis selected. Hence, no addresses to convert "
"to DSO.\n");
if (PRINT_FIELD(DSO) && !PRINT_FIELD(IP) && !PRINT_FIELD(ADDR) &&
!PRINT_FIELD(BRSTACK) && !PRINT_FIELD(BRSTACKSYM) && !PRINT_FIELD(BRSTACKOFF)) {
pr_err("Display of DSO requested but no address to convert. Select\n"
"sample IP, sample address, brstack, brstacksym, or brstackoff.\n");
return -EINVAL;
}
if (PRINT_FIELD(SRCLINE) && !PRINT_FIELD(IP)) {
@ -346,7 +387,7 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
static void set_print_ip_opts(struct perf_event_attr *attr)
{
unsigned int type = attr->type;
unsigned int type = output_type(attr->type);
output[type].print_ip_opts = 0;
if (PRINT_FIELD(IP))
@ -374,16 +415,17 @@ static int perf_session__check_output_opt(struct perf_session *session)
unsigned int j;
struct perf_evsel *evsel;
for (j = 0; j < PERF_TYPE_MAX; ++j) {
evsel = perf_session__find_first_evtype(session, j);
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
evsel = perf_session__find_first_evtype(session, attr_type(j));
/*
* even if fields is set to 0 (ie., show nothing) event must
* exist if user explicitly includes it on the command line
*/
if (!evsel && output[j].user_set && !output[j].wildcard_set) {
if (!evsel && output[j].user_set && !output[j].wildcard_set &&
j != OUTPUT_TYPE_SYNTH) {
pr_err("%s events do not exist. "
"Remove corresponding -f option to proceed.\n",
"Remove corresponding -F option to proceed.\n",
event_type(j));
return -1;
}
@ -514,18 +556,43 @@ mispred_str(struct branch_entry *br)
return br->flags.predicted ? 'P' : 'M';
}
static void print_sample_brstack(struct perf_sample *sample)
static void print_sample_brstack(struct perf_sample *sample,
struct thread *thread,
struct perf_event_attr *attr)
{
struct branch_stack *br = sample->branch_stack;
u64 i;
struct addr_location alf, alt;
u64 i, from, to;
if (!(br && br->nr))
return;
for (i = 0; i < br->nr; i++) {
printf(" 0x%"PRIx64"/0x%"PRIx64"/%c/%c/%c/%d ",
br->entries[i].from,
br->entries[i].to,
from = br->entries[i].from;
to = br->entries[i].to;
if (PRINT_FIELD(DSO)) {
memset(&alf, 0, sizeof(alf));
memset(&alt, 0, sizeof(alt));
thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, from, &alf);
thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt);
}
printf("0x%"PRIx64, from);
if (PRINT_FIELD(DSO)) {
printf("(");
map__fprintf_dsoname(alf.map, stdout);
printf(")");
}
printf("/0x%"PRIx64, to);
if (PRINT_FIELD(DSO)) {
printf("(");
map__fprintf_dsoname(alt.map, stdout);
printf(")");
}
printf("/%c/%c/%c/%d ",
mispred_str( br->entries + i),
br->entries[i].flags.in_tx? 'X' : '-',
br->entries[i].flags.abort? 'A' : '-',
@ -534,7 +601,8 @@ static void print_sample_brstack(struct perf_sample *sample)
}
static void print_sample_brstacksym(struct perf_sample *sample,
struct thread *thread)
struct thread *thread,
struct perf_event_attr *attr)
{
struct branch_stack *br = sample->branch_stack;
struct addr_location alf, alt;
@ -559,8 +627,18 @@ static void print_sample_brstacksym(struct perf_sample *sample,
alt.sym = map__find_symbol(alt.map, alt.addr);
symbol__fprintf_symname_offs(alf.sym, &alf, stdout);
if (PRINT_FIELD(DSO)) {
printf("(");
map__fprintf_dsoname(alf.map, stdout);
printf(")");
}
putchar('/');
symbol__fprintf_symname_offs(alt.sym, &alt, stdout);
if (PRINT_FIELD(DSO)) {
printf("(");
map__fprintf_dsoname(alt.map, stdout);
printf(")");
}
printf("/%c/%c/%c/%d ",
mispred_str( br->entries + i),
br->entries[i].flags.in_tx? 'X' : '-',
@ -569,6 +647,51 @@ static void print_sample_brstacksym(struct perf_sample *sample,
}
}
static void print_sample_brstackoff(struct perf_sample *sample,
struct thread *thread,
struct perf_event_attr *attr)
{
struct branch_stack *br = sample->branch_stack;
struct addr_location alf, alt;
u64 i, from, to;
if (!(br && br->nr))
return;
for (i = 0; i < br->nr; i++) {
memset(&alf, 0, sizeof(alf));
memset(&alt, 0, sizeof(alt));
from = br->entries[i].from;
to = br->entries[i].to;
thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, from, &alf);
if (alf.map && !alf.map->dso->adjust_symbols)
from = map__map_ip(alf.map, from);
thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt);
if (alt.map && !alt.map->dso->adjust_symbols)
to = map__map_ip(alt.map, to);
printf("0x%"PRIx64, from);
if (PRINT_FIELD(DSO)) {
printf("(");
map__fprintf_dsoname(alf.map, stdout);
printf(")");
}
printf("/0x%"PRIx64, to);
if (PRINT_FIELD(DSO)) {
printf("(");
map__fprintf_dsoname(alt.map, stdout);
printf(")");
}
printf("/%c/%c/%c/%d ",
mispred_str(br->entries + i),
br->entries[i].flags.in_tx ? 'X' : '-',
br->entries[i].flags.abort ? 'A' : '-',
br->entries[i].flags.cycles);
}
}
#define MAXBB 16384UL
static int grab_bb(u8 *buffer, u64 start, u64 end,
@ -906,6 +1029,7 @@ static void print_sample_bts(struct perf_sample *sample,
struct machine *machine)
{
struct perf_event_attr *attr = &evsel->attr;
unsigned int type = output_type(attr->type);
bool print_srcline_last = false;
if (PRINT_FIELD(CALLINDENT))
@ -913,7 +1037,7 @@ static void print_sample_bts(struct perf_sample *sample,
/* print branch_from information */
if (PRINT_FIELD(IP)) {
unsigned int print_opts = output[attr->type].print_ip_opts;
unsigned int print_opts = output[type].print_ip_opts;
struct callchain_cursor *cursor = NULL;
if (symbol_conf.use_callchain && sample->callchain &&
@ -936,7 +1060,7 @@ static void print_sample_bts(struct perf_sample *sample,
/* print branch_to information */
if (PRINT_FIELD(ADDR) ||
((evsel->attr.sample_type & PERF_SAMPLE_ADDR) &&
!output[attr->type].user_set)) {
!output[type].user_set)) {
printf(" => ");
print_sample_addr(sample, thread, attr);
}
@ -1079,6 +1203,127 @@ static void print_sample_bpf_output(struct perf_sample *sample)
(char *)(sample->raw_data));
}
static void print_sample_spacing(int len, int spacing)
{
if (len > 0 && len < spacing)
printf("%*s", spacing - len, "");
}
static void print_sample_pt_spacing(int len)
{
print_sample_spacing(len, 34);
}
static void print_sample_synth_ptwrite(struct perf_sample *sample)
{
struct perf_synth_intel_ptwrite *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
return;
len = printf(" IP: %u payload: %#" PRIx64 " ",
data->ip, le64_to_cpu(data->payload));
print_sample_pt_spacing(len);
}
static void print_sample_synth_mwait(struct perf_sample *sample)
{
struct perf_synth_intel_mwait *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
return;
len = printf(" hints: %#x extensions: %#x ",
data->hints, data->extensions);
print_sample_pt_spacing(len);
}
static void print_sample_synth_pwre(struct perf_sample *sample)
{
struct perf_synth_intel_pwre *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
return;
len = printf(" hw: %u cstate: %u sub-cstate: %u ",
data->hw, data->cstate, data->subcstate);
print_sample_pt_spacing(len);
}
static void print_sample_synth_exstop(struct perf_sample *sample)
{
struct perf_synth_intel_exstop *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
return;
len = printf(" IP: %u ", data->ip);
print_sample_pt_spacing(len);
}
static void print_sample_synth_pwrx(struct perf_sample *sample)
{
struct perf_synth_intel_pwrx *data = perf_sample__synth_ptr(sample);
int len;
if (perf_sample__bad_synth_size(sample, *data))
return;
len = printf(" deepest cstate: %u last cstate: %u wake reason: %#x ",
data->deepest_cstate, data->last_cstate,
data->wake_reason);
print_sample_pt_spacing(len);
}
static void print_sample_synth_cbr(struct perf_sample *sample)
{
struct perf_synth_intel_cbr *data = perf_sample__synth_ptr(sample);
unsigned int percent, freq;
int len;
if (perf_sample__bad_synth_size(sample, *data))
return;
freq = (le32_to_cpu(data->freq) + 500) / 1000;
len = printf(" cbr: %2u freq: %4u MHz ", data->cbr, freq);
if (data->max_nonturbo) {
percent = (5 + (1000 * data->cbr) / data->max_nonturbo) / 10;
len += printf("(%3u%%) ", percent);
}
print_sample_pt_spacing(len);
}
static void print_sample_synth(struct perf_sample *sample,
struct perf_evsel *evsel)
{
switch (evsel->attr.config) {
case PERF_SYNTH_INTEL_PTWRITE:
print_sample_synth_ptwrite(sample);
break;
case PERF_SYNTH_INTEL_MWAIT:
print_sample_synth_mwait(sample);
break;
case PERF_SYNTH_INTEL_PWRE:
print_sample_synth_pwre(sample);
break;
case PERF_SYNTH_INTEL_EXSTOP:
print_sample_synth_exstop(sample);
break;
case PERF_SYNTH_INTEL_PWRX:
print_sample_synth_pwrx(sample);
break;
case PERF_SYNTH_INTEL_CBR:
print_sample_synth_cbr(sample);
break;
default:
break;
}
}
struct perf_script {
struct perf_tool tool;
struct perf_session *session;
@ -1132,8 +1377,9 @@ static void process_event(struct perf_script *script,
{
struct thread *thread = al->thread;
struct perf_event_attr *attr = &evsel->attr;
unsigned int type = output_type(attr->type);
if (output[attr->type].fields == 0)
if (output[type].fields == 0)
return;
print_sample_start(sample, thread, evsel);
@ -1162,6 +1408,10 @@ static void process_event(struct perf_script *script,
if (PRINT_FIELD(TRACE))
event_format__print(evsel->tp_format, sample->cpu,
sample->raw_data, sample->raw_size);
if (attr->type == PERF_TYPE_SYNTH && PRINT_FIELD(SYNTH))
print_sample_synth(sample, evsel);
if (PRINT_FIELD(ADDR))
print_sample_addr(sample, thread, attr);
@ -1180,16 +1430,18 @@ static void process_event(struct perf_script *script,
cursor = &callchain_cursor;
putchar(cursor ? '\n' : ' ');
sample__fprintf_sym(sample, al, 0, output[attr->type].print_ip_opts, cursor, stdout);
sample__fprintf_sym(sample, al, 0, output[type].print_ip_opts, cursor, stdout);
}
if (PRINT_FIELD(IREGS))
print_sample_iregs(sample, attr);
if (PRINT_FIELD(BRSTACK))
print_sample_brstack(sample);
print_sample_brstack(sample, thread, attr);
else if (PRINT_FIELD(BRSTACKSYM))
print_sample_brstacksym(sample, thread);
print_sample_brstacksym(sample, thread, attr);
else if (PRINT_FIELD(BRSTACKOFF))
print_sample_brstackoff(sample, thread, attr);
if (perf_evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT))
print_sample_bpf_output(sample);
@ -1325,7 +1577,8 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
evlist = *pevlist;
evsel = perf_evlist__last(*pevlist);
if (evsel->attr.type >= PERF_TYPE_MAX)
if (evsel->attr.type >= PERF_TYPE_MAX &&
evsel->attr.type != PERF_TYPE_SYNTH)
return 0;
evlist__for_each_entry(evlist, pos) {
@ -1727,6 +1980,7 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
int rc = 0;
char *str = strdup(arg);
int type = -1;
enum { DEFAULT, SET, ADD, REMOVE } change = DEFAULT;
if (!str)
return -ENOMEM;
@ -1749,6 +2003,8 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
type = PERF_TYPE_RAW;
else if (!strcmp(str, "break"))
type = PERF_TYPE_BREAKPOINT;
else if (!strcmp(str, "synth"))
type = OUTPUT_TYPE_SYNTH;
else {
fprintf(stderr, "Invalid event type in field string.\n");
rc = -EINVAL;
@ -1772,23 +2028,44 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
goto out;
}
/* Don't override defaults for +- */
if (strchr(str, '+') || strchr(str, '-'))
goto parse;
if (output_set_by_user())
pr_warning("Overriding previous field request for all events.\n");
for (j = 0; j < PERF_TYPE_MAX; ++j) {
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
output[j].fields = 0;
output[j].user_set = true;
output[j].wildcard_set = true;
}
}
parse:
for (tok = strtok_r(tok, ",", &strtok_saveptr); tok; tok = strtok_r(NULL, ",", &strtok_saveptr)) {
if (*tok == '+') {
if (change == SET)
goto out_badmix;
change = ADD;
tok++;
} else if (*tok == '-') {
if (change == SET)
goto out_badmix;
change = REMOVE;
tok++;
} else {
if (change != SET && change != DEFAULT)
goto out_badmix;
change = SET;
}
for (i = 0; i < imax; ++i) {
if (strcmp(tok, all_output_options[i].str) == 0)
break;
}
if (i == imax && strcmp(tok, "flags") == 0) {
print_flags = true;
print_flags = change == REMOVE ? false : true;
continue;
}
if (i == imax) {
@ -1801,12 +2078,16 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
/* add user option to all events types for
* which it is valid
*/
for (j = 0; j < PERF_TYPE_MAX; ++j) {
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
if (output[j].invalid_fields & all_output_options[i].field) {
pr_warning("\'%s\' not valid for %s events. Ignoring.\n",
all_output_options[i].str, event_type(j));
} else
output[j].fields |= all_output_options[i].field;
} else {
if (change == REMOVE)
output[j].fields &= ~all_output_options[i].field;
else
output[j].fields |= all_output_options[i].field;
}
}
} else {
if (output[type].invalid_fields & all_output_options[i].field) {
@ -1826,7 +2107,11 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
"Events will not be displayed.\n", event_type(type));
}
}
goto out;
out_badmix:
fprintf(stderr, "Cannot mix +-field with overridden fields\n");
rc = -EINVAL;
out:
free(str);
return rc;
@ -2444,10 +2729,11 @@ int cmd_script(int argc, const char **argv)
symbol__config_symfs),
OPT_CALLBACK('F', "fields", NULL, "str",
"comma separated output fields prepend with 'type:'. "
"Valid types: hw,sw,trace,raw. "
"+field to add and -field to remove."
"Valid types: hw,sw,trace,raw,synth. "
"Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
"addr,symoff,period,iregs,brstack,brstacksym,flags,"
"bpf-output,callindent,insn,insnlen,brstackinsn",
"bpf-output,callindent,insn,insnlen,brstackinsn,synth",
parse_output_fields),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
"system-wide collection from all CPUs"),
@ -2706,6 +2992,7 @@ int cmd_script(int argc, const char **argv)
err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
if (err < 0)
goto out_delete;
itrace_synth_opts.cpu_bitmap = cpu_bitmap;
}
if (!no_callchain)

View File

@ -86,6 +86,7 @@
#define DEFAULT_SEPARATOR " "
#define CNTR_NOT_SUPPORTED "<not supported>"
#define CNTR_NOT_COUNTED "<not counted>"
#define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi"
static void print_counters(struct timespec *ts, int argc, const char **argv);
@ -122,6 +123,14 @@ static const char * topdown_attrs[] = {
NULL,
};
static const char *smi_cost_attrs = {
"{"
"msr/aperf/,"
"msr/smi/,"
"cycles"
"}"
};
static struct perf_evlist *evsel_list;
static struct target target = {
@ -137,6 +146,8 @@ static bool null_run = false;
static int detailed_run = 0;
static bool transaction_run;
static bool topdown_run = false;
static bool smi_cost = false;
static bool smi_reset = false;
static bool big_num = true;
static int big_num_opt = -1;
static const char *csv_sep = NULL;
@ -625,14 +636,14 @@ static int __run_perf_stat(int argc, const char **argv)
}
if (perf_evlist__apply_filters(evsel_list, &counter)) {
error("failed to set filter \"%s\" on event %s with %d (%s)\n",
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
counter->filter, perf_evsel__name(counter), errno,
str_error_r(errno, msg, sizeof(msg)));
return -1;
}
if (perf_evlist__apply_drv_configs(evsel_list, &counter, &err_term)) {
error("failed to set config \"%s\" on event %s with %d (%s)\n",
pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
err_term->val.drv_cfg, perf_evsel__name(counter), errno,
str_error_r(errno, msg, sizeof(msg)));
return -1;
@ -1782,6 +1793,8 @@ static const struct option stat_options[] = {
"Only print computed metrics. No raw values", enable_metric_only),
OPT_BOOLEAN(0, "topdown", &topdown_run,
"measure topdown level 1 statistics"),
OPT_BOOLEAN(0, "smi-cost", &smi_cost,
"measure SMI cost"),
OPT_END()
};
@ -2160,6 +2173,39 @@ static int add_default_attributes(void)
return 0;
}
if (smi_cost) {
int smi;
if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) {
fprintf(stderr, "freeze_on_smi is not supported.\n");
return -1;
}
if (!smi) {
if (sysfs__write_int(FREEZE_ON_SMI_PATH, 1) < 0) {
fprintf(stderr, "Failed to set freeze_on_smi.\n");
return -1;
}
smi_reset = true;
}
if (pmu_have_event("msr", "aperf") &&
pmu_have_event("msr", "smi")) {
if (!force_metric_only)
metric_only = true;
err = parse_events(evsel_list, smi_cost_attrs, NULL);
} else {
fprintf(stderr, "To measure SMI cost, it needs "
"msr/aperf/, msr/smi/ and cpu/cycles/ support\n");
return -1;
}
if (err) {
fprintf(stderr, "Cannot set up SMI cost events\n");
return -1;
}
return 0;
}
if (topdown_run) {
char *str = NULL;
bool warn = false;
@ -2742,6 +2788,9 @@ int cmd_stat(int argc, const char **argv)
perf_stat__exit_aggr_mode();
perf_evlist__free_stats(evsel_list);
out:
if (smi_cost && smi_reset)
sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
perf_evlist__delete(evsel_list);
return status;
}

View File

@ -134,7 +134,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
return err;
}
err = symbol__disassemble(sym, map, NULL, 0);
err = symbol__disassemble(sym, map, NULL, 0, NULL);
if (err == 0) {
out_assign:
top->sym_filter_entry = he;
@ -958,7 +958,7 @@ static int __cmd_top(struct perf_top *top)
ret = perf_evlist__apply_drv_configs(evlist, &pos, &err_term);
if (ret) {
error("failed to set config \"%s\" on event %s with %d (%s)\n",
pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
err_term->val.drv_cfg, perf_evsel__name(pos), errno,
str_error_r(errno, msg, sizeof(msg)));
goto out_delete;

View File

@ -304,7 +304,7 @@ jvmti_close(void *agent)
FILE *fp = agent;
if (!fp) {
warnx("jvmti: incalid fd in close_agent");
warnx("jvmti: invalid fd in close_agent");
return -1;
}

View File

@ -5,8 +5,6 @@
#include <stdint.h>
#include <jvmti.h>
#define __unused __attribute__((unused))
#if defined(__cplusplus)
extern "C" {
#endif

View File

@ -1,3 +1,4 @@
#include <linux/compiler.h>
#include <sys/types.h>
#include <stdio.h>
#include <string.h>
@ -238,7 +239,7 @@ code_generated_cb(jvmtiEnv *jvmti,
}
JNIEXPORT jint JNICALL
Agent_OnLoad(JavaVM *jvm, char *options, void *reserved __unused)
Agent_OnLoad(JavaVM *jvm, char *options, void *reserved __maybe_unused)
{
jvmtiEventCallbacks cb;
jvmtiCapabilities caps1;
@ -313,7 +314,7 @@ Agent_OnLoad(JavaVM *jvm, char *options, void *reserved __unused)
}
JNIEXPORT void JNICALL
Agent_OnUnload(JavaVM *jvm __unused)
Agent_OnUnload(JavaVM *jvm __maybe_unused)
{
int ret;

View File

@ -48,10 +48,6 @@
#include "json.h"
#include "jevents.h"
#ifndef __maybe_unused
#define __maybe_unused __attribute__((unused))
#endif
int verbose;
char *prog;

View File

@ -0,0 +1,13 @@
#!/bin/bash
#
# print Intel PT Power Events and PTWRITE. The intel_pt PMU event needs
# to be specified with appropriate config terms.
#
if ! echo "$@" | grep -q intel_pt ; then
echo "Options must include the Intel PT event e.g. -e intel_pt/pwr_evt,ptw/"
echo "and for power events it probably needs to be system wide i.e. -a option"
echo "For example: -a -e intel_pt/pwr_evt,branch=0/ sleep 1"
exit 1
fi
perf record $@

View File

@ -0,0 +1,3 @@
#!/bin/bash
# description: print Intel PT Power Events and PTWRITE
perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/intel-pt-events.py

View File

@ -0,0 +1,128 @@
# intel-pt-events.py: Print Intel PT Power Events and PTWRITE
# Copyright (c) 2017, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
import os
import sys
import struct
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
# These perf imports are not used at present
#from perf_trace_context import *
#from Core import *
def trace_begin():
print "Intel PT Power Events and PTWRITE"
def trace_end():
print "End"
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
def print_ptwrite(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
flags = data[0]
payload = data[1]
exact_ip = flags & 1
print "IP: %u payload: %#x" % (exact_ip, payload),
def print_cbr(raw_buf):
data = struct.unpack_from("<BBBBII", raw_buf)
cbr = data[0]
f = (data[4] + 500) / 1000
p = ((cbr * 1000 / data[2]) + 5) / 10
print "%3u freq: %4u MHz (%3u%%)" % (cbr, f, p),
def print_mwait(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hints = payload & 0xff
extensions = (payload >> 32) & 0x3
print "hints: %#x extensions: %#x" % (hints, extensions),
def print_pwre(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hw = (payload >> 7) & 1
cstate = (payload >> 12) & 0xf
subcstate = (payload >> 8) & 0xf
print "hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
def print_exstop(raw_buf):
data = struct.unpack_from("<I", raw_buf)
flags = data[0]
exact_ip = flags & 1
print "IP: %u" % (exact_ip),
def print_pwrx(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
deepest_cstate = payload & 0xf
last_cstate = (payload >> 4) & 0xf
wake_reason = (payload >> 8) & 0xf
print "deepest cstate: %u last cstate: %u wake reason: %#x" % (deepest_cstate, last_cstate, wake_reason),
def print_common_start(comm, sample, name):
ts = sample["time"]
cpu = sample["cpu"]
pid = sample["pid"]
tid = sample["tid"]
print "%16s %5u/%-5u [%03u] %9u.%09u %7s:" % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
def print_common_ip(sample, symbol, dso):
ip = sample["ip"]
print "%16x %s (%s)" % (ip, symbol, dso)
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "[unknown]"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "[unknown]"
if name == "ptwrite":
print_common_start(comm, sample, name)
print_ptwrite(raw_buf)
print_common_ip(sample, symbol, dso)
elif name == "cbr":
print_common_start(comm, sample, name)
print_cbr(raw_buf)
print_common_ip(sample, symbol, dso)
elif name == "mwait":
print_common_start(comm, sample, name)
print_mwait(raw_buf)
print_common_ip(sample, symbol, dso)
elif name == "pwre":
print_common_start(comm, sample, name)
print_pwre(raw_buf)
print_common_ip(sample, symbol, dso)
elif name == "exstop":
print_common_start(comm, sample, name)
print_exstop(raw_buf)
print_common_ip(sample, symbol, dso)
elif name == "pwrx":
print_common_start(comm, sample, name)
print_pwrx(raw_buf)
print_common_ip(sample, symbol, dso)

View File

@ -18,6 +18,7 @@
* permissions. All the event text files are stored there.
*/
#include <debug.h>
#include <errno.h>
#include <inttypes.h>
#include <stdlib.h>
@ -29,14 +30,11 @@
#include <sys/stat.h>
#include <unistd.h>
#include "../perf.h"
#include "util.h"
#include <subcmd/exec-cmd.h>
#include "tests.h"
#define ENV "PERF_TEST_ATTR"
extern int verbose;
static char *dir;
void test_attr__init(void)
@ -138,8 +136,10 @@ void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
{
int errno_saved = errno;
if (store_event(attr, pid, cpu, fd, group_fd, flags))
die("test attr FAILED");
if (store_event(attr, pid, cpu, fd, group_fd, flags)) {
pr_err("test attr FAILED");
exit(128);
}
errno = errno_saved;
}

View File

@ -16,6 +16,13 @@ class Fail(Exception):
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Notest(Exception):
def __init__(self, test, arch):
self.arch = arch
self.test = test
def getMsg(self):
return '[%s] \'%s\'' % (self.arch, self.test.path)
class Unsup(Exception):
def __init__(self, test):
self.test = test
@ -112,6 +119,9 @@ class Event(dict):
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
# 'arch' - architecture specific test (optional)
# comma separated list, ! at the beginning
# negates it.
#
# [eventX:base]
# - one or multiple instances in file
@ -134,6 +144,12 @@ class Test(object):
except:
self.ret = 0
try:
self.arch = parser.get('config', 'arch')
log.warning("test limitation '%s'" % self.arch)
except:
self.arch = ''
self.expect = {}
self.result = {}
log.debug(" loading expected events");
@ -145,6 +161,31 @@ class Test(object):
else:
return True
def skip_test(self, myarch):
# If architecture not set always run test
if self.arch == '':
# log.warning("test for arch %s is ok" % myarch)
return False
# Allow multiple values in assignment separated by ','
arch_list = self.arch.split(',')
# Handle negated list such as !s390x,ppc
if arch_list[0][0] == '!':
arch_list[0] = arch_list[0][1:]
log.warning("excluded architecture list %s" % arch_list)
for arch_item in arch_list:
# log.warning("test for %s arch is %s" % (arch_item, myarch))
if arch_item == myarch:
return True
return False
for arch_item in arch_list:
# log.warning("test for architecture '%s' current '%s'" % (arch_item, myarch))
if arch_item == myarch:
return False
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
@ -168,6 +209,11 @@ class Test(object):
events[section] = e
def run_cmd(self, tempdir):
junk1, junk2, junk3, junk4, myarch = (os.uname())
if self.skip_test(myarch):
raise Notest(self, myarch)
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
@ -265,6 +311,8 @@ def run_tests(options):
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
except Notest, obj:
log.warning("skipped %s" % obj.getMsg())
def setup_log(verbose):
global log

View File

@ -62,8 +62,7 @@ static void __test_function(volatile long *ptr)
}
#endif
__attribute__ ((noinline))
static int test_function(void)
static noinline int test_function(void)
{
__test_function(&the_var);
the_var++;

View File

@ -28,8 +28,7 @@
static int overflows;
__attribute__ ((noinline))
static int test_function(void)
static noinline int test_function(void)
{
return time(NULL);
}

View File

@ -10,6 +10,15 @@
#include <uapi/linux/fs.h>
/*
* If CONFIG_PROFILE_ALL_BRANCHES is selected,
* 'if' is redefined after include kernel header.
* Recover 'if' for BPF object code.
*/
#ifdef if
# undef if
#endif
#define FMODE_READ 0x1
#define FMODE_WRITE 0x2

View File

@ -76,8 +76,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
return strcmp((const char *) symbol, funcs[idx]);
}
__attribute__ ((noinline))
static int unwind_thread(struct thread *thread)
static noinline int unwind_thread(struct thread *thread)
{
struct perf_sample sample;
unsigned long cnt = 0;
@ -108,8 +107,7 @@ static int unwind_thread(struct thread *thread)
static int global_unwind_retval = -INT_MAX;
__attribute__ ((noinline))
static int compare(void *p1, void *p2)
static noinline int compare(void *p1, void *p2)
{
/* Any possible value should be 'thread' */
struct thread *thread = *(struct thread **)p1;
@ -128,8 +126,7 @@ static int compare(void *p1, void *p2)
return p1 - p2;
}
__attribute__ ((noinline))
static int krava_3(struct thread *thread)
static noinline int krava_3(struct thread *thread)
{
struct thread *array[2] = {thread, thread};
void *fp = &bsearch;
@ -147,14 +144,12 @@ static int krava_3(struct thread *thread)
return global_unwind_retval;
}
__attribute__ ((noinline))
static int krava_2(struct thread *thread)
static noinline int krava_2(struct thread *thread)
{
return krava_3(thread);
}
__attribute__ ((noinline))
static int krava_1(struct thread *thread)
static noinline int krava_1(struct thread *thread)
{
return krava_2(thread);
}

View File

@ -1810,17 +1810,6 @@ static int test_pmu_events(void)
return ret;
}
static void debug_warn(const char *warn, va_list params)
{
char msg[1024];
if (verbose <= 0)
return;
vsnprintf(msg, sizeof(msg), warn, params);
fprintf(stderr, " Warning: %s\n", msg);
}
int test__parse_events(int subtest __maybe_unused)
{
int ret1, ret2 = 0;
@ -1832,8 +1821,6 @@ do { \
ret2 = ret1; \
} while (0)
set_warning_routine(debug_warn);
TEST_EVENTS(test__events);
if (test_pmu())

View File

@ -46,12 +46,15 @@ static struct annotate_browser_opt {
.jump_arrows = true,
};
struct arch;
struct annotate_browser {
struct ui_browser b;
struct rb_root entries;
struct rb_node *curr_hot;
struct disasm_line *selection;
struct disasm_line **offsets;
struct arch *arch;
int nr_events;
u64 start;
int nr_asm_entries;
@ -125,43 +128,57 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
int i, pcnt_width = annotate_browser__pcnt_width(ab);
double percent_max = 0.0;
char bf[256];
bool show_title = false;
for (i = 0; i < ab->nr_events; i++) {
if (bdl->samples[i].percent > percent_max)
percent_max = bdl->samples[i].percent;
}
if ((row == 0) && (dl->offset == -1 || percent_max == 0.0)) {
if (ab->have_cycles) {
if (dl->ipc == 0.0 && dl->cycles == 0)
show_title = true;
} else
show_title = true;
}
if (dl->offset != -1 && percent_max != 0.0) {
if (percent_max != 0.0) {
for (i = 0; i < ab->nr_events; i++) {
ui_browser__set_percent_color(browser,
bdl->samples[i].percent,
current_entry);
if (annotate_browser__opts.show_total_period) {
ui_browser__printf(browser, "%6" PRIu64 " ",
bdl->samples[i].nr);
} else {
ui_browser__printf(browser, "%6.2f ",
bdl->samples[i].percent);
}
for (i = 0; i < ab->nr_events; i++) {
ui_browser__set_percent_color(browser,
bdl->samples[i].percent,
current_entry);
if (annotate_browser__opts.show_total_period) {
ui_browser__printf(browser, "%6" PRIu64 " ",
bdl->samples[i].nr);
} else {
ui_browser__printf(browser, "%6.2f ",
bdl->samples[i].percent);
}
} else {
ui_browser__write_nstring(browser, " ", 7 * ab->nr_events);
}
} else {
ui_browser__set_percent_color(browser, 0, current_entry);
ui_browser__write_nstring(browser, " ", 7 * ab->nr_events);
if (!show_title)
ui_browser__write_nstring(browser, " ", 7 * ab->nr_events);
else
ui_browser__printf(browser, "%*s", 7, "Percent");
}
if (ab->have_cycles) {
if (dl->ipc)
ui_browser__printf(browser, "%*.2f ", IPC_WIDTH - 1, dl->ipc);
else
else if (!show_title)
ui_browser__write_nstring(browser, " ", IPC_WIDTH);
else
ui_browser__printf(browser, "%*s ", IPC_WIDTH - 1, "IPC");
if (dl->cycles)
ui_browser__printf(browser, "%*" PRIu64 " ",
CYCLES_WIDTH - 1, dl->cycles);
else
else if (!show_title)
ui_browser__write_nstring(browser, " ", CYCLES_WIDTH);
else
ui_browser__printf(browser, "%*s ", CYCLES_WIDTH - 1, "Cycle");
}
SLsmg_write_char(' ');
@ -1056,7 +1073,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
(nr_pcnt - 1);
}
err = symbol__disassemble(sym, map, perf_evsel__env_arch(evsel), sizeof_bdl);
err = symbol__disassemble(sym, map, perf_evsel__env_arch(evsel),
sizeof_bdl, &browser.arch);
if (err) {
char msg[BUFSIZ];
symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));

View File

@ -168,7 +168,8 @@ static int symbol__gtk_annotate(struct symbol *sym, struct map *map,
if (map->dso->annotate_warned)
return -1;
err = symbol__disassemble(sym, map, perf_evsel__env_arch(evsel), 0);
err = symbol__disassemble(sym, map, perf_evsel__env_arch(evsel),
0, NULL);
if (err) {
char msg[BUFSIZ];
symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));

View File

@ -1379,7 +1379,9 @@ static const char *annotate__norm_arch(const char *arch_name)
return normalize_arch((char *)arch_name);
}
int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_name, size_t privsize)
int symbol__disassemble(struct symbol *sym, struct map *map,
const char *arch_name, size_t privsize,
struct arch **parch)
{
struct dso *dso = map->dso;
char command[PATH_MAX * 2];
@ -1405,6 +1407,9 @@ int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_na
if (arch == NULL)
return -ENOTSUP;
if (parch)
*parch = arch;
if (arch->init) {
err = arch->init(arch);
if (err) {
@ -1901,7 +1906,8 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map,
struct rb_root source_line = RB_ROOT;
u64 len;
if (symbol__disassemble(sym, map, perf_evsel__env_arch(evsel), 0) < 0)
if (symbol__disassemble(sym, map, perf_evsel__env_arch(evsel),
0, NULL) < 0)
return -1;
len = symbol__size(sym);

View File

@ -158,7 +158,9 @@ int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 addr);
int symbol__alloc_hist(struct symbol *sym);
void symbol__annotate_zero_histograms(struct symbol *sym);
int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_name, size_t privsize);
int symbol__disassemble(struct symbol *sym, struct map *map,
const char *arch_name, size_t privsize,
struct arch **parch);
enum symbol_disassemble_errno {
SYMBOL_ANNOTATE_ERRNO__SUCCESS = 0,

View File

@ -322,6 +322,13 @@ static int auxtrace_queues__add_event_buffer(struct auxtrace_queues *queues,
return auxtrace_queues__add_buffer(queues, idx, buffer);
}
static bool filter_cpu(struct perf_session *session, int cpu)
{
unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap;
return cpu_bitmap && cpu != -1 && !test_bit(cpu, cpu_bitmap);
}
int auxtrace_queues__add_event(struct auxtrace_queues *queues,
struct perf_session *session,
union perf_event *event, off_t data_offset,
@ -331,6 +338,9 @@ int auxtrace_queues__add_event(struct auxtrace_queues *queues,
unsigned int idx;
int err;
if (filter_cpu(session, event->auxtrace.cpu))
return 0;
buffer = zalloc(sizeof(struct auxtrace_buffer));
if (!buffer)
return -ENOMEM;
@ -947,6 +957,8 @@ void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts)
synth_opts->instructions = true;
synth_opts->branches = true;
synth_opts->transactions = true;
synth_opts->ptwrites = true;
synth_opts->pwr_events = true;
synth_opts->errors = true;
synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
@ -1030,6 +1042,12 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str,
case 'x':
synth_opts->transactions = true;
break;
case 'w':
synth_opts->ptwrites = true;
break;
case 'p':
synth_opts->pwr_events = true;
break;
case 'e':
synth_opts->errors = true;
break;

View File

@ -59,6 +59,8 @@ enum itrace_period_type {
* @instructions: whether to synthesize 'instructions' events
* @branches: whether to synthesize 'branches' events
* @transactions: whether to synthesize events for transactions
* @ptwrites: whether to synthesize events for ptwrites
* @pwr_events: whether to synthesize power events
* @errors: whether to synthesize decoder error events
* @dont_decode: whether to skip decoding entirely
* @log: write a decoding log
@ -72,6 +74,7 @@ enum itrace_period_type {
* @period: 'instructions' events period
* @period_type: 'instructions' events period type
* @initial_skip: skip N events at the beginning.
* @cpu_bitmap: CPUs for which to synthesize events, or NULL for all
*/
struct itrace_synth_opts {
bool set;
@ -79,6 +82,8 @@ struct itrace_synth_opts {
bool instructions;
bool branches;
bool transactions;
bool ptwrites;
bool pwr_events;
bool errors;
bool dont_decode;
bool log;
@ -92,6 +97,7 @@ struct itrace_synth_opts {
unsigned long long period;
enum itrace_period_type period_type;
unsigned long initial_skip;
unsigned long *cpu_bitmap;
};
/**

View File

@ -5,6 +5,7 @@
#include <subcmd/pager.h>
#include "../ui/ui.h"
#include <linux/compiler.h>
#include <linux/string.h>
#define CMD_EXEC_PATH "--exec-path"
@ -24,6 +25,6 @@ static inline int is_absolute_path(const char *path)
return path[0] == '/';
}
char *mkpath(const char *fmt, ...) __attribute__((format (printf, 1, 2)));
char *mkpath(const char *fmt, ...) __printf(1, 2);
#endif /* __PERF_CACHE_H */

View File

@ -335,32 +335,42 @@ static int perf_parse_long(const char *value, long *ret)
return 0;
}
static void die_bad_config(const char *name)
static void bad_config(const char *name)
{
if (config_file_name)
die("bad config value for '%s' in %s", name, config_file_name);
die("bad config value for '%s'", name);
pr_warning("bad config value for '%s' in %s, ignoring...\n", name, config_file_name);
else
pr_warning("bad config value for '%s', ignoring...\n", name);
}
u64 perf_config_u64(const char *name, const char *value)
int perf_config_u64(u64 *dest, const char *name, const char *value)
{
long long ret = 0;
if (!perf_parse_llong(value, &ret))
die_bad_config(name);
return (u64) ret;
if (!perf_parse_llong(value, &ret)) {
bad_config(name);
return -1;
}
*dest = ret;
return 0;
}
int perf_config_int(const char *name, const char *value)
int perf_config_int(int *dest, const char *name, const char *value)
{
long ret = 0;
if (!perf_parse_long(value, &ret))
die_bad_config(name);
return ret;
if (!perf_parse_long(value, &ret)) {
bad_config(name);
return -1;
}
*dest = ret;
return 0;
}
static int perf_config_bool_or_int(const char *name, const char *value, int *is_bool)
{
int ret;
*is_bool = 1;
if (!value)
return 1;
@ -371,7 +381,7 @@ static int perf_config_bool_or_int(const char *name, const char *value, int *is_
if (!strcasecmp(value, "false") || !strcasecmp(value, "no") || !strcasecmp(value, "off"))
return 0;
*is_bool = 0;
return perf_config_int(name, value);
return perf_config_int(&ret, name, value) < 0 ? -1 : ret;
}
int perf_config_bool(const char *name, const char *value)
@ -657,8 +667,7 @@ static int perf_config_set__init(struct perf_config_set *set)
user_config = strdup(mkpath("%s/.perfconfig", home));
if (user_config == NULL) {
warning("Not enough memory to process %s/.perfconfig, "
"ignoring it.", home);
pr_warning("Not enough memory to process %s/.perfconfig, ignoring it.", home);
goto out;
}
@ -671,8 +680,7 @@ static int perf_config_set__init(struct perf_config_set *set)
ret = 0;
if (st.st_uid && (st.st_uid != geteuid())) {
warning("File %s not owned by current user or root, "
"ignoring it.", user_config);
pr_warning("File %s not owned by current user or root, ignoring it.", user_config);
goto out_free;
}
@ -795,7 +803,8 @@ void perf_config_set__delete(struct perf_config_set *set)
*/
int config_error_nonbool(const char *var)
{
return error("Missing value for '%s'", var);
pr_err("Missing value for '%s'", var);
return -1;
}
void set_buildid_dir(const char *dir)

View File

@ -27,8 +27,8 @@ extern const char *config_exclusive_filename;
typedef int (*config_fn_t)(const char *, const char *, void *);
int perf_default_config(const char *, const char *, void *);
int perf_config(config_fn_t fn, void *);
int perf_config_int(const char *, const char *);
u64 perf_config_u64(const char *, const char *);
int perf_config_int(int *dest, const char *, const char *);
int perf_config_u64(u64 *dest, const char *, const char *);
int perf_config_bool(const char *, const char *);
int config_error_nonbool(const char *);
const char *perf_etc_perfconfig(void);

View File

@ -1444,10 +1444,8 @@ static int convert__config(const char *var, const char *value, void *cb)
{
struct convert *c = cb;
if (!strcmp(var, "convert.queue-size")) {
c->queue_size = perf_config_u64(var, value);
return 0;
}
if (!strcmp(var, "convert.queue-size"))
return perf_config_u64(&c->queue_size, var, value);
return 0;
}

View File

@ -4,6 +4,7 @@
#include <stdbool.h>
#include <string.h>
#include <linux/compiler.h>
#include "event.h"
#include "../ui/helpline.h"
#include "../ui/progress.h"
@ -40,16 +41,16 @@ extern int debug_data_convert;
#define STRERR_BUFSIZE 128 /* For the buffer size of str_error_r */
int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
int dump_printf(const char *fmt, ...) __printf(1, 2);
void trace_event(union perf_event *event);
int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
int ui__error(const char *format, ...) __printf(1, 2);
int ui__warning(const char *format, ...) __printf(1, 2);
void pr_stat(const char *fmt, ...);
int eprintf(int level, int var, const char *fmt, ...) __attribute__((format(printf, 3, 4)));
int eprintf_time(int level, int var, u64 t, const char *fmt, ...) __attribute__((format(printf, 4, 5)));
int eprintf(int level, int var, const char *fmt, ...) __printf(3, 4);
int eprintf_time(int level, int var, u64 t, const char *fmt, ...) __printf(4, 5);
int veprintf(int level, int var, const char *fmt, va_list args);
int perf_debug_option(const char *str);

View File

@ -252,6 +252,127 @@ enum auxtrace_error_type {
PERF_AUXTRACE_ERROR_MAX
};
/* Attribute type for custom synthesized events */
#define PERF_TYPE_SYNTH (INT_MAX + 1U)
/* Attribute config for custom synthesized events */
enum perf_synth_id {
PERF_SYNTH_INTEL_PTWRITE,
PERF_SYNTH_INTEL_MWAIT,
PERF_SYNTH_INTEL_PWRE,
PERF_SYNTH_INTEL_EXSTOP,
PERF_SYNTH_INTEL_PWRX,
PERF_SYNTH_INTEL_CBR,
};
/*
* Raw data formats for synthesized events. Note that 4 bytes of padding are
* present to match the 'size' member of PERF_SAMPLE_RAW data which is always
* 8-byte aligned. That means we must dereference raw_data with an offset of 4.
* Refer perf_sample__synth_ptr() and perf_synth__raw_data(). It also means the
* structure sizes are 4 bytes bigger than the raw_size, refer
* perf_synth__raw_size().
*/
struct perf_synth_intel_ptwrite {
u32 padding;
union {
struct {
u32 ip : 1,
reserved : 31;
};
u32 flags;
};
u64 payload;
};
struct perf_synth_intel_mwait {
u32 padding;
u32 reserved;
union {
struct {
u64 hints : 8,
reserved1 : 24,
extensions : 2,
reserved2 : 30;
};
u64 payload;
};
};
struct perf_synth_intel_pwre {
u32 padding;
u32 reserved;
union {
struct {
u64 reserved1 : 7,
hw : 1,
subcstate : 4,
cstate : 4,
reserved2 : 48;
};
u64 payload;
};
};
struct perf_synth_intel_exstop {
u32 padding;
union {
struct {
u32 ip : 1,
reserved : 31;
};
u32 flags;
};
};
struct perf_synth_intel_pwrx {
u32 padding;
u32 reserved;
union {
struct {
u64 deepest_cstate : 4,
last_cstate : 4,
wake_reason : 4,
reserved1 : 52;
};
u64 payload;
};
};
struct perf_synth_intel_cbr {
u32 padding;
union {
struct {
u32 cbr : 8,
reserved1 : 8,
max_nonturbo : 8,
reserved2 : 8;
};
u32 flags;
};
u32 freq;
u32 reserved3;
};
/*
* raw_data is always 4 bytes from an 8-byte boundary, so subtract 4 to get
* 8-byte alignment.
*/
static inline void *perf_sample__synth_ptr(struct perf_sample *sample)
{
return sample->raw_data - 4;
}
static inline void *perf_synth__raw_data(void *p)
{
return p + 4;
}
#define perf_synth__raw_size(d) (sizeof(d) - 4)
#define perf_sample__bad_synth_size(s, d) ((s)->raw_size < sizeof(d) - 4)
/*
* The kernel collects the number of events it couldn't send in a stretch and
* when possible sends this number in a PERF_RECORD_LOST event. The number of

View File

@ -1,6 +1,7 @@
#ifndef __PERF_EVLIST_H
#define __PERF_EVLIST_H 1
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/refcount.h>
#include <linux/list.h>
@ -34,7 +35,7 @@ struct perf_mmap {
refcount_t refcnt;
u64 prev;
struct auxtrace_mmap auxtrace_mmap;
char event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8)));
char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
};
static inline size_t

View File

@ -11,13 +11,17 @@
#include <errno.h>
#include <inttypes.h>
#include <linux/bitops.h>
#include <api/fs/fs.h>
#include <api/fs/tracing_path.h>
#include <traceevent/event-parse.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
#include <linux/compiler.h>
#include <linux/err.h>
#include <sys/ioctl.h>
#include <sys/resource.h>
#include <sys/types.h>
#include <dirent.h>
#include "asm/bug.h"
#include "callchain.h"
#include "cgroup.h"
@ -1441,7 +1445,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
}
static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
void *priv __attribute__((unused)))
void *priv __maybe_unused)
{
return fprintf(fp, " %-32s %s\n", name, val);
}
@ -2471,6 +2475,42 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
return false;
}
static bool find_process(const char *name)
{
size_t len = strlen(name);
DIR *dir;
struct dirent *d;
int ret = -1;
dir = opendir(procfs__mountpoint());
if (!dir)
return false;
/* Walk through the directory. */
while (ret && (d = readdir(dir)) != NULL) {
char path[PATH_MAX];
char *data;
size_t size;
if ((d->d_type != DT_DIR) ||
!strcmp(".", d->d_name) ||
!strcmp("..", d->d_name))
continue;
scnprintf(path, sizeof(path), "%s/%s/comm",
procfs__mountpoint(), d->d_name);
if (filename__read_str(path, &data, &size))
continue;
ret = strncmp(name, data, len);
free(data);
}
closedir(dir);
return ret ? false : true;
}
int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
int err, char *msg, size_t size)
{

View File

@ -11,6 +11,7 @@
* @remark Copyright 2007 OProfile authors
* @author Philippe Elie
*/
#include <linux/compiler.h>
#include <sys/types.h>
#include <stdio.h>
#include <getopt.h>
@ -125,7 +126,7 @@ struct debug_line_header {
* and filesize, last entry is followed by en empty string.
*/
/* follow the first program statement */
} __attribute__((packed));
} __packed;
/* DWARF 2 spec talk only about one possible compilation unit header while
* binutils can handle two flavours of dwarf 2, 32 and 64 bits, this is not
@ -138,7 +139,7 @@ struct compilation_unit_header {
uhalf version;
uword debug_abbrev_offset;
ubyte pointer_size;
} __attribute__((packed));
} __packed;
#define DW_LNS_num_opcode (DW_LNS_set_isa + 1)

View File

@ -8,6 +8,7 @@
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
@ -1274,7 +1275,7 @@ read_event_desc(struct perf_header *ph, int fd)
}
static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
void *priv __attribute__((unused)))
void *priv __maybe_unused)
{
return fprintf(fp, ", %s = %s", name, val);
}

View File

@ -12,7 +12,7 @@ static int perf_unknown_cmd_config(const char *var, const char *value,
void *cb __maybe_unused)
{
if (!strcmp(var, "help.autocorrect"))
autocorrect = perf_config_int(var,value);
return perf_config_int(&autocorrect, var,value);
return 0;
}

View File

@ -866,8 +866,6 @@ static void intel_bts_print_info(u64 *arr, int start, int finish)
fprintf(stdout, intel_bts_info_fmts[i], arr[i]);
}
u64 intel_bts_auxtrace_info_priv[INTEL_BTS_AUXTRACE_PRIV_SIZE];
int intel_bts_process_auxtrace_info(union perf_event *event,
struct perf_session *session)
{

View File

@ -64,6 +64,25 @@ enum intel_pt_pkt_state {
INTEL_PT_STATE_FUP_NO_TIP,
};
static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
{
switch (pkt_state) {
case INTEL_PT_STATE_NO_PSB:
case INTEL_PT_STATE_NO_IP:
case INTEL_PT_STATE_ERR_RESYNC:
case INTEL_PT_STATE_IN_SYNC:
case INTEL_PT_STATE_TNT:
return true;
case INTEL_PT_STATE_TIP:
case INTEL_PT_STATE_TIP_PGD:
case INTEL_PT_STATE_FUP:
case INTEL_PT_STATE_FUP_NO_TIP:
return false;
default:
return true;
};
}
#ifdef INTEL_PT_STRICT
#define INTEL_PT_STATE_ERR1 INTEL_PT_STATE_NO_PSB
#define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_PSB
@ -87,11 +106,13 @@ struct intel_pt_decoder {
const unsigned char *buf;
size_t len;
bool return_compression;
bool branch_enable;
bool mtc_insn;
bool pge;
bool have_tma;
bool have_cyc;
bool fixup_last_mtc;
bool have_last_ip;
uint64_t pos;
uint64_t last_ip;
uint64_t ip;
@ -99,6 +120,7 @@ struct intel_pt_decoder {
uint64_t timestamp;
uint64_t tsc_timestamp;
uint64_t ref_timestamp;
uint64_t sample_timestamp;
uint64_t ret_addr;
uint64_t ctc_timestamp;
uint64_t ctc_delta;
@ -119,6 +141,7 @@ struct intel_pt_decoder {
int pkt_len;
int last_packet_type;
unsigned int cbr;
unsigned int cbr_seen;
unsigned int max_non_turbo_ratio;
double max_non_turbo_ratio_fp;
double cbr_cyc_to_tsc;
@ -136,9 +159,18 @@ struct intel_pt_decoder {
bool continuous_period;
bool overflow;
bool set_fup_tx_flags;
bool set_fup_ptw;
bool set_fup_mwait;
bool set_fup_pwre;
bool set_fup_exstop;
unsigned int fup_tx_flags;
unsigned int tx_flags;
uint64_t fup_ptw_payload;
uint64_t fup_mwait_payload;
uint64_t fup_pwre_payload;
uint64_t cbr_payload;
uint64_t timestamp_insn_cnt;
uint64_t sample_insn_cnt;
uint64_t stuck_ip;
int no_progress;
int stuck_ip_prd;
@ -192,6 +224,7 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
decoder->pgd_ip = params->pgd_ip;
decoder->data = params->data;
decoder->return_compression = params->return_compression;
decoder->branch_enable = params->branch_enable;
decoder->period = params->period;
decoder->period_type = params->period_type;
@ -398,6 +431,7 @@ static uint64_t intel_pt_calc_ip(const struct intel_pt_pkt *packet,
static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder)
{
decoder->last_ip = intel_pt_calc_ip(&decoder->packet, decoder->last_ip);
decoder->have_last_ip = true;
}
static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder)
@ -635,6 +669,8 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
case INTEL_PT_PAD:
case INTEL_PT_VMCS:
case INTEL_PT_MNT:
case INTEL_PT_PTWRITE:
case INTEL_PT_PTWRITE_IP:
return 0;
case INTEL_PT_MTC:
@ -675,6 +711,12 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
break;
case INTEL_PT_TSC:
/*
* For now, do not support using TSC packets - refer
* intel_pt_calc_cyc_to_tsc().
*/
if (data->from_mtc)
return 1;
timestamp = pkt_info->packet.payload |
(data->timestamp & (0xffULL << 56));
if (data->from_mtc && timestamp < data->timestamp &&
@ -733,6 +775,11 @@ static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
case INTEL_PT_TIP_PGD:
case INTEL_PT_TRACESTOP:
case INTEL_PT_EXSTOP:
case INTEL_PT_EXSTOP_IP:
case INTEL_PT_MWAIT:
case INTEL_PT_PWRE:
case INTEL_PT_PWRX:
case INTEL_PT_OVF:
case INTEL_PT_BAD: /* Does not happen */
default:
@ -787,6 +834,14 @@ static void intel_pt_calc_cyc_to_tsc(struct intel_pt_decoder *decoder,
.cbr_cyc_to_tsc = 0,
};
/*
* For now, do not support using TSC packets for at least the reasons:
* 1) timing might have stopped
* 2) TSC packets within PSB+ can slip against CYC packets
*/
if (!from_mtc)
return;
intel_pt_pkt_lookahead(decoder, intel_pt_calc_cyc_cb, &data);
}
@ -898,6 +953,7 @@ static int intel_pt_walk_insn(struct intel_pt_decoder *decoder,
decoder->tot_insn_cnt += insn_cnt;
decoder->timestamp_insn_cnt += insn_cnt;
decoder->sample_insn_cnt += insn_cnt;
decoder->period_insn_cnt += insn_cnt;
if (err) {
@ -990,6 +1046,57 @@ static int intel_pt_walk_insn(struct intel_pt_decoder *decoder,
return err;
}
static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
{
bool ret = false;
if (decoder->set_fup_tx_flags) {
decoder->set_fup_tx_flags = false;
decoder->tx_flags = decoder->fup_tx_flags;
decoder->state.type = INTEL_PT_TRANSACTION;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->state.flags = decoder->fup_tx_flags;
return true;
}
if (decoder->set_fup_ptw) {
decoder->set_fup_ptw = false;
decoder->state.type = INTEL_PT_PTW;
decoder->state.flags |= INTEL_PT_FUP_IP;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->state.ptw_payload = decoder->fup_ptw_payload;
return true;
}
if (decoder->set_fup_mwait) {
decoder->set_fup_mwait = false;
decoder->state.type = INTEL_PT_MWAIT_OP;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->state.mwait_payload = decoder->fup_mwait_payload;
ret = true;
}
if (decoder->set_fup_pwre) {
decoder->set_fup_pwre = false;
decoder->state.type |= INTEL_PT_PWR_ENTRY;
decoder->state.type &= ~INTEL_PT_BRANCH;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->state.pwre_payload = decoder->fup_pwre_payload;
ret = true;
}
if (decoder->set_fup_exstop) {
decoder->set_fup_exstop = false;
decoder->state.type |= INTEL_PT_EX_STOP;
decoder->state.type &= ~INTEL_PT_BRANCH;
decoder->state.flags |= INTEL_PT_FUP_IP;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
ret = true;
}
return ret;
}
static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
{
struct intel_pt_insn intel_pt_insn;
@ -1003,15 +1110,8 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
if (err == INTEL_PT_RETURN)
return 0;
if (err == -EAGAIN) {
if (decoder->set_fup_tx_flags) {
decoder->set_fup_tx_flags = false;
decoder->tx_flags = decoder->fup_tx_flags;
decoder->state.type = INTEL_PT_TRANSACTION;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->state.flags = decoder->fup_tx_flags;
if (intel_pt_fup_event(decoder))
return 0;
}
return err;
}
decoder->set_fup_tx_flags = false;
@ -1360,7 +1460,9 @@ static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder)
static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)
{
unsigned int cbr = decoder->packet.payload;
unsigned int cbr = decoder->packet.payload & 0xff;
decoder->cbr_payload = decoder->packet.payload;
if (decoder->cbr == cbr)
return;
@ -1417,6 +1519,13 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
case INTEL_PT_TRACESTOP:
case INTEL_PT_BAD:
case INTEL_PT_PSB:
case INTEL_PT_PTWRITE:
case INTEL_PT_PTWRITE_IP:
case INTEL_PT_EXSTOP:
case INTEL_PT_EXSTOP_IP:
case INTEL_PT_MWAIT:
case INTEL_PT_PWRE:
case INTEL_PT_PWRX:
decoder->have_tma = false;
intel_pt_log("ERROR: Unexpected packet\n");
return -EAGAIN;
@ -1446,7 +1555,8 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
case INTEL_PT_FUP:
decoder->pge = true;
intel_pt_set_last_ip(decoder);
if (decoder->packet.count)
intel_pt_set_last_ip(decoder);
break;
case INTEL_PT_MODE_TSX:
@ -1497,6 +1607,13 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
case INTEL_PT_MODE_TSX:
case INTEL_PT_BAD:
case INTEL_PT_PSBEND:
case INTEL_PT_PTWRITE:
case INTEL_PT_PTWRITE_IP:
case INTEL_PT_EXSTOP:
case INTEL_PT_EXSTOP_IP:
case INTEL_PT_MWAIT:
case INTEL_PT_PWRE:
case INTEL_PT_PWRX:
intel_pt_log("ERROR: Missing TIP after FUP\n");
decoder->pkt_state = INTEL_PT_STATE_ERR3;
return -ENOENT;
@ -1625,6 +1742,15 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
break;
}
intel_pt_set_last_ip(decoder);
if (!decoder->branch_enable) {
decoder->ip = decoder->last_ip;
if (intel_pt_fup_event(decoder))
return 0;
no_tip = false;
break;
}
if (decoder->set_fup_mwait)
no_tip = true;
err = intel_pt_walk_fup(decoder);
if (err != -EAGAIN) {
if (err)
@ -1650,6 +1776,8 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
break;
case INTEL_PT_PSB:
decoder->last_ip = 0;
decoder->have_last_ip = true;
intel_pt_clear_stack(&decoder->stack);
err = intel_pt_walk_psbend(decoder);
if (err == -EAGAIN)
@ -1696,6 +1824,16 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
case INTEL_PT_CBR:
intel_pt_calc_cbr(decoder);
if (!decoder->branch_enable &&
decoder->cbr != decoder->cbr_seen) {
decoder->cbr_seen = decoder->cbr;
decoder->state.type = INTEL_PT_CBR_CHG;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->state.cbr_payload =
decoder->packet.payload;
return 0;
}
break;
case INTEL_PT_MODE_EXEC:
@ -1722,6 +1860,71 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
case INTEL_PT_PAD:
break;
case INTEL_PT_PTWRITE_IP:
decoder->fup_ptw_payload = decoder->packet.payload;
err = intel_pt_get_next_packet(decoder);
if (err)
return err;
if (decoder->packet.type == INTEL_PT_FUP) {
decoder->set_fup_ptw = true;
no_tip = true;
} else {
intel_pt_log_at("ERROR: Missing FUP after PTWRITE",
decoder->pos);
}
goto next;
case INTEL_PT_PTWRITE:
decoder->state.type = INTEL_PT_PTW;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->state.ptw_payload = decoder->packet.payload;
return 0;
case INTEL_PT_MWAIT:
decoder->fup_mwait_payload = decoder->packet.payload;
decoder->set_fup_mwait = true;
break;
case INTEL_PT_PWRE:
if (decoder->set_fup_mwait) {
decoder->fup_pwre_payload =
decoder->packet.payload;
decoder->set_fup_pwre = true;
break;
}
decoder->state.type = INTEL_PT_PWR_ENTRY;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->state.pwrx_payload = decoder->packet.payload;
return 0;
case INTEL_PT_EXSTOP_IP:
err = intel_pt_get_next_packet(decoder);
if (err)
return err;
if (decoder->packet.type == INTEL_PT_FUP) {
decoder->set_fup_exstop = true;
no_tip = true;
} else {
intel_pt_log_at("ERROR: Missing FUP after EXSTOP",
decoder->pos);
}
goto next;
case INTEL_PT_EXSTOP:
decoder->state.type = INTEL_PT_EX_STOP;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
return 0;
case INTEL_PT_PWRX:
decoder->state.type = INTEL_PT_PWR_EXIT;
decoder->state.from_ip = decoder->ip;
decoder->state.to_ip = 0;
decoder->state.pwrx_payload = decoder->packet.payload;
return 0;
default:
return intel_pt_bug(decoder);
}
@ -1730,8 +1933,9 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
static inline bool intel_pt_have_ip(struct intel_pt_decoder *decoder)
{
return decoder->last_ip || decoder->packet.count == 0 ||
decoder->packet.count == 3 || decoder->packet.count == 6;
return decoder->packet.count &&
(decoder->have_last_ip || decoder->packet.count == 3 ||
decoder->packet.count == 6);
}
/* Walk PSB+ packets to get in sync. */
@ -1750,6 +1954,13 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
__fallthrough;
case INTEL_PT_TIP_PGE:
case INTEL_PT_TIP:
case INTEL_PT_PTWRITE:
case INTEL_PT_PTWRITE_IP:
case INTEL_PT_EXSTOP:
case INTEL_PT_EXSTOP_IP:
case INTEL_PT_MWAIT:
case INTEL_PT_PWRE:
case INTEL_PT_PWRX:
intel_pt_log("ERROR: Unexpected packet\n");
return -ENOENT;
@ -1854,14 +2065,10 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
break;
case INTEL_PT_FUP:
if (decoder->overflow) {
if (intel_pt_have_ip(decoder))
intel_pt_set_ip(decoder);
if (decoder->ip)
return 0;
}
if (decoder->packet.count)
intel_pt_set_last_ip(decoder);
if (intel_pt_have_ip(decoder))
intel_pt_set_ip(decoder);
if (decoder->ip)
return 0;
break;
case INTEL_PT_MTC:
@ -1910,6 +2117,9 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
break;
case INTEL_PT_PSB:
decoder->last_ip = 0;
decoder->have_last_ip = true;
intel_pt_clear_stack(&decoder->stack);
err = intel_pt_walk_psb(decoder);
if (err)
return err;
@ -1925,6 +2135,13 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
case INTEL_PT_VMCS:
case INTEL_PT_MNT:
case INTEL_PT_PAD:
case INTEL_PT_PTWRITE:
case INTEL_PT_PTWRITE_IP:
case INTEL_PT_EXSTOP:
case INTEL_PT_EXSTOP_IP:
case INTEL_PT_MWAIT:
case INTEL_PT_PWRE:
case INTEL_PT_PWRX:
default:
break;
}
@ -1935,6 +2152,19 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
{
int err;
decoder->set_fup_tx_flags = false;
decoder->set_fup_ptw = false;
decoder->set_fup_mwait = false;
decoder->set_fup_pwre = false;
decoder->set_fup_exstop = false;
if (!decoder->branch_enable) {
decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
decoder->overflow = false;
decoder->state.type = 0; /* Do not have a sample */
return 0;
}
intel_pt_log("Scanning for full IP\n");
err = intel_pt_walk_to_ip(decoder);
if (err)
@ -2043,6 +2273,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
decoder->pge = false;
decoder->continuous_period = false;
decoder->have_last_ip = false;
decoder->last_ip = 0;
decoder->ip = 0;
intel_pt_clear_stack(&decoder->stack);
@ -2051,6 +2282,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
if (err)
return err;
decoder->have_last_ip = true;
decoder->pkt_state = INTEL_PT_STATE_NO_IP;
err = intel_pt_walk_psb(decoder);
@ -2069,7 +2301,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder)
static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder)
{
uint64_t est = decoder->timestamp_insn_cnt << 1;
uint64_t est = decoder->sample_insn_cnt << 1;
if (!decoder->cbr || !decoder->max_non_turbo_ratio)
goto out;
@ -2077,7 +2309,7 @@ static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder)
est *= decoder->max_non_turbo_ratio;
est /= decoder->cbr;
out:
return decoder->timestamp + est;
return decoder->sample_timestamp + est;
}
const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
@ -2093,8 +2325,10 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
err = intel_pt_sync(decoder);
break;
case INTEL_PT_STATE_NO_IP:
decoder->have_last_ip = false;
decoder->last_ip = 0;
/* Fall through */
decoder->ip = 0;
__fallthrough;
case INTEL_PT_STATE_ERR_RESYNC:
err = intel_pt_sync_ip(decoder);
break;
@ -2130,15 +2364,29 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
}
} while (err == -ENOLINK);
decoder->state.err = err ? intel_pt_ext_err(err) : 0;
decoder->state.timestamp = decoder->timestamp;
if (err) {
decoder->state.err = intel_pt_ext_err(err);
decoder->state.from_ip = decoder->ip;
decoder->sample_timestamp = decoder->timestamp;
decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
} else {
decoder->state.err = 0;
if (decoder->cbr != decoder->cbr_seen && decoder->state.type) {
decoder->cbr_seen = decoder->cbr;
decoder->state.type |= INTEL_PT_CBR_CHG;
decoder->state.cbr_payload = decoder->cbr_payload;
}
if (intel_pt_sample_time(decoder->pkt_state)) {
decoder->sample_timestamp = decoder->timestamp;
decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
}
}
decoder->state.timestamp = decoder->sample_timestamp;
decoder->state.est_timestamp = intel_pt_est_timestamp(decoder);
decoder->state.cr3 = decoder->cr3;
decoder->state.tot_insn_cnt = decoder->tot_insn_cnt;
if (err)
decoder->state.from_ip = decoder->ip;
return &decoder->state;
}

View File

@ -25,11 +25,18 @@
#define INTEL_PT_IN_TX (1 << 0)
#define INTEL_PT_ABORT_TX (1 << 1)
#define INTEL_PT_ASYNC (1 << 2)
#define INTEL_PT_FUP_IP (1 << 3)
enum intel_pt_sample_type {
INTEL_PT_BRANCH = 1 << 0,
INTEL_PT_INSTRUCTION = 1 << 1,
INTEL_PT_TRANSACTION = 1 << 2,
INTEL_PT_PTW = 1 << 3,
INTEL_PT_MWAIT_OP = 1 << 4,
INTEL_PT_PWR_ENTRY = 1 << 5,
INTEL_PT_EX_STOP = 1 << 6,
INTEL_PT_PWR_EXIT = 1 << 7,
INTEL_PT_CBR_CHG = 1 << 8,
};
enum intel_pt_period_type {
@ -63,6 +70,11 @@ struct intel_pt_state {
uint64_t timestamp;
uint64_t est_timestamp;
uint64_t trace_nr;
uint64_t ptw_payload;
uint64_t mwait_payload;
uint64_t pwre_payload;
uint64_t pwrx_payload;
uint64_t cbr_payload;
uint32_t flags;
enum intel_pt_insn_op insn_op;
int insn_len;
@ -87,6 +99,7 @@ struct intel_pt_params {
bool (*pgd_ip)(uint64_t ip, void *data);
void *data;
bool return_compression;
bool branch_enable;
uint64_t period;
enum intel_pt_period_type period_type;
unsigned max_non_turbo_ratio;

View File

@ -16,6 +16,7 @@
#ifndef INCLUDE__INTEL_PT_LOG_H__
#define INCLUDE__INTEL_PT_LOG_H__
#include <linux/compiler.h>
#include <stdint.h>
#include <inttypes.h>
@ -34,8 +35,7 @@ void __intel_pt_log_insn(struct intel_pt_insn *intel_pt_insn, uint64_t ip);
void __intel_pt_log_insn_no_data(struct intel_pt_insn *intel_pt_insn,
uint64_t ip);
__attribute__((format(printf, 1, 2)))
void __intel_pt_log(const char *fmt, ...);
void __intel_pt_log(const char *fmt, ...) __printf(1, 2);
#define intel_pt_log(fmt, ...) \
do { \

View File

@ -64,6 +64,13 @@ static const char * const packet_name[] = {
[INTEL_PT_PIP] = "PIP",
[INTEL_PT_OVF] = "OVF",
[INTEL_PT_MNT] = "MNT",
[INTEL_PT_PTWRITE] = "PTWRITE",
[INTEL_PT_PTWRITE_IP] = "PTWRITE",
[INTEL_PT_EXSTOP] = "EXSTOP",
[INTEL_PT_EXSTOP_IP] = "EXSTOP",
[INTEL_PT_MWAIT] = "MWAIT",
[INTEL_PT_PWRE] = "PWRE",
[INTEL_PT_PWRX] = "PWRX",
};
const char *intel_pt_pkt_name(enum intel_pt_pkt_type type)
@ -123,7 +130,7 @@ static int intel_pt_get_cbr(const unsigned char *buf, size_t len,
if (len < 4)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_CBR;
packet->payload = buf[2];
packet->payload = le16_to_cpu(*(uint16_t *)(buf + 2));
return 4;
}
@ -217,12 +224,80 @@ static int intel_pt_get_3byte(const unsigned char *buf, size_t len,
}
}
static int intel_pt_get_ptwrite(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
packet->count = (buf[1] >> 5) & 0x3;
packet->type = buf[1] & BIT(7) ? INTEL_PT_PTWRITE_IP :
INTEL_PT_PTWRITE;
switch (packet->count) {
case 0:
if (len < 6)
return INTEL_PT_NEED_MORE_BYTES;
packet->payload = le32_to_cpu(*(uint32_t *)(buf + 2));
return 6;
case 1:
if (len < 10)
return INTEL_PT_NEED_MORE_BYTES;
packet->payload = le64_to_cpu(*(uint64_t *)(buf + 2));
return 10;
default:
return INTEL_PT_BAD_PACKET;
}
}
static int intel_pt_get_exstop(struct intel_pt_pkt *packet)
{
packet->type = INTEL_PT_EXSTOP;
return 2;
}
static int intel_pt_get_exstop_ip(struct intel_pt_pkt *packet)
{
packet->type = INTEL_PT_EXSTOP_IP;
return 2;
}
static int intel_pt_get_mwait(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 10)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_MWAIT;
packet->payload = le64_to_cpu(*(uint64_t *)(buf + 2));
return 10;
}
static int intel_pt_get_pwre(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 4)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_PWRE;
memcpy_le64(&packet->payload, buf + 2, 2);
return 4;
}
static int intel_pt_get_pwrx(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 7)
return INTEL_PT_NEED_MORE_BYTES;
packet->type = INTEL_PT_PWRX;
memcpy_le64(&packet->payload, buf + 2, 5);
return 7;
}
static int intel_pt_get_ext(const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
if (len < 2)
return INTEL_PT_NEED_MORE_BYTES;
if ((buf[1] & 0x1f) == 0x12)
return intel_pt_get_ptwrite(buf, len, packet);
switch (buf[1]) {
case 0xa3: /* Long TNT */
return intel_pt_get_long_tnt(buf, len, packet);
@ -244,6 +319,16 @@ static int intel_pt_get_ext(const unsigned char *buf, size_t len,
return intel_pt_get_tma(buf, len, packet);
case 0xC3: /* 3-byte header */
return intel_pt_get_3byte(buf, len, packet);
case 0x62: /* EXSTOP no IP */
return intel_pt_get_exstop(packet);
case 0xE2: /* EXSTOP with IP */
return intel_pt_get_exstop_ip(packet);
case 0xC2: /* MWAIT */
return intel_pt_get_mwait(buf, len, packet);
case 0x22: /* PWRE */
return intel_pt_get_pwre(buf, len, packet);
case 0xA2: /* PWRX */
return intel_pt_get_pwrx(buf, len, packet);
default:
return INTEL_PT_BAD_PACKET;
}
@ -522,6 +607,29 @@ int intel_pt_pkt_desc(const struct intel_pt_pkt *packet, char *buf,
ret = snprintf(buf, buf_len, "%s 0x%llx (NR=%d)",
name, payload, nr);
return ret;
case INTEL_PT_PTWRITE:
return snprintf(buf, buf_len, "%s 0x%llx IP:0", name, payload);
case INTEL_PT_PTWRITE_IP:
return snprintf(buf, buf_len, "%s 0x%llx IP:1", name, payload);
case INTEL_PT_EXSTOP:
return snprintf(buf, buf_len, "%s IP:0", name);
case INTEL_PT_EXSTOP_IP:
return snprintf(buf, buf_len, "%s IP:1", name);
case INTEL_PT_MWAIT:
return snprintf(buf, buf_len, "%s 0x%llx Hints 0x%x Extensions 0x%x",
name, payload, (unsigned int)(payload & 0xff),
(unsigned int)((payload >> 32) & 0x3));
case INTEL_PT_PWRE:
return snprintf(buf, buf_len, "%s 0x%llx HW:%u CState:%u Sub-CState:%u",
name, payload, !!(payload & 0x80),
(unsigned int)((payload >> 12) & 0xf),
(unsigned int)((payload >> 8) & 0xf));
case INTEL_PT_PWRX:
return snprintf(buf, buf_len, "%s 0x%llx Last CState:%u Deepest CState:%u Wake Reason 0x%x",
name, payload,
(unsigned int)((payload >> 4) & 0xf),
(unsigned int)(payload & 0xf),
(unsigned int)((payload >> 8) & 0xf));
default:
break;
}

View File

@ -52,6 +52,13 @@ enum intel_pt_pkt_type {
INTEL_PT_PIP,
INTEL_PT_OVF,
INTEL_PT_MNT,
INTEL_PT_PTWRITE,
INTEL_PT_PTWRITE_IP,
INTEL_PT_EXSTOP,
INTEL_PT_EXSTOP_IP,
INTEL_PT_MWAIT,
INTEL_PT_PWRE,
INTEL_PT_PWRX,
};
struct intel_pt_pkt {

View File

@ -1009,7 +1009,7 @@ GrpTable: Grp15
1: fxstor | RDGSBASE Ry (F3),(11B)
2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
4: XSAVE
4: XSAVE | ptwrite Ey (F3),(11B)
5: XRSTOR | lfence (11B)
6: XSAVEOPT | clwb (66) | mfence (11B)
7: clflush | clflushopt (66) | sfence (11B)

View File

@ -81,7 +81,6 @@ struct intel_pt {
bool sample_instructions;
u64 instructions_sample_type;
u64 instructions_sample_period;
u64 instructions_id;
bool sample_branches;
@ -93,6 +92,18 @@ struct intel_pt {
u64 transactions_sample_type;
u64 transactions_id;
bool sample_ptwrites;
u64 ptwrites_sample_type;
u64 ptwrites_id;
bool sample_pwr_events;
u64 pwr_events_sample_type;
u64 mwait_id;
u64 pwre_id;
u64 exstop_id;
u64 pwrx_id;
u64 cbr_id;
bool synth_needs_swap;
u64 tsc_bit;
@ -103,6 +114,7 @@ struct intel_pt {
u64 cyc_bit;
u64 noretcomp_bit;
unsigned max_non_turbo_ratio;
unsigned cbr2khz;
unsigned long num_events;
@ -668,6 +680,19 @@ static bool intel_pt_return_compression(struct intel_pt *pt)
return true;
}
static bool intel_pt_branch_enable(struct intel_pt *pt)
{
struct perf_evsel *evsel;
u64 config;
evlist__for_each_entry(pt->session->evlist, evsel) {
if (intel_pt_get_config(pt, &evsel->attr, &config) &&
(config & 1) && !(config & 0x2000))
return false;
}
return true;
}
static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
{
struct perf_evsel *evsel;
@ -799,6 +824,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
params.walk_insn = intel_pt_walk_next_insn;
params.data = ptq;
params.return_compression = intel_pt_return_compression(pt);
params.branch_enable = intel_pt_branch_enable(pt);
params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
params.mtc_period = intel_pt_mtc_period(pt);
params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
@ -1044,6 +1070,36 @@ static void intel_pt_update_last_branch_rb(struct intel_pt_queue *ptq)
bs->nr += 1;
}
static inline bool intel_pt_skip_event(struct intel_pt *pt)
{
return pt->synth_opts.initial_skip &&
pt->num_events++ < pt->synth_opts.initial_skip;
}
static void intel_pt_prep_b_sample(struct intel_pt *pt,
struct intel_pt_queue *ptq,
union perf_event *event,
struct perf_sample *sample)
{
event->sample.header.type = PERF_RECORD_SAMPLE;
event->sample.header.misc = PERF_RECORD_MISC_USER;
event->sample.header.size = sizeof(struct perf_event_header);
if (!pt->timeless_decoding)
sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
sample->cpumode = PERF_RECORD_MISC_USER;
sample->ip = ptq->state->from_ip;
sample->pid = ptq->pid;
sample->tid = ptq->tid;
sample->addr = ptq->state->to_ip;
sample->period = 1;
sample->cpu = ptq->cpu;
sample->flags = ptq->flags;
sample->insn_len = ptq->insn_len;
memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
}
static int intel_pt_inject_event(union perf_event *event,
struct perf_sample *sample, u64 type,
bool swapped)
@ -1052,9 +1108,35 @@ static int intel_pt_inject_event(union perf_event *event,
return perf_event__synthesize_sample(event, type, 0, sample, swapped);
}
static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
static inline int intel_pt_opt_inject(struct intel_pt *pt,
union perf_event *event,
struct perf_sample *sample, u64 type)
{
if (!pt->synth_opts.inject)
return 0;
return intel_pt_inject_event(event, sample, type, pt->synth_needs_swap);
}
static int intel_pt_deliver_synth_b_event(struct intel_pt *pt,
union perf_event *event,
struct perf_sample *sample, u64 type)
{
int ret;
ret = intel_pt_opt_inject(pt, event, sample, type);
if (ret)
return ret;
ret = perf_session__deliver_synth_event(pt->session, event, sample);
if (ret)
pr_err("Intel PT: failed to deliver event, error %d\n", ret);
return ret;
}
static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
@ -1066,29 +1148,13 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
return 0;
if (pt->synth_opts.initial_skip &&
pt->num_events++ < pt->synth_opts.initial_skip)
if (intel_pt_skip_event(pt))
return 0;
event->sample.header.type = PERF_RECORD_SAMPLE;
event->sample.header.misc = PERF_RECORD_MISC_USER;
event->sample.header.size = sizeof(struct perf_event_header);
intel_pt_prep_b_sample(pt, ptq, event, &sample);
if (!pt->timeless_decoding)
sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
sample.cpumode = PERF_RECORD_MISC_USER;
sample.ip = ptq->state->from_ip;
sample.pid = ptq->pid;
sample.tid = ptq->tid;
sample.addr = ptq->state->to_ip;
sample.id = ptq->pt->branches_id;
sample.stream_id = ptq->pt->branches_id;
sample.period = 1;
sample.cpu = ptq->cpu;
sample.flags = ptq->flags;
sample.insn_len = ptq->insn_len;
memcpy(sample.insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
/*
* perf report cannot handle events without a branch stack when using
@ -1105,144 +1171,251 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
sample.branch_stack = (struct branch_stack *)&dummy_bs;
}
if (pt->synth_opts.inject) {
ret = intel_pt_inject_event(event, &sample,
pt->branches_sample_type,
pt->synth_needs_swap);
if (ret)
return ret;
return intel_pt_deliver_synth_b_event(pt, event, &sample,
pt->branches_sample_type);
}
static void intel_pt_prep_sample(struct intel_pt *pt,
struct intel_pt_queue *ptq,
union perf_event *event,
struct perf_sample *sample)
{
intel_pt_prep_b_sample(pt, ptq, event, sample);
if (pt->synth_opts.callchain) {
thread_stack__sample(ptq->thread, ptq->chain,
pt->synth_opts.callchain_sz, sample->ip);
sample->callchain = ptq->chain;
}
ret = perf_session__deliver_synth_event(pt->session, event, &sample);
if (ret)
pr_err("Intel Processor Trace: failed to deliver branch event, error %d\n",
ret);
if (pt->synth_opts.last_branch) {
intel_pt_copy_last_branch_rb(ptq);
sample->branch_stack = ptq->last_branch;
}
}
static inline int intel_pt_deliver_synth_event(struct intel_pt *pt,
struct intel_pt_queue *ptq,
union perf_event *event,
struct perf_sample *sample,
u64 type)
{
int ret;
ret = intel_pt_deliver_synth_b_event(pt, event, sample, type);
if (pt->synth_opts.last_branch)
intel_pt_reset_last_branch_rb(ptq);
return ret;
}
static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
{
int ret;
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
if (pt->synth_opts.initial_skip &&
pt->num_events++ < pt->synth_opts.initial_skip)
if (intel_pt_skip_event(pt))
return 0;
event->sample.header.type = PERF_RECORD_SAMPLE;
event->sample.header.misc = PERF_RECORD_MISC_USER;
event->sample.header.size = sizeof(struct perf_event_header);
intel_pt_prep_sample(pt, ptq, event, &sample);
if (!pt->timeless_decoding)
sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
sample.cpumode = PERF_RECORD_MISC_USER;
sample.ip = ptq->state->from_ip;
sample.pid = ptq->pid;
sample.tid = ptq->tid;
sample.addr = ptq->state->to_ip;
sample.id = ptq->pt->instructions_id;
sample.stream_id = ptq->pt->instructions_id;
sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
sample.cpu = ptq->cpu;
sample.flags = ptq->flags;
sample.insn_len = ptq->insn_len;
memcpy(sample.insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
if (pt->synth_opts.callchain) {
thread_stack__sample(ptq->thread, ptq->chain,
pt->synth_opts.callchain_sz, sample.ip);
sample.callchain = ptq->chain;
}
if (pt->synth_opts.last_branch) {
intel_pt_copy_last_branch_rb(ptq);
sample.branch_stack = ptq->last_branch;
}
if (pt->synth_opts.inject) {
ret = intel_pt_inject_event(event, &sample,
pt->instructions_sample_type,
pt->synth_needs_swap);
if (ret)
return ret;
}
ret = perf_session__deliver_synth_event(pt->session, event, &sample);
if (ret)
pr_err("Intel Processor Trace: failed to deliver instruction event, error %d\n",
ret);
if (pt->synth_opts.last_branch)
intel_pt_reset_last_branch_rb(ptq);
return ret;
return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
pt->instructions_sample_type);
}
static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
{
int ret;
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
if (pt->synth_opts.initial_skip &&
pt->num_events++ < pt->synth_opts.initial_skip)
if (intel_pt_skip_event(pt))
return 0;
event->sample.header.type = PERF_RECORD_SAMPLE;
event->sample.header.misc = PERF_RECORD_MISC_USER;
event->sample.header.size = sizeof(struct perf_event_header);
intel_pt_prep_sample(pt, ptq, event, &sample);
if (!pt->timeless_decoding)
sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
sample.cpumode = PERF_RECORD_MISC_USER;
sample.ip = ptq->state->from_ip;
sample.pid = ptq->pid;
sample.tid = ptq->tid;
sample.addr = ptq->state->to_ip;
sample.id = ptq->pt->transactions_id;
sample.stream_id = ptq->pt->transactions_id;
sample.period = 1;
sample.cpu = ptq->cpu;
sample.flags = ptq->flags;
sample.insn_len = ptq->insn_len;
memcpy(sample.insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
if (pt->synth_opts.callchain) {
thread_stack__sample(ptq->thread, ptq->chain,
pt->synth_opts.callchain_sz, sample.ip);
sample.callchain = ptq->chain;
}
return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
pt->transactions_sample_type);
}
if (pt->synth_opts.last_branch) {
intel_pt_copy_last_branch_rb(ptq);
sample.branch_stack = ptq->last_branch;
}
static void intel_pt_prep_p_sample(struct intel_pt *pt,
struct intel_pt_queue *ptq,
union perf_event *event,
struct perf_sample *sample)
{
intel_pt_prep_sample(pt, ptq, event, sample);
if (pt->synth_opts.inject) {
ret = intel_pt_inject_event(event, &sample,
pt->transactions_sample_type,
pt->synth_needs_swap);
if (ret)
return ret;
}
/*
* Zero IP is used to mean "trace start" but that is not the case for
* power or PTWRITE events with no IP, so clear the flags.
*/
if (!sample->ip)
sample->flags = 0;
}
ret = perf_session__deliver_synth_event(pt->session, event, &sample);
if (ret)
pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
ret);
static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_ptwrite raw;
if (pt->synth_opts.last_branch)
intel_pt_reset_last_branch_rb(ptq);
if (intel_pt_skip_event(pt))
return 0;
return ret;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->ptwrites_id;
sample.stream_id = ptq->pt->ptwrites_id;
raw.flags = 0;
raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
raw.payload = cpu_to_le64(ptq->state->ptw_payload);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
pt->ptwrites_sample_type);
}
static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_cbr raw;
u32 flags;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->cbr_id;
sample.stream_id = ptq->pt->cbr_id;
flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
raw.flags = cpu_to_le32(flags);
raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
raw.reserved3 = 0;
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_mwait raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->mwait_id;
sample.stream_id = ptq->pt->mwait_id;
raw.reserved = 0;
raw.payload = cpu_to_le64(ptq->state->mwait_payload);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_pwre raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->pwre_id;
sample.stream_id = ptq->pt->pwre_id;
raw.reserved = 0;
raw.payload = cpu_to_le64(ptq->state->pwre_payload);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_exstop raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->exstop_id;
sample.stream_id = ptq->pt->exstop_id;
raw.flags = 0;
raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
{
struct intel_pt *pt = ptq->pt;
union perf_event *event = ptq->event_buf;
struct perf_sample sample = { .ip = 0, };
struct perf_synth_intel_pwrx raw;
if (intel_pt_skip_event(pt))
return 0;
intel_pt_prep_p_sample(pt, ptq, event, &sample);
sample.id = ptq->pt->pwrx_id;
sample.stream_id = ptq->pt->pwrx_id;
raw.reserved = 0;
raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
sample.raw_size = perf_synth__raw_size(raw);
sample.raw_data = perf_synth__raw_data(&raw);
return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
pt->pwr_events_sample_type);
}
static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
@ -1296,6 +1469,10 @@ static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
}
#define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT | \
INTEL_PT_CBR_CHG)
static int intel_pt_sample(struct intel_pt_queue *ptq)
{
const struct intel_pt_state *state = ptq->state;
@ -1307,24 +1484,52 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
ptq->have_sample = false;
if (pt->sample_instructions &&
(state->type & INTEL_PT_INSTRUCTION) &&
(!pt->synth_opts.initial_skip ||
pt->num_events++ >= pt->synth_opts.initial_skip)) {
if (pt->sample_pwr_events && (state->type & INTEL_PT_PWR_EVT)) {
if (state->type & INTEL_PT_CBR_CHG) {
err = intel_pt_synth_cbr_sample(ptq);
if (err)
return err;
}
if (state->type & INTEL_PT_MWAIT_OP) {
err = intel_pt_synth_mwait_sample(ptq);
if (err)
return err;
}
if (state->type & INTEL_PT_PWR_ENTRY) {
err = intel_pt_synth_pwre_sample(ptq);
if (err)
return err;
}
if (state->type & INTEL_PT_EX_STOP) {
err = intel_pt_synth_exstop_sample(ptq);
if (err)
return err;
}
if (state->type & INTEL_PT_PWR_EXIT) {
err = intel_pt_synth_pwrx_sample(ptq);
if (err)
return err;
}
}
if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
err = intel_pt_synth_instruction_sample(ptq);
if (err)
return err;
}
if (pt->sample_transactions &&
(state->type & INTEL_PT_TRANSACTION) &&
(!pt->synth_opts.initial_skip ||
pt->num_events++ >= pt->synth_opts.initial_skip)) {
if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
err = intel_pt_synth_transaction_sample(ptq);
if (err)
return err;
}
if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
err = intel_pt_synth_ptwrite_sample(ptq);
if (err)
return err;
}
if (!(state->type & INTEL_PT_BRANCH))
return 0;
@ -1925,36 +2130,65 @@ static int intel_pt_event_synth(struct perf_tool *tool,
NULL);
}
static int intel_pt_synth_event(struct perf_session *session,
static int intel_pt_synth_event(struct perf_session *session, const char *name,
struct perf_event_attr *attr, u64 id)
{
struct intel_pt_synth intel_pt_synth;
int err;
pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
name, id, (u64)attr->sample_type);
memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
intel_pt_synth.session = session;
return perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
&id, intel_pt_event_synth);
err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
&id, intel_pt_event_synth);
if (err)
pr_err("%s: failed to synthesize '%s' event type\n",
__func__, name);
return err;
}
static void intel_pt_set_event_name(struct perf_evlist *evlist, u64 id,
const char *name)
{
struct perf_evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->id && evsel->id[0] == id) {
if (evsel->name)
zfree(&evsel->name);
evsel->name = strdup(name);
break;
}
}
}
static struct perf_evsel *intel_pt_evsel(struct intel_pt *pt,
struct perf_evlist *evlist)
{
struct perf_evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
if (evsel->attr.type == pt->pmu_type && evsel->ids)
return evsel;
}
return NULL;
}
static int intel_pt_synth_events(struct intel_pt *pt,
struct perf_session *session)
{
struct perf_evlist *evlist = session->evlist;
struct perf_evsel *evsel;
struct perf_evsel *evsel = intel_pt_evsel(pt, evlist);
struct perf_event_attr attr;
bool found = false;
u64 id;
int err;
evlist__for_each_entry(evlist, evsel) {
if (evsel->attr.type == pt->pmu_type && evsel->ids) {
found = true;
break;
}
}
if (!found) {
if (!evsel) {
pr_debug("There are no selected events with Intel Processor Trace data\n");
return 0;
}
@ -1983,6 +2217,25 @@ static int intel_pt_synth_events(struct intel_pt *pt,
if (!id)
id = 1;
if (pt->synth_opts.branches) {
attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
attr.sample_period = 1;
attr.sample_type |= PERF_SAMPLE_ADDR;
err = intel_pt_synth_event(session, "branches", &attr, id);
if (err)
return err;
pt->sample_branches = true;
pt->branches_sample_type = attr.sample_type;
pt->branches_id = id;
id += 1;
attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
}
if (pt->synth_opts.callchain)
attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
if (pt->synth_opts.last_branch)
attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
if (pt->synth_opts.instructions) {
attr.config = PERF_COUNT_HW_INSTRUCTIONS;
if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
@ -1990,70 +2243,90 @@ static int intel_pt_synth_events(struct intel_pt *pt,
intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
else
attr.sample_period = pt->synth_opts.period;
pt->instructions_sample_period = attr.sample_period;
if (pt->synth_opts.callchain)
attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
if (pt->synth_opts.last_branch)
attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
pr_debug("Synthesizing 'instructions' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
id, (u64)attr.sample_type);
err = intel_pt_synth_event(session, &attr, id);
if (err) {
pr_err("%s: failed to synthesize 'instructions' event type\n",
__func__);
err = intel_pt_synth_event(session, "instructions", &attr, id);
if (err)
return err;
}
pt->sample_instructions = true;
pt->instructions_sample_type = attr.sample_type;
pt->instructions_id = id;
id += 1;
}
attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
attr.sample_period = 1;
if (pt->synth_opts.transactions) {
attr.config = PERF_COUNT_HW_INSTRUCTIONS;
attr.sample_period = 1;
if (pt->synth_opts.callchain)
attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
if (pt->synth_opts.last_branch)
attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
pr_debug("Synthesizing 'transactions' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
id, (u64)attr.sample_type);
err = intel_pt_synth_event(session, &attr, id);
if (err) {
pr_err("%s: failed to synthesize 'transactions' event type\n",
__func__);
err = intel_pt_synth_event(session, "transactions", &attr, id);
if (err)
return err;
}
pt->sample_transactions = true;
pt->transactions_sample_type = attr.sample_type;
pt->transactions_id = id;
intel_pt_set_event_name(evlist, id, "transactions");
id += 1;
evlist__for_each_entry(evlist, evsel) {
if (evsel->id && evsel->id[0] == pt->transactions_id) {
if (evsel->name)
zfree(&evsel->name);
evsel->name = strdup("transactions");
break;
}
}
}
if (pt->synth_opts.branches) {
attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
attr.sample_period = 1;
attr.sample_type |= PERF_SAMPLE_ADDR;
attr.sample_type &= ~(u64)PERF_SAMPLE_CALLCHAIN;
attr.sample_type &= ~(u64)PERF_SAMPLE_BRANCH_STACK;
pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
id, (u64)attr.sample_type);
err = intel_pt_synth_event(session, &attr, id);
if (err) {
pr_err("%s: failed to synthesize 'branches' event type\n",
__func__);
attr.type = PERF_TYPE_SYNTH;
attr.sample_type |= PERF_SAMPLE_RAW;
if (pt->synth_opts.ptwrites) {
attr.config = PERF_SYNTH_INTEL_PTWRITE;
err = intel_pt_synth_event(session, "ptwrite", &attr, id);
if (err)
return err;
}
pt->sample_branches = true;
pt->branches_sample_type = attr.sample_type;
pt->branches_id = id;
pt->sample_ptwrites = true;
pt->ptwrites_sample_type = attr.sample_type;
pt->ptwrites_id = id;
intel_pt_set_event_name(evlist, id, "ptwrite");
id += 1;
}
if (pt->synth_opts.pwr_events) {
pt->sample_pwr_events = true;
pt->pwr_events_sample_type = attr.sample_type;
attr.config = PERF_SYNTH_INTEL_CBR;
err = intel_pt_synth_event(session, "cbr", &attr, id);
if (err)
return err;
pt->cbr_id = id;
intel_pt_set_event_name(evlist, id, "cbr");
id += 1;
}
if (pt->synth_opts.pwr_events && (evsel->attr.config & 0x10)) {
attr.config = PERF_SYNTH_INTEL_MWAIT;
err = intel_pt_synth_event(session, "mwait", &attr, id);
if (err)
return err;
pt->mwait_id = id;
intel_pt_set_event_name(evlist, id, "mwait");
id += 1;
attr.config = PERF_SYNTH_INTEL_PWRE;
err = intel_pt_synth_event(session, "pwre", &attr, id);
if (err)
return err;
pt->pwre_id = id;
intel_pt_set_event_name(evlist, id, "pwre");
id += 1;
attr.config = PERF_SYNTH_INTEL_EXSTOP;
err = intel_pt_synth_event(session, "exstop", &attr, id);
if (err)
return err;
pt->exstop_id = id;
intel_pt_set_event_name(evlist, id, "exstop");
id += 1;
attr.config = PERF_SYNTH_INTEL_PWRX;
err = intel_pt_synth_event(session, "pwrx", &attr, id);
if (err)
return err;
pt->pwrx_id = id;
intel_pt_set_event_name(evlist, id, "pwrx");
id += 1;
}
pt->synth_needs_swap = evsel->needs_swap;
@ -2322,6 +2595,7 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
intel_pt_log("Maximum non-turbo ratio %u\n",
pt->max_non_turbo_ratio);
pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
}
if (pt->synth_opts.calls)

View File

@ -2,6 +2,7 @@
#define __PMU_H
#include <linux/bitmap.h>
#include <linux/compiler.h>
#include <linux/perf_event.h>
#include <stdbool.h>
#include "evsel.h"
@ -83,8 +84,7 @@ void print_pmu_events(const char *event_glob, bool name_only, bool quiet,
bool long_desc, bool details_flag);
bool pmu_have_event(const char *pname, const char *name);
int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt,
...) __attribute__((format(scanf, 3, 4)));
int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt, ...) __scanf(3, 4);
int perf_pmu__test(void);

View File

@ -1,6 +1,7 @@
#ifndef _PROBE_EVENT_H
#define _PROBE_EVENT_H
#include <linux/compiler.h>
#include <stdbool.h>
#include "intlist.h"
@ -171,8 +172,7 @@ void arch__fix_tev_from_maps(struct perf_probe_event *pev,
struct symbol *sym);
/* If there is no space to write, returns -E2BIG. */
int e_snprintf(char *str, size_t size, const char *format, ...)
__attribute__((format(printf, 3, 4)));
int e_snprintf(char *str, size_t size, const char *format, ...) __printf(3, 4);
/* Maximum index number of event-name postfix */
#define MAX_EVENT_INDEX 1024

View File

@ -28,6 +28,7 @@
#include <stdbool.h>
#include <errno.h>
#include <linux/bitmap.h>
#include <linux/compiler.h>
#include <linux/time64.h>
#include "../../perf.h"
@ -84,7 +85,7 @@ struct tables {
static struct tables tables_global;
static void handler_call_die(const char *handler_name) NORETURN;
static void handler_call_die(const char *handler_name) __noreturn;
static void handler_call_die(const char *handler_name)
{
PyErr_Print();

View File

@ -2035,7 +2035,7 @@ int perf_session__cpu_bitmap(struct perf_session *session,
if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
pr_err("File does not contain CPU events. "
"Remove -c option to proceed.\n");
"Remove -C option to proceed.\n");
return -1;
}
}

View File

@ -2532,12 +2532,12 @@ static int setup_sort_list(struct perf_hpp_list *list, char *str,
ret = sort_dimension__add(list, tok, evlist, level);
if (ret == -EINVAL) {
if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok)))
error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
pr_err("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
else
error("Invalid --sort key: `%s'", tok);
pr_err("Invalid --sort key: `%s'", tok);
break;
} else if (ret == -ESRCH) {
error("Unknown --sort key: `%s'", tok);
pr_err("Unknown --sort key: `%s'", tok);
break;
}
}
@ -2594,7 +2594,7 @@ static int setup_sort_order(struct perf_evlist *evlist)
return 0;
if (sort_order[1] == '\0') {
error("Invalid --sort key: `+'");
pr_err("Invalid --sort key: `+'");
return -EINVAL;
}
@ -2604,7 +2604,7 @@ static int setup_sort_order(struct perf_evlist *evlist)
*/
if (asprintf(&new_sort_order, "%s,%s",
get_default_sort_order(evlist), sort_order + 1) < 0) {
error("Not enough memory to set up --sort");
pr_err("Not enough memory to set up --sort");
return -ENOMEM;
}
@ -2668,7 +2668,7 @@ static int __setup_sorting(struct perf_evlist *evlist)
str = strdup(sort_keys);
if (str == NULL) {
error("Not enough memory to setup sort keys");
pr_err("Not enough memory to setup sort keys");
return -ENOMEM;
}
@ -2678,7 +2678,7 @@ static int __setup_sorting(struct perf_evlist *evlist)
if (!is_strict_order(field_order)) {
str = setup_overhead(str);
if (str == NULL) {
error("Not enough memory to setup overhead keys");
pr_err("Not enough memory to setup overhead keys");
return -ENOMEM;
}
}
@ -2834,10 +2834,10 @@ static int setup_output_list(struct perf_hpp_list *list, char *str)
tok; tok = strtok_r(NULL, ", ", &tmp)) {
ret = output_field_add(list, tok);
if (ret == -EINVAL) {
error("Invalid --fields key: `%s'", tok);
pr_err("Invalid --fields key: `%s'", tok);
break;
} else if (ret == -ESRCH) {
error("Unknown --fields key: `%s'", tok);
pr_err("Unknown --fields key: `%s'", tok);
break;
}
}
@ -2877,7 +2877,7 @@ static int __setup_output_field(void)
strp = str = strdup(field_order);
if (str == NULL) {
error("Not enough memory to setup output fields");
pr_err("Not enough memory to setup output fields");
return -ENOMEM;
}
@ -2885,7 +2885,7 @@ static int __setup_output_field(void)
strp++;
if (!strlen(strp)) {
error("Invalid --fields key: `+'");
pr_err("Invalid --fields key: `+'");
goto out;
}

View File

@ -44,6 +44,8 @@ static struct stats runtime_topdown_slots_issued[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_topdown_slots_retired[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_topdown_fetch_bubbles[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_topdown_recovery_bubbles[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_smi_num_stats[NUM_CTX][MAX_NR_CPUS];
static struct stats runtime_aperf_stats[NUM_CTX][MAX_NR_CPUS];
static struct rblist runtime_saved_values;
static bool have_frontend_stalled;
@ -157,6 +159,8 @@ void perf_stat__reset_shadow_stats(void)
memset(runtime_topdown_slots_issued, 0, sizeof(runtime_topdown_slots_issued));
memset(runtime_topdown_fetch_bubbles, 0, sizeof(runtime_topdown_fetch_bubbles));
memset(runtime_topdown_recovery_bubbles, 0, sizeof(runtime_topdown_recovery_bubbles));
memset(runtime_smi_num_stats, 0, sizeof(runtime_smi_num_stats));
memset(runtime_aperf_stats, 0, sizeof(runtime_aperf_stats));
next = rb_first(&runtime_saved_values.entries);
while (next) {
@ -217,6 +221,10 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count[0]);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
update_stats(&runtime_itlb_cache_stats[ctx][cpu], count[0]);
else if (perf_stat_evsel__is(counter, SMI_NUM))
update_stats(&runtime_smi_num_stats[ctx][cpu], count[0]);
else if (perf_stat_evsel__is(counter, APERF))
update_stats(&runtime_aperf_stats[ctx][cpu], count[0]);
if (counter->collect_stat) {
struct saved_value *v = saved_value_lookup(counter, cpu, ctx,
@ -592,6 +600,29 @@ static double td_be_bound(int ctx, int cpu)
return sanitize_val(1.0 - sum);
}
static void print_smi_cost(int cpu, struct perf_evsel *evsel,
struct perf_stat_output_ctx *out)
{
double smi_num, aperf, cycles, cost = 0.0;
int ctx = evsel_context(evsel);
const char *color = NULL;
smi_num = avg_stats(&runtime_smi_num_stats[ctx][cpu]);
aperf = avg_stats(&runtime_aperf_stats[ctx][cpu]);
cycles = avg_stats(&runtime_cycles_stats[ctx][cpu]);
if ((cycles == 0) || (aperf == 0))
return;
if (smi_num)
cost = (aperf - cycles) / aperf * 100.00;
if (cost > 10)
color = PERF_COLOR_RED;
out->print_metric(out->ctx, color, "%8.1f%%", "SMI cycles%", cost);
out->print_metric(out->ctx, NULL, "%4.0f", "SMI#", smi_num);
}
void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
double avg, int cpu,
struct perf_stat_output_ctx *out)
@ -825,6 +856,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
}
snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
print_metric(ctxp, NULL, "%8.3f", unit_buf, ratio);
} else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
print_smi_cost(cpu, evsel, out);
} else {
print_metric(ctxp, NULL, NULL, NULL, 0);
}

View File

@ -86,6 +86,8 @@ static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired),
ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles),
ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles),
ID(SMI_NUM, msr/smi/),
ID(APERF, msr/aperf/),
};
#undef ID

View File

@ -22,6 +22,8 @@ enum perf_stat_evsel_id {
PERF_STAT_EVSEL_ID__TOPDOWN_SLOTS_RETIRED,
PERF_STAT_EVSEL_ID__TOPDOWN_FETCH_BUBBLES,
PERF_STAT_EVSEL_ID__TOPDOWN_RECOVERY_BUBBLES,
PERF_STAT_EVSEL_ID__SMI_NUM,
PERF_STAT_EVSEL_ID__APERF,
PERF_STAT_EVSEL_ID__MAX,
};

View File

@ -42,6 +42,7 @@
#include <stdarg.h>
#include <stddef.h>
#include <string.h>
#include <linux/compiler.h>
#include <sys/types.h>
extern char strbuf_slopbuf[];
@ -85,8 +86,7 @@ static inline int strbuf_addstr(struct strbuf *sb, const char *s) {
return strbuf_add(sb, s, strlen(s));
}
__attribute__((format(printf,2,3)))
int strbuf_addf(struct strbuf *sb, const char *fmt, ...);
int strbuf_addf(struct strbuf *sb, const char *fmt, ...) __printf(2, 3);
/* XXX: if read fails, any partial read is undone */
ssize_t strbuf_read(struct strbuf *, int fd, ssize_t hint);

View File

@ -24,7 +24,7 @@
#include <errno.h>
#include "../perf.h"
#include "util.h"
#include "debug.h"
#include "trace-event.h"
#include "sane_ctype.h"
@ -150,7 +150,7 @@ void parse_ftrace_printk(struct pevent *pevent,
while (line) {
addr_str = strtok_r(line, ":", &fmt);
if (!addr_str) {
warning("printk format with empty entry");
pr_warning("printk format with empty entry");
break;
}
addr = strtoull(addr_str, NULL, 16);

View File

@ -9,75 +9,17 @@
#include "util.h"
#include "debug.h"
static void report(const char *prefix, const char *err, va_list params)
{
char msg[1024];
vsnprintf(msg, sizeof(msg), err, params);
fprintf(stderr, " %s%s\n", prefix, msg);
}
static NORETURN void usage_builtin(const char *err)
static __noreturn void usage_builtin(const char *err)
{
fprintf(stderr, "\n Usage: %s\n", err);
exit(129);
}
static NORETURN void die_builtin(const char *err, va_list params)
{
report(" Fatal: ", err, params);
exit(128);
}
static void error_builtin(const char *err, va_list params)
{
report(" Error: ", err, params);
}
static void warn_builtin(const char *warn, va_list params)
{
report(" Warning: ", warn, params);
}
/* If we are in a dlopen()ed .so write to a global variable would segfault
* (ugh), so keep things static. */
static void (*usage_routine)(const char *err) NORETURN = usage_builtin;
static void (*error_routine)(const char *err, va_list params) = error_builtin;
static void (*warn_routine)(const char *err, va_list params) = warn_builtin;
void set_warning_routine(void (*routine)(const char *err, va_list params))
{
warn_routine = routine;
}
static void (*usage_routine)(const char *err) __noreturn = usage_builtin;
void usage(const char *err)
{
usage_routine(err);
}
void die(const char *err, ...)
{
va_list params;
va_start(params, err);
die_builtin(err, params);
va_end(params);
}
int error(const char *err, ...)
{
va_list params;
va_start(params, err);
error_routine(err, params);
va_end(params);
return -1;
}
void warning(const char *warn, ...)
{
va_list params;
va_start(params, warn);
warn_routine(warn, params);
va_end(params);
}

View File

@ -343,43 +343,6 @@ int perf_event_paranoid(void)
return value;
}
bool find_process(const char *name)
{
size_t len = strlen(name);
DIR *dir;
struct dirent *d;
int ret = -1;
dir = opendir(procfs__mountpoint());
if (!dir)
return false;
/* Walk through the directory. */
while (ret && (d = readdir(dir)) != NULL) {
char path[PATH_MAX];
char *data;
size_t size;
if ((d->d_type != DT_DIR) ||
!strcmp(".", d->d_name) ||
!strcmp("..", d->d_name))
continue;
scnprintf(path, sizeof(path), "%s/%s/comm",
procfs__mountpoint(), d->d_name);
if (filename__read_str(path, &data, &size))
continue;
ret = strncmp(name, data, len);
free(data);
}
closedir(dir);
return ret ? false : true;
}
static int
fetch_ubuntu_kernel_version(unsigned int *puint)
{
@ -387,8 +350,12 @@ fetch_ubuntu_kernel_version(unsigned int *puint)
size_t line_len = 0;
char *ptr, *line = NULL;
int version, patchlevel, sublevel, err;
FILE *vsig = fopen("/proc/version_signature", "r");
FILE *vsig;
if (!puint)
return 0;
vsig = fopen("/proc/version_signature", "r");
if (!vsig) {
pr_debug("Open /proc/version_signature failed: %s\n",
strerror(errno));
@ -418,8 +385,7 @@ fetch_ubuntu_kernel_version(unsigned int *puint)
goto errout;
}
if (puint)
*puint = (version << 16) + (patchlevel << 8) + sublevel;
*puint = (version << 16) + (patchlevel << 8) + sublevel;
err = 0;
errout:
free(line);
@ -446,6 +412,9 @@ fetch_kernel_version(unsigned int *puint, char *str,
str[str_size - 1] = '\0';
}
if (!puint || int_ver_ready)
return 0;
err = sscanf(utsname.release, "%d.%d.%d",
&version, &patchlevel, &sublevel);
@ -455,8 +424,7 @@ fetch_kernel_version(unsigned int *puint, char *str,
return -1;
}
if (puint && !int_ver_ready)
*puint = (version << 16) + (patchlevel << 8) + sublevel;
*puint = (version << 16) + (patchlevel << 8) + sublevel;
return 0;
}

View File

@ -1,7 +1,6 @@
#ifndef GIT_COMPAT_UTIL_H
#define GIT_COMPAT_UTIL_H
#define _ALL_SOURCE 1
#define _BSD_SOURCE 1
/* glibc 2.20 deprecates _BSD_SOURCE in favour of _DEFAULT_SOURCE */
#define _DEFAULT_SOURCE 1
@ -11,24 +10,12 @@
#include <stddef.h>
#include <stdlib.h>
#include <stdarg.h>
#include <linux/compiler.h>
#include <linux/types.h>
#ifdef __GNUC__
#define NORETURN __attribute__((__noreturn__))
#else
#define NORETURN
#ifndef __attribute__
#define __attribute__(x)
#endif
#endif
/* General helper functions */
void usage(const char *err) NORETURN;
void die(const char *err, ...) NORETURN __attribute__((format (printf, 1, 2)));
int error(const char *err, ...) __attribute__((format (printf, 1, 2)));
void warning(const char *err, ...) __attribute__((format (printf, 1, 2)));
void set_warning_routine(void (*routine)(const char *err, va_list params));
void usage(const char *err) __noreturn;
void die(const char *err, ...) __noreturn __printf(1, 2);
static inline void *zalloc(size_t size)
{
@ -57,8 +44,6 @@ int hex2u64(const char *ptr, u64 *val);
extern unsigned int page_size;
extern int cacheline_size;
bool find_process(const char *name);
int fetch_kernel_version(unsigned int *puint,
char *str, size_t str_sz);
#define KVER_VERSION(x) (((x) >> 16) & 0xff)