summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-08-15 09:48:17 +0200
committerIngo Molnar <mingo@kernel.org>2013-08-15 09:48:17 +0200
commit58cea307432e3376293e6b2be88d1f6e6e99274a (patch)
tree4f1a04fccc12b6ff58b2a10e6ca9f2fcaac4adbe
parent0a3d23a2568ed5e73bd4fb532dc672fa9f03b1f1 (diff)
parent2ae3a312c0ccd8ff615372f00aab1700aac27474 (diff)
Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo: * Allow specifying syscalls in 'perf trace', a la strace. * Simplify symbol filtering by doing it at machine class level, from Adrian Hunter. * Add option to 'perf kvm' to print only events that exceed a specified time duration, from David Ahern. * 'perf sched' improvements, including removing some tracepoints that provide the same information as the PERF_RECORD_{FORK,EXIT} events. * Improve stack trace printing, from David Ahern. * Update documentation with live command, from David Ahern * Fix 'perf test' compile failure on do_sort_something, from David Ahern. * Improve robustness of topology parsing code, from Stephane Eranian. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--tools/perf/Documentation/perf-kvm.txt46
-rw-r--r--tools/perf/Documentation/perf-trace.txt4
-rw-r--r--tools/perf/builtin-annotate.c5
-rw-r--r--tools/perf/builtin-diff.c2
-rw-r--r--tools/perf/builtin-inject.c2
-rw-r--r--tools/perf/builtin-kvm.c25
-rw-r--r--tools/perf/builtin-mem.c4
-rw-r--r--tools/perf/builtin-report.c7
-rw-r--r--tools/perf/builtin-sched.c94
-rw-r--r--tools/perf/builtin-script.c35
-rw-r--r--tools/perf/builtin-top.c5
-rw-r--r--tools/perf/builtin-trace.c52
-rw-r--r--tools/perf/perf.h3
-rw-r--r--tools/perf/tests/code-reading.c13
-rw-r--r--tools/perf/tests/hists_link.c4
-rw-r--r--tools/perf/util/build-id.c2
-rw-r--r--tools/perf/util/event.c20
-rw-r--r--tools/perf/util/event.h3
-rw-r--r--tools/perf/util/header.c11
-rw-r--r--tools/perf/util/machine.c28
-rw-r--r--tools/perf/util/machine.h5
-rw-r--r--tools/perf/util/session.c40
-rw-r--r--tools/perf/util/session.h8
-rw-r--r--tools/perf/util/thread.h5
-rw-r--r--tools/perf/util/unwind.c6
25 files changed, 294 insertions, 135 deletions
diff --git a/tools/perf/Documentation/perf-kvm.txt b/tools/perf/Documentation/perf-kvm.txt
index 326f2cb333cb..ac84db2d2334 100644
--- a/tools/perf/Documentation/perf-kvm.txt
+++ b/tools/perf/Documentation/perf-kvm.txt
@@ -13,6 +13,7 @@ SYNOPSIS
{top|record|report|diff|buildid-list}
'perf kvm' [--host] [--guest] [--guestkallsyms=<path> --guestmodules=<path>
| --guestvmlinux=<path>] {top|record|report|diff|buildid-list|stat}
+'perf kvm stat [record|report|live] [<options>]
DESCRIPTION
-----------
@@ -50,6 +51,10 @@ There are a couple of variants of perf kvm:
'perf kvm stat report' reports statistical data which includes events
handled time, samples, and so on.
+ 'perf kvm stat live' reports statistical data in a live mode (similar to
+ record + report but with statistical data updated live at a given display
+ rate).
+
OPTIONS
-------
-i::
@@ -85,13 +90,50 @@ STAT REPORT OPTIONS
--vcpu=<value>::
analyze events which occures on this vcpu. (default: all vcpus)
---events=<value>::
- events to be analyzed. Possible values: vmexit, mmio, ioport.
+--event=<value>::
+ event to be analyzed. Possible values: vmexit, mmio, ioport.
(default: vmexit)
-k::
--key=<value>::
Sorting key. Possible values: sample (default, sort by samples
number), time (sort by average time).
+-p::
+--pid=::
+ Analyze events only for given process ID(s) (comma separated list).
+
+STAT LIVE OPTIONS
+-----------------
+-d::
+--display::
+ Time in seconds between display updates
+
+-m::
+--mmap-pages=::
+ Number of mmap data pages. Must be a power of two.
+
+-a::
+--all-cpus::
+ System-wide collection from all CPUs.
+
+-p::
+--pid=::
+ Analyze events only for given process ID(s) (comma separated list).
+
+--vcpu=<value>::
+ analyze events which occures on this vcpu. (default: all vcpus)
+
+
+--event=<value>::
+ event to be analyzed. Possible values: vmexit, mmio, ioport.
+ (default: vmexit)
+
+-k::
+--key=<value>::
+ Sorting key. Possible values: sample (default, sort by samples
+ number), time (sort by average time).
+
+--duration=<value>::
+ Show events other than HLT that take longer than duration usecs.
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 68718ccdd178..3b3552a8959e 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -26,6 +26,10 @@ OPTIONS
--all-cpus::
System-wide collection from all CPUs.
+-e::
+--expr::
+ List of events to show, currently only syscall names.
+
-p::
--pid=::
Record events on existing process ID (comma separated list).
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index db491e9a812b..f988d380c52f 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -90,8 +90,7 @@ static int process_sample_event(struct perf_tool *tool,
struct perf_annotate *ann = container_of(tool, struct perf_annotate, tool);
struct addr_location al;
- if (perf_event__preprocess_sample(event, machine, &al, sample,
- symbol__annotate_init) < 0) {
+ if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
@@ -195,6 +194,8 @@ static int __cmd_annotate(struct perf_annotate *ann)
if (session == NULL)
return -ENOMEM;
+ machines__set_symbol_filter(&session->machines, symbol__annotate_init);
+
if (ann->cpu_list) {
ret = perf_session__cpu_bitmap(session, ann->cpu_list,
ann->cpu_bitmap);
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 93de3ac177c5..f28799e94f2a 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -319,7 +319,7 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
{
struct addr_location al;
- if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) {
+ if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index f012a98c726c..1d8de2e4a407 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -206,7 +206,7 @@ static int perf_event__inject_buildid(struct perf_tool *tool,
}
thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
- event->ip.ip, &al, NULL);
+ event->ip.ip, &al);
if (al.map != NULL) {
if (!al.map->dso->hit) {
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 2ceec817a2fb..fa2f3d79886a 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -106,6 +106,7 @@ struct perf_kvm_stat {
u64 total_time;
u64 total_count;
u64 lost_events;
+ u64 duration;
const char *pid_str;
struct intlist *pid_list;
@@ -473,7 +474,7 @@ static bool update_kvm_event(struct kvm_event *event, int vcpu_id,
static bool handle_end_event(struct perf_kvm_stat *kvm,
struct vcpu_event_record *vcpu_record,
struct event_key *key,
- u64 timestamp)
+ struct perf_sample *sample)
{
struct kvm_event *event;
u64 time_begin, time_diff;
@@ -510,12 +511,24 @@ static bool handle_end_event(struct perf_kvm_stat *kvm,
vcpu_record->start_time = 0;
/* seems to happen once in a while during live mode */
- if (timestamp < time_begin) {
+ if (sample->time < time_begin) {
pr_debug("End time before begin time; skipping event.\n");
return true;
}
- time_diff = timestamp - time_begin;
+ time_diff = sample->time - time_begin;
+
+ if (kvm->duration && time_diff > kvm->duration) {
+ char decode[32];
+
+ kvm->events_ops->decode_key(kvm, &event->key, decode);
+ if (strcmp(decode, "HLT")) {
+ pr_info("%" PRIu64 " VM %d, vcpu %d: %s event took %" PRIu64 "usec\n",
+ sample->time, sample->pid, vcpu_record->vcpu_id,
+ decode, time_diff/1000);
+ }
+ }
+
return update_kvm_event(event, vcpu, time_diff);
}
@@ -562,7 +575,7 @@ static bool handle_kvm_event(struct perf_kvm_stat *kvm,
return handle_begin_event(kvm, vcpu_record, &key, sample->time);
if (kvm->events_ops->is_end_event(evsel, sample, &key))
- return handle_end_event(kvm, vcpu_record, &key, sample->time);
+ return handle_end_event(kvm, vcpu_record, &key, sample);
return true;
}
@@ -1429,6 +1442,8 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
"key for sorting: sample(sort by samples number)"
" time (sort by avg time)"),
+ OPT_U64(0, "duration", &kvm->duration,
+ "show events other than HALT that take longer than duration usecs"),
OPT_END()
};
const char * const live_usage[] = {
@@ -1467,6 +1482,8 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
usage_with_options(live_usage, live_options);
}
+ kvm->duration *= NSEC_PER_USEC; /* convert usec to nsec */
+
/*
* target related setups
*/
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index a8ff6d264e50..706a1faa9559 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -14,7 +14,6 @@ static const char *mem_operation = MEM_OPERATION_LOAD;
struct perf_mem {
struct perf_tool tool;
char const *input_name;
- symbol_filter_t annotate_init;
bool hide_unresolved;
bool dump_raw;
const char *cpu_list;
@@ -69,8 +68,7 @@ dump_raw_samples(struct perf_tool *tool,
struct addr_location al;
const char *fmt;
- if (perf_event__preprocess_sample(event, machine, &al, sample,
- mem->annotate_init) < 0) {
+ if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
fprintf(stderr, "problem processing %d event, skipping it.\n",
event->header.type);
return -1;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index d785d89ed226..958a56a0e39e 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -49,7 +49,6 @@ struct perf_report {
bool mem_mode;
struct perf_read_values show_threads_values;
const char *pretty_printing_style;
- symbol_filter_t annotate_init;
const char *cpu_list;
const char *symbol_filter_str;
float min_percent;
@@ -305,8 +304,7 @@ static int process_sample_event(struct perf_tool *tool,
struct addr_location al;
int ret;
- if (perf_event__preprocess_sample(event, machine, &al, sample,
- rep->annotate_init) < 0) {
+ if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
fprintf(stderr, "problem processing %d event, skipping it.\n",
event->header.type);
return -1;
@@ -924,7 +922,8 @@ repeat:
*/
if (use_browser == 1 && sort__has_sym) {
symbol_conf.priv_size = sizeof(struct annotation);
- report.annotate_init = symbol__annotate_init;
+ machines__set_symbol_filter(&session->machines,
+ symbol__annotate_init);
/*
* For searching by name on the "Browse map details".
* providing it only in verbose mode not to bloat too
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 948183adb6e5..f809cc7fb7d9 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -109,8 +109,9 @@ struct trace_sched_handler {
int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel,
struct perf_sample *sample, struct machine *machine);
- int (*fork_event)(struct perf_sched *sched, struct perf_evsel *evsel,
- struct perf_sample *sample);
+ /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
+ int (*fork_event)(struct perf_sched *sched, union perf_event *event,
+ struct machine *machine);
int (*migrate_task_event)(struct perf_sched *sched,
struct perf_evsel *evsel,
@@ -717,22 +718,29 @@ static int replay_switch_event(struct perf_sched *sched,
return 0;
}
-static int replay_fork_event(struct perf_sched *sched, struct perf_evsel *evsel,
- struct perf_sample *sample)
+static int replay_fork_event(struct perf_sched *sched,
+ union perf_event *event,
+ struct machine *machine)
{
- const char *parent_comm = perf_evsel__strval(evsel, sample, "parent_comm"),
- *child_comm = perf_evsel__strval(evsel, sample, "child_comm");
- const u32 parent_pid = perf_evsel__intval(evsel, sample, "parent_pid"),
- child_pid = perf_evsel__intval(evsel, sample, "child_pid");
+ struct thread *child, *parent;
+
+ child = machine__findnew_thread(machine, event->fork.tid);
+ parent = machine__findnew_thread(machine, event->fork.ptid);
+
+ if (child == NULL || parent == NULL) {
+ pr_debug("thread does not exist on fork event: child %p, parent %p\n",
+ child, parent);
+ return 0;
+ }
if (verbose) {
- printf("sched_fork event %p\n", evsel);
- printf("... parent: %s/%d\n", parent_comm, parent_pid);
- printf("... child: %s/%d\n", child_comm, child_pid);
+ printf("fork event\n");
+ printf("... parent: %s/%d\n", parent->comm, parent->tid);
+ printf("... child: %s/%d\n", child->comm, child->tid);
}
- register_pid(sched, parent_pid, parent_comm);
- register_pid(sched, child_pid, child_comm);
+ register_pid(sched, parent->tid, parent->comm);
+ register_pid(sched, child->tid, child->comm);
return 0;
}
@@ -824,14 +832,6 @@ static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
return 0;
}
-static int latency_fork_event(struct perf_sched *sched __maybe_unused,
- struct perf_evsel *evsel __maybe_unused,
- struct perf_sample *sample __maybe_unused)
-{
- /* should insert the newcomer */
- return 0;
-}
-
static char sched_out_state(u64 prev_state)
{
const char *str = TASK_STATE_TO_CHAR_STR;
@@ -1379,25 +1379,20 @@ static int process_sched_runtime_event(struct perf_tool *tool,
return 0;
}
-static int process_sched_fork_event(struct perf_tool *tool,
- struct perf_evsel *evsel,
- struct perf_sample *sample,
- struct machine *machine __maybe_unused)
+static int perf_sched__process_fork_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
{
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- if (sched->tp_handler->fork_event)
- return sched->tp_handler->fork_event(sched, evsel, sample);
+ /* run the fork event through the perf machineruy */
+ perf_event__process_fork(tool, event, sample, machine);
- return 0;
-}
+ /* and then run additional processing needed for this command */
+ if (sched->tp_handler->fork_event)
+ return sched->tp_handler->fork_event(sched, event, machine);
-static int process_sched_exit_event(struct perf_tool *tool __maybe_unused,
- struct perf_evsel *evsel,
- struct perf_sample *sample __maybe_unused,
- struct machine *machine __maybe_unused)
-{
- pr_debug("sched_exit event %p\n", evsel);
return 0;
}
@@ -1425,15 +1420,8 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_
struct perf_evsel *evsel,
struct machine *machine)
{
- struct thread *thread = machine__findnew_thread(machine, sample->tid);
int err = 0;
- if (thread == NULL) {
- pr_debug("problem processing %s event, skipping it.\n",
- perf_evsel__name(evsel));
- return -1;
- }
-
evsel->hists.stats.total_period += sample->period;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
@@ -1445,7 +1433,7 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_
return err;
}
-static int perf_sched__read_events(struct perf_sched *sched, bool destroy,
+static int perf_sched__read_events(struct perf_sched *sched,
struct perf_session **psession)
{
const struct perf_evsel_str_handler handlers[] = {
@@ -1453,8 +1441,6 @@ static int perf_sched__read_events(struct perf_sched *sched, bool destroy,
{ "sched:sched_stat_runtime", process_sched_runtime_event, },
{ "sched:sched_wakeup", process_sched_wakeup_event, },
{ "sched:sched_wakeup_new", process_sched_wakeup_event, },
- { "sched:sched_process_fork", process_sched_fork_event, },
- { "sched:sched_process_exit", process_sched_exit_event, },
{ "sched:sched_migrate_task", process_sched_migrate_task_event, },
};
struct perf_session *session;
@@ -1480,11 +1466,10 @@ static int perf_sched__read_events(struct perf_sched *sched, bool destroy,
sched->nr_lost_chunks = session->stats.nr_events[PERF_RECORD_LOST];
}
- if (destroy)
- perf_session__delete(session);
-
if (psession)
*psession = session;
+ else
+ perf_session__delete(session);
return 0;
@@ -1529,8 +1514,11 @@ static int perf_sched__lat(struct perf_sched *sched)
struct perf_session *session;
setup_pager();
- if (perf_sched__read_events(sched, false, &session))
+
+ /* save session -- references to threads are held in work_list */
+ if (perf_sched__read_events(sched, &session))
return -1;
+
perf_sched__sort_lat(sched);
printf("\n ---------------------------------------------------------------------------------------------------------------\n");
@@ -1565,7 +1553,7 @@ static int perf_sched__map(struct perf_sched *sched)
sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
setup_pager();
- if (perf_sched__read_events(sched, true, NULL))
+ if (perf_sched__read_events(sched, NULL))
return -1;
print_bad_events(sched);
return 0;
@@ -1580,7 +1568,7 @@ static int perf_sched__replay(struct perf_sched *sched)
test_calibrations(sched);
- if (perf_sched__read_events(sched, true, NULL))
+ if (perf_sched__read_events(sched, NULL))
return -1;
printf("nr_run_events: %ld\n", sched->nr_run_events);
@@ -1639,7 +1627,6 @@ static int __cmd_record(int argc, const char **argv)
"-e", "sched:sched_stat_sleep",
"-e", "sched:sched_stat_iowait",
"-e", "sched:sched_stat_runtime",
- "-e", "sched:sched_process_exit",
"-e", "sched:sched_process_fork",
"-e", "sched:sched_wakeup",
"-e", "sched:sched_migrate_task",
@@ -1668,7 +1655,7 @@ static struct perf_sched sched = {
.sample = perf_sched__process_tracepoint_sample,
.comm = perf_event__process_comm,
.lost = perf_event__process_lost,
- .fork = perf_event__process_fork,
+ .fork = perf_sched__process_fork_event,
.ordered_samples = true,
},
.cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
@@ -1730,7 +1717,6 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
.wakeup_event = latency_wakeup_event,
.switch_event = latency_switch_event,
.runtime_event = latency_runtime_event,
- .fork_event = latency_fork_event,
.migrate_task_event = latency_migrate_task_event,
};
struct trace_sched_handler map_ops = {
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index cd616ff5d221..2ad9d5b6fb3c 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -66,6 +66,7 @@ struct output_option {
static struct {
bool user_set;
bool wildcard_set;
+ unsigned int print_ip_opts;
u64 fields;
u64 invalid_fields;
} output[PERF_TYPE_MAX] = {
@@ -235,6 +236,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
{
int j;
struct perf_evsel *evsel;
+ struct perf_event_attr *attr;
for (j = 0; j < PERF_TYPE_MAX; ++j) {
evsel = perf_session__find_first_evtype(session, j);
@@ -253,6 +255,24 @@ static int perf_session__check_output_opt(struct perf_session *session)
if (evsel && output[j].fields &&
perf_evsel__check_attr(evsel, session))
return -1;
+
+ if (evsel == NULL)
+ continue;
+
+ attr = &evsel->attr;
+
+ output[j].print_ip_opts = 0;
+ if (PRINT_FIELD(IP))
+ output[j].print_ip_opts |= PRINT_IP_OPT_IP;
+
+ if (PRINT_FIELD(SYM))
+ output[j].print_ip_opts |= PRINT_IP_OPT_SYM;
+
+ if (PRINT_FIELD(DSO))
+ output[j].print_ip_opts |= PRINT_IP_OPT_DSO;
+
+ if (PRINT_FIELD(SYMOFFSET))
+ output[j].print_ip_opts |= PRINT_IP_OPT_SYMOFFSET;
}
return 0;
@@ -341,10 +361,10 @@ static void print_sample_addr(union perf_event *event,
return;
thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
- sample->addr, &al, NULL);
+ sample->addr, &al);
if (!al.map)
thread__find_addr_map(thread, machine, cpumode, MAP__VARIABLE,
- sample->addr, &al, NULL);
+ sample->addr, &al);
al.cpu = sample->cpu;
al.sym = NULL;
@@ -382,8 +402,8 @@ static void print_sample_bts(union perf_event *event,
else
printf("\n");
perf_evsel__print_ip(evsel, event, sample, machine,
- PRINT_FIELD(SYM), PRINT_FIELD(DSO),
- PRINT_FIELD(SYMOFFSET));
+ output[attr->type].print_ip_opts,
+ PERF_MAX_STACK_DEPTH);
}
printf(" => ");
@@ -423,9 +443,10 @@ static void process_event(union perf_event *event, struct perf_sample *sample,
printf(" ");
else
printf("\n");
+
perf_evsel__print_ip(evsel, event, sample, machine,
- PRINT_FIELD(SYM), PRINT_FIELD(DSO),
- PRINT_FIELD(SYMOFFSET));
+ output[attr->type].print_ip_opts,
+ PERF_MAX_STACK_DEPTH);
}
printf("\n");
@@ -499,7 +520,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
return 0;
}
- if (perf_event__preprocess_sample(event, machine, &al, sample, 0) < 0) {
+ if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
pr_err("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 440c3b371401..e37521fc715a 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -716,8 +716,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
top->exact_samples++;
- if (perf_event__preprocess_sample(event, machine, &al, sample,
- symbol_filter) < 0 ||
+ if (perf_event__preprocess_sample(event, machine, &al, sample) < 0 ||
al.filtered)
return;
@@ -938,6 +937,8 @@ static int __cmd_top(struct perf_top *top)
if (top->session == NULL)
return -ENOMEM;
+ machines__set_symbol_filter(&top->session->machines, symbol_filter);
+
if (!objdump_path) {
ret = perf_session_env__lookup_objdump(&top->session->header.env);
if (ret)
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index da7ae01c8394..120fdfb3d920 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -5,6 +5,7 @@
#include "util/machine.h"
#include "util/thread.h"
#include "util/parse-options.h"
+#include "util/strlist.h"
#include "util/thread_map.h"
#include <libaudit.h>
@@ -47,6 +48,7 @@ static struct syscall_fmt *syscall_fmt__find(const char *name)
struct syscall {
struct event_format *tp_format;
const char *name;
+ bool filtered;
struct syscall_fmt *fmt;
};
@@ -110,6 +112,7 @@ struct trace {
struct perf_record_opts opts;
struct machine host;
u64 base_time;
+ struct strlist *ev_qualifier;
unsigned long nr_events;
bool sched;
bool multiple_threads;
@@ -226,6 +229,16 @@ static int trace__read_syscall_info(struct trace *trace, int id)
sc = trace->syscalls.table + id;
sc->name = name;
+
+ if (trace->ev_qualifier && !strlist__find(trace->ev_qualifier, name)) {
+ sc->filtered = true;
+ /*
+ * No need to do read tracepoint information since this will be
+ * filtered out.
+ */
+ return 0;
+ }
+
sc->fmt = syscall_fmt__find(sc->name);
snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
@@ -302,11 +315,19 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
char *msg;
void *args;
size_t printed = 0;
- struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
+ struct thread *thread;
struct syscall *sc = trace__syscall_info(trace, evsel, sample);
- struct thread_trace *ttrace = thread__trace(thread);
+ struct thread_trace *ttrace;
+
+ if (sc == NULL)
+ return -1;
- if (ttrace == NULL || sc == NULL)
+ if (sc->filtered)
+ return 0;
+
+ thread = machine__findnew_thread(&trace->host, sample->tid);
+ ttrace = thread__trace(thread);
+ if (ttrace == NULL)
return -1;
args = perf_evsel__rawptr(evsel, sample, "args");
@@ -345,11 +366,19 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
{
int ret;
u64 duration = 0;
- struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
- struct thread_trace *ttrace = thread__trace(thread);
+ struct thread *thread;
struct syscall *sc = trace__syscall_info(trace, evsel, sample);
+ struct thread_trace *ttrace;
+
+ if (sc == NULL)
+ return -1;
- if (ttrace == NULL || sc == NULL)
+ if (sc->filtered)
+ return 0;
+
+ thread = machine__findnew_thread(&trace->host, sample->tid);
+ ttrace = thread__trace(thread);
+ if (ttrace == NULL)
return -1;
ret = perf_evsel__intval(evsel, sample, "ret");
@@ -634,7 +663,10 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
.mmap_pages = 1024,
},
};
+ const char *ev_qualifier_str = NULL;
const struct option trace_options[] = {
+ OPT_STRING('e', "expr", &ev_qualifier_str, "expr",
+ "list of events to trace"),
OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
"trace events on existing process id"),
OPT_STRING(0, "tid", &trace.opts.target.tid, "tid",
@@ -660,6 +692,14 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
argc = parse_options(argc, argv, trace_options, trace_usage, 0);
+ if (ev_qualifier_str != NULL) {
+ trace.ev_qualifier = strlist__new(true, ev_qualifier_str);
+ if (trace.ev_qualifier == NULL) {
+ puts("Not enough memory to parse event qualifier");
+ return -ENOMEM;
+ }
+ }
+
err = perf_target__validate(&trace.opts.target);
if (err) {
perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 32bd102c32b6..cf20187eee0a 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -125,6 +125,9 @@
#ifndef NSEC_PER_SEC
# define NSEC_PER_SEC 1000000000ULL
#endif
+#ifndef NSEC_PER_USEC
+# define NSEC_PER_USEC 1000ULL
+#endif
static inline unsigned long long rdclock(void)
{
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 8e0943b966d7..df9afd9cab4c 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -147,7 +147,7 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, addr,
- &al, NULL);
+ &al);
if (!al.map || !al.map->dso) {
pr_debug("thread__find_addr_map failed\n");
return -1;
@@ -304,15 +304,14 @@ static int comp(const void *a, const void *b)
static void do_sort_something(void)
{
- size_t sz = 40960;
- int buf[sz], i;
+ int buf[40960], i;
- for (i = 0; i < (int)sz; i++)
- buf[i] = sz - i - 1;
+ for (i = 0; i < (int)ARRAY_SIZE(buf); i++)
+ buf[i] = ARRAY_SIZE(buf) - i - 1;
- qsort(buf, sz, sizeof(int), comp);
+ qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp);
- for (i = 0; i < (int)sz; i++) {
+ for (i = 0; i < (int)ARRAY_SIZE(buf); i++) {
if (buf[i] != i) {
pr_debug("qsort failed\n");
break;
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index 89085a9615e2..50bfb01183ea 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -220,7 +220,7 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
};
if (perf_event__preprocess_sample(&event, machine, &al,
- &sample, 0) < 0)
+ &sample) < 0)
goto out;
he = __hists__add_entry(&evsel->hists, &al, NULL, 1, 1);
@@ -244,7 +244,7 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
};
if (perf_event__preprocess_sample(&event, machine, &al,
- &sample, 0) < 0)
+ &sample) < 0)
goto out;
he = __hists__add_entry(&evsel->hists, &al, NULL, 1, 1);
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 3a0f5089379c..5295625c0c00 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -33,7 +33,7 @@ int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
}
thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
- event->ip.ip, &al, NULL);
+ event->ip.ip, &al);
if (al.map != NULL)
al.map->dso->hit = 1;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index cc7c0c9c9ea6..49713ae46551 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -592,7 +592,7 @@ int perf_event__process(struct perf_tool *tool __maybe_unused,
void thread__find_addr_map(struct thread *self,
struct machine *machine, u8 cpumode,
enum map_type type, u64 addr,
- struct addr_location *al, symbol_filter_t filter)
+ struct addr_location *al)
{
struct map_groups *mg = &self->mg;
bool load_map = false;
@@ -663,19 +663,19 @@ try_again:
* must be done prior to using kernel maps.
*/
if (load_map)
- map__load(al->map, filter);
+ map__load(al->map, machine->symbol_filter);
al->addr = al->map->map_ip(al->map, al->addr);
}
}
void thread__find_addr_location(struct thr