blob: 575e2535ea0309a386e0369bbb35fe72a71daccf [file] [log] [blame]
Ingo Molnarddcacfa2009-04-20 15:37:32 +02001/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02002 * builtin-stat.c
3 *
4 * Builtin stat command: Give a precise performance counters summary
5 * overview about any workload, CPU or specific PID.
6 *
7 * Sample output:
Ingo Molnarddcacfa2009-04-20 15:37:32 +02008
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02009 $ perf stat ./hackbench 10
Ingo Molnarddcacfa2009-04-20 15:37:32 +020010
Ingo Molnar2cba3ff2011-05-19 13:30:56 +020011 Time: 0.118
Ingo Molnarddcacfa2009-04-20 15:37:32 +020012
Ingo Molnar2cba3ff2011-05-19 13:30:56 +020013 Performance counter stats for './hackbench 10':
Ingo Molnarddcacfa2009-04-20 15:37:32 +020014
Ingo Molnar2cba3ff2011-05-19 13:30:56 +020015 1708.761321 task-clock # 11.037 CPUs utilized
16 41,190 context-switches # 0.024 M/sec
17 6,735 CPU-migrations # 0.004 M/sec
18 17,318 page-faults # 0.010 M/sec
19 5,205,202,243 cycles # 3.046 GHz
20 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
21 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
22 2,603,501,247 instructions # 0.50 insns per cycle
23 # 1.48 stalled cycles per insn
24 484,357,498 branches # 283.455 M/sec
25 6,388,934 branch-misses # 1.32% of all branches
26
27 0.154822978 seconds time elapsed
Ingo Molnarddcacfa2009-04-20 15:37:32 +020028
Ingo Molnar52425192009-05-26 09:17:18 +020029 *
Ingo Molnar2cba3ff2011-05-19 13:30:56 +020030 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
Ingo Molnar52425192009-05-26 09:17:18 +020031 *
32 * Improvements and fixes by:
33 *
34 * Arjan van de Ven <arjan@linux.intel.com>
35 * Yanmin Zhang <yanmin.zhang@intel.com>
36 * Wu Fengguang <fengguang.wu@intel.com>
37 * Mike Galbraith <efault@gmx.de>
38 * Paul Mackerras <paulus@samba.org>
Jaswinder Singh Rajput6e750a8f2009-06-27 03:02:07 +053039 * Jaswinder Singh Rajput <jaswinder@kernel.org>
Ingo Molnar52425192009-05-26 09:17:18 +020040 *
41 * Released under the GPL v2. (and only v2, not any later version)
Ingo Molnarddcacfa2009-04-20 15:37:32 +020042 */
43
Peter Zijlstra1a482f32009-05-23 18:28:58 +020044#include "perf.h"
Ingo Molnar16f762a2009-05-27 09:10:38 +020045#include "builtin.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030046#include "util/cgroup.h"
Ingo Molnar148be2c2009-04-27 08:02:14 +020047#include "util/util.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060048#include <subcmd/parse-options.h>
Ingo Molnar52425192009-05-26 09:17:18 +020049#include "util/parse-events.h"
Andi Kleen4cabc3d2013-08-21 16:47:26 -070050#include "util/pmu.h"
Frederic Weisbecker8f28827a2009-08-16 22:05:48 +020051#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020052#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020053#include "util/evsel.h"
Frederic Weisbecker8f28827a2009-08-16 22:05:48 +020054#include "util/debug.h"
Ingo Molnara5d243d2011-04-27 05:39:24 +020055#include "util/color.h"
Xiao Guangrong0007ece2012-09-17 16:31:14 +080056#include "util/stat.h"
Liming Wang60666c62009-12-31 16:05:50 +080057#include "util/header.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110058#include "util/cpumap.h"
Zhang, Yanmind6d901c2010-03-18 11:36:05 -030059#include "util/thread.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020060#include "util/thread_map.h"
Jiri Olsad8095602015-08-07 12:51:03 +020061#include "util/counts.h"
Jiri Olsa4979d0c2015-11-05 15:40:46 +010062#include "util/session.h"
Ingo Molnarddcacfa2009-04-20 15:37:32 +020063
Peter Zijlstra1f16c572012-10-23 13:40:14 +020064#include <stdlib.h>
Ingo Molnarddcacfa2009-04-20 15:37:32 +020065#include <sys/prctl.h>
Stephane Eranian5af52b52010-05-18 15:00:01 +020066#include <locale.h>
Peter Zijlstra16c8a102009-05-05 17:50:27 +020067
Stephane Eraniand7470b62010-12-01 18:49:05 +020068#define DEFAULT_SEPARATOR " "
David Ahern2cee77c2011-05-30 08:55:59 -060069#define CNTR_NOT_SUPPORTED "<not supported>"
70#define CNTR_NOT_COUNTED "<not counted>"
Stephane Eraniand7470b62010-12-01 18:49:05 +020071
Jiri Olsad4f63a42015-06-26 11:29:26 +020072static void print_counters(struct timespec *ts, int argc, const char **argv);
Stephane Eranian13370a92013-01-29 12:47:44 +010073
Andi Kleen4cabc3d2013-08-21 16:47:26 -070074/* Default events used for perf stat -T */
Jiri Olsaa4547422015-06-03 16:25:53 +020075static const char *transaction_attrs = {
76 "task-clock,"
Andi Kleen4cabc3d2013-08-21 16:47:26 -070077 "{"
78 "instructions,"
79 "cycles,"
80 "cpu/cycles-t/,"
81 "cpu/tx-start/,"
82 "cpu/el-start/,"
83 "cpu/cycles-ct/"
84 "}"
85};
86
87/* More limited version when the CPU does not have all events. */
Jiri Olsaa4547422015-06-03 16:25:53 +020088static const char * transaction_limited_attrs = {
89 "task-clock,"
Andi Kleen4cabc3d2013-08-21 16:47:26 -070090 "{"
91 "instructions,"
92 "cycles,"
93 "cpu/cycles-t/,"
94 "cpu/tx-start/"
95 "}"
96};
97
Robert Richter666e6d42012-04-05 18:26:27 +020098static struct perf_evlist *evsel_list;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020099
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -0300100static struct target target = {
Namhyung Kim77a6f012012-05-07 14:09:04 +0900101 .uid = UINT_MAX,
102};
Jaswinder Singh Rajput3d632592009-06-24 18:19:34 +0530103
Jiri Olsa1e5a2932015-10-25 15:51:18 +0100104typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu);
105
Jaswinder Singh Rajput3d632592009-06-24 18:19:34 +0530106static int run_count = 1;
Stephane Eranian2e6cdf92010-05-12 10:40:01 +0200107static bool no_inherit = false;
Stephane Eraniand07f0b12013-06-04 17:44:26 +0200108static volatile pid_t child_pid = -1;
Ian Munsiec0555642010-04-13 18:37:33 +1000109static bool null_run = false;
Ingo Molnar2cba3ff2011-05-19 13:30:56 +0200110static int detailed_run = 0;
Andi Kleen4cabc3d2013-08-21 16:47:26 -0700111static bool transaction_run;
Arnaldo Carvalho de Melo201e0b02010-12-01 17:53:27 -0200112static bool big_num = true;
Stephane Eraniand7470b62010-12-01 18:49:05 +0200113static int big_num_opt = -1;
Stephane Eraniand7470b62010-12-01 18:49:05 +0200114static const char *csv_sep = NULL;
115static bool csv_output = false;
Lin Ming43bece72011-08-17 18:42:07 +0800116static bool group = false;
Peter Zijlstra1f16c572012-10-23 13:40:14 +0200117static const char *pre_cmd = NULL;
118static const char *post_cmd = NULL;
119static bool sync_run = false;
Andi Kleen41191682013-08-02 17:41:11 -0700120static unsigned int initial_delay = 0;
Stephane Eranian410136f2013-11-12 17:58:49 +0100121static unsigned int unit_width = 4; /* strlen("unit") */
Frederik Deweerdta7e191c2013-03-01 13:02:27 -0500122static bool forever = false;
Stephane Eranian13370a92013-01-29 12:47:44 +0100123static struct timespec ref_time;
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100124static struct cpu_map *aggr_map;
Jiri Olsa1e5a2932015-10-25 15:51:18 +0100125static aggr_get_id_t aggr_get_id;
Jiri Olsae0547312015-11-05 15:40:45 +0100126static bool append_file;
127static const char *output_name;
128static int output_fd;
Stephane Eranian5af52b52010-05-18 15:00:01 +0200129
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100130struct perf_stat {
131 bool record;
132 struct perf_data_file file;
133 struct perf_session *session;
134 u64 bytes_written;
135};
136
137static struct perf_stat perf_stat;
138#define STAT_RECORD perf_stat.record
139
Liming Wang60666c62009-12-31 16:05:50 +0800140static volatile int done = 0;
141
Jiri Olsa421a50f2015-07-21 14:31:22 +0200142static struct perf_stat_config stat_config = {
143 .aggr_mode = AGGR_GLOBAL,
Jiri Olsa711a5722015-07-21 14:31:23 +0200144 .scale = true,
Jiri Olsa421a50f2015-07-21 14:31:22 +0200145};
146
Stephane Eranian13370a92013-01-29 12:47:44 +0100147static inline void diff_timespec(struct timespec *r, struct timespec *a,
148 struct timespec *b)
149{
150 r->tv_sec = a->tv_sec - b->tv_sec;
151 if (a->tv_nsec < b->tv_nsec) {
152 r->tv_nsec = a->tv_nsec + 1000000000L - b->tv_nsec;
153 r->tv_sec--;
154 } else {
155 r->tv_nsec = a->tv_nsec - b->tv_nsec ;
156 }
157}
158
Jiri Olsa254ecbc2015-06-26 11:29:13 +0200159static void perf_stat__reset_stats(void)
160{
161 perf_evlist__reset_stats(evsel_list);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200162 perf_stat__reset_shadow_stats();
Jiri Olsa1eda3b22015-06-03 16:25:55 +0200163}
164
Jiri Olsacac21422012-11-12 18:34:00 +0100165static int create_perf_stat_counter(struct perf_evsel *evsel)
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200166{
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200167 struct perf_event_attr *attr = &evsel->attr;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200168
Jiri Olsa711a5722015-07-21 14:31:23 +0200169 if (stat_config.scale)
Ingo Molnara21ca2c2009-06-06 09:58:57 +0200170 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
171 PERF_FORMAT_TOTAL_TIME_RUNNING;
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200172
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200173 attr->inherit = !no_inherit;
Arnaldo Carvalho de Melo5d2cd902011-04-14 11:20:14 -0300174
Jiri Olsa6acd8e92015-11-25 16:36:54 +0100175 /*
176 * Some events get initialized with sample_(period/type) set,
177 * like tracepoints. Clear it up for counting.
178 */
179 attr->sample_period = 0;
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100180 /*
181 * But set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
182 * while avoiding that older tools show confusing messages.
183 */
184 attr->sample_type = PERF_SAMPLE_IDENTIFIER;
Jiri Olsa6acd8e92015-11-25 16:36:54 +0100185
Jiri Olsa67ccdec2015-12-03 10:06:44 +0100186 /*
187 * Disabling all counters initially, they will be enabled
188 * either manually by us or by kernel via enable_on_exec
189 * set later.
190 */
Jiri Olsac8280ce2015-12-03 10:06:45 +0100191 if (perf_evsel__is_group_leader(evsel)) {
Jiri Olsa67ccdec2015-12-03 10:06:44 +0100192 attr->disabled = 1;
193
Jiri Olsac8280ce2015-12-03 10:06:45 +0100194 /*
195 * In case of initial_delay we enable tracee
196 * events manually.
197 */
198 if (target__none(&target) && !initial_delay)
199 attr->enable_on_exec = 1;
200 }
201
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -0300202 if (target__has_cpu(&target))
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -0300203 return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
Stephane Eranian5622c072012-04-27 14:45:38 +0200204
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -0300205 return perf_evsel__open_per_thread(evsel, evsel_list->threads);
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200206}
207
Ingo Molnarc04f5e52009-05-29 09:10:54 +0200208/*
209 * Does the counter have nsecs as a unit?
210 */
Arnaldo Carvalho de Melodaec78a2011-01-03 16:49:44 -0200211static inline int nsec_counter(struct perf_evsel *evsel)
Ingo Molnarc04f5e52009-05-29 09:10:54 +0200212{
Arnaldo Carvalho de Melodaec78a2011-01-03 16:49:44 -0200213 if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
214 perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
Ingo Molnarc04f5e52009-05-29 09:10:54 +0200215 return 1;
216
217 return 0;
218}
219
Jiri Olsa8b99b1a2015-11-05 15:40:48 +0100220static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100221 union perf_event *event,
222 struct perf_sample *sample __maybe_unused,
223 struct machine *machine __maybe_unused)
224{
Jiri Olsa8b99b1a2015-11-05 15:40:48 +0100225 if (perf_data_file__write(&perf_stat.file, event, event->header.size) < 0) {
226 pr_err("failed to write perf data, error: %m\n");
227 return -1;
228 }
229
230 perf_stat.bytes_written += event->header.size;
231 return 0;
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100232}
233
Jiri Olsa7aad0c32015-11-05 15:40:52 +0100234static int write_stat_round_event(u64 time, u64 type)
235{
236 return perf_event__synthesize_stat_round(NULL, time, type,
237 process_synthesized_event,
238 NULL);
239}
240
241#define WRITE_STAT_ROUND_EVENT(time, interval) \
242 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
243
Jiri Olsa5a6ea812015-11-05 15:40:51 +0100244#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
245
246static int
247perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread,
248 struct perf_counts_values *count)
249{
250 struct perf_sample_id *sid = SID(counter, cpu, thread);
251
252 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count,
253 process_synthesized_event, NULL);
254}
255
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200256/*
257 * Read out the results of a single counter:
258 * do not aggregate counts across CPUs in system-wide mode
259 */
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200260static int read_counter(struct perf_evsel *counter)
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200261{
Jiri Olsa9bf1a522014-11-21 10:31:09 +0100262 int nthreads = thread_map__nr(evsel_list->threads);
263 int ncpus = perf_evsel__nr_cpus(counter);
264 int cpu, thread;
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200265
Suzuki K. Poulose3b4331d2015-02-13 18:40:58 +0000266 if (!counter->supported)
267 return -ENOENT;
268
Jiri Olsa9bf1a522014-11-21 10:31:09 +0100269 if (counter->system_wide)
270 nthreads = 1;
271
272 for (thread = 0; thread < nthreads; thread++) {
273 for (cpu = 0; cpu < ncpus; cpu++) {
Jiri Olsa3b3eb042015-06-26 11:29:20 +0200274 struct perf_counts_values *count;
275
276 count = perf_counts(counter->counts, cpu, thread);
277 if (perf_evsel__read(counter, cpu, thread, count))
Jiri Olsa9bf1a522014-11-21 10:31:09 +0100278 return -1;
Jiri Olsa5a6ea812015-11-05 15:40:51 +0100279
280 if (STAT_RECORD) {
281 if (perf_evsel__write_stat_event(counter, cpu, thread, count)) {
282 pr_err("failed to write stat event\n");
283 return -1;
284 }
285 }
Jiri Olsa9bf1a522014-11-21 10:31:09 +0100286 }
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200287 }
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200288
289 return 0;
Ingo Molnar2996f5d2009-05-29 09:10:54 +0200290}
291
Jiri Olsa5fc472a2015-07-08 13:17:31 +0200292static void read_counters(bool close_counters)
Jiri Olsa106a94a2015-06-26 11:29:19 +0200293{
294 struct perf_evsel *counter;
Jiri Olsa106a94a2015-06-26 11:29:19 +0200295
296 evlist__for_each(evsel_list, counter) {
Jiri Olsa3b3eb042015-06-26 11:29:20 +0200297 if (read_counter(counter))
Andi Kleen245bad82015-09-01 15:52:46 -0700298 pr_debug("failed to read counter %s\n", counter->name);
Jiri Olsa3b3eb042015-06-26 11:29:20 +0200299
Jiri Olsaf80010e2015-07-21 14:31:27 +0200300 if (perf_stat_process_counter(&stat_config, counter))
Jiri Olsa3b3eb042015-06-26 11:29:20 +0200301 pr_warning("failed to process counter %s\n", counter->name);
Jiri Olsa106a94a2015-06-26 11:29:19 +0200302
Jiri Olsa5fc472a2015-07-08 13:17:31 +0200303 if (close_counters) {
Jiri Olsa106a94a2015-06-26 11:29:19 +0200304 perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
305 thread_map__nr(evsel_list->threads));
306 }
307 }
308}
309
Jiri Olsaba411a92015-06-26 11:29:24 +0200310static void process_interval(void)
Stephane Eranian13370a92013-01-29 12:47:44 +0100311{
Stephane Eranian13370a92013-01-29 12:47:44 +0100312 struct timespec ts, rs;
Stephane Eranian13370a92013-01-29 12:47:44 +0100313
Jiri Olsa106a94a2015-06-26 11:29:19 +0200314 read_counters(false);
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100315
Stephane Eranian13370a92013-01-29 12:47:44 +0100316 clock_gettime(CLOCK_MONOTONIC, &ts);
317 diff_timespec(&rs, &ts, &ref_time);
Stephane Eranian13370a92013-01-29 12:47:44 +0100318
Jiri Olsa7aad0c32015-11-05 15:40:52 +0100319 if (STAT_RECORD) {
320 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSECS_PER_SEC + rs.tv_nsec, INTERVAL))
321 pr_err("failed to write stat round event\n");
322 }
323
Jiri Olsad4f63a42015-06-26 11:29:26 +0200324 print_counters(&rs, 0, NULL);
Stephane Eranian13370a92013-01-29 12:47:44 +0100325}
326
Jiri Olsa67ccdec2015-12-03 10:06:44 +0100327static void enable_counters(void)
Andi Kleen41191682013-08-02 17:41:11 -0700328{
Jiri Olsa67ccdec2015-12-03 10:06:44 +0100329 if (initial_delay)
Andi Kleen41191682013-08-02 17:41:11 -0700330 usleep(initial_delay * 1000);
Jiri Olsa67ccdec2015-12-03 10:06:44 +0100331
332 /*
333 * We need to enable counters only if:
334 * - we don't have tracee (attaching to task or cpu)
335 * - we have initial delay configured
336 */
337 if (!target__none(&target) || initial_delay)
Jiri Olsaab46db02015-12-03 10:06:43 +0100338 perf_evlist__enable(evsel_list);
Andi Kleen41191682013-08-02 17:41:11 -0700339}
340
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300341static volatile int workload_exec_errno;
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300342
343/*
344 * perf_evlist__prepare_workload will send a SIGUSR1
345 * if the fork fails, since we asked by setting its
346 * want_signal to true.
347 */
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300348static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info,
349 void *ucontext __maybe_unused)
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300350{
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300351 workload_exec_errno = info->si_value.sival_int;
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300352}
353
Jiri Olsa7b60a7e2015-11-05 15:40:54 +0100354static bool has_unit(struct perf_evsel *counter)
355{
356 return counter->unit && *counter->unit;
357}
358
359static bool has_scale(struct perf_evsel *counter)
360{
361 return counter->scale != 1;
362}
363
Jiri Olsa664c98d2015-11-05 15:40:50 +0100364static int perf_stat_synthesize_config(bool is_pipe)
Jiri Olsa8b99b1a2015-11-05 15:40:48 +0100365{
Jiri Olsa7b60a7e2015-11-05 15:40:54 +0100366 struct perf_evsel *counter;
Jiri Olsa8b99b1a2015-11-05 15:40:48 +0100367 int err;
368
Jiri Olsa664c98d2015-11-05 15:40:50 +0100369 if (is_pipe) {
370 err = perf_event__synthesize_attrs(NULL, perf_stat.session,
371 process_synthesized_event);
372 if (err < 0) {
373 pr_err("Couldn't synthesize attrs.\n");
374 return err;
375 }
376 }
377
Jiri Olsa7b60a7e2015-11-05 15:40:54 +0100378 /*
379 * Synthesize other events stuff not carried within
380 * attr event - unit, scale, name
381 */
382 evlist__for_each(evsel_list, counter) {
383 if (!counter->supported)
384 continue;
385
386 /*
387 * Synthesize unit and scale only if it's defined.
388 */
389 if (has_unit(counter)) {
390 err = perf_event__synthesize_event_update_unit(NULL, counter, process_synthesized_event);
391 if (err < 0) {
392 pr_err("Couldn't synthesize evsel unit.\n");
393 return err;
394 }
395 }
396
397 if (has_scale(counter)) {
398 err = perf_event__synthesize_event_update_scale(NULL, counter, process_synthesized_event);
399 if (err < 0) {
400 pr_err("Couldn't synthesize evsel scale.\n");
401 return err;
402 }
403 }
404
405 if (counter->own_cpus) {
406 err = perf_event__synthesize_event_update_cpus(NULL, counter, process_synthesized_event);
407 if (err < 0) {
408 pr_err("Couldn't synthesize evsel scale.\n");
409 return err;
410 }
411 }
412
413 /*
414 * Name is needed only for pipe output,
415 * perf.data carries event names.
416 */
417 if (is_pipe) {
418 err = perf_event__synthesize_event_update_name(NULL, counter, process_synthesized_event);
419 if (err < 0) {
420 pr_err("Couldn't synthesize evsel name.\n");
421 return err;
422 }
423 }
424 }
425
Jiri Olsa8b99b1a2015-11-05 15:40:48 +0100426 err = perf_event__synthesize_thread_map2(NULL, evsel_list->threads,
427 process_synthesized_event,
428 NULL);
429 if (err < 0) {
430 pr_err("Couldn't synthesize thread map.\n");
431 return err;
432 }
433
434 err = perf_event__synthesize_cpu_map(NULL, evsel_list->cpus,
435 process_synthesized_event, NULL);
436 if (err < 0) {
437 pr_err("Couldn't synthesize thread map.\n");
438 return err;
439 }
440
441 err = perf_event__synthesize_stat_config(NULL, &stat_config,
442 process_synthesized_event, NULL);
443 if (err < 0) {
444 pr_err("Couldn't synthesize config.\n");
445 return err;
446 }
447
448 return 0;
449}
450
Jiri Olsa2af46462015-11-05 15:40:49 +0100451#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
452
453static int __store_counter_ids(struct perf_evsel *counter,
454 struct cpu_map *cpus,
455 struct thread_map *threads)
456{
457 int cpu, thread;
458
459 for (cpu = 0; cpu < cpus->nr; cpu++) {
460 for (thread = 0; thread < threads->nr; thread++) {
461 int fd = FD(counter, cpu, thread);
462
463 if (perf_evlist__id_add_fd(evsel_list, counter,
464 cpu, thread, fd) < 0)
465 return -1;
466 }
467 }
468
469 return 0;
470}
471
472static int store_counter_ids(struct perf_evsel *counter)
473{
474 struct cpu_map *cpus = counter->cpus;
475 struct thread_map *threads = counter->threads;
476
477 if (perf_evsel__alloc_id(counter, cpus->nr, threads->nr))
478 return -ENOMEM;
479
480 return __store_counter_ids(counter, cpus, threads);
481}
482
Namhyung Kimacf28922013-03-11 16:43:18 +0900483static int __run_perf_stat(int argc, const char **argv)
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200484{
Jiri Olsaec0d3d12015-07-21 14:31:25 +0200485 int interval = stat_config.interval;
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300486 char msg[512];
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200487 unsigned long long t0, t1;
Jiri Olsacac21422012-11-12 18:34:00 +0100488 struct perf_evsel *counter;
Stephane Eranian13370a92013-01-29 12:47:44 +0100489 struct timespec ts;
Stephane Eranian410136f2013-11-12 17:58:49 +0100490 size_t l;
Ingo Molnar42202dd2009-06-13 14:57:28 +0200491 int status = 0;
Zhang, Yanmin6be28502010-03-18 11:36:03 -0300492 const bool forks = (argc > 0);
Jiri Olsa664c98d2015-11-05 15:40:50 +0100493 bool is_pipe = STAT_RECORD ? perf_stat.file.is_pipe : false;
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200494
Stephane Eranian13370a92013-01-29 12:47:44 +0100495 if (interval) {
496 ts.tv_sec = interval / 1000;
497 ts.tv_nsec = (interval % 1000) * 1000000;
498 } else {
499 ts.tv_sec = 1;
500 ts.tv_nsec = 0;
501 }
502
Liming Wang60666c62009-12-31 16:05:50 +0800503 if (forks) {
Jiri Olsa664c98d2015-11-05 15:40:50 +0100504 if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -0300505 workload_exec_failed_signal) < 0) {
Namhyung Kimacf28922013-03-11 16:43:18 +0900506 perror("failed to prepare workload");
507 return -1;
Liming Wang60666c62009-12-31 16:05:50 +0800508 }
Namhyung Kimd20a47e2013-09-30 18:01:11 +0900509 child_pid = evsel_list->workload.pid;
Paul Mackerras051ae7f2009-06-29 21:13:21 +1000510 }
511
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200512 if (group)
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300513 perf_evlist__set_leader(evsel_list);
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200514
Arnaldo Carvalho de Melo0050f7a2014-01-10 10:37:27 -0300515 evlist__for_each(evsel_list, counter) {
Jiri Olsacac21422012-11-12 18:34:00 +0100516 if (create_perf_stat_counter(counter) < 0) {
David Ahern979987a2012-05-08 09:29:16 -0600517 /*
518 * PPC returns ENXIO for HW counters until 2.6.37
519 * (behavior changed with commit b0a873e).
520 */
Anton Blanchard38f6ae12011-12-02 09:38:33 +1100521 if (errno == EINVAL || errno == ENOSYS ||
David Ahern979987a2012-05-08 09:29:16 -0600522 errno == ENOENT || errno == EOPNOTSUPP ||
523 errno == ENXIO) {
David Ahernc63ca0c2011-04-29 16:04:15 -0600524 if (verbose)
525 ui__warning("%s event is not supported by the kernel.\n",
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300526 perf_evsel__name(counter));
David Ahern2cee77c2011-05-30 08:55:59 -0600527 counter->supported = false;
Kan Liangcb5ef602015-06-11 02:32:40 -0400528
529 if ((counter->leader != counter) ||
530 !(counter->leader->nr_members > 1))
531 continue;
David Ahernc63ca0c2011-04-29 16:04:15 -0600532 }
Ingo Molnarede70292011-04-28 08:48:42 +0200533
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300534 perf_evsel__open_strerror(counter, &target,
535 errno, msg, sizeof(msg));
536 ui__error("%s\n", msg);
537
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200538 if (child_pid != -1)
539 kill(child_pid, SIGTERM);
David Ahernfceda7f2012-08-26 12:24:44 -0600540
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200541 return -1;
542 }
David Ahern2cee77c2011-05-30 08:55:59 -0600543 counter->supported = true;
Stephane Eranian410136f2013-11-12 17:58:49 +0100544
545 l = strlen(counter->unit);
546 if (l > unit_width)
547 unit_width = l;
Jiri Olsa2af46462015-11-05 15:40:49 +0100548
549 if (STAT_RECORD && store_counter_ids(counter))
550 return -1;
Arnaldo Carvalho de Melo084ab9f2010-03-22 13:10:28 -0300551 }
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200552
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300553 if (perf_evlist__apply_filters(evsel_list, &counter)) {
554 error("failed to set filter \"%s\" on event %s with %d (%s)\n",
555 counter->filter, perf_evsel__name(counter), errno,
Masami Hiramatsu759e6122014-08-14 02:22:55 +0000556 strerror_r(errno, msg, sizeof(msg)));
Frederic Weisbeckercfd748a2011-03-14 16:40:30 +0100557 return -1;
558 }
559
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100560 if (STAT_RECORD) {
561 int err, fd = perf_data_file__fd(&perf_stat.file);
562
Jiri Olsa664c98d2015-11-05 15:40:50 +0100563 if (is_pipe) {
564 err = perf_header__write_pipe(perf_data_file__fd(&perf_stat.file));
565 } else {
566 err = perf_session__write_header(perf_stat.session, evsel_list,
567 fd, false);
568 }
569
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100570 if (err < 0)
571 return err;
Jiri Olsa8b99b1a2015-11-05 15:40:48 +0100572
Jiri Olsa664c98d2015-11-05 15:40:50 +0100573 err = perf_stat_synthesize_config(is_pipe);
Jiri Olsa8b99b1a2015-11-05 15:40:48 +0100574 if (err < 0)
575 return err;
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100576 }
577
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200578 /*
579 * Enable counters and exec the command:
580 */
581 t0 = rdclock();
Stephane Eranian13370a92013-01-29 12:47:44 +0100582 clock_gettime(CLOCK_MONOTONIC, &ref_time);
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200583
Liming Wang60666c62009-12-31 16:05:50 +0800584 if (forks) {
Namhyung Kimacf28922013-03-11 16:43:18 +0900585 perf_evlist__start_workload(evsel_list);
Jiri Olsa67ccdec2015-12-03 10:06:44 +0100586 enable_counters();
Namhyung Kimacf28922013-03-11 16:43:18 +0900587
Stephane Eranian13370a92013-01-29 12:47:44 +0100588 if (interval) {
589 while (!waitpid(child_pid, &status, WNOHANG)) {
590 nanosleep(&ts, NULL);
Jiri Olsaba411a92015-06-26 11:29:24 +0200591 process_interval();
Stephane Eranian13370a92013-01-29 12:47:44 +0100592 }
593 }
Liming Wang60666c62009-12-31 16:05:50 +0800594 wait(&status);
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300595
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300596 if (workload_exec_errno) {
597 const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
598 pr_err("Workload failed: %s\n", emsg);
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300599 return -1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300600 }
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300601
Andi Kleen33e49ea2011-09-15 14:31:40 -0700602 if (WIFSIGNALED(status))
603 psignal(WTERMSIG(status), argv[0]);
Liming Wang60666c62009-12-31 16:05:50 +0800604 } else {
Jiri Olsa67ccdec2015-12-03 10:06:44 +0100605 enable_counters();
Stephane Eranian13370a92013-01-29 12:47:44 +0100606 while (!done) {
607 nanosleep(&ts, NULL);
608 if (interval)
Jiri Olsaba411a92015-06-26 11:29:24 +0200609 process_interval();
Stephane Eranian13370a92013-01-29 12:47:44 +0100610 }
Liming Wang60666c62009-12-31 16:05:50 +0800611 }
Ingo Molnar44db76c2009-06-03 19:36:07 +0200612
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200613 t1 = rdclock();
614
Peter Zijlstra9e9772c2009-09-04 15:36:08 +0200615 update_stats(&walltime_nsecs_stats, t1 - t0);
Ingo Molnar42202dd2009-06-13 14:57:28 +0200616
Jiri Olsa106a94a2015-06-26 11:29:19 +0200617 read_counters(true);
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200618
Ingo Molnar42202dd2009-06-13 14:57:28 +0200619 return WEXITSTATUS(status);
620}
621
Arnaldo Carvalho de Melo41cde472014-01-03 17:34:42 -0300622static int run_perf_stat(int argc, const char **argv)
Peter Zijlstra1f16c572012-10-23 13:40:14 +0200623{
624 int ret;
625
626 if (pre_cmd) {
627 ret = system(pre_cmd);
628 if (ret)
629 return ret;
630 }
631
632 if (sync_run)
633 sync();
634
635 ret = __run_perf_stat(argc, argv);
636 if (ret)
637 return ret;
638
639 if (post_cmd) {
640 ret = system(post_cmd);
641 if (ret)
642 return ret;
643 }
644
645 return ret;
646}
647
Andi Kleend73515c2015-03-11 07:16:27 -0700648static void print_running(u64 run, u64 ena)
649{
650 if (csv_output) {
Jiri Olsa58215222015-07-21 14:31:24 +0200651 fprintf(stat_config.output, "%s%" PRIu64 "%s%.2f",
Andi Kleend73515c2015-03-11 07:16:27 -0700652 csv_sep,
653 run,
654 csv_sep,
655 ena ? 100.0 * run / ena : 100.0);
656 } else if (run != ena) {
Jiri Olsa58215222015-07-21 14:31:24 +0200657 fprintf(stat_config.output, " (%.2f%%)", 100.0 * run / ena);
Andi Kleend73515c2015-03-11 07:16:27 -0700658 }
659}
660
Ingo Molnarf99844c2011-04-27 05:35:39 +0200661static void print_noise_pct(double total, double avg)
662{
Xiao Guangrong0007ece2012-09-17 16:31:14 +0800663 double pct = rel_stddev_stats(total, avg);
Ingo Molnarf99844c2011-04-27 05:35:39 +0200664
Zhengyu He3ae9a34d2011-06-23 13:45:42 -0700665 if (csv_output)
Jiri Olsa58215222015-07-21 14:31:24 +0200666 fprintf(stat_config.output, "%s%.2f%%", csv_sep, pct);
Jim Cromiea1bca6c2011-09-07 17:14:02 -0600667 else if (pct)
Jiri Olsa58215222015-07-21 14:31:24 +0200668 fprintf(stat_config.output, " ( +-%6.2f%% )", pct);
Ingo Molnarf99844c2011-04-27 05:35:39 +0200669}
670
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200671static void print_noise(struct perf_evsel *evsel, double avg)
Ingo Molnar42202dd2009-06-13 14:57:28 +0200672{
Jiri Olsa581cc8a2015-10-16 12:41:03 +0200673 struct perf_stat_evsel *ps;
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200674
Peter Zijlstra849abde2009-09-04 18:23:38 +0200675 if (run_count == 1)
676 return;
677
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200678 ps = evsel->priv;
Ingo Molnarf99844c2011-04-27 05:35:39 +0200679 print_noise_pct(stddev_stats(&ps->res_stats[0]), avg);
Ingo Molnar42202dd2009-06-13 14:57:28 +0200680}
681
Stephane Eranian12c08a92013-02-14 13:57:29 +0100682static void aggr_printout(struct perf_evsel *evsel, int id, int nr)
Ingo Molnar42202dd2009-06-13 14:57:28 +0200683{
Jiri Olsa421a50f2015-07-21 14:31:22 +0200684 switch (stat_config.aggr_mode) {
Stephane Eranian12c08a92013-02-14 13:57:29 +0100685 case AGGR_CORE:
Jiri Olsa58215222015-07-21 14:31:24 +0200686 fprintf(stat_config.output, "S%d-C%*d%s%*d%s",
Stephane Eranian12c08a92013-02-14 13:57:29 +0100687 cpu_map__id_to_socket(id),
688 csv_output ? 0 : -8,
689 cpu_map__id_to_cpu(id),
690 csv_sep,
691 csv_output ? 0 : 4,
692 nr,
693 csv_sep);
694 break;
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100695 case AGGR_SOCKET:
Jiri Olsa58215222015-07-21 14:31:24 +0200696 fprintf(stat_config.output, "S%*d%s%*d%s",
Stephane Eraniand7e7a452013-02-06 15:46:02 +0100697 csv_output ? 0 : -5,
Stephane Eranian12c08a92013-02-14 13:57:29 +0100698 id,
Stephane Eraniand7e7a452013-02-06 15:46:02 +0100699 csv_sep,
700 csv_output ? 0 : 4,
701 nr,
702 csv_sep);
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100703 break;
704 case AGGR_NONE:
Jiri Olsa58215222015-07-21 14:31:24 +0200705 fprintf(stat_config.output, "CPU%*d%s",
Stephane Eraniand7470b62010-12-01 18:49:05 +0200706 csv_output ? 0 : -4,
Stephane Eranian12c08a92013-02-14 13:57:29 +0100707 perf_evsel__cpus(evsel)->map[id], csv_sep);
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100708 break;
Jiri Olsa32b8af82015-06-26 11:29:27 +0200709 case AGGR_THREAD:
Jiri Olsa58215222015-07-21 14:31:24 +0200710 fprintf(stat_config.output, "%*s-%*d%s",
Jiri Olsa32b8af82015-06-26 11:29:27 +0200711 csv_output ? 0 : 16,
712 thread_map__comm(evsel->threads, id),
713 csv_output ? 0 : -8,
714 thread_map__pid(evsel->threads, id),
715 csv_sep);
716 break;
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100717 case AGGR_GLOBAL:
Jiri Olsa208df992015-10-16 12:41:04 +0200718 case AGGR_UNSET:
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100719 default:
720 break;
721 }
722}
Stephane Eraniand7470b62010-12-01 18:49:05 +0200723
Andi Kleenda88c7f2014-09-24 13:50:46 -0700724static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100725{
Jiri Olsa58215222015-07-21 14:31:24 +0200726 FILE *output = stat_config.output;
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100727 double msecs = avg / 1e6;
Stephane Eranian410136f2013-11-12 17:58:49 +0100728 const char *fmt_v, *fmt_n;
David Ahern4bbe5a62013-09-28 14:28:00 -0600729 char name[25];
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100730
Stephane Eranian410136f2013-11-12 17:58:49 +0100731 fmt_v = csv_output ? "%.6f%s" : "%18.6f%s";
732 fmt_n = csv_output ? "%s" : "%-25s";
733
Andi Kleenda88c7f2014-09-24 13:50:46 -0700734 aggr_printout(evsel, id, nr);
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100735
David Ahern4bbe5a62013-09-28 14:28:00 -0600736 scnprintf(name, sizeof(name), "%s%s",
737 perf_evsel__name(evsel), csv_output ? "" : " (msec)");
Stephane Eranian410136f2013-11-12 17:58:49 +0100738
739 fprintf(output, fmt_v, msecs, csv_sep);
740
741 if (csv_output)
742 fprintf(output, "%s%s", evsel->unit, csv_sep);
743 else
744 fprintf(output, "%-*s%s", unit_width, evsel->unit, csv_sep);
745
746 fprintf(output, fmt_n, name);
Stephane Eraniand7470b62010-12-01 18:49:05 +0200747
Stephane Eranian023695d2011-02-14 11:20:01 +0200748 if (evsel->cgrp)
Stephane Eranian4aa90152011-08-15 22:22:33 +0200749 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
Ingo Molnar42202dd2009-06-13 14:57:28 +0200750}
751
Jiri Olsa556b1fb2015-06-03 16:25:56 +0200752static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
753{
Jiri Olsa58215222015-07-21 14:31:24 +0200754 FILE *output = stat_config.output;
Jiri Olsa556b1fb2015-06-03 16:25:56 +0200755 double sc = evsel->scale;
756 const char *fmt;
Jiri Olsa556b1fb2015-06-03 16:25:56 +0200757
758 if (csv_output) {
759 fmt = sc != 1.0 ? "%.2f%s" : "%.0f%s";
760 } else {
761 if (big_num)
762 fmt = sc != 1.0 ? "%'18.2f%s" : "%'18.0f%s";
763 else
764 fmt = sc != 1.0 ? "%18.2f%s" : "%18.0f%s";
765 }
766
767 aggr_printout(evsel, id, nr);
768
Jiri Olsa556b1fb2015-06-03 16:25:56 +0200769 fprintf(output, fmt, avg, csv_sep);
770
771 if (evsel->unit)
772 fprintf(output, "%-*s%s",
773 csv_output ? 0 : unit_width,
774 evsel->unit, csv_sep);
775
776 fprintf(output, "%-*s", csv_output ? 0 : 25, perf_evsel__name(evsel));
777
778 if (evsel->cgrp)
779 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
Andi Kleeneedfcb42015-11-02 17:50:21 -0800780}
Jiri Olsa556b1fb2015-06-03 16:25:56 +0200781
Andi Kleeneedfcb42015-11-02 17:50:21 -0800782static void printout(int id, int nr, struct perf_evsel *counter, double uval)
783{
784 int cpu = cpu_map__id_to_cpu(id);
Jiri Olsa556b1fb2015-06-03 16:25:56 +0200785
Andi Kleeneedfcb42015-11-02 17:50:21 -0800786 if (stat_config.aggr_mode == AGGR_GLOBAL)
787 cpu = 0;
788
789 if (nsec_counter(counter))
790 nsec_printout(id, nr, counter, uval);
791 else
792 abs_printout(id, nr, counter, uval);
793
794 if (!csv_output && !stat_config.interval)
795 perf_stat__print_shadow_stats(stat_config.output, counter,
796 uval, cpu,
797 stat_config.aggr_mode);
Jiri Olsa556b1fb2015-06-03 16:25:56 +0200798}
799
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100800static void print_aggr(char *prefix)
Stephane Eraniand7e7a452013-02-06 15:46:02 +0100801{
Jiri Olsa58215222015-07-21 14:31:24 +0200802 FILE *output = stat_config.output;
Stephane Eraniand7e7a452013-02-06 15:46:02 +0100803 struct perf_evsel *counter;
Kan Liang601083c2015-07-02 03:08:43 -0400804 int cpu, s, s2, id, nr;
Stephane Eranian410136f2013-11-12 17:58:49 +0100805 double uval;
Stephane Eraniand7e7a452013-02-06 15:46:02 +0100806 u64 ena, run, val;
Stephane Eraniand7e7a452013-02-06 15:46:02 +0100807
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100808 if (!(aggr_map || aggr_get_id))
Stephane Eraniand7e7a452013-02-06 15:46:02 +0100809 return;
810
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100811 for (s = 0; s < aggr_map->nr; s++) {
812 id = aggr_map->map[s];
Arnaldo Carvalho de Melo0050f7a2014-01-10 10:37:27 -0300813 evlist__for_each(evsel_list, counter) {
Stephane Eraniand7e7a452013-02-06 15:46:02 +0100814 val = ena = run = 0;
815 nr = 0;
816 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
Kan Liang601083c2015-07-02 03:08:43 -0400817 s2 = aggr_get_id(perf_evsel__cpus(counter), cpu);
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100818 if (s2 != id)
Stephane Eraniand7e7a452013-02-06 15:46:02 +0100819 continue;
Jiri Olsaa6fa0032015-06-26 11:29:11 +0200820 val += perf_counts(counter->counts, cpu, 0)->val;
821 ena += perf_counts(counter->counts, cpu, 0)->ena;
822 run += perf_counts(counter->counts, cpu, 0)->run;
Stephane Eraniand7e7a452013-02-06 15:46:02 +0100823 nr++;
824 }
825 if (prefix)
826 fprintf(output, "%s", prefix);
827
828 if (run == 0 || ena == 0) {
Stephane Eranian582ec0822013-07-05 19:06:45 +0200829 aggr_printout(counter, id, nr);
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100830
Stephane Eranian410136f2013-11-12 17:58:49 +0100831 fprintf(output, "%*s%s",
Stephane Eraniand7e7a452013-02-06 15:46:02 +0100832 csv_output ? 0 : 18,
833 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
Stephane Eranian410136f2013-11-12 17:58:49 +0100834 csv_sep);
835
836 fprintf(output, "%-*s%s",
837 csv_output ? 0 : unit_width,
838 counter->unit, csv_sep);
839
840 fprintf(output, "%*s",
841 csv_output ? 0 : -25,
Stephane Eraniand7e7a452013-02-06 15:46:02 +0100842 perf_evsel__name(counter));
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100843
Stephane Eraniand7e7a452013-02-06 15:46:02 +0100844 if (counter->cgrp)
845 fprintf(output, "%s%s",
846 csv_sep, counter->cgrp->name);
847
Andi Kleend73515c2015-03-11 07:16:27 -0700848 print_running(run, ena);
Stephane Eraniand7e7a452013-02-06 15:46:02 +0100849 fputc('\n', output);
850 continue;
851 }
Stephane Eranian410136f2013-11-12 17:58:49 +0100852 uval = val * counter->scale;
Andi Kleeneedfcb42015-11-02 17:50:21 -0800853 printout(id, nr, counter, uval);
Andi Kleend73515c2015-03-11 07:16:27 -0700854 if (!csv_output)
Stephane Eraniand7e7a452013-02-06 15:46:02 +0100855 print_noise(counter, 1.0);
856
Andi Kleend73515c2015-03-11 07:16:27 -0700857 print_running(run, ena);
Stephane Eraniand7e7a452013-02-06 15:46:02 +0100858 fputc('\n', output);
859 }
860 }
861}
862
Jiri Olsa32b8af82015-06-26 11:29:27 +0200863static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
864{
Jiri Olsa58215222015-07-21 14:31:24 +0200865 FILE *output = stat_config.output;
Jiri Olsa32b8af82015-06-26 11:29:27 +0200866 int nthreads = thread_map__nr(counter->threads);
867 int ncpus = cpu_map__nr(counter->cpus);
868 int cpu, thread;
869 double uval;
870
871 for (thread = 0; thread < nthreads; thread++) {
872 u64 ena = 0, run = 0, val = 0;
873
874 for (cpu = 0; cpu < ncpus; cpu++) {
875 val += perf_counts(counter->counts, cpu, thread)->val;
876 ena += perf_counts(counter->counts, cpu, thread)->ena;
877 run += perf_counts(counter->counts, cpu, thread)->run;
878 }
879
880 if (prefix)
881 fprintf(output, "%s", prefix);
882
883 uval = val * counter->scale;
Andi Kleeneedfcb42015-11-02 17:50:21 -0800884 printout(thread, 0, counter, uval);
Jiri Olsa32b8af82015-06-26 11:29:27 +0200885
886 if (!csv_output)
887 print_noise(counter, 1.0);
888
889 print_running(run, ena);
890 fputc('\n', output);
891 }
892}
893
Ingo Molnar42202dd2009-06-13 14:57:28 +0200894/*
895 * Print out the results of a single counter:
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200896 * aggregated counts in system-wide mode
Ingo Molnar42202dd2009-06-13 14:57:28 +0200897 */
Stephane Eranian13370a92013-01-29 12:47:44 +0100898static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
Ingo Molnar42202dd2009-06-13 14:57:28 +0200899{
Jiri Olsa58215222015-07-21 14:31:24 +0200900 FILE *output = stat_config.output;
Jiri Olsa581cc8a2015-10-16 12:41:03 +0200901 struct perf_stat_evsel *ps = counter->priv;
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200902 double avg = avg_stats(&ps->res_stats[0]);
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200903 int scaled = counter->counts->scaled;
Stephane Eranian410136f2013-11-12 17:58:49 +0100904 double uval;
Andi Kleend73515c2015-03-11 07:16:27 -0700905 double avg_enabled, avg_running;
906
907 avg_enabled = avg_stats(&ps->res_stats[1]);
908 avg_running = avg_stats(&ps->res_stats[2]);
Ingo Molnar42202dd2009-06-13 14:57:28 +0200909
Stephane Eranian13370a92013-01-29 12:47:44 +0100910 if (prefix)
911 fprintf(output, "%s", prefix);
912
Suzuki K. Poulose3b4331d2015-02-13 18:40:58 +0000913 if (scaled == -1 || !counter->supported) {
Stephane Eranian410136f2013-11-12 17:58:49 +0100914 fprintf(output, "%*s%s",
Stephane Eraniand7470b62010-12-01 18:49:05 +0200915 csv_output ? 0 : 18,
David Ahern2cee77c2011-05-30 08:55:59 -0600916 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
Stephane Eranian410136f2013-11-12 17:58:49 +0100917 csv_sep);
918 fprintf(output, "%-*s%s",
919 csv_output ? 0 : unit_width,
920 counter->unit, csv_sep);
921 fprintf(output, "%*s",
922 csv_output ? 0 : -25,
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300923 perf_evsel__name(counter));
Stephane Eranian023695d2011-02-14 11:20:01 +0200924
925 if (counter->cgrp)
Stephane Eranian4aa90152011-08-15 22:22:33 +0200926 fprintf(output, "%s%s", csv_sep, counter->cgrp->name);
Stephane Eranian023695d2011-02-14 11:20:01 +0200927
Andi Kleend73515c2015-03-11 07:16:27 -0700928 print_running(avg_running, avg_enabled);
Stephane Eranian4aa90152011-08-15 22:22:33 +0200929 fputc('\n', output);
Ingo Molnar42202dd2009-06-13 14:57:28 +0200930 return;
931 }
932
Stephane Eranian410136f2013-11-12 17:58:49 +0100933 uval = avg * counter->scale;
Andi Kleeneedfcb42015-11-02 17:50:21 -0800934 printout(-1, 0, counter, uval);
Peter Zijlstra849abde2009-09-04 18:23:38 +0200935
Zhengyu He3ae9a34d2011-06-23 13:45:42 -0700936 print_noise(counter, avg);
937
Andi Kleend73515c2015-03-11 07:16:27 -0700938 print_running(avg_running, avg_enabled);
Stephane Eranian4aa90152011-08-15 22:22:33 +0200939 fprintf(output, "\n");
Ingo Molnar42202dd2009-06-13 14:57:28 +0200940}
941
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200942/*
943 * Print out the results of a single counter:
944 * does not use aggregated count in system-wide
945 */
Stephane Eranian13370a92013-01-29 12:47:44 +0100946static void print_counter(struct perf_evsel *counter, char *prefix)
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200947{
Jiri Olsa58215222015-07-21 14:31:24 +0200948 FILE *output = stat_config.output;
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200949 u64 ena, run, val;
Stephane Eranian410136f2013-11-12 17:58:49 +0100950 double uval;
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200951 int cpu;
952
Yan, Zheng7ae92e72012-09-10 15:53:50 +0800953 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
Jiri Olsaa6fa0032015-06-26 11:29:11 +0200954 val = perf_counts(counter->counts, cpu, 0)->val;
955 ena = perf_counts(counter->counts, cpu, 0)->ena;
956 run = perf_counts(counter->counts, cpu, 0)->run;
Stephane Eranian13370a92013-01-29 12:47:44 +0100957
958 if (prefix)
959 fprintf(output, "%s", prefix);
960
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200961 if (run == 0 || ena == 0) {
Stephane Eranian410136f2013-11-12 17:58:49 +0100962 fprintf(output, "CPU%*d%s%*s%s",
Stephane Eraniand7470b62010-12-01 18:49:05 +0200963 csv_output ? 0 : -4,
Yan, Zheng7ae92e72012-09-10 15:53:50 +0800964 perf_evsel__cpus(counter)->map[cpu], csv_sep,
Stephane Eraniand7470b62010-12-01 18:49:05 +0200965 csv_output ? 0 : 18,
David Ahern2cee77c2011-05-30 08:55:59 -0600966 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
Stephane Eranian410136f2013-11-12 17:58:49 +0100967 csv_sep);
968
969 fprintf(output, "%-*s%s",
970 csv_output ? 0 : unit_width,
971 counter->unit, csv_sep);
972
973 fprintf(output, "%*s",
974 csv_output ? 0 : -25,
975 perf_evsel__name(counter));
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200976
Stephane Eranian023695d2011-02-14 11:20:01 +0200977 if (counter->cgrp)
Stephane Eranian4aa90152011-08-15 22:22:33 +0200978 fprintf(output, "%s%s",
979 csv_sep, counter->cgrp->name);
Stephane Eranian023695d2011-02-14 11:20:01 +0200980
Andi Kleend73515c2015-03-11 07:16:27 -0700981 print_running(run, ena);
Stephane Eranian4aa90152011-08-15 22:22:33 +0200982 fputc('\n', output);
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200983 continue;
984 }
985
Stephane Eranian410136f2013-11-12 17:58:49 +0100986 uval = val * counter->scale;
Andi Kleeneedfcb42015-11-02 17:50:21 -0800987 printout(cpu, 0, counter, uval);
Andi Kleend73515c2015-03-11 07:16:27 -0700988 if (!csv_output)
Stephane Eraniand7470b62010-12-01 18:49:05 +0200989 print_noise(counter, 1.0);
Andi Kleend73515c2015-03-11 07:16:27 -0700990 print_running(run, ena);
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200991
Stephane Eranian4aa90152011-08-15 22:22:33 +0200992 fputc('\n', output);
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200993 }
994}
995
Jiri Olsad4f63a42015-06-26 11:29:26 +0200996static void print_interval(char *prefix, struct timespec *ts)
Ingo Molnar42202dd2009-06-13 14:57:28 +0200997{
Jiri Olsa58215222015-07-21 14:31:24 +0200998 FILE *output = stat_config.output;
Jiri Olsad4f63a42015-06-26 11:29:26 +0200999 static int num_print_interval;
1000
1001 sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep);
1002
1003 if (num_print_interval == 0 && !csv_output) {
Jiri Olsa421a50f2015-07-21 14:31:22 +02001004 switch (stat_config.aggr_mode) {
Jiri Olsad4f63a42015-06-26 11:29:26 +02001005 case AGGR_SOCKET:
1006 fprintf(output, "# time socket cpus counts %*s events\n", unit_width, "unit");
1007 break;
1008 case AGGR_CORE:
1009 fprintf(output, "# time core cpus counts %*s events\n", unit_width, "unit");
1010 break;
1011 case AGGR_NONE:
1012 fprintf(output, "# time CPU counts %*s events\n", unit_width, "unit");
1013 break;
Jiri Olsa32b8af82015-06-26 11:29:27 +02001014 case AGGR_THREAD:
1015 fprintf(output, "# time comm-pid counts %*s events\n", unit_width, "unit");
1016 break;
Jiri Olsad4f63a42015-06-26 11:29:26 +02001017 case AGGR_GLOBAL:
1018 default:
1019 fprintf(output, "# time counts %*s events\n", unit_width, "unit");
Jiri Olsa208df992015-10-16 12:41:04 +02001020 case AGGR_UNSET:
1021 break;
Jiri Olsad4f63a42015-06-26 11:29:26 +02001022 }
1023 }
1024
1025 if (++num_print_interval == 25)
1026 num_print_interval = 0;
1027}
1028
1029static void print_header(int argc, const char **argv)
1030{
Jiri Olsa58215222015-07-21 14:31:24 +02001031 FILE *output = stat_config.output;
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001032 int i;
Ingo Molnar42202dd2009-06-13 14:57:28 +02001033
Ingo Molnarddcacfa2009-04-20 15:37:32 +02001034 fflush(stdout);
1035
Stephane Eraniand7470b62010-12-01 18:49:05 +02001036 if (!csv_output) {
Stephane Eranian4aa90152011-08-15 22:22:33 +02001037 fprintf(output, "\n");
1038 fprintf(output, " Performance counter stats for ");
David Ahern62d3b612013-09-28 14:27:58 -06001039 if (target.system_wide)
1040 fprintf(output, "\'system wide");
1041 else if (target.cpu_list)
1042 fprintf(output, "\'CPU(s) %s", target.cpu_list);
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001043 else if (!target__has_task(&target)) {
Stephane Eranian4aa90152011-08-15 22:22:33 +02001044 fprintf(output, "\'%s", argv[0]);
Stephane Eraniand7470b62010-12-01 18:49:05 +02001045 for (i = 1; i < argc; i++)
Stephane Eranian4aa90152011-08-15 22:22:33 +02001046 fprintf(output, " %s", argv[i]);
Namhyung Kim20f946b2012-04-26 14:15:16 +09001047 } else if (target.pid)
1048 fprintf(output, "process id \'%s", target.pid);
Stephane Eraniand7470b62010-12-01 18:49:05 +02001049 else
Namhyung Kim20f946b2012-04-26 14:15:16 +09001050 fprintf(output, "thread id \'%s", target.tid);
Ingo Molnar44db76c2009-06-03 19:36:07 +02001051
Stephane Eranian4aa90152011-08-15 22:22:33 +02001052 fprintf(output, "\'");
Stephane Eraniand7470b62010-12-01 18:49:05 +02001053 if (run_count > 1)
Stephane Eranian4aa90152011-08-15 22:22:33 +02001054 fprintf(output, " (%d runs)", run_count);
1055 fprintf(output, ":\n\n");
Stephane Eraniand7470b62010-12-01 18:49:05 +02001056 }
Jiri Olsad4f63a42015-06-26 11:29:26 +02001057}
1058
1059static void print_footer(void)
1060{
Jiri Olsa58215222015-07-21 14:31:24 +02001061 FILE *output = stat_config.output;
1062
Jiri Olsad4f63a42015-06-26 11:29:26 +02001063 if (!null_run)
1064 fprintf(output, "\n");
1065 fprintf(output, " %17.9f seconds time elapsed",
1066 avg_stats(&walltime_nsecs_stats)/1e9);
1067 if (run_count > 1) {
1068 fprintf(output, " ");
1069 print_noise_pct(stddev_stats(&walltime_nsecs_stats),
1070 avg_stats(&walltime_nsecs_stats));
1071 }
1072 fprintf(output, "\n\n");
1073}
1074
1075static void print_counters(struct timespec *ts, int argc, const char **argv)
1076{
Jiri Olsaec0d3d12015-07-21 14:31:25 +02001077 int interval = stat_config.interval;
Jiri Olsad4f63a42015-06-26 11:29:26 +02001078 struct perf_evsel *counter;
1079 char buf[64], *prefix = NULL;
1080
Jiri Olsa664c98d2015-11-05 15:40:50 +01001081 /* Do not print anything if we record to the pipe. */
1082 if (STAT_RECORD && perf_stat.file.is_pipe)
1083 return;
1084
Jiri Olsad4f63a42015-06-26 11:29:26 +02001085 if (interval)
1086 print_interval(prefix = buf, ts);
1087 else
1088 print_header(argc, argv);
Ingo Molnar2996f5d2009-05-29 09:10:54 +02001089
Jiri Olsa421a50f2015-07-21 14:31:22 +02001090 switch (stat_config.aggr_mode) {
Stephane Eranian12c08a92013-02-14 13:57:29 +01001091 case AGGR_CORE:
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001092 case AGGR_SOCKET:
Jiri Olsad4f63a42015-06-26 11:29:26 +02001093 print_aggr(prefix);
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001094 break;
Jiri Olsa32b8af82015-06-26 11:29:27 +02001095 case AGGR_THREAD:
1096 evlist__for_each(evsel_list, counter)
1097 print_aggr_thread(counter, prefix);
1098 break;
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001099 case AGGR_GLOBAL:
Arnaldo Carvalho de Melo0050f7a2014-01-10 10:37:27 -03001100 evlist__for_each(evsel_list, counter)
Jiri Olsad4f63a42015-06-26 11:29:26 +02001101 print_counter_aggr(counter, prefix);
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001102 break;
1103 case AGGR_NONE:
Arnaldo Carvalho de Melo0050f7a2014-01-10 10:37:27 -03001104 evlist__for_each(evsel_list, counter)
Jiri Olsad4f63a42015-06-26 11:29:26 +02001105 print_counter(counter, prefix);
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001106 break;
Jiri Olsa208df992015-10-16 12:41:04 +02001107 case AGGR_UNSET:
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001108 default:
1109 break;
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +02001110 }
Ingo Molnarddcacfa2009-04-20 15:37:32 +02001111
Jiri Olsad4f63a42015-06-26 11:29:26 +02001112 if (!interval && !csv_output)
1113 print_footer();
1114
Jiri Olsa58215222015-07-21 14:31:24 +02001115 fflush(stat_config.output);
Ingo Molnarddcacfa2009-04-20 15:37:32 +02001116}
1117
Peter Zijlstraf7b7c262009-06-10 15:55:59 +02001118static volatile int signr = -1;
1119
Ingo Molnar52425192009-05-26 09:17:18 +02001120static void skip_signal(int signo)
Ingo Molnarddcacfa2009-04-20 15:37:32 +02001121{
Jiri Olsaec0d3d12015-07-21 14:31:25 +02001122 if ((child_pid == -1) || stat_config.interval)
Liming Wang60666c62009-12-31 16:05:50 +08001123 done = 1;
1124
Peter Zijlstraf7b7c262009-06-10 15:55:59 +02001125 signr = signo;
Stephane Eraniand07f0b12013-06-04 17:44:26 +02001126 /*
1127 * render child_pid harmless
1128 * won't send SIGTERM to a random
1129 * process in case of race condition
1130 * and fast PID recycling
1131 */
1132 child_pid = -1;
Peter Zijlstraf7b7c262009-06-10 15:55:59 +02001133}
1134
1135static void sig_atexit(void)
1136{
Stephane Eraniand07f0b12013-06-04 17:44:26 +02001137 sigset_t set, oset;
1138
1139 /*
1140 * avoid race condition with SIGCHLD handler
1141 * in skip_signal() which is modifying child_pid
1142 * goal is to avoid send SIGTERM to a random
1143 * process
1144 */
1145 sigemptyset(&set);
1146 sigaddset(&set, SIGCHLD);
1147 sigprocmask(SIG_BLOCK, &set, &oset);
1148
Chris Wilson933da832009-10-04 01:35:01 +01001149 if (child_pid != -1)
1150 kill(child_pid, SIGTERM);
1151
Stephane Eraniand07f0b12013-06-04 17:44:26 +02001152 sigprocmask(SIG_SETMASK, &oset, NULL);
1153
Peter Zijlstraf7b7c262009-06-10 15:55:59 +02001154 if (signr == -1)
1155 return;
1156
1157 signal(signr, SIG_DFL);
1158 kill(getpid(), signr);
Ingo Molnar52425192009-05-26 09:17:18 +02001159}
Ingo Molnarddcacfa2009-04-20 15:37:32 +02001160
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001161static int stat__set_big_num(const struct option *opt __maybe_unused,
1162 const char *s __maybe_unused, int unset)
Stephane Eraniand7470b62010-12-01 18:49:05 +02001163{
1164 big_num_opt = unset ? 0 : 1;
1165 return 0;
1166}
1167
Jiri Olsae0547312015-11-05 15:40:45 +01001168static const struct option stat_options[] = {
1169 OPT_BOOLEAN('T', "transaction", &transaction_run,
1170 "hardware transaction statistics"),
1171 OPT_CALLBACK('e', "event", &evsel_list, "event",
1172 "event selector. use 'perf list' to list available events",
1173 parse_events_option),
1174 OPT_CALLBACK(0, "filter", &evsel_list, "filter",
1175 "event filter", parse_filter),
1176 OPT_BOOLEAN('i', "no-inherit", &no_inherit,
1177 "child tasks do not inherit counters"),
1178 OPT_STRING('p', "pid", &target.pid, "pid",
1179 "stat events on existing process id"),
1180 OPT_STRING('t', "tid", &target.tid, "tid",
1181 "stat events on existing thread id"),
1182 OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
1183 "system-wide collection from all CPUs"),
1184 OPT_BOOLEAN('g', "group", &group,
1185 "put the counters into a counter group"),
1186 OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
1187 OPT_INCR('v', "verbose", &verbose,
1188 "be more verbose (show counter open errors, etc)"),
1189 OPT_INTEGER('r', "repeat", &run_count,
1190 "repeat command and print average + stddev (max: 100, forever: 0)"),
1191 OPT_BOOLEAN('n', "null", &null_run,
1192 "null run - dont start any counters"),
1193 OPT_INCR('d', "detailed", &detailed_run,
1194 "detailed run - start a lot of events"),
1195 OPT_BOOLEAN('S', "sync", &sync_run,
1196 "call sync() before starting a run"),
1197 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
1198 "print large numbers with thousands\' separators",
1199 stat__set_big_num),
1200 OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
1201 "list of cpus to monitor in system-wide"),
1202 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
1203 "disable CPU count aggregation", AGGR_NONE),
1204 OPT_STRING('x', "field-separator", &csv_sep, "separator",
1205 "print counts with custom separator"),
1206 OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
1207 "monitor event in cgroup name only", parse_cgroups),
1208 OPT_STRING('o', "output", &output_name, "file", "output file name"),
1209 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
1210 OPT_INTEGER(0, "log-fd", &output_fd,
1211 "log output to fd, instead of stderr"),
1212 OPT_STRING(0, "pre", &pre_cmd, "command",
1213 "command to run prior to the measured command"),
1214 OPT_STRING(0, "post", &post_cmd, "command",
1215 "command to run after to the measured command"),
1216 OPT_UINTEGER('I', "interval-print", &stat_config.interval,
1217 "print counts at regular interval in ms (>= 10)"),
1218 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
1219 "aggregate counts per processor socket", AGGR_SOCKET),
1220 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
1221 "aggregate counts per physical processor core", AGGR_CORE),
1222 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
1223 "aggregate counts per thread", AGGR_THREAD),
1224 OPT_UINTEGER('D', "delay", &initial_delay,
1225 "ms to wait before starting measurement after program start"),
1226 OPT_END()
1227};
1228
Jiri Olsa1fe7a302015-10-16 12:41:15 +02001229static int perf_stat__get_socket(struct cpu_map *map, int cpu)
1230{
1231 return cpu_map__get_socket(map, cpu, NULL);
1232}
1233
1234static int perf_stat__get_core(struct cpu_map *map, int cpu)
1235{
1236 return cpu_map__get_core(map, cpu, NULL);
1237}
1238
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001239static int cpu_map__get_max(struct cpu_map *map)
1240{
1241 int i, max = -1;
1242
1243 for (i = 0; i < map->nr; i++) {
1244 if (map->map[i] > max)
1245 max = map->map[i];
1246 }
1247
1248 return max;
1249}
1250
1251static struct cpu_map *cpus_aggr_map;
1252
1253static int perf_stat__get_aggr(aggr_get_id_t get_id, struct cpu_map *map, int idx)
1254{
1255 int cpu;
1256
1257 if (idx >= map->nr)
1258 return -1;
1259
1260 cpu = map->map[idx];
1261
1262 if (cpus_aggr_map->map[cpu] == -1)
1263 cpus_aggr_map->map[cpu] = get_id(map, idx);
1264
1265 return cpus_aggr_map->map[cpu];
1266}
1267
1268static int perf_stat__get_socket_cached(struct cpu_map *map, int idx)
1269{
1270 return perf_stat__get_aggr(perf_stat__get_socket, map, idx);
1271}
1272
1273static int perf_stat__get_core_cached(struct cpu_map *map, int idx)
1274{
1275 return perf_stat__get_aggr(perf_stat__get_core, map, idx);
1276}
1277
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001278static int perf_stat_init_aggr_mode(void)
1279{
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001280 int nr;
1281
Jiri Olsa421a50f2015-07-21 14:31:22 +02001282 switch (stat_config.aggr_mode) {
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001283 case AGGR_SOCKET:
1284 if (cpu_map__build_socket_map(evsel_list->cpus, &aggr_map)) {
1285 perror("cannot build socket map");
1286 return -1;
1287 }
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001288 aggr_get_id = perf_stat__get_socket_cached;
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001289 break;
Stephane Eranian12c08a92013-02-14 13:57:29 +01001290 case AGGR_CORE:
1291 if (cpu_map__build_core_map(evsel_list->cpus, &aggr_map)) {
1292 perror("cannot build core map");
1293 return -1;
1294 }
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001295 aggr_get_id = perf_stat__get_core_cached;
Stephane Eranian12c08a92013-02-14 13:57:29 +01001296 break;
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001297 case AGGR_NONE:
1298 case AGGR_GLOBAL:
Jiri Olsa32b8af82015-06-26 11:29:27 +02001299 case AGGR_THREAD:
Jiri Olsa208df992015-10-16 12:41:04 +02001300 case AGGR_UNSET:
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001301 default:
1302 break;
1303 }
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001304
1305 /*
1306 * The evsel_list->cpus is the base we operate on,
1307 * taking the highest cpu number to be the size of
1308 * the aggregation translate cpumap.
1309 */
1310 nr = cpu_map__get_max(evsel_list->cpus);
1311 cpus_aggr_map = cpu_map__empty_new(nr + 1);
1312 return cpus_aggr_map ? 0 : -ENOMEM;
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001313}
1314
Masami Hiramatsu544c2ae2015-12-09 11:11:27 +09001315static void perf_stat__exit_aggr_mode(void)
1316{
1317 cpu_map__put(aggr_map);
1318 cpu_map__put(cpus_aggr_map);
1319 aggr_map = NULL;
1320 cpus_aggr_map = NULL;
1321}
1322
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02001323/*
1324 * Add default attributes, if there were no attributes specified or
1325 * if -d/--detailed, -d -d or -d -d -d is used:
1326 */
1327static int add_default_attributes(void)
1328{
Arnaldo Carvalho de Melob070a542012-10-01 15:20:58 -03001329 struct perf_event_attr default_attrs[] = {
1330
1331 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
1332 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
1333 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
1334 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
1335
1336 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
1337 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
1338 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
1339 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
1340 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
1341 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
1342
1343};
1344
1345/*
1346 * Detailed stats (-d), covering the L1 and last level data caches:
1347 */
1348 struct perf_event_attr detailed_attrs[] = {
1349
1350 { .type = PERF_TYPE_HW_CACHE,
1351 .config =
1352 PERF_COUNT_HW_CACHE_L1D << 0 |
1353 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1354 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1355
1356 { .type = PERF_TYPE_HW_CACHE,
1357 .config =
1358 PERF_COUNT_HW_CACHE_L1D << 0 |
1359 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1360 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1361
1362 { .type = PERF_TYPE_HW_CACHE,
1363 .config =
1364 PERF_COUNT_HW_CACHE_LL << 0 |
1365 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1366 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1367
1368 { .type = PERF_TYPE_HW_CACHE,
1369 .config =
1370 PERF_COUNT_HW_CACHE_LL << 0 |
1371 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1372 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1373};
1374
1375/*
1376 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
1377 */
1378 struct perf_event_attr very_detailed_attrs[] = {
1379
1380 { .type = PERF_TYPE_HW_CACHE,
1381 .config =
1382 PERF_COUNT_HW_CACHE_L1I << 0 |
1383 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1384 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1385
1386 { .type = PERF_TYPE_HW_CACHE,
1387 .config =
1388 PERF_COUNT_HW_CACHE_L1I << 0 |
1389 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1390 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1391
1392 { .type = PERF_TYPE_HW_CACHE,
1393 .config =
1394 PERF_COUNT_HW_CACHE_DTLB << 0 |
1395 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1396 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1397
1398 { .type = PERF_TYPE_HW_CACHE,
1399 .config =
1400 PERF_COUNT_HW_CACHE_DTLB << 0 |
1401 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1402 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1403
1404 { .type = PERF_TYPE_HW_CACHE,
1405 .config =
1406 PERF_COUNT_HW_CACHE_ITLB << 0 |
1407 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1408 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1409
1410 { .type = PERF_TYPE_HW_CACHE,
1411 .config =
1412 PERF_COUNT_HW_CACHE_ITLB << 0 |
1413 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1414 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1415
1416};
1417
1418/*
1419 * Very, very detailed stats (-d -d -d), adding prefetch events:
1420 */
1421 struct perf_event_attr very_very_detailed_attrs[] = {
1422
1423 { .type = PERF_TYPE_HW_CACHE,
1424 .config =
1425 PERF_COUNT_HW_CACHE_L1D << 0 |
1426 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
1427 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1428
1429 { .type = PERF_TYPE_HW_CACHE,
1430 .config =
1431 PERF_COUNT_HW_CACHE_L1D << 0 |
1432 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
1433 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1434};
1435
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02001436 /* Set attrs if no event is selected and !null_run: */
1437 if (null_run)
1438 return 0;
1439
Andi Kleen4cabc3d2013-08-21 16:47:26 -07001440 if (transaction_run) {
1441 int err;
1442 if (pmu_have_event("cpu", "cycles-ct") &&
1443 pmu_have_event("cpu", "el-start"))
Jiri Olsaa4547422015-06-03 16:25:53 +02001444 err = parse_events(evsel_list, transaction_attrs, NULL);
Andi Kleen4cabc3d2013-08-21 16:47:26 -07001445 else
Jiri Olsaa4547422015-06-03 16:25:53 +02001446 err = parse_events(evsel_list, transaction_limited_attrs, NULL);
1447 if (err) {
Andi Kleen4cabc3d2013-08-21 16:47:26 -07001448 fprintf(stderr, "Cannot set up transaction events\n");
1449 return -1;
1450 }
1451 return 0;
1452 }
1453
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02001454 if (!evsel_list->nr_entries) {
Arnaldo Carvalho de Melo79695e12012-05-30 13:53:54 -03001455 if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -02001456 return -1;
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02001457 }
1458
1459 /* Detailed events get appended to the event list: */
1460
1461 if (detailed_run < 1)
1462 return 0;
1463
1464 /* Append detailed run extra attributes: */
Arnaldo Carvalho de Melo79695e12012-05-30 13:53:54 -03001465 if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -02001466 return -1;
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02001467
1468 if (detailed_run < 2)
1469 return 0;
1470
1471 /* Append very detailed run extra attributes: */
Arnaldo Carvalho de Melo79695e12012-05-30 13:53:54 -03001472 if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -02001473 return -1;
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02001474
1475 if (detailed_run < 3)
1476 return 0;
1477
1478 /* Append very, very detailed run extra attributes: */
Arnaldo Carvalho de Melo79695e12012-05-30 13:53:54 -03001479 return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02001480}
1481
Jiri Olsa4979d0c2015-11-05 15:40:46 +01001482static const char * const recort_usage[] = {
1483 "perf stat record [<options>]",
1484 NULL,
1485};
1486
Jiri Olsa3ba78bd2015-11-05 15:40:47 +01001487static void init_features(struct perf_session *session)
1488{
1489 int feat;
1490
1491 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1492 perf_header__set_feat(&session->header, feat);
1493
1494 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1495 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1496 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
1497 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
1498}
1499
Jiri Olsa4979d0c2015-11-05 15:40:46 +01001500static int __cmd_record(int argc, const char **argv)
1501{
1502 struct perf_session *session;
1503 struct perf_data_file *file = &perf_stat.file;
1504
1505 argc = parse_options(argc, argv, stat_options, record_usage,
1506 PARSE_OPT_STOP_AT_NON_OPTION);
1507
1508 if (output_name)
1509 file->path = output_name;
1510
Jiri Olsae9d6db8e82015-11-05 15:40:53 +01001511 if (run_count != 1 || forever) {
1512 pr_err("Cannot use -r option with perf stat record.\n");
1513 return -1;
1514 }
1515
Jiri Olsa4979d0c2015-11-05 15:40:46 +01001516 session = perf_session__new(file, false, NULL);
1517 if (session == NULL) {
1518 pr_err("Perf session creation failed.\n");
1519 return -1;
1520 }
1521
Jiri Olsa3ba78bd2015-11-05 15:40:47 +01001522 init_features(session);
1523
Jiri Olsa4979d0c2015-11-05 15:40:46 +01001524 session->evlist = evsel_list;
1525 perf_stat.session = session;
1526 perf_stat.record = true;
1527 return argc;
1528}
1529
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001530int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
Ingo Molnar52425192009-05-26 09:17:18 +02001531{
Arnaldo Carvalho de Melob070a542012-10-01 15:20:58 -03001532 const char * const stat_usage[] = {
1533 "perf stat [<options>] [<command>]",
1534 NULL
1535 };
Namhyung Kimcc03c542013-11-01 16:33:15 +09001536 int status = -EINVAL, run_idx;
Stephane Eranian4aa90152011-08-15 22:22:33 +02001537 const char *mode;
Jiri Olsa58215222015-07-21 14:31:24 +02001538 FILE *output = stderr;
Jiri Olsaec0d3d12015-07-21 14:31:25 +02001539 unsigned int interval;
Jiri Olsa4979d0c2015-11-05 15:40:46 +01001540 const char * const stat_subcommands[] = { "record" };
Ingo Molnar42202dd2009-06-13 14:57:28 +02001541
Stephane Eranian5af52b52010-05-18 15:00:01 +02001542 setlocale(LC_ALL, "");
1543
Namhyung Kim334fe7a2013-03-11 16:43:12 +09001544 evsel_list = perf_evlist__new();
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02001545 if (evsel_list == NULL)
1546 return -ENOMEM;
1547
Jiri Olsa4979d0c2015-11-05 15:40:46 +01001548 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
1549 (const char **) stat_usage,
1550 PARSE_OPT_STOP_AT_NON_OPTION);
1551
1552 if (argc && !strncmp(argv[0], "rec", 3)) {
1553 argc = __cmd_record(argc, argv);
1554 if (argc < 0)
1555 return -1;
1556 }
Stephane Eraniand7470b62010-12-01 18:49:05 +02001557
Jiri Olsaec0d3d12015-07-21 14:31:25 +02001558 interval = stat_config.interval;
1559
Jiri Olsa4979d0c2015-11-05 15:40:46 +01001560 /*
1561 * For record command the -o is already taken care of.
1562 */
1563 if (!STAT_RECORD && output_name && strcmp(output_name, "-"))
Stephane Eranian4aa90152011-08-15 22:22:33 +02001564 output = NULL;
1565
Jim Cromie56f3bae2011-09-07 17:14:00 -06001566 if (output_name && output_fd) {
1567 fprintf(stderr, "cannot use both --output and --log-fd\n");
Jiri Olsae0547312015-11-05 15:40:45 +01001568 parse_options_usage(stat_usage, stat_options, "o", 1);
1569 parse_options_usage(NULL, stat_options, "log-fd", 0);
Namhyung Kimcc03c542013-11-01 16:33:15 +09001570 goto out;
Jim Cromie56f3bae2011-09-07 17:14:00 -06001571 }
Stephane Eranianfc3e4d02012-05-15 13:11:11 +02001572
1573 if (output_fd < 0) {
1574 fprintf(stderr, "argument to --log-fd must be a > 0\n");
Jiri Olsae0547312015-11-05 15:40:45 +01001575 parse_options_usage(stat_usage, stat_options, "log-fd", 0);
Namhyung Kimcc03c542013-11-01 16:33:15 +09001576 goto out;
Stephane Eranianfc3e4d02012-05-15 13:11:11 +02001577 }
1578
Stephane Eranian4aa90152011-08-15 22:22:33 +02001579 if (!output) {
1580 struct timespec tm;
1581 mode = append_file ? "a" : "w";
1582
1583 output = fopen(output_name, mode);
1584 if (!output) {
1585 perror("failed to create output file");
David Ahernfceda7f2012-08-26 12:24:44 -06001586 return -1;
Stephane Eranian4aa90152011-08-15 22:22:33 +02001587 }
1588 clock_gettime(CLOCK_REALTIME, &tm);
1589 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
Stephane Eranianfc3e4d02012-05-15 13:11:11 +02001590 } else if (output_fd > 0) {
Jim Cromie56f3bae2011-09-07 17:14:00 -06001591 mode = append_file ? "a" : "w";
1592 output = fdopen(output_fd, mode);
1593 if (!output) {
1594 perror("Failed opening logfd");
1595 return -errno;
1596 }
Stephane Eranian4aa90152011-08-15 22:22:33 +02001597 }
1598
Jiri Olsa58215222015-07-21 14:31:24 +02001599 stat_config.output = output;
1600
Jim Cromied4ffd042011-09-07 17:14:03 -06001601 if (csv_sep) {
Stephane Eraniand7470b62010-12-01 18:49:05 +02001602 csv_output = true;
Jim Cromied4ffd042011-09-07 17:14:03 -06001603 if (!strcmp(csv_sep, "\\t"))
1604 csv_sep = "\t";
1605 } else
Stephane Eraniand7470b62010-12-01 18:49:05 +02001606 csv_sep = DEFAULT_SEPARATOR;
1607
1608 /*
1609 * let the spreadsheet do the pretty-printing
1610 */
1611 if (csv_output) {
Jim Cromie61a9f322011-09-07 17:14:04 -06001612 /* User explicitly passed -B? */
Stephane Eraniand7470b62010-12-01 18:49:05 +02001613 if (big_num_opt == 1) {
1614 fprintf(stderr, "-B option not supported with -x\n");
Jiri Olsae0547312015-11-05 15:40:45 +01001615 parse_options_usage(stat_usage, stat_options, "B", 1);
1616 parse_options_usage(NULL, stat_options, "x", 1);
Namhyung Kimcc03c542013-11-01 16:33:15 +09001617 goto out;
Stephane Eraniand7470b62010-12-01 18:49:05 +02001618 } else /* Nope, so disable big number formatting */
1619 big_num = false;
1620 } else if (big_num_opt == 0) /* User passed --no-big-num */
1621 big_num = false;
1622
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001623 if (!argc && target__none(&target))
Jiri Olsae0547312015-11-05 15:40:45 +01001624 usage_with_options(stat_usage, stat_options);
David Ahernac3063b2013-09-30 07:37:37 -06001625
Frederik Deweerdta7e191c2013-03-01 13:02:27 -05001626 if (run_count < 0) {
Namhyung Kimcc03c542013-11-01 16:33:15 +09001627 pr_err("Run count must be a positive number\n");
Jiri Olsae0547312015-11-05 15:40:45 +01001628 parse_options_usage(stat_usage, stat_options, "r", 1);
Namhyung Kimcc03c542013-11-01 16:33:15 +09001629 goto out;
Frederik Deweerdta7e191c2013-03-01 13:02:27 -05001630 } else if (run_count == 0) {
1631 forever = true;
1632 run_count = 1;
1633 }
Ingo Molnarddcacfa2009-04-20 15:37:32 +02001634
Jiri Olsa421a50f2015-07-21 14:31:22 +02001635 if ((stat_config.aggr_mode == AGGR_THREAD) && !target__has_task(&target)) {
Jiri Olsa32b8af82015-06-26 11:29:27 +02001636 fprintf(stderr, "The --per-thread option is only available "
1637 "when monitoring via -p -t options.\n");
Jiri Olsae0547312015-11-05 15:40:45 +01001638 parse_options_usage(NULL, stat_options, "p", 1);
1639 parse_options_usage(NULL, stat_options, "t", 1);
Jiri Olsa32b8af82015-06-26 11:29:27 +02001640 goto out;
1641 }
1642
1643 /*
1644 * no_aggr, cgroup are for system-wide only
1645 * --per-thread is aggregated per thread, we dont mix it with cpu mode
1646 */
Jiri Olsa421a50f2015-07-21 14:31:22 +02001647 if (((stat_config.aggr_mode != AGGR_GLOBAL &&
1648 stat_config.aggr_mode != AGGR_THREAD) || nr_cgroups) &&
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001649 !target__has_cpu(&target)) {
Stephane Eranian023695d2011-02-14 11:20:01 +02001650 fprintf(stderr, "both cgroup and no-aggregation "
1651 "modes only available in system-wide mode\n");
1652
Jiri Olsae0547312015-11-05 15:40:45 +01001653 parse_options_usage(stat_usage, stat_options, "G", 1);
1654 parse_options_usage(NULL, stat_options, "A", 1);
1655 parse_options_usage(NULL, stat_options, "a", 1);
Namhyung Kimcc03c542013-11-01 16:33:15 +09001656 goto out;
Stephane Eraniand7e7a452013-02-06 15:46:02 +01001657 }
1658
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02001659 if (add_default_attributes())
1660 goto out;
Ingo Molnarddcacfa2009-04-20 15:37:32 +02001661
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001662 target__validate(&target);
Arnaldo Carvalho de Melo5c98d4662011-01-03 17:53:33 -02001663
Namhyung Kim77a6f012012-05-07 14:09:04 +09001664 if (perf_evlist__create_maps(evsel_list, &target) < 0) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001665 if (target__has_task(&target)) {
Namhyung Kim77a6f012012-05-07 14:09:04 +09001666 pr_err("Problems finding threads of monitor\n");
Jiri Olsae0547312015-11-05 15:40:45 +01001667 parse_options_usage(stat_usage, stat_options, "p", 1);
1668 parse_options_usage(NULL, stat_options, "t", 1);
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001669 } else if (target__has_cpu(&target)) {
Namhyung Kim77a6f012012-05-07 14:09:04 +09001670 perror("failed to parse CPUs map");
Jiri Olsae0547312015-11-05 15:40:45 +01001671 parse_options_usage(stat_usage, stat_options, "C", 1);
1672 parse_options_usage(NULL, stat_options, "a", 1);
Namhyung Kimcc03c542013-11-01 16:33:15 +09001673 }
1674 goto out;
Arnaldo Carvalho de Melo60d567e2011-01-03 17:49:48 -02001675 }
Jiri Olsa32b8af82015-06-26 11:29:27 +02001676
1677 /*
1678 * Initialize thread_map with comm names,
1679 * so we could print it out on output.
1680 */
Jiri Olsa421a50f2015-07-21 14:31:22 +02001681 if (stat_config.aggr_mode == AGGR_THREAD)
Jiri Olsa32b8af82015-06-26 11:29:27 +02001682 thread_map__read_comms(evsel_list->threads);
1683
Stephane Eranian13370a92013-01-29 12:47:44 +01001684 if (interval && interval < 100) {
Kan Liang19afd102015-10-02 05:04:34 -04001685 if (interval < 10) {
1686 pr_err("print interval must be >= 10ms\n");
Jiri Olsae0547312015-11-05 15:40:45 +01001687 parse_options_usage(stat_usage, stat_options, "I", 1);
Kan Liang19afd102015-10-02 05:04:34 -04001688 goto out;
1689 } else
1690 pr_warning("print interval < 100ms. "
1691 "The overhead percentage could be high in some cases. "
1692 "Please proceed with caution.\n");
Stephane Eranian13370a92013-01-29 12:47:44 +01001693 }
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02001694
Arnaldo Carvalho de Melod134ffb2013-03-18 11:24:21 -03001695 if (perf_evlist__alloc_stats(evsel_list, interval))
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -03001696 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001697
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001698 if (perf_stat_init_aggr_mode())
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -03001699 goto out;
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001700
Ingo Molnar58d7e992009-05-15 11:03:23 +02001701 /*
1702 * We dont want to block the signals - that would cause
1703 * child tasks to inherit that and Ctrl-C would not work.
1704 * What we want is for Ctrl-C to work in the exec()-ed
1705 * task, but being ignored by perf stat itself:
1706 */
Peter Zijlstraf7b7c262009-06-10 15:55:59 +02001707 atexit(sig_atexit);
Frederik Deweerdta7e191c2013-03-01 13:02:27 -05001708 if (!forever)
1709 signal(SIGINT, skip_signal);
Stephane Eranian13370a92013-01-29 12:47:44 +01001710 signal(SIGCHLD, skip_signal);
Ingo Molnar58d7e992009-05-15 11:03:23 +02001711 signal(SIGALRM, skip_signal);
1712 signal(SIGABRT, skip_signal);
1713
Ingo Molnar42202dd2009-06-13 14:57:28 +02001714 status = 0;
Frederik Deweerdta7e191c2013-03-01 13:02:27 -05001715 for (run_idx = 0; forever || run_idx < run_count; run_idx++) {
Ingo Molnar42202dd2009-06-13 14:57:28 +02001716 if (run_count != 1 && verbose)
Stephane Eranian4aa90152011-08-15 22:22:33 +02001717 fprintf(output, "[ perf stat: executing run #%d ... ]\n",
1718 run_idx + 1);
Ingo Molnarf9cef0a2011-04-28 18:17:11 +02001719
Ingo Molnar42202dd2009-06-13 14:57:28 +02001720 status = run_perf_stat(argc, argv);
Frederik Deweerdta7e191c2013-03-01 13:02:27 -05001721 if (forever && status != -1) {
Jiri Olsad4f63a42015-06-26 11:29:26 +02001722 print_counters(NULL, argc, argv);
Jiri Olsa254ecbc2015-06-26 11:29:13 +02001723 perf_stat__reset_stats();
Frederik Deweerdta7e191c2013-03-01 13:02:27 -05001724 }
Ingo Molnar42202dd2009-06-13 14:57:28 +02001725 }
1726
Frederik Deweerdta7e191c2013-03-01 13:02:27 -05001727 if (!forever && status != -1 && !interval)
Jiri Olsad4f63a42015-06-26 11:29:26 +02001728 print_counters(NULL, argc, argv);
Arnaldo Carvalho de Melod134ffb2013-03-18 11:24:21 -03001729
Jiri Olsa4979d0c2015-11-05 15:40:46 +01001730 if (STAT_RECORD) {
1731 /*
1732 * We synthesize the kernel mmap record just so that older tools
1733 * don't emit warnings about not being able to resolve symbols
1734 * due to /proc/sys/kernel/kptr_restrict settings and instear provide
1735 * a saner message about no samples being in the perf.data file.
1736 *
1737 * This also serves to suppress a warning about f_header.data.size == 0
Jiri Olsa8b99b1a2015-11-05 15:40:48 +01001738 * in header.c at the moment 'perf stat record' gets introduced, which
1739 * is not really needed once we start adding the stat specific PERF_RECORD_
1740 * records, but the need to suppress the kptr_restrict messages in older
1741 * tools remain -acme
Jiri Olsa4979d0c2015-11-05 15:40:46 +01001742 */
1743 int fd = perf_data_file__fd(&perf_stat.file);
1744 int err = perf_event__synthesize_kernel_mmap((void *)&perf_stat,
1745 process_synthesized_event,
1746 &perf_stat.session->machines.host);
1747 if (err) {
1748 pr_warning("Couldn't synthesize the kernel mmap record, harmless, "
1749 "older tools may produce warnings about this file\n.");
1750 }
1751
Jiri Olsa7aad0c32015-11-05 15:40:52 +01001752 if (!interval) {
1753 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL))
1754 pr_err("failed to write stat round event\n");
1755 }
1756
Jiri Olsa664c98d2015-11-05 15:40:50 +01001757 if (!perf_stat.file.is_pipe) {
1758 perf_stat.session->header.data_size += perf_stat.bytes_written;
1759 perf_session__write_header(perf_stat.session, evsel_list, fd, true);
1760 }
Jiri Olsa4979d0c2015-11-05 15:40:46 +01001761
1762 perf_session__delete(perf_stat.session);
1763 }
1764
Masami Hiramatsu544c2ae2015-12-09 11:11:27 +09001765 perf_stat__exit_aggr_mode();
Arnaldo Carvalho de Melod134ffb2013-03-18 11:24:21 -03001766 perf_evlist__free_stats(evsel_list);
Arnaldo Carvalho de Melo0015e2e2011-02-01 16:18:10 -02001767out:
1768 perf_evlist__delete(evsel_list);
Ingo Molnar42202dd2009-06-13 14:57:28 +02001769 return status;
Ingo Molnarddcacfa2009-04-20 15:37:32 +02001770}