blob: 3c7452b39f57649b05d675db3d19395fb765df2d [file] [log] [blame]
Ingo Molnarddcacfa2009-04-20 15:37:32 +02001/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02002 * builtin-stat.c
3 *
4 * Builtin stat command: Give a precise performance counters summary
5 * overview about any workload, CPU or specific PID.
6 *
7 * Sample output:
Ingo Molnarddcacfa2009-04-20 15:37:32 +02008
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02009 $ perf stat ./hackbench 10
Ingo Molnarddcacfa2009-04-20 15:37:32 +020010
Ingo Molnar2cba3ff2011-05-19 13:30:56 +020011 Time: 0.118
Ingo Molnarddcacfa2009-04-20 15:37:32 +020012
Ingo Molnar2cba3ff2011-05-19 13:30:56 +020013 Performance counter stats for './hackbench 10':
Ingo Molnarddcacfa2009-04-20 15:37:32 +020014
Ingo Molnar2cba3ff2011-05-19 13:30:56 +020015 1708.761321 task-clock # 11.037 CPUs utilized
16 41,190 context-switches # 0.024 M/sec
17 6,735 CPU-migrations # 0.004 M/sec
18 17,318 page-faults # 0.010 M/sec
19 5,205,202,243 cycles # 3.046 GHz
20 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
21 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
22 2,603,501,247 instructions # 0.50 insns per cycle
23 # 1.48 stalled cycles per insn
24 484,357,498 branches # 283.455 M/sec
25 6,388,934 branch-misses # 1.32% of all branches
26
27 0.154822978 seconds time elapsed
Ingo Molnarddcacfa2009-04-20 15:37:32 +020028
Ingo Molnar52425192009-05-26 09:17:18 +020029 *
Ingo Molnar2cba3ff2011-05-19 13:30:56 +020030 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
Ingo Molnar52425192009-05-26 09:17:18 +020031 *
32 * Improvements and fixes by:
33 *
34 * Arjan van de Ven <arjan@linux.intel.com>
35 * Yanmin Zhang <yanmin.zhang@intel.com>
36 * Wu Fengguang <fengguang.wu@intel.com>
37 * Mike Galbraith <efault@gmx.de>
38 * Paul Mackerras <paulus@samba.org>
Jaswinder Singh Rajput6e750a8f2009-06-27 03:02:07 +053039 * Jaswinder Singh Rajput <jaswinder@kernel.org>
Ingo Molnar52425192009-05-26 09:17:18 +020040 *
41 * Released under the GPL v2. (and only v2, not any later version)
Ingo Molnarddcacfa2009-04-20 15:37:32 +020042 */
43
Peter Zijlstra1a482f32009-05-23 18:28:58 +020044#include "perf.h"
Ingo Molnar16f762a2009-05-27 09:10:38 +020045#include "builtin.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030046#include "util/cgroup.h"
Ingo Molnar148be2c2009-04-27 08:02:14 +020047#include "util/util.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060048#include <subcmd/parse-options.h>
Ingo Molnar52425192009-05-26 09:17:18 +020049#include "util/parse-events.h"
Andi Kleen4cabc3d2013-08-21 16:47:26 -070050#include "util/pmu.h"
Frederic Weisbecker8f28827a2009-08-16 22:05:48 +020051#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020052#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020053#include "util/evsel.h"
Frederic Weisbecker8f28827a2009-08-16 22:05:48 +020054#include "util/debug.h"
Ingo Molnara5d243d2011-04-27 05:39:24 +020055#include "util/color.h"
Xiao Guangrong0007ece2012-09-17 16:31:14 +080056#include "util/stat.h"
Liming Wang60666c62009-12-31 16:05:50 +080057#include "util/header.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110058#include "util/cpumap.h"
Zhang, Yanmind6d901c2010-03-18 11:36:05 -030059#include "util/thread.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020060#include "util/thread_map.h"
Jiri Olsad8095602015-08-07 12:51:03 +020061#include "util/counts.h"
Andi Kleen44b1e602016-05-30 12:49:42 -030062#include "util/group.h"
Jiri Olsa4979d0c2015-11-05 15:40:46 +010063#include "util/session.h"
Jiri Olsaba6039b62015-11-05 15:40:55 +010064#include "util/tool.h"
Andi Kleen44b1e602016-05-30 12:49:42 -030065#include "util/group.h"
Jiri Olsaba6039b62015-11-05 15:40:55 +010066#include "asm/bug.h"
Ingo Molnarddcacfa2009-04-20 15:37:32 +020067
Andi Kleen44b1e602016-05-30 12:49:42 -030068#include <api/fs/fs.h>
Peter Zijlstra1f16c572012-10-23 13:40:14 +020069#include <stdlib.h>
Ingo Molnarddcacfa2009-04-20 15:37:32 +020070#include <sys/prctl.h>
Stephane Eranian5af52b52010-05-18 15:00:01 +020071#include <locale.h>
Andi Kleene3b03b62016-05-05 16:04:03 -070072#include <math.h>
Peter Zijlstra16c8a102009-05-05 17:50:27 +020073
Stephane Eraniand7470b62010-12-01 18:49:05 +020074#define DEFAULT_SEPARATOR " "
David Ahern2cee77c2011-05-30 08:55:59 -060075#define CNTR_NOT_SUPPORTED "<not supported>"
76#define CNTR_NOT_COUNTED "<not counted>"
Stephane Eraniand7470b62010-12-01 18:49:05 +020077
Jiri Olsad4f63a42015-06-26 11:29:26 +020078static void print_counters(struct timespec *ts, int argc, const char **argv);
Stephane Eranian13370a92013-01-29 12:47:44 +010079
Andi Kleen4cabc3d2013-08-21 16:47:26 -070080/* Default events used for perf stat -T */
Jiri Olsaa4547422015-06-03 16:25:53 +020081static const char *transaction_attrs = {
82 "task-clock,"
Andi Kleen4cabc3d2013-08-21 16:47:26 -070083 "{"
84 "instructions,"
85 "cycles,"
86 "cpu/cycles-t/,"
87 "cpu/tx-start/,"
88 "cpu/el-start/,"
89 "cpu/cycles-ct/"
90 "}"
91};
92
93/* More limited version when the CPU does not have all events. */
Jiri Olsaa4547422015-06-03 16:25:53 +020094static const char * transaction_limited_attrs = {
95 "task-clock,"
Andi Kleen4cabc3d2013-08-21 16:47:26 -070096 "{"
97 "instructions,"
98 "cycles,"
99 "cpu/cycles-t/,"
100 "cpu/tx-start/"
101 "}"
102};
103
Andi Kleen44b1e602016-05-30 12:49:42 -0300104static const char * topdown_attrs[] = {
105 "topdown-total-slots",
106 "topdown-slots-retired",
107 "topdown-recovery-bubbles",
108 "topdown-fetch-bubbles",
109 "topdown-slots-issued",
110 NULL,
111};
112
Robert Richter666e6d42012-04-05 18:26:27 +0200113static struct perf_evlist *evsel_list;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200114
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -0300115static struct target target = {
Namhyung Kim77a6f012012-05-07 14:09:04 +0900116 .uid = UINT_MAX,
117};
Jaswinder Singh Rajput3d632592009-06-24 18:19:34 +0530118
Jiri Olsa1e5a2932015-10-25 15:51:18 +0100119typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu);
120
Jaswinder Singh Rajput3d632592009-06-24 18:19:34 +0530121static int run_count = 1;
Stephane Eranian2e6cdf92010-05-12 10:40:01 +0200122static bool no_inherit = false;
Stephane Eraniand07f0b12013-06-04 17:44:26 +0200123static volatile pid_t child_pid = -1;
Ian Munsiec0555642010-04-13 18:37:33 +1000124static bool null_run = false;
Ingo Molnar2cba3ff2011-05-19 13:30:56 +0200125static int detailed_run = 0;
Andi Kleen4cabc3d2013-08-21 16:47:26 -0700126static bool transaction_run;
Andi Kleen44b1e602016-05-30 12:49:42 -0300127static bool topdown_run = false;
Arnaldo Carvalho de Melo201e0b02010-12-01 17:53:27 -0200128static bool big_num = true;
Stephane Eraniand7470b62010-12-01 18:49:05 +0200129static int big_num_opt = -1;
Stephane Eraniand7470b62010-12-01 18:49:05 +0200130static const char *csv_sep = NULL;
131static bool csv_output = false;
Lin Ming43bece72011-08-17 18:42:07 +0800132static bool group = false;
Peter Zijlstra1f16c572012-10-23 13:40:14 +0200133static const char *pre_cmd = NULL;
134static const char *post_cmd = NULL;
135static bool sync_run = false;
Andi Kleen41191682013-08-02 17:41:11 -0700136static unsigned int initial_delay = 0;
Stephane Eranian410136f2013-11-12 17:58:49 +0100137static unsigned int unit_width = 4; /* strlen("unit") */
Frederik Deweerdta7e191c2013-03-01 13:02:27 -0500138static bool forever = false;
Andi Kleen54b50912016-03-03 15:57:36 -0800139static bool metric_only = false;
Andi Kleen44b1e602016-05-30 12:49:42 -0300140static bool force_metric_only = false;
Stephane Eranian13370a92013-01-29 12:47:44 +0100141static struct timespec ref_time;
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100142static struct cpu_map *aggr_map;
Jiri Olsa1e5a2932015-10-25 15:51:18 +0100143static aggr_get_id_t aggr_get_id;
Jiri Olsae0547312015-11-05 15:40:45 +0100144static bool append_file;
145static const char *output_name;
146static int output_fd;
Stephane Eranian5af52b52010-05-18 15:00:01 +0200147
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100148struct perf_stat {
149 bool record;
150 struct perf_data_file file;
151 struct perf_session *session;
152 u64 bytes_written;
Jiri Olsaba6039b62015-11-05 15:40:55 +0100153 struct perf_tool tool;
Jiri Olsa1975d362015-11-05 15:40:56 +0100154 bool maps_allocated;
155 struct cpu_map *cpus;
156 struct thread_map *threads;
Jiri Olsa89af4e02015-11-05 15:41:02 +0100157 enum aggr_mode aggr_mode;
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100158};
159
160static struct perf_stat perf_stat;
161#define STAT_RECORD perf_stat.record
162
Liming Wang60666c62009-12-31 16:05:50 +0800163static volatile int done = 0;
164
Jiri Olsa421a50f2015-07-21 14:31:22 +0200165static struct perf_stat_config stat_config = {
166 .aggr_mode = AGGR_GLOBAL,
Jiri Olsa711a5722015-07-21 14:31:23 +0200167 .scale = true,
Jiri Olsa421a50f2015-07-21 14:31:22 +0200168};
169
Stephane Eranian13370a92013-01-29 12:47:44 +0100170static inline void diff_timespec(struct timespec *r, struct timespec *a,
171 struct timespec *b)
172{
173 r->tv_sec = a->tv_sec - b->tv_sec;
174 if (a->tv_nsec < b->tv_nsec) {
175 r->tv_nsec = a->tv_nsec + 1000000000L - b->tv_nsec;
176 r->tv_sec--;
177 } else {
178 r->tv_nsec = a->tv_nsec - b->tv_nsec ;
179 }
180}
181
Jiri Olsa254ecbc2015-06-26 11:29:13 +0200182static void perf_stat__reset_stats(void)
183{
184 perf_evlist__reset_stats(evsel_list);
Jiri Olsaf87027b2015-06-03 16:25:59 +0200185 perf_stat__reset_shadow_stats();
Jiri Olsa1eda3b22015-06-03 16:25:55 +0200186}
187
Jiri Olsacac21422012-11-12 18:34:00 +0100188static int create_perf_stat_counter(struct perf_evsel *evsel)
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200189{
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200190 struct perf_event_attr *attr = &evsel->attr;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200191
Jiri Olsa711a5722015-07-21 14:31:23 +0200192 if (stat_config.scale)
Ingo Molnara21ca2c2009-06-06 09:58:57 +0200193 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
194 PERF_FORMAT_TOTAL_TIME_RUNNING;
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200195
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200196 attr->inherit = !no_inherit;
Arnaldo Carvalho de Melo5d2cd902011-04-14 11:20:14 -0300197
Jiri Olsa6acd8e92015-11-25 16:36:54 +0100198 /*
199 * Some events get initialized with sample_(period/type) set,
200 * like tracepoints. Clear it up for counting.
201 */
202 attr->sample_period = 0;
Jiri Olsa6db1a5c2016-01-05 22:09:05 +0100203
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100204 /*
205 * But set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
206 * while avoiding that older tools show confusing messages.
Jiri Olsa6db1a5c2016-01-05 22:09:05 +0100207 *
208 * However for pipe sessions we need to keep it zero,
209 * because script's perf_evsel__check_attr is triggered
210 * by attr->sample_type != 0, and we can't run it on
211 * stat sessions.
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100212 */
Jiri Olsa6db1a5c2016-01-05 22:09:05 +0100213 if (!(STAT_RECORD && perf_stat.file.is_pipe))
214 attr->sample_type = PERF_SAMPLE_IDENTIFIER;
Jiri Olsa6acd8e92015-11-25 16:36:54 +0100215
Jiri Olsa67ccdec2015-12-03 10:06:44 +0100216 /*
217 * Disabling all counters initially, they will be enabled
218 * either manually by us or by kernel via enable_on_exec
219 * set later.
220 */
Jiri Olsac8280ce2015-12-03 10:06:45 +0100221 if (perf_evsel__is_group_leader(evsel)) {
Jiri Olsa67ccdec2015-12-03 10:06:44 +0100222 attr->disabled = 1;
223
Jiri Olsac8280ce2015-12-03 10:06:45 +0100224 /*
225 * In case of initial_delay we enable tracee
226 * events manually.
227 */
228 if (target__none(&target) && !initial_delay)
229 attr->enable_on_exec = 1;
230 }
231
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -0300232 if (target__has_cpu(&target))
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -0300233 return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
Stephane Eranian5622c072012-04-27 14:45:38 +0200234
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -0300235 return perf_evsel__open_per_thread(evsel, evsel_list->threads);
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200236}
237
Ingo Molnarc04f5e52009-05-29 09:10:54 +0200238/*
239 * Does the counter have nsecs as a unit?
240 */
Arnaldo Carvalho de Melodaec78a2011-01-03 16:49:44 -0200241static inline int nsec_counter(struct perf_evsel *evsel)
Ingo Molnarc04f5e52009-05-29 09:10:54 +0200242{
Arnaldo Carvalho de Melodaec78a2011-01-03 16:49:44 -0200243 if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
244 perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
Ingo Molnarc04f5e52009-05-29 09:10:54 +0200245 return 1;
246
247 return 0;
248}
249
Jiri Olsa8b99b1a2015-11-05 15:40:48 +0100250static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100251 union perf_event *event,
252 struct perf_sample *sample __maybe_unused,
253 struct machine *machine __maybe_unused)
254{
Jiri Olsa8b99b1a2015-11-05 15:40:48 +0100255 if (perf_data_file__write(&perf_stat.file, event, event->header.size) < 0) {
256 pr_err("failed to write perf data, error: %m\n");
257 return -1;
258 }
259
260 perf_stat.bytes_written += event->header.size;
261 return 0;
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100262}
263
Jiri Olsa1975d362015-11-05 15:40:56 +0100264static int write_stat_round_event(u64 tm, u64 type)
Jiri Olsa7aad0c32015-11-05 15:40:52 +0100265{
Jiri Olsa1975d362015-11-05 15:40:56 +0100266 return perf_event__synthesize_stat_round(NULL, tm, type,
Jiri Olsa7aad0c32015-11-05 15:40:52 +0100267 process_synthesized_event,
268 NULL);
269}
270
271#define WRITE_STAT_ROUND_EVENT(time, interval) \
272 write_stat_round_event(time, PERF_STAT_ROUND_TYPE__ ## interval)
273
Jiri Olsa5a6ea812015-11-05 15:40:51 +0100274#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
275
276static int
277perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread,
278 struct perf_counts_values *count)
279{
280 struct perf_sample_id *sid = SID(counter, cpu, thread);
281
282 return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count,
283 process_synthesized_event, NULL);
284}
285
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200286/*
287 * Read out the results of a single counter:
288 * do not aggregate counts across CPUs in system-wide mode
289 */
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200290static int read_counter(struct perf_evsel *counter)
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200291{
Jiri Olsa9bf1a522014-11-21 10:31:09 +0100292 int nthreads = thread_map__nr(evsel_list->threads);
Mark Rutland00e727b2016-07-15 11:08:10 +0100293 int ncpus, cpu, thread;
294
295 if (target__has_cpu(&target))
296 ncpus = perf_evsel__nr_cpus(counter);
297 else
298 ncpus = 1;
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200299
Suzuki K. Poulose3b4331d2015-02-13 18:40:58 +0000300 if (!counter->supported)
301 return -ENOENT;
302
Jiri Olsa9bf1a522014-11-21 10:31:09 +0100303 if (counter->system_wide)
304 nthreads = 1;
305
306 for (thread = 0; thread < nthreads; thread++) {
307 for (cpu = 0; cpu < ncpus; cpu++) {
Jiri Olsa3b3eb042015-06-26 11:29:20 +0200308 struct perf_counts_values *count;
309
310 count = perf_counts(counter->counts, cpu, thread);
311 if (perf_evsel__read(counter, cpu, thread, count))
Jiri Olsa9bf1a522014-11-21 10:31:09 +0100312 return -1;
Jiri Olsa5a6ea812015-11-05 15:40:51 +0100313
314 if (STAT_RECORD) {
315 if (perf_evsel__write_stat_event(counter, cpu, thread, count)) {
316 pr_err("failed to write stat event\n");
317 return -1;
318 }
319 }
Andi Kleen0b1abbf2016-04-27 13:00:51 -0700320
321 if (verbose > 1) {
322 fprintf(stat_config.output,
323 "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
324 perf_evsel__name(counter),
325 cpu,
326 count->val, count->ena, count->run);
327 }
Jiri Olsa9bf1a522014-11-21 10:31:09 +0100328 }
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +0200329 }
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200330
331 return 0;
Ingo Molnar2996f5d2009-05-29 09:10:54 +0200332}
333
Mark Rutland3df33ef2016-08-09 14:04:29 +0100334static void read_counters(void)
Jiri Olsa106a94a2015-06-26 11:29:19 +0200335{
336 struct perf_evsel *counter;
Jiri Olsa106a94a2015-06-26 11:29:19 +0200337
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300338 evlist__for_each_entry(evsel_list, counter) {
Jiri Olsa3b3eb042015-06-26 11:29:20 +0200339 if (read_counter(counter))
Andi Kleen245bad82015-09-01 15:52:46 -0700340 pr_debug("failed to read counter %s\n", counter->name);
Jiri Olsa3b3eb042015-06-26 11:29:20 +0200341
Jiri Olsaf80010e2015-07-21 14:31:27 +0200342 if (perf_stat_process_counter(&stat_config, counter))
Jiri Olsa3b3eb042015-06-26 11:29:20 +0200343 pr_warning("failed to process counter %s\n", counter->name);
Jiri Olsa106a94a2015-06-26 11:29:19 +0200344 }
345}
346
Jiri Olsaba411a92015-06-26 11:29:24 +0200347static void process_interval(void)
Stephane Eranian13370a92013-01-29 12:47:44 +0100348{
Stephane Eranian13370a92013-01-29 12:47:44 +0100349 struct timespec ts, rs;
Stephane Eranian13370a92013-01-29 12:47:44 +0100350
Mark Rutland3df33ef2016-08-09 14:04:29 +0100351 read_counters();
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100352
Stephane Eranian13370a92013-01-29 12:47:44 +0100353 clock_gettime(CLOCK_MONOTONIC, &ts);
354 diff_timespec(&rs, &ts, &ref_time);
Stephane Eranian13370a92013-01-29 12:47:44 +0100355
Jiri Olsa7aad0c32015-11-05 15:40:52 +0100356 if (STAT_RECORD) {
357 if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSECS_PER_SEC + rs.tv_nsec, INTERVAL))
358 pr_err("failed to write stat round event\n");
359 }
360
Jiri Olsad4f63a42015-06-26 11:29:26 +0200361 print_counters(&rs, 0, NULL);
Stephane Eranian13370a92013-01-29 12:47:44 +0100362}
363
Jiri Olsa67ccdec2015-12-03 10:06:44 +0100364static void enable_counters(void)
Andi Kleen41191682013-08-02 17:41:11 -0700365{
Jiri Olsa67ccdec2015-12-03 10:06:44 +0100366 if (initial_delay)
Andi Kleen41191682013-08-02 17:41:11 -0700367 usleep(initial_delay * 1000);
Jiri Olsa67ccdec2015-12-03 10:06:44 +0100368
369 /*
370 * We need to enable counters only if:
371 * - we don't have tracee (attaching to task or cpu)
372 * - we have initial delay configured
373 */
374 if (!target__none(&target) || initial_delay)
Jiri Olsaab46db02015-12-03 10:06:43 +0100375 perf_evlist__enable(evsel_list);
Andi Kleen41191682013-08-02 17:41:11 -0700376}
377
Mark Rutland3df33ef2016-08-09 14:04:29 +0100378static void disable_counters(void)
379{
380 /*
381 * If we don't have tracee (attaching to task or cpu), counters may
382 * still be running. To get accurate group ratios, we must stop groups
383 * from counting before reading their constituent counters.
384 */
385 if (!target__none(&target))
386 perf_evlist__disable(evsel_list);
387}
388
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300389static volatile int workload_exec_errno;
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300390
391/*
392 * perf_evlist__prepare_workload will send a SIGUSR1
393 * if the fork fails, since we asked by setting its
394 * want_signal to true.
395 */
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300396static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info,
397 void *ucontext __maybe_unused)
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300398{
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300399 workload_exec_errno = info->si_value.sival_int;
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300400}
401
Jiri Olsa7b60a7e2015-11-05 15:40:54 +0100402static bool has_unit(struct perf_evsel *counter)
403{
404 return counter->unit && *counter->unit;
405}
406
407static bool has_scale(struct perf_evsel *counter)
408{
409 return counter->scale != 1;
410}
411
Jiri Olsa664c98d2015-11-05 15:40:50 +0100412static int perf_stat_synthesize_config(bool is_pipe)
Jiri Olsa8b99b1a2015-11-05 15:40:48 +0100413{
Jiri Olsa7b60a7e2015-11-05 15:40:54 +0100414 struct perf_evsel *counter;
Jiri Olsa8b99b1a2015-11-05 15:40:48 +0100415 int err;
416
Jiri Olsa664c98d2015-11-05 15:40:50 +0100417 if (is_pipe) {
418 err = perf_event__synthesize_attrs(NULL, perf_stat.session,
419 process_synthesized_event);
420 if (err < 0) {
421 pr_err("Couldn't synthesize attrs.\n");
422 return err;
423 }
424 }
425
Jiri Olsa7b60a7e2015-11-05 15:40:54 +0100426 /*
427 * Synthesize other events stuff not carried within
428 * attr event - unit, scale, name
429 */
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300430 evlist__for_each_entry(evsel_list, counter) {
Jiri Olsa7b60a7e2015-11-05 15:40:54 +0100431 if (!counter->supported)
432 continue;
433
434 /*
435 * Synthesize unit and scale only if it's defined.
436 */
437 if (has_unit(counter)) {
438 err = perf_event__synthesize_event_update_unit(NULL, counter, process_synthesized_event);
439 if (err < 0) {
440 pr_err("Couldn't synthesize evsel unit.\n");
441 return err;
442 }
443 }
444
445 if (has_scale(counter)) {
446 err = perf_event__synthesize_event_update_scale(NULL, counter, process_synthesized_event);
447 if (err < 0) {
448 pr_err("Couldn't synthesize evsel scale.\n");
449 return err;
450 }
451 }
452
453 if (counter->own_cpus) {
454 err = perf_event__synthesize_event_update_cpus(NULL, counter, process_synthesized_event);
455 if (err < 0) {
456 pr_err("Couldn't synthesize evsel scale.\n");
457 return err;
458 }
459 }
460
461 /*
462 * Name is needed only for pipe output,
463 * perf.data carries event names.
464 */
465 if (is_pipe) {
466 err = perf_event__synthesize_event_update_name(NULL, counter, process_synthesized_event);
467 if (err < 0) {
468 pr_err("Couldn't synthesize evsel name.\n");
469 return err;
470 }
471 }
472 }
473
Jiri Olsa8b99b1a2015-11-05 15:40:48 +0100474 err = perf_event__synthesize_thread_map2(NULL, evsel_list->threads,
475 process_synthesized_event,
476 NULL);
477 if (err < 0) {
478 pr_err("Couldn't synthesize thread map.\n");
479 return err;
480 }
481
482 err = perf_event__synthesize_cpu_map(NULL, evsel_list->cpus,
483 process_synthesized_event, NULL);
484 if (err < 0) {
485 pr_err("Couldn't synthesize thread map.\n");
486 return err;
487 }
488
489 err = perf_event__synthesize_stat_config(NULL, &stat_config,
490 process_synthesized_event, NULL);
491 if (err < 0) {
492 pr_err("Couldn't synthesize config.\n");
493 return err;
494 }
495
496 return 0;
497}
498
Jiri Olsa2af46462015-11-05 15:40:49 +0100499#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
500
501static int __store_counter_ids(struct perf_evsel *counter,
502 struct cpu_map *cpus,
503 struct thread_map *threads)
504{
505 int cpu, thread;
506
507 for (cpu = 0; cpu < cpus->nr; cpu++) {
508 for (thread = 0; thread < threads->nr; thread++) {
509 int fd = FD(counter, cpu, thread);
510
511 if (perf_evlist__id_add_fd(evsel_list, counter,
512 cpu, thread, fd) < 0)
513 return -1;
514 }
515 }
516
517 return 0;
518}
519
520static int store_counter_ids(struct perf_evsel *counter)
521{
522 struct cpu_map *cpus = counter->cpus;
523 struct thread_map *threads = counter->threads;
524
525 if (perf_evsel__alloc_id(counter, cpus->nr, threads->nr))
526 return -ENOMEM;
527
528 return __store_counter_ids(counter, cpus, threads);
529}
530
Namhyung Kimacf28922013-03-11 16:43:18 +0900531static int __run_perf_stat(int argc, const char **argv)
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200532{
Jiri Olsaec0d3d12015-07-21 14:31:25 +0200533 int interval = stat_config.interval;
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300534 char msg[512];
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200535 unsigned long long t0, t1;
Jiri Olsacac21422012-11-12 18:34:00 +0100536 struct perf_evsel *counter;
Stephane Eranian13370a92013-01-29 12:47:44 +0100537 struct timespec ts;
Stephane Eranian410136f2013-11-12 17:58:49 +0100538 size_t l;
Ingo Molnar42202dd2009-06-13 14:57:28 +0200539 int status = 0;
Zhang, Yanmin6be28502010-03-18 11:36:03 -0300540 const bool forks = (argc > 0);
Jiri Olsa664c98d2015-11-05 15:40:50 +0100541 bool is_pipe = STAT_RECORD ? perf_stat.file.is_pipe : false;
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200542
Stephane Eranian13370a92013-01-29 12:47:44 +0100543 if (interval) {
544 ts.tv_sec = interval / 1000;
545 ts.tv_nsec = (interval % 1000) * 1000000;
546 } else {
547 ts.tv_sec = 1;
548 ts.tv_nsec = 0;
549 }
550
Liming Wang60666c62009-12-31 16:05:50 +0800551 if (forks) {
Jiri Olsa664c98d2015-11-05 15:40:50 +0100552 if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -0300553 workload_exec_failed_signal) < 0) {
Namhyung Kimacf28922013-03-11 16:43:18 +0900554 perror("failed to prepare workload");
555 return -1;
Liming Wang60666c62009-12-31 16:05:50 +0800556 }
Namhyung Kimd20a47e2013-09-30 18:01:11 +0900557 child_pid = evsel_list->workload.pid;
Paul Mackerras051ae7f2009-06-29 21:13:21 +1000558 }
559
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200560 if (group)
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300561 perf_evlist__set_leader(evsel_list);
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200562
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300563 evlist__for_each_entry(evsel_list, counter) {
Arnaldo Carvalho de Melo42ef8a72016-05-12 16:25:18 -0300564try_again:
Jiri Olsacac21422012-11-12 18:34:00 +0100565 if (create_perf_stat_counter(counter) < 0) {
David Ahern979987a2012-05-08 09:29:16 -0600566 /*
567 * PPC returns ENXIO for HW counters until 2.6.37
568 * (behavior changed with commit b0a873e).
569 */
Anton Blanchard38f6ae12011-12-02 09:38:33 +1100570 if (errno == EINVAL || errno == ENOSYS ||
David Ahern979987a2012-05-08 09:29:16 -0600571 errno == ENOENT || errno == EOPNOTSUPP ||
572 errno == ENXIO) {
David Ahernc63ca0c2011-04-29 16:04:15 -0600573 if (verbose)
574 ui__warning("%s event is not supported by the kernel.\n",
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300575 perf_evsel__name(counter));
David Ahern2cee77c2011-05-30 08:55:59 -0600576 counter->supported = false;
Kan Liangcb5ef602015-06-11 02:32:40 -0400577
578 if ((counter->leader != counter) ||
579 !(counter->leader->nr_members > 1))
580 continue;
Arnaldo Carvalho de Melo42ef8a72016-05-12 16:25:18 -0300581 } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
582 if (verbose)
583 ui__warning("%s\n", msg);
584 goto try_again;
585 }
Ingo Molnarede70292011-04-28 08:48:42 +0200586
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300587 perf_evsel__open_strerror(counter, &target,
588 errno, msg, sizeof(msg));
589 ui__error("%s\n", msg);
590
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200591 if (child_pid != -1)
592 kill(child_pid, SIGTERM);
David Ahernfceda7f2012-08-26 12:24:44 -0600593
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200594 return -1;
595 }
David Ahern2cee77c2011-05-30 08:55:59 -0600596 counter->supported = true;
Stephane Eranian410136f2013-11-12 17:58:49 +0100597
598 l = strlen(counter->unit);
599 if (l > unit_width)
600 unit_width = l;
Jiri Olsa2af46462015-11-05 15:40:49 +0100601
602 if (STAT_RECORD && store_counter_ids(counter))
603 return -1;
Arnaldo Carvalho de Melo084ab9f2010-03-22 13:10:28 -0300604 }
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200605
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300606 if (perf_evlist__apply_filters(evsel_list, &counter)) {
607 error("failed to set filter \"%s\" on event %s with %d (%s)\n",
608 counter->filter, perf_evsel__name(counter), errno,
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300609 str_error_r(errno, msg, sizeof(msg)));
Frederic Weisbeckercfd748a2011-03-14 16:40:30 +0100610 return -1;
611 }
612
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100613 if (STAT_RECORD) {
614 int err, fd = perf_data_file__fd(&perf_stat.file);
615
Jiri Olsa664c98d2015-11-05 15:40:50 +0100616 if (is_pipe) {
617 err = perf_header__write_pipe(perf_data_file__fd(&perf_stat.file));
618 } else {
619 err = perf_session__write_header(perf_stat.session, evsel_list,
620 fd, false);
621 }
622
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100623 if (err < 0)
624 return err;
Jiri Olsa8b99b1a2015-11-05 15:40:48 +0100625
Jiri Olsa664c98d2015-11-05 15:40:50 +0100626 err = perf_stat_synthesize_config(is_pipe);
Jiri Olsa8b99b1a2015-11-05 15:40:48 +0100627 if (err < 0)
628 return err;
Jiri Olsa4979d0c2015-11-05 15:40:46 +0100629 }
630
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200631 /*
632 * Enable counters and exec the command:
633 */
634 t0 = rdclock();
Stephane Eranian13370a92013-01-29 12:47:44 +0100635 clock_gettime(CLOCK_MONOTONIC, &ref_time);
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200636
Liming Wang60666c62009-12-31 16:05:50 +0800637 if (forks) {
Namhyung Kimacf28922013-03-11 16:43:18 +0900638 perf_evlist__start_workload(evsel_list);
Jiri Olsa67ccdec2015-12-03 10:06:44 +0100639 enable_counters();
Namhyung Kimacf28922013-03-11 16:43:18 +0900640
Stephane Eranian13370a92013-01-29 12:47:44 +0100641 if (interval) {
642 while (!waitpid(child_pid, &status, WNOHANG)) {
643 nanosleep(&ts, NULL);
Jiri Olsaba411a92015-06-26 11:29:24 +0200644 process_interval();
Stephane Eranian13370a92013-01-29 12:47:44 +0100645 }
646 }
Liming Wang60666c62009-12-31 16:05:50 +0800647 wait(&status);
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300648
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300649 if (workload_exec_errno) {
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -0300650 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300651 pr_err("Workload failed: %s\n", emsg);
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300652 return -1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300653 }
Arnaldo Carvalho de Melo6af206f2013-12-28 15:45:08 -0300654
Andi Kleen33e49ea2011-09-15 14:31:40 -0700655 if (WIFSIGNALED(status))
656 psignal(WTERMSIG(status), argv[0]);
Liming Wang60666c62009-12-31 16:05:50 +0800657 } else {
Jiri Olsa67ccdec2015-12-03 10:06:44 +0100658 enable_counters();
Stephane Eranian13370a92013-01-29 12:47:44 +0100659 while (!done) {
660 nanosleep(&ts, NULL);
661 if (interval)
Jiri Olsaba411a92015-06-26 11:29:24 +0200662 process_interval();
Stephane Eranian13370a92013-01-29 12:47:44 +0100663 }
Liming Wang60666c62009-12-31 16:05:50 +0800664 }
Ingo Molnar44db76c2009-06-03 19:36:07 +0200665
Mark Rutland3df33ef2016-08-09 14:04:29 +0100666 disable_counters();
667
Ingo Molnarddcacfa2009-04-20 15:37:32 +0200668 t1 = rdclock();
669
Peter Zijlstra9e9772c2009-09-04 15:36:08 +0200670 update_stats(&walltime_nsecs_stats, t1 - t0);
Ingo Molnar42202dd2009-06-13 14:57:28 +0200671
Mark Rutland3df33ef2016-08-09 14:04:29 +0100672 /*
673 * Closing a group leader splits the group, and as we only disable
674 * group leaders, results in remaining events becoming enabled. To
675 * avoid arbitrary skew, we must read all counters before closing any
676 * group leaders.
677 */
678 read_counters();
679 perf_evlist__close(evsel_list);
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200680
Ingo Molnar42202dd2009-06-13 14:57:28 +0200681 return WEXITSTATUS(status);
682}
683
Arnaldo Carvalho de Melo41cde472014-01-03 17:34:42 -0300684static int run_perf_stat(int argc, const char **argv)
Peter Zijlstra1f16c572012-10-23 13:40:14 +0200685{
686 int ret;
687
688 if (pre_cmd) {
689 ret = system(pre_cmd);
690 if (ret)
691 return ret;
692 }
693
694 if (sync_run)
695 sync();
696
697 ret = __run_perf_stat(argc, argv);
698 if (ret)
699 return ret;
700
701 if (post_cmd) {
702 ret = system(post_cmd);
703 if (ret)
704 return ret;
705 }
706
707 return ret;
708}
709
Andi Kleend73515c2015-03-11 07:16:27 -0700710static void print_running(u64 run, u64 ena)
711{
712 if (csv_output) {
Jiri Olsa58215222015-07-21 14:31:24 +0200713 fprintf(stat_config.output, "%s%" PRIu64 "%s%.2f",
Andi Kleend73515c2015-03-11 07:16:27 -0700714 csv_sep,
715 run,
716 csv_sep,
717 ena ? 100.0 * run / ena : 100.0);
718 } else if (run != ena) {
Jiri Olsa58215222015-07-21 14:31:24 +0200719 fprintf(stat_config.output, " (%.2f%%)", 100.0 * run / ena);
Andi Kleend73515c2015-03-11 07:16:27 -0700720 }
721}
722
Ingo Molnarf99844c2011-04-27 05:35:39 +0200723static void print_noise_pct(double total, double avg)
724{
Xiao Guangrong0007ece2012-09-17 16:31:14 +0800725 double pct = rel_stddev_stats(total, avg);
Ingo Molnarf99844c2011-04-27 05:35:39 +0200726
Zhengyu He3ae9a34d2011-06-23 13:45:42 -0700727 if (csv_output)
Jiri Olsa58215222015-07-21 14:31:24 +0200728 fprintf(stat_config.output, "%s%.2f%%", csv_sep, pct);
Jim Cromiea1bca6c2011-09-07 17:14:02 -0600729 else if (pct)
Jiri Olsa58215222015-07-21 14:31:24 +0200730 fprintf(stat_config.output, " ( +-%6.2f%% )", pct);
Ingo Molnarf99844c2011-04-27 05:35:39 +0200731}
732
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200733static void print_noise(struct perf_evsel *evsel, double avg)
Ingo Molnar42202dd2009-06-13 14:57:28 +0200734{
Jiri Olsa581cc8a2015-10-16 12:41:03 +0200735 struct perf_stat_evsel *ps;
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200736
Peter Zijlstra849abde2009-09-04 18:23:38 +0200737 if (run_count == 1)
738 return;
739
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200740 ps = evsel->priv;
Ingo Molnarf99844c2011-04-27 05:35:39 +0200741 print_noise_pct(stddev_stats(&ps->res_stats[0]), avg);
Ingo Molnar42202dd2009-06-13 14:57:28 +0200742}
743
Stephane Eranian12c08a92013-02-14 13:57:29 +0100744static void aggr_printout(struct perf_evsel *evsel, int id, int nr)
Ingo Molnar42202dd2009-06-13 14:57:28 +0200745{
Jiri Olsa421a50f2015-07-21 14:31:22 +0200746 switch (stat_config.aggr_mode) {
Stephane Eranian12c08a92013-02-14 13:57:29 +0100747 case AGGR_CORE:
Jiri Olsa58215222015-07-21 14:31:24 +0200748 fprintf(stat_config.output, "S%d-C%*d%s%*d%s",
Stephane Eranian12c08a92013-02-14 13:57:29 +0100749 cpu_map__id_to_socket(id),
750 csv_output ? 0 : -8,
751 cpu_map__id_to_cpu(id),
752 csv_sep,
753 csv_output ? 0 : 4,
754 nr,
755 csv_sep);
756 break;
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100757 case AGGR_SOCKET:
Jiri Olsa58215222015-07-21 14:31:24 +0200758 fprintf(stat_config.output, "S%*d%s%*d%s",
Stephane Eraniand7e7a452013-02-06 15:46:02 +0100759 csv_output ? 0 : -5,
Stephane Eranian12c08a92013-02-14 13:57:29 +0100760 id,
Stephane Eraniand7e7a452013-02-06 15:46:02 +0100761 csv_sep,
762 csv_output ? 0 : 4,
763 nr,
764 csv_sep);
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100765 break;
766 case AGGR_NONE:
Jiri Olsa58215222015-07-21 14:31:24 +0200767 fprintf(stat_config.output, "CPU%*d%s",
Stephane Eraniand7470b62010-12-01 18:49:05 +0200768 csv_output ? 0 : -4,
Stephane Eranian12c08a92013-02-14 13:57:29 +0100769 perf_evsel__cpus(evsel)->map[id], csv_sep);
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100770 break;
Jiri Olsa32b8af82015-06-26 11:29:27 +0200771 case AGGR_THREAD:
Jiri Olsa58215222015-07-21 14:31:24 +0200772 fprintf(stat_config.output, "%*s-%*d%s",
Jiri Olsa32b8af82015-06-26 11:29:27 +0200773 csv_output ? 0 : 16,
774 thread_map__comm(evsel->threads, id),
775 csv_output ? 0 : -8,
776 thread_map__pid(evsel->threads, id),
777 csv_sep);
778 break;
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100779 case AGGR_GLOBAL:
Jiri Olsa208df992015-10-16 12:41:04 +0200780 case AGGR_UNSET:
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100781 default:
782 break;
783 }
784}
Stephane Eraniand7470b62010-12-01 18:49:05 +0200785
Andi Kleen140aead2016-01-30 09:06:49 -0800786struct outstate {
787 FILE *fh;
788 bool newline;
Andi Kleenf9483392016-01-30 09:06:50 -0800789 const char *prefix;
Andi Kleen92a61f62016-02-29 14:36:21 -0800790 int nfields;
Andi Kleen44d49a62016-02-29 14:36:22 -0800791 int id, nr;
792 struct perf_evsel *evsel;
Andi Kleen140aead2016-01-30 09:06:49 -0800793};
794
795#define METRIC_LEN 35
796
797static void new_line_std(void *ctx)
798{
799 struct outstate *os = ctx;
800
801 os->newline = true;
802}
803
804static void do_new_line_std(struct outstate *os)
805{
806 fputc('\n', os->fh);
Andi Kleenf9483392016-01-30 09:06:50 -0800807 fputs(os->prefix, os->fh);
Andi Kleen44d49a62016-02-29 14:36:22 -0800808 aggr_printout(os->evsel, os->id, os->nr);
Andi Kleen140aead2016-01-30 09:06:49 -0800809 if (stat_config.aggr_mode == AGGR_NONE)
810 fprintf(os->fh, " ");
Andi Kleen140aead2016-01-30 09:06:49 -0800811 fprintf(os->fh, " ");
812}
813
814static void print_metric_std(void *ctx, const char *color, const char *fmt,
815 const char *unit, double val)
816{
817 struct outstate *os = ctx;
818 FILE *out = os->fh;
819 int n;
820 bool newline = os->newline;
821
822 os->newline = false;
823
824 if (unit == NULL || fmt == NULL) {
825 fprintf(out, "%-*s", METRIC_LEN, "");
826 return;
827 }
828
829 if (newline)
830 do_new_line_std(os);
831
832 n = fprintf(out, " # ");
833 if (color)
834 n += color_fprintf(out, color, fmt, val);
835 else
836 n += fprintf(out, fmt, val);
837 fprintf(out, " %-*s", METRIC_LEN - n - 1, unit);
838}
839
Andi Kleen92a61f62016-02-29 14:36:21 -0800840static void new_line_csv(void *ctx)
841{
842 struct outstate *os = ctx;
843 int i;
844
845 fputc('\n', os->fh);
846 if (os->prefix)
847 fprintf(os->fh, "%s%s", os->prefix, csv_sep);
Andi Kleen44d49a62016-02-29 14:36:22 -0800848 aggr_printout(os->evsel, os->id, os->nr);
Andi Kleen92a61f62016-02-29 14:36:21 -0800849 for (i = 0; i < os->nfields; i++)
850 fputs(csv_sep, os->fh);
851}
852
853static void print_metric_csv(void *ctx,
854 const char *color __maybe_unused,
855 const char *fmt, const char *unit, double val)
856{
857 struct outstate *os = ctx;
858 FILE *out = os->fh;
859 char buf[64], *vals, *ends;
860
861 if (unit == NULL || fmt == NULL) {
862 fprintf(out, "%s%s%s%s", csv_sep, csv_sep, csv_sep, csv_sep);
863 return;
864 }
865 snprintf(buf, sizeof(buf), fmt, val);
866 vals = buf;
867 while (isspace(*vals))
868 vals++;
869 ends = vals;
870 while (isdigit(*ends) || *ends == '.')
871 ends++;
872 *ends = 0;
873 while (isspace(*unit))
874 unit++;
875 fprintf(out, "%s%s%s%s", csv_sep, vals, csv_sep, unit);
876}
877
Andi Kleen54b50912016-03-03 15:57:36 -0800878#define METRIC_ONLY_LEN 20
879
880/* Filter out some columns that don't work well in metrics only mode */
881
882static bool valid_only_metric(const char *unit)
883{
884 if (!unit)
885 return false;
886 if (strstr(unit, "/sec") ||
887 strstr(unit, "hz") ||
888 strstr(unit, "Hz") ||
889 strstr(unit, "CPUs utilized"))
890 return false;
891 return true;
892}
893
894static const char *fixunit(char *buf, struct perf_evsel *evsel,
895 const char *unit)
896{
897 if (!strncmp(unit, "of all", 6)) {
898 snprintf(buf, 1024, "%s %s", perf_evsel__name(evsel),
899 unit);
900 return buf;
901 }
902 return unit;
903}
904
905static void print_metric_only(void *ctx, const char *color, const char *fmt,
906 const char *unit, double val)
907{
908 struct outstate *os = ctx;
909 FILE *out = os->fh;
910 int n;
911 char buf[1024];
912 unsigned mlen = METRIC_ONLY_LEN;
913
914 if (!valid_only_metric(unit))
915 return;
916 unit = fixunit(buf, os->evsel, unit);
917 if (color)
918 n = color_fprintf(out, color, fmt, val);
919 else
920 n = fprintf(out, fmt, val);
921 if (n > METRIC_ONLY_LEN)
922 n = METRIC_ONLY_LEN;
923 if (mlen < strlen(unit))
924 mlen = strlen(unit) + 1;
925 fprintf(out, "%*s", mlen - n, "");
926}
927
928static void print_metric_only_csv(void *ctx, const char *color __maybe_unused,
929 const char *fmt,
930 const char *unit, double val)
931{
932 struct outstate *os = ctx;
933 FILE *out = os->fh;
934 char buf[64], *vals, *ends;
935 char tbuf[1024];
936
937 if (!valid_only_metric(unit))
938 return;
939 unit = fixunit(tbuf, os->evsel, unit);
940 snprintf(buf, sizeof buf, fmt, val);
941 vals = buf;
942 while (isspace(*vals))
943 vals++;
944 ends = vals;
945 while (isdigit(*ends) || *ends == '.')
946 ends++;
947 *ends = 0;
948 fprintf(out, "%s%s", vals, csv_sep);
949}
950
951static void new_line_metric(void *ctx __maybe_unused)
952{
953}
954
955static void print_metric_header(void *ctx, const char *color __maybe_unused,
956 const char *fmt __maybe_unused,
957 const char *unit, double val __maybe_unused)
958{
959 struct outstate *os = ctx;
960 char tbuf[1024];
961
962 if (!valid_only_metric(unit))
963 return;
964 unit = fixunit(tbuf, os->evsel, unit);
965 if (csv_output)
966 fprintf(os->fh, "%s%s", unit, csv_sep);
967 else
968 fprintf(os->fh, "%-*s ", METRIC_ONLY_LEN, unit);
969}
970
Andi Kleenda88c7f2014-09-24 13:50:46 -0700971static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100972{
Jiri Olsa58215222015-07-21 14:31:24 +0200973 FILE *output = stat_config.output;
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100974 double msecs = avg / 1e6;
Stephane Eranian410136f2013-11-12 17:58:49 +0100975 const char *fmt_v, *fmt_n;
David Ahern4bbe5a62013-09-28 14:28:00 -0600976 char name[25];
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100977
Stephane Eranian410136f2013-11-12 17:58:49 +0100978 fmt_v = csv_output ? "%.6f%s" : "%18.6f%s";
979 fmt_n = csv_output ? "%s" : "%-25s";
980
Andi Kleenda88c7f2014-09-24 13:50:46 -0700981 aggr_printout(evsel, id, nr);
Stephane Eranian86ee6e12013-02-14 13:57:27 +0100982
David Ahern4bbe5a62013-09-28 14:28:00 -0600983 scnprintf(name, sizeof(name), "%s%s",
984 perf_evsel__name(evsel), csv_output ? "" : " (msec)");
Stephane Eranian410136f2013-11-12 17:58:49 +0100985
986 fprintf(output, fmt_v, msecs, csv_sep);
987
988 if (csv_output)
989 fprintf(output, "%s%s", evsel->unit, csv_sep);
990 else
991 fprintf(output, "%-*s%s", unit_width, evsel->unit, csv_sep);
992
993 fprintf(output, fmt_n, name);
Stephane Eraniand7470b62010-12-01 18:49:05 +0200994
Stephane Eranian023695d2011-02-14 11:20:01 +0200995 if (evsel->cgrp)
Stephane Eranian4aa90152011-08-15 22:22:33 +0200996 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
Ingo Molnar42202dd2009-06-13 14:57:28 +0200997}
998
Andi Kleen44d49a62016-02-29 14:36:22 -0800999static int first_shadow_cpu(struct perf_evsel *evsel, int id)
1000{
1001 int i;
1002
1003 if (!aggr_get_id)
1004 return 0;
1005
1006 if (stat_config.aggr_mode == AGGR_NONE)
1007 return id;
1008
1009 if (stat_config.aggr_mode == AGGR_GLOBAL)
1010 return 0;
1011
1012 for (i = 0; i < perf_evsel__nr_cpus(evsel); i++) {
1013 int cpu2 = perf_evsel__cpus(evsel)->map[i];
1014
1015 if (aggr_get_id(evsel_list->cpus, cpu2) == id)
1016 return cpu2;
1017 }
1018 return 0;
1019}
1020
Jiri Olsa556b1fb2015-06-03 16:25:56 +02001021static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
1022{
Jiri Olsa58215222015-07-21 14:31:24 +02001023 FILE *output = stat_config.output;
Jiri Olsa556b1fb2015-06-03 16:25:56 +02001024 double sc = evsel->scale;
1025 const char *fmt;
Jiri Olsa556b1fb2015-06-03 16:25:56 +02001026
1027 if (csv_output) {
Andi Kleene3b03b62016-05-05 16:04:03 -07001028 fmt = floor(sc) != sc ? "%.2f%s" : "%.0f%s";
Jiri Olsa556b1fb2015-06-03 16:25:56 +02001029 } else {
1030 if (big_num)
Andi Kleene3b03b62016-05-05 16:04:03 -07001031 fmt = floor(sc) != sc ? "%'18.2f%s" : "%'18.0f%s";
Jiri Olsa556b1fb2015-06-03 16:25:56 +02001032 else
Andi Kleene3b03b62016-05-05 16:04:03 -07001033 fmt = floor(sc) != sc ? "%18.2f%s" : "%18.0f%s";
Jiri Olsa556b1fb2015-06-03 16:25:56 +02001034 }
1035
1036 aggr_printout(evsel, id, nr);
1037
Jiri Olsa556b1fb2015-06-03 16:25:56 +02001038 fprintf(output, fmt, avg, csv_sep);
1039
1040 if (evsel->unit)
1041 fprintf(output, "%-*s%s",
1042 csv_output ? 0 : unit_width,
1043 evsel->unit, csv_sep);
1044
1045 fprintf(output, "%-*s", csv_output ? 0 : 25, perf_evsel__name(evsel));
1046
1047 if (evsel->cgrp)
1048 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
Andi Kleeneedfcb42015-11-02 17:50:21 -08001049}
Jiri Olsa556b1fb2015-06-03 16:25:56 +02001050
Andi Kleenf9483392016-01-30 09:06:50 -08001051static void printout(int id, int nr, struct perf_evsel *counter, double uval,
Andi Kleencb110f42016-01-30 09:06:51 -08001052 char *prefix, u64 run, u64 ena, double noise)
Andi Kleeneedfcb42015-11-02 17:50:21 -08001053{
Andi Kleen140aead2016-01-30 09:06:49 -08001054 struct perf_stat_output_ctx out;
Andi Kleenf9483392016-01-30 09:06:50 -08001055 struct outstate os = {
1056 .fh = stat_config.output,
Andi Kleen44d49a62016-02-29 14:36:22 -08001057 .prefix = prefix ? prefix : "",
1058 .id = id,
1059 .nr = nr,
1060 .evsel = counter,
Andi Kleenf9483392016-01-30 09:06:50 -08001061 };
Andi Kleen140aead2016-01-30 09:06:49 -08001062 print_metric_t pm = print_metric_std;
1063 void (*nl)(void *);
Jiri Olsa556b1fb2015-06-03 16:25:56 +02001064
Andi Kleen54b50912016-03-03 15:57:36 -08001065 if (metric_only) {
1066 nl = new_line_metric;
1067 if (csv_output)
1068 pm = print_metric_only_csv;
1069 else
1070 pm = print_metric_only;
1071 } else
1072 nl = new_line_std;
Andi Kleeneedfcb42015-11-02 17:50:21 -08001073
Andi Kleen54b50912016-03-03 15:57:36 -08001074 if (csv_output && !metric_only) {
Andi Kleen92a61f62016-02-29 14:36:21 -08001075 static int aggr_fields[] = {
1076 [AGGR_GLOBAL] = 0,
1077 [AGGR_THREAD] = 1,
1078 [AGGR_NONE] = 1,
1079 [AGGR_SOCKET] = 2,
1080 [AGGR_CORE] = 2,
1081 };
1082
1083 pm = print_metric_csv;
1084 nl = new_line_csv;
1085 os.nfields = 3;
1086 os.nfields += aggr_fields[stat_config.aggr_mode];
1087 if (counter->cgrp)
1088 os.nfields++;
1089 }
Andi Kleenb002f3b2016-02-17 14:44:00 -08001090 if (run == 0 || ena == 0 || counter->counts->scaled == -1) {
Andi Kleen54b50912016-03-03 15:57:36 -08001091 if (metric_only) {
1092 pm(&os, NULL, "", "", 0);
1093 return;
1094 }
Andi Kleencb110f42016-01-30 09:06:51 -08001095 aggr_printout(counter, id, nr);
1096
1097 fprintf(stat_config.output, "%*s%s",
1098 csv_output ? 0 : 18,
1099 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
1100 csv_sep);
1101
1102 fprintf(stat_config.output, "%-*s%s",
1103 csv_output ? 0 : unit_width,
1104 counter->unit, csv_sep);
1105
1106 fprintf(stat_config.output, "%*s",
1107 csv_output ? 0 : -25,
1108 perf_evsel__name(counter));
1109
1110 if (counter->cgrp)
1111 fprintf(stat_config.output, "%s%s",
1112 csv_sep, counter->cgrp->name);
1113
Andi Kleen92a61f62016-02-29 14:36:21 -08001114 if (!csv_output)
1115 pm(&os, NULL, NULL, "", 0);
1116 print_noise(counter, noise);
Andi Kleencb110f42016-01-30 09:06:51 -08001117 print_running(run, ena);
Andi Kleen92a61f62016-02-29 14:36:21 -08001118 if (csv_output)
1119 pm(&os, NULL, NULL, "", 0);
Andi Kleencb110f42016-01-30 09:06:51 -08001120 return;
1121 }
1122
Andi Kleen54b50912016-03-03 15:57:36 -08001123 if (metric_only)
1124 /* nothing */;
1125 else if (nsec_counter(counter))
Andi Kleeneedfcb42015-11-02 17:50:21 -08001126 nsec_printout(id, nr, counter, uval);
1127 else
1128 abs_printout(id, nr, counter, uval);
1129
Andi Kleen140aead2016-01-30 09:06:49 -08001130 out.print_metric = pm;
1131 out.new_line = nl;
1132 out.ctx = &os;
1133
Andi Kleen54b50912016-03-03 15:57:36 -08001134 if (csv_output && !metric_only) {
Andi Kleen92a61f62016-02-29 14:36:21 -08001135 print_noise(counter, noise);
1136 print_running(run, ena);
1137 }
1138
1139 perf_stat__print_shadow_stats(counter, uval,
Andi Kleen44d49a62016-02-29 14:36:22 -08001140 first_shadow_cpu(counter, id),
Andi Kleen140aead2016-01-30 09:06:49 -08001141 &out);
Andi Kleen54b50912016-03-03 15:57:36 -08001142 if (!csv_output && !metric_only) {
Andi Kleen92a61f62016-02-29 14:36:21 -08001143 print_noise(counter, noise);
1144 print_running(run, ena);
1145 }
Jiri Olsa556b1fb2015-06-03 16:25:56 +02001146}
1147
Andi Kleen44d49a62016-02-29 14:36:22 -08001148static void aggr_update_shadow(void)
1149{
1150 int cpu, s2, id, s;
1151 u64 val;
1152 struct perf_evsel *counter;
1153
1154 for (s = 0; s < aggr_map->nr; s++) {
1155 id = aggr_map->map[s];
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001156 evlist__for_each_entry(evsel_list, counter) {
Andi Kleen44d49a62016-02-29 14:36:22 -08001157 val = 0;
1158 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
1159 s2 = aggr_get_id(evsel_list->cpus, cpu);
1160 if (s2 != id)
1161 continue;
1162 val += perf_counts(counter->counts, cpu, 0)->val;
1163 }
1164 val = val * counter->scale;
1165 perf_stat__update_shadow_stats(counter, &val,
1166 first_shadow_cpu(counter, id));
1167 }
1168 }
1169}
1170
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001171static void print_aggr(char *prefix)
Stephane Eraniand7e7a452013-02-06 15:46:02 +01001172{
Jiri Olsa58215222015-07-21 14:31:24 +02001173 FILE *output = stat_config.output;
Stephane Eraniand7e7a452013-02-06 15:46:02 +01001174 struct perf_evsel *counter;
Kan Liang601083c2015-07-02 03:08:43 -04001175 int cpu, s, s2, id, nr;
Stephane Eranian410136f2013-11-12 17:58:49 +01001176 double uval;
Stephane Eraniand7e7a452013-02-06 15:46:02 +01001177 u64 ena, run, val;
Andi Kleen54b50912016-03-03 15:57:36 -08001178 bool first;
Stephane Eraniand7e7a452013-02-06 15:46:02 +01001179
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001180 if (!(aggr_map || aggr_get_id))
Stephane Eraniand7e7a452013-02-06 15:46:02 +01001181 return;
1182
Andi Kleen44d49a62016-02-29 14:36:22 -08001183 aggr_update_shadow();
1184
Andi Kleen54b50912016-03-03 15:57:36 -08001185 /*
1186 * With metric_only everything is on a single line.
1187 * Without each counter has its own line.
1188 */
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001189 for (s = 0; s < aggr_map->nr; s++) {
Andi Kleen54b50912016-03-03 15:57:36 -08001190 if (prefix && metric_only)
1191 fprintf(output, "%s", prefix);
1192
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001193 id = aggr_map->map[s];
Andi Kleen54b50912016-03-03 15:57:36 -08001194 first = true;
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001195 evlist__for_each_entry(evsel_list, counter) {
Stephane Eraniand7e7a452013-02-06 15:46:02 +01001196 val = ena = run = 0;
1197 nr = 0;
1198 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
Kan Liang601083c2015-07-02 03:08:43 -04001199 s2 = aggr_get_id(perf_evsel__cpus(counter), cpu);
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001200 if (s2 != id)
Stephane Eraniand7e7a452013-02-06 15:46:02 +01001201 continue;
Jiri Olsaa6fa0032015-06-26 11:29:11 +02001202 val += perf_counts(counter->counts, cpu, 0)->val;
1203 ena += perf_counts(counter->counts, cpu, 0)->ena;
1204 run += perf_counts(counter->counts, cpu, 0)->run;
Stephane Eraniand7e7a452013-02-06 15:46:02 +01001205 nr++;
1206 }
Andi Kleen54b50912016-03-03 15:57:36 -08001207 if (first && metric_only) {
1208 first = false;
1209 aggr_printout(counter, id, nr);
1210 }
1211 if (prefix && !metric_only)
Stephane Eraniand7e7a452013-02-06 15:46:02 +01001212 fprintf(output, "%s", prefix);
1213
Stephane Eranian410136f2013-11-12 17:58:49 +01001214 uval = val * counter->scale;
Andi Kleencb110f42016-01-30 09:06:51 -08001215 printout(id, nr, counter, uval, prefix, run, ena, 1.0);
Andi Kleen54b50912016-03-03 15:57:36 -08001216 if (!metric_only)
1217 fputc('\n', output);
Stephane Eraniand7e7a452013-02-06 15:46:02 +01001218 }
Andi Kleen54b50912016-03-03 15:57:36 -08001219 if (metric_only)
1220 fputc('\n', output);
Stephane Eraniand7e7a452013-02-06 15:46:02 +01001221 }
1222}
1223
Jiri Olsa32b8af82015-06-26 11:29:27 +02001224static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
1225{
Jiri Olsa58215222015-07-21 14:31:24 +02001226 FILE *output = stat_config.output;
Jiri Olsa32b8af82015-06-26 11:29:27 +02001227 int nthreads = thread_map__nr(counter->threads);
1228 int ncpus = cpu_map__nr(counter->cpus);
1229 int cpu, thread;
1230 double uval;
1231
1232 for (thread = 0; thread < nthreads; thread++) {
1233 u64 ena = 0, run = 0, val = 0;
1234
1235 for (cpu = 0; cpu < ncpus; cpu++) {
1236 val += perf_counts(counter->counts, cpu, thread)->val;
1237 ena += perf_counts(counter->counts, cpu, thread)->ena;
1238 run += perf_counts(counter->counts, cpu, thread)->run;
1239 }
1240
1241 if (prefix)
1242 fprintf(output, "%s", prefix);
1243
1244 uval = val * counter->scale;
Andi Kleencb110f42016-01-30 09:06:51 -08001245 printout(thread, 0, counter, uval, prefix, run, ena, 1.0);
Jiri Olsa32b8af82015-06-26 11:29:27 +02001246 fputc('\n', output);
1247 }
1248}
1249
Ingo Molnar42202dd2009-06-13 14:57:28 +02001250/*
1251 * Print out the results of a single counter:
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +02001252 * aggregated counts in system-wide mode
Ingo Molnar42202dd2009-06-13 14:57:28 +02001253 */
Stephane Eranian13370a92013-01-29 12:47:44 +01001254static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
Ingo Molnar42202dd2009-06-13 14:57:28 +02001255{
Jiri Olsa58215222015-07-21 14:31:24 +02001256 FILE *output = stat_config.output;
Jiri Olsa581cc8a2015-10-16 12:41:03 +02001257 struct perf_stat_evsel *ps = counter->priv;
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001258 double avg = avg_stats(&ps->res_stats[0]);
Stephane Eranian410136f2013-11-12 17:58:49 +01001259 double uval;
Andi Kleend73515c2015-03-11 07:16:27 -07001260 double avg_enabled, avg_running;
1261
1262 avg_enabled = avg_stats(&ps->res_stats[1]);
1263 avg_running = avg_stats(&ps->res_stats[2]);
Ingo Molnar42202dd2009-06-13 14:57:28 +02001264
Andi Kleen54b50912016-03-03 15:57:36 -08001265 if (prefix && !metric_only)
Stephane Eranian13370a92013-01-29 12:47:44 +01001266 fprintf(output, "%s", prefix);
1267
Stephane Eranian410136f2013-11-12 17:58:49 +01001268 uval = avg * counter->scale;
Andi Kleencb110f42016-01-30 09:06:51 -08001269 printout(-1, 0, counter, uval, prefix, avg_running, avg_enabled, avg);
Andi Kleen54b50912016-03-03 15:57:36 -08001270 if (!metric_only)
1271 fprintf(output, "\n");
Ingo Molnar42202dd2009-06-13 14:57:28 +02001272}
1273
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +02001274/*
1275 * Print out the results of a single counter:
1276 * does not use aggregated count in system-wide
1277 */
Stephane Eranian13370a92013-01-29 12:47:44 +01001278static void print_counter(struct perf_evsel *counter, char *prefix)
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +02001279{
Jiri Olsa58215222015-07-21 14:31:24 +02001280 FILE *output = stat_config.output;
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +02001281 u64 ena, run, val;
Stephane Eranian410136f2013-11-12 17:58:49 +01001282 double uval;
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +02001283 int cpu;
1284
Yan, Zheng7ae92e72012-09-10 15:53:50 +08001285 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
Jiri Olsaa6fa0032015-06-26 11:29:11 +02001286 val = perf_counts(counter->counts, cpu, 0)->val;
1287 ena = perf_counts(counter->counts, cpu, 0)->ena;
1288 run = perf_counts(counter->counts, cpu, 0)->run;
Stephane Eranian13370a92013-01-29 12:47:44 +01001289
1290 if (prefix)
1291 fprintf(output, "%s", prefix);
1292
Stephane Eranian410136f2013-11-12 17:58:49 +01001293 uval = val * counter->scale;
Andi Kleencb110f42016-01-30 09:06:51 -08001294 printout(cpu, 0, counter, uval, prefix, run, ena, 1.0);
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +02001295
Stephane Eranian4aa90152011-08-15 22:22:33 +02001296 fputc('\n', output);
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +02001297 }
1298}
1299
Andi Kleen206cab62016-03-03 15:57:37 -08001300static void print_no_aggr_metric(char *prefix)
1301{
1302 int cpu;
1303 int nrcpus = 0;
1304 struct perf_evsel *counter;
1305 u64 ena, run, val;
1306 double uval;
1307
1308 nrcpus = evsel_list->cpus->nr;
1309 for (cpu = 0; cpu < nrcpus; cpu++) {
1310 bool first = true;
1311
1312 if (prefix)
1313 fputs(prefix, stat_config.output);
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001314 evlist__for_each_entry(evsel_list, counter) {
Andi Kleen206cab62016-03-03 15:57:37 -08001315 if (first) {
1316 aggr_printout(counter, cpu, 0);
1317 first = false;
1318 }
1319 val = perf_counts(counter->counts, cpu, 0)->val;
1320 ena = perf_counts(counter->counts, cpu, 0)->ena;
1321 run = perf_counts(counter->counts, cpu, 0)->run;
1322
1323 uval = val * counter->scale;
1324 printout(cpu, 0, counter, uval, prefix, run, ena, 1.0);
1325 }
1326 fputc('\n', stat_config.output);
1327 }
1328}
1329
Andi Kleen54b50912016-03-03 15:57:36 -08001330static int aggr_header_lens[] = {
1331 [AGGR_CORE] = 18,
1332 [AGGR_SOCKET] = 12,
Andi Kleen206cab62016-03-03 15:57:37 -08001333 [AGGR_NONE] = 6,
Andi Kleen54b50912016-03-03 15:57:36 -08001334 [AGGR_THREAD] = 24,
1335 [AGGR_GLOBAL] = 0,
1336};
1337
Andi Kleenc51fd632016-05-24 12:52:39 -07001338static const char *aggr_header_csv[] = {
1339 [AGGR_CORE] = "core,cpus,",
1340 [AGGR_SOCKET] = "socket,cpus",
1341 [AGGR_NONE] = "cpu,",
1342 [AGGR_THREAD] = "comm-pid,",
1343 [AGGR_GLOBAL] = ""
1344};
1345
Andi Kleen41c8ca22016-05-24 12:52:38 -07001346static void print_metric_headers(const char *prefix, bool no_indent)
Andi Kleen54b50912016-03-03 15:57:36 -08001347{
1348 struct perf_stat_output_ctx out;
1349 struct perf_evsel *counter;
1350 struct outstate os = {
1351 .fh = stat_config.output
1352 };
1353
1354 if (prefix)
1355 fprintf(stat_config.output, "%s", prefix);
1356
Andi Kleen41c8ca22016-05-24 12:52:38 -07001357 if (!csv_output && !no_indent)
Andi Kleen54b50912016-03-03 15:57:36 -08001358 fprintf(stat_config.output, "%*s",
1359 aggr_header_lens[stat_config.aggr_mode], "");
Andi Kleenc51fd632016-05-24 12:52:39 -07001360 if (csv_output) {
1361 if (stat_config.interval)
1362 fputs("time,", stat_config.output);
1363 fputs(aggr_header_csv[stat_config.aggr_mode],
1364 stat_config.output);
1365 }
Andi Kleen54b50912016-03-03 15:57:36 -08001366
1367 /* Print metrics headers only */
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001368 evlist__for_each_entry(evsel_list, counter) {
Andi Kleen54b50912016-03-03 15:57:36 -08001369 os.evsel = counter;
1370 out.ctx = &os;
1371 out.print_metric = print_metric_header;
1372 out.new_line = new_line_metric;
1373 os.evsel = counter;
1374 perf_stat__print_shadow_stats(counter, 0,
1375 0,
1376 &out);
1377 }
1378 fputc('\n', stat_config.output);
1379}
1380
Jiri Olsad4f63a42015-06-26 11:29:26 +02001381static void print_interval(char *prefix, struct timespec *ts)
Ingo Molnar42202dd2009-06-13 14:57:28 +02001382{
Jiri Olsa58215222015-07-21 14:31:24 +02001383 FILE *output = stat_config.output;
Jiri Olsad4f63a42015-06-26 11:29:26 +02001384 static int num_print_interval;
1385
1386 sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep);
1387
Andi Kleen41c8ca22016-05-24 12:52:38 -07001388 if (num_print_interval == 0 && !csv_output) {
Jiri Olsa421a50f2015-07-21 14:31:22 +02001389 switch (stat_config.aggr_mode) {
Jiri Olsad4f63a42015-06-26 11:29:26 +02001390 case AGGR_SOCKET:
Andi Kleen41c8ca22016-05-24 12:52:38 -07001391 fprintf(output, "# time socket cpus");
1392 if (!metric_only)
1393 fprintf(output, " counts %*s events\n", unit_width, "unit");
Jiri Olsad4f63a42015-06-26 11:29:26 +02001394 break;
1395 case AGGR_CORE:
Andi Kleen41c8ca22016-05-24 12:52:38 -07001396 fprintf(output, "# time core cpus");
1397 if (!metric_only)
1398 fprintf(output, " counts %*s events\n", unit_width, "unit");
Jiri Olsad4f63a42015-06-26 11:29:26 +02001399 break;
1400 case AGGR_NONE:
Andi Kleen41c8ca22016-05-24 12:52:38 -07001401 fprintf(output, "# time CPU");
1402 if (!metric_only)
1403 fprintf(output, " counts %*s events\n", unit_width, "unit");
Jiri Olsad4f63a42015-06-26 11:29:26 +02001404 break;
Jiri Olsa32b8af82015-06-26 11:29:27 +02001405 case AGGR_THREAD:
Andi Kleen41c8ca22016-05-24 12:52:38 -07001406 fprintf(output, "# time comm-pid");
1407 if (!metric_only)
1408 fprintf(output, " counts %*s events\n", unit_width, "unit");
Jiri Olsa32b8af82015-06-26 11:29:27 +02001409 break;
Jiri Olsad4f63a42015-06-26 11:29:26 +02001410 case AGGR_GLOBAL:
1411 default:
Andi Kleen41c8ca22016-05-24 12:52:38 -07001412 fprintf(output, "# time");
1413 if (!metric_only)
1414 fprintf(output, " counts %*s events\n", unit_width, "unit");
Jiri Olsa208df992015-10-16 12:41:04 +02001415 case AGGR_UNSET:
1416 break;
Jiri Olsad4f63a42015-06-26 11:29:26 +02001417 }
1418 }
1419
Andi Kleen41c8ca22016-05-24 12:52:38 -07001420 if (num_print_interval == 0 && metric_only)
1421 print_metric_headers(" ", true);
Jiri Olsad4f63a42015-06-26 11:29:26 +02001422 if (++num_print_interval == 25)
1423 num_print_interval = 0;
1424}
1425
1426static void print_header(int argc, const char **argv)
1427{
Jiri Olsa58215222015-07-21 14:31:24 +02001428 FILE *output = stat_config.output;
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001429 int i;
Ingo Molnar42202dd2009-06-13 14:57:28 +02001430
Ingo Molnarddcacfa2009-04-20 15:37:32 +02001431 fflush(stdout);
1432
Stephane Eraniand7470b62010-12-01 18:49:05 +02001433 if (!csv_output) {
Stephane Eranian4aa90152011-08-15 22:22:33 +02001434 fprintf(output, "\n");
1435 fprintf(output, " Performance counter stats for ");
David Ahern62d3b612013-09-28 14:27:58 -06001436 if (target.system_wide)
1437 fprintf(output, "\'system wide");
1438 else if (target.cpu_list)
1439 fprintf(output, "\'CPU(s) %s", target.cpu_list);
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001440 else if (!target__has_task(&target)) {
Jiri Olsaba6039b62015-11-05 15:40:55 +01001441 fprintf(output, "\'%s", argv ? argv[0] : "pipe");
1442 for (i = 1; argv && (i < argc); i++)
Stephane Eranian4aa90152011-08-15 22:22:33 +02001443 fprintf(output, " %s", argv[i]);
Namhyung Kim20f946b2012-04-26 14:15:16 +09001444 } else if (target.pid)
1445 fprintf(output, "process id \'%s", target.pid);
Stephane Eraniand7470b62010-12-01 18:49:05 +02001446 else
Namhyung Kim20f946b2012-04-26 14:15:16 +09001447 fprintf(output, "thread id \'%s", target.tid);
Ingo Molnar44db76c2009-06-03 19:36:07 +02001448
Stephane Eranian4aa90152011-08-15 22:22:33 +02001449 fprintf(output, "\'");
Stephane Eraniand7470b62010-12-01 18:49:05 +02001450 if (run_count > 1)
Stephane Eranian4aa90152011-08-15 22:22:33 +02001451 fprintf(output, " (%d runs)", run_count);
1452 fprintf(output, ":\n\n");
Stephane Eraniand7470b62010-12-01 18:49:05 +02001453 }
Jiri Olsad4f63a42015-06-26 11:29:26 +02001454}
1455
1456static void print_footer(void)
1457{
Jiri Olsa58215222015-07-21 14:31:24 +02001458 FILE *output = stat_config.output;
1459
Jiri Olsad4f63a42015-06-26 11:29:26 +02001460 if (!null_run)
1461 fprintf(output, "\n");
1462 fprintf(output, " %17.9f seconds time elapsed",
1463 avg_stats(&walltime_nsecs_stats)/1e9);
1464 if (run_count > 1) {
1465 fprintf(output, " ");
1466 print_noise_pct(stddev_stats(&walltime_nsecs_stats),
1467 avg_stats(&walltime_nsecs_stats));
1468 }
1469 fprintf(output, "\n\n");
1470}
1471
1472static void print_counters(struct timespec *ts, int argc, const char **argv)
1473{
Jiri Olsaec0d3d12015-07-21 14:31:25 +02001474 int interval = stat_config.interval;
Jiri Olsad4f63a42015-06-26 11:29:26 +02001475 struct perf_evsel *counter;
1476 char buf[64], *prefix = NULL;
1477
Jiri Olsa664c98d2015-11-05 15:40:50 +01001478 /* Do not print anything if we record to the pipe. */
1479 if (STAT_RECORD && perf_stat.file.is_pipe)
1480 return;
1481
Jiri Olsad4f63a42015-06-26 11:29:26 +02001482 if (interval)
1483 print_interval(prefix = buf, ts);
1484 else
1485 print_header(argc, argv);
Ingo Molnar2996f5d2009-05-29 09:10:54 +02001486
Andi Kleen54b50912016-03-03 15:57:36 -08001487 if (metric_only) {
1488 static int num_print_iv;
1489
Andi Kleen41c8ca22016-05-24 12:52:38 -07001490 if (num_print_iv == 0 && !interval)
1491 print_metric_headers(prefix, false);
Andi Kleen54b50912016-03-03 15:57:36 -08001492 if (num_print_iv++ == 25)
1493 num_print_iv = 0;
1494 if (stat_config.aggr_mode == AGGR_GLOBAL && prefix)
1495 fprintf(stat_config.output, "%s", prefix);
1496 }
1497
Jiri Olsa421a50f2015-07-21 14:31:22 +02001498 switch (stat_config.aggr_mode) {
Stephane Eranian12c08a92013-02-14 13:57:29 +01001499 case AGGR_CORE:
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001500 case AGGR_SOCKET:
Jiri Olsad4f63a42015-06-26 11:29:26 +02001501 print_aggr(prefix);
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001502 break;
Jiri Olsa32b8af82015-06-26 11:29:27 +02001503 case AGGR_THREAD:
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001504 evlist__for_each_entry(evsel_list, counter)
Jiri Olsa32b8af82015-06-26 11:29:27 +02001505 print_aggr_thread(counter, prefix);
1506 break;
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001507 case AGGR_GLOBAL:
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001508 evlist__for_each_entry(evsel_list, counter)
Jiri Olsad4f63a42015-06-26 11:29:26 +02001509 print_counter_aggr(counter, prefix);
Andi Kleen54b50912016-03-03 15:57:36 -08001510 if (metric_only)
1511 fputc('\n', stat_config.output);
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001512 break;
1513 case AGGR_NONE:
Andi Kleen206cab62016-03-03 15:57:37 -08001514 if (metric_only)
1515 print_no_aggr_metric(prefix);
1516 else {
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001517 evlist__for_each_entry(evsel_list, counter)
Andi Kleen206cab62016-03-03 15:57:37 -08001518 print_counter(counter, prefix);
1519 }
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001520 break;
Jiri Olsa208df992015-10-16 12:41:04 +02001521 case AGGR_UNSET:
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001522 default:
1523 break;
Stephane Eranianf5b4a9c32010-11-16 11:05:01 +02001524 }
Ingo Molnarddcacfa2009-04-20 15:37:32 +02001525
Jiri Olsad4f63a42015-06-26 11:29:26 +02001526 if (!interval && !csv_output)
1527 print_footer();
1528
Jiri Olsa58215222015-07-21 14:31:24 +02001529 fflush(stat_config.output);
Ingo Molnarddcacfa2009-04-20 15:37:32 +02001530}
1531
Peter Zijlstraf7b7c262009-06-10 15:55:59 +02001532static volatile int signr = -1;
1533
Ingo Molnar52425192009-05-26 09:17:18 +02001534static void skip_signal(int signo)
Ingo Molnarddcacfa2009-04-20 15:37:32 +02001535{
Jiri Olsaec0d3d12015-07-21 14:31:25 +02001536 if ((child_pid == -1) || stat_config.interval)
Liming Wang60666c62009-12-31 16:05:50 +08001537 done = 1;
1538
Peter Zijlstraf7b7c262009-06-10 15:55:59 +02001539 signr = signo;
Stephane Eraniand07f0b12013-06-04 17:44:26 +02001540 /*
1541 * render child_pid harmless
1542 * won't send SIGTERM to a random
1543 * process in case of race condition
1544 * and fast PID recycling
1545 */
1546 child_pid = -1;
Peter Zijlstraf7b7c262009-06-10 15:55:59 +02001547}
1548
1549static void sig_atexit(void)
1550{
Stephane Eraniand07f0b12013-06-04 17:44:26 +02001551 sigset_t set, oset;
1552
1553 /*
1554 * avoid race condition with SIGCHLD handler
1555 * in skip_signal() which is modifying child_pid
1556 * goal is to avoid send SIGTERM to a random
1557 * process
1558 */
1559 sigemptyset(&set);
1560 sigaddset(&set, SIGCHLD);
1561 sigprocmask(SIG_BLOCK, &set, &oset);
1562
Chris Wilson933da832009-10-04 01:35:01 +01001563 if (child_pid != -1)
1564 kill(child_pid, SIGTERM);
1565
Stephane Eraniand07f0b12013-06-04 17:44:26 +02001566 sigprocmask(SIG_SETMASK, &oset, NULL);
1567
Peter Zijlstraf7b7c262009-06-10 15:55:59 +02001568 if (signr == -1)
1569 return;
1570
1571 signal(signr, SIG_DFL);
1572 kill(getpid(), signr);
Ingo Molnar52425192009-05-26 09:17:18 +02001573}
Ingo Molnarddcacfa2009-04-20 15:37:32 +02001574
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001575static int stat__set_big_num(const struct option *opt __maybe_unused,
1576 const char *s __maybe_unused, int unset)
Stephane Eraniand7470b62010-12-01 18:49:05 +02001577{
1578 big_num_opt = unset ? 0 : 1;
1579 return 0;
1580}
1581
Andi Kleen44b1e602016-05-30 12:49:42 -03001582static int enable_metric_only(const struct option *opt __maybe_unused,
1583 const char *s __maybe_unused, int unset)
1584{
1585 force_metric_only = true;
1586 metric_only = !unset;
1587 return 0;
1588}
1589
Jiri Olsae0547312015-11-05 15:40:45 +01001590static const struct option stat_options[] = {
1591 OPT_BOOLEAN('T', "transaction", &transaction_run,
1592 "hardware transaction statistics"),
1593 OPT_CALLBACK('e', "event", &evsel_list, "event",
1594 "event selector. use 'perf list' to list available events",
1595 parse_events_option),
1596 OPT_CALLBACK(0, "filter", &evsel_list, "filter",
1597 "event filter", parse_filter),
1598 OPT_BOOLEAN('i', "no-inherit", &no_inherit,
1599 "child tasks do not inherit counters"),
1600 OPT_STRING('p', "pid", &target.pid, "pid",
1601 "stat events on existing process id"),
1602 OPT_STRING('t', "tid", &target.tid, "tid",
1603 "stat events on existing thread id"),
1604 OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
1605 "system-wide collection from all CPUs"),
1606 OPT_BOOLEAN('g', "group", &group,
1607 "put the counters into a counter group"),
1608 OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
1609 OPT_INCR('v', "verbose", &verbose,
1610 "be more verbose (show counter open errors, etc)"),
1611 OPT_INTEGER('r', "repeat", &run_count,
1612 "repeat command and print average + stddev (max: 100, forever: 0)"),
1613 OPT_BOOLEAN('n', "null", &null_run,
1614 "null run - dont start any counters"),
1615 OPT_INCR('d', "detailed", &detailed_run,
1616 "detailed run - start a lot of events"),
1617 OPT_BOOLEAN('S', "sync", &sync_run,
1618 "call sync() before starting a run"),
1619 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
1620 "print large numbers with thousands\' separators",
1621 stat__set_big_num),
1622 OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
1623 "list of cpus to monitor in system-wide"),
1624 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
1625 "disable CPU count aggregation", AGGR_NONE),
1626 OPT_STRING('x', "field-separator", &csv_sep, "separator",
1627 "print counts with custom separator"),
1628 OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
1629 "monitor event in cgroup name only", parse_cgroups),
1630 OPT_STRING('o', "output", &output_name, "file", "output file name"),
1631 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
1632 OPT_INTEGER(0, "log-fd", &output_fd,
1633 "log output to fd, instead of stderr"),
1634 OPT_STRING(0, "pre", &pre_cmd, "command",
1635 "command to run prior to the measured command"),
1636 OPT_STRING(0, "post", &post_cmd, "command",
1637 "command to run after to the measured command"),
1638 OPT_UINTEGER('I', "interval-print", &stat_config.interval,
1639 "print counts at regular interval in ms (>= 10)"),
1640 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
1641 "aggregate counts per processor socket", AGGR_SOCKET),
1642 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
1643 "aggregate counts per physical processor core", AGGR_CORE),
1644 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
1645 "aggregate counts per thread", AGGR_THREAD),
1646 OPT_UINTEGER('D', "delay", &initial_delay,
1647 "ms to wait before starting measurement after program start"),
Andi Kleen44b1e602016-05-30 12:49:42 -03001648 OPT_CALLBACK_NOOPT(0, "metric-only", &metric_only, NULL,
1649 "Only print computed metrics. No raw values", enable_metric_only),
1650 OPT_BOOLEAN(0, "topdown", &topdown_run,
1651 "measure topdown level 1 statistics"),
Jiri Olsae0547312015-11-05 15:40:45 +01001652 OPT_END()
1653};
1654
Jiri Olsa1fe7a302015-10-16 12:41:15 +02001655static int perf_stat__get_socket(struct cpu_map *map, int cpu)
1656{
1657 return cpu_map__get_socket(map, cpu, NULL);
1658}
1659
1660static int perf_stat__get_core(struct cpu_map *map, int cpu)
1661{
1662 return cpu_map__get_core(map, cpu, NULL);
1663}
1664
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001665static int cpu_map__get_max(struct cpu_map *map)
1666{
1667 int i, max = -1;
1668
1669 for (i = 0; i < map->nr; i++) {
1670 if (map->map[i] > max)
1671 max = map->map[i];
1672 }
1673
1674 return max;
1675}
1676
1677static struct cpu_map *cpus_aggr_map;
1678
1679static int perf_stat__get_aggr(aggr_get_id_t get_id, struct cpu_map *map, int idx)
1680{
1681 int cpu;
1682
1683 if (idx >= map->nr)
1684 return -1;
1685
1686 cpu = map->map[idx];
1687
1688 if (cpus_aggr_map->map[cpu] == -1)
1689 cpus_aggr_map->map[cpu] = get_id(map, idx);
1690
1691 return cpus_aggr_map->map[cpu];
1692}
1693
1694static int perf_stat__get_socket_cached(struct cpu_map *map, int idx)
1695{
1696 return perf_stat__get_aggr(perf_stat__get_socket, map, idx);
1697}
1698
1699static int perf_stat__get_core_cached(struct cpu_map *map, int idx)
1700{
1701 return perf_stat__get_aggr(perf_stat__get_core, map, idx);
1702}
1703
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001704static int perf_stat_init_aggr_mode(void)
1705{
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001706 int nr;
1707
Jiri Olsa421a50f2015-07-21 14:31:22 +02001708 switch (stat_config.aggr_mode) {
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001709 case AGGR_SOCKET:
1710 if (cpu_map__build_socket_map(evsel_list->cpus, &aggr_map)) {
1711 perror("cannot build socket map");
1712 return -1;
1713 }
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001714 aggr_get_id = perf_stat__get_socket_cached;
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001715 break;
Stephane Eranian12c08a92013-02-14 13:57:29 +01001716 case AGGR_CORE:
1717 if (cpu_map__build_core_map(evsel_list->cpus, &aggr_map)) {
1718 perror("cannot build core map");
1719 return -1;
1720 }
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001721 aggr_get_id = perf_stat__get_core_cached;
Stephane Eranian12c08a92013-02-14 13:57:29 +01001722 break;
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001723 case AGGR_NONE:
1724 case AGGR_GLOBAL:
Jiri Olsa32b8af82015-06-26 11:29:27 +02001725 case AGGR_THREAD:
Jiri Olsa208df992015-10-16 12:41:04 +02001726 case AGGR_UNSET:
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001727 default:
1728 break;
1729 }
Jiri Olsa1e5a2932015-10-25 15:51:18 +01001730
1731 /*
1732 * The evsel_list->cpus is the base we operate on,
1733 * taking the highest cpu number to be the size of
1734 * the aggregation translate cpumap.
1735 */
1736 nr = cpu_map__get_max(evsel_list->cpus);
1737 cpus_aggr_map = cpu_map__empty_new(nr + 1);
1738 return cpus_aggr_map ? 0 : -ENOMEM;
Stephane Eranian86ee6e12013-02-14 13:57:27 +01001739}
1740
Masami Hiramatsu544c2ae2015-12-09 11:11:27 +09001741static void perf_stat__exit_aggr_mode(void)
1742{
1743 cpu_map__put(aggr_map);
1744 cpu_map__put(cpus_aggr_map);
1745 aggr_map = NULL;
1746 cpus_aggr_map = NULL;
1747}
1748
Jiri Olsa68d702f2015-11-05 15:40:58 +01001749static inline int perf_env__get_cpu(struct perf_env *env, struct cpu_map *map, int idx)
1750{
1751 int cpu;
1752
1753 if (idx > map->nr)
1754 return -1;
1755
1756 cpu = map->map[idx];
1757
1758 if (cpu >= env->nr_cpus_online)
1759 return -1;
1760
1761 return cpu;
1762}
1763
1764static int perf_env__get_socket(struct cpu_map *map, int idx, void *data)
1765{
1766 struct perf_env *env = data;
1767 int cpu = perf_env__get_cpu(env, map, idx);
1768
1769 return cpu == -1 ? -1 : env->cpu[cpu].socket_id;
1770}
1771
1772static int perf_env__get_core(struct cpu_map *map, int idx, void *data)
1773{
1774 struct perf_env *env = data;
1775 int core = -1, cpu = perf_env__get_cpu(env, map, idx);
1776
1777 if (cpu != -1) {
1778 int socket_id = env->cpu[cpu].socket_id;
1779
1780 /*
1781 * Encode socket in upper 16 bits
1782 * core_id is relative to socket, and
1783 * we need a global id. So we combine
1784 * socket + core id.
1785 */
1786 core = (socket_id << 16) | (env->cpu[cpu].core_id & 0xffff);
1787 }
1788
1789 return core;
1790}
1791
1792static int perf_env__build_socket_map(struct perf_env *env, struct cpu_map *cpus,
1793 struct cpu_map **sockp)
1794{
1795 return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env);
1796}
1797
1798static int perf_env__build_core_map(struct perf_env *env, struct cpu_map *cpus,
1799 struct cpu_map **corep)
1800{
1801 return cpu_map__build_map(cpus, corep, perf_env__get_core, env);
1802}
1803
1804static int perf_stat__get_socket_file(struct cpu_map *map, int idx)
1805{
1806 return perf_env__get_socket(map, idx, &perf_stat.session->header.env);
1807}
1808
1809static int perf_stat__get_core_file(struct cpu_map *map, int idx)
1810{
1811 return perf_env__get_core(map, idx, &perf_stat.session->header.env);
1812}
1813
1814static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
1815{
1816 struct perf_env *env = &st->session->header.env;
1817
1818 switch (stat_config.aggr_mode) {
1819 case AGGR_SOCKET:
1820 if (perf_env__build_socket_map(env, evsel_list->cpus, &aggr_map)) {
1821 perror("cannot build socket map");
1822 return -1;
1823 }
1824 aggr_get_id = perf_stat__get_socket_file;
1825 break;
1826 case AGGR_CORE:
1827 if (perf_env__build_core_map(env, evsel_list->cpus, &aggr_map)) {
1828 perror("cannot build core map");
1829 return -1;
1830 }
1831 aggr_get_id = perf_stat__get_core_file;
1832 break;
1833 case AGGR_NONE:
1834 case AGGR_GLOBAL:
1835 case AGGR_THREAD:
1836 case AGGR_UNSET:
1837 default:
1838 break;
1839 }
1840
1841 return 0;
1842}
1843
Andi Kleen44b1e602016-05-30 12:49:42 -03001844static int topdown_filter_events(const char **attr, char **str, bool use_group)
1845{
1846 int off = 0;
1847 int i;
1848 int len = 0;
1849 char *s;
1850
1851 for (i = 0; attr[i]; i++) {
1852 if (pmu_have_event("cpu", attr[i])) {
1853 len += strlen(attr[i]) + 1;
1854 attr[i - off] = attr[i];
1855 } else
1856 off++;
1857 }
1858 attr[i - off] = NULL;
1859
1860 *str = malloc(len + 1 + 2);
1861 if (!*str)
1862 return -1;
1863 s = *str;
1864 if (i - off == 0) {
1865 *s = 0;
1866 return 0;
1867 }
1868 if (use_group)
1869 *s++ = '{';
1870 for (i = 0; attr[i]; i++) {
1871 strcpy(s, attr[i]);
1872 s += strlen(s);
1873 *s++ = ',';
1874 }
1875 if (use_group) {
1876 s[-1] = '}';
1877 *s = 0;
1878 } else
1879 s[-1] = 0;
1880 return 0;
1881}
1882
1883__weak bool arch_topdown_check_group(bool *warn)
1884{
1885 *warn = false;
1886 return false;
1887}
1888
1889__weak void arch_topdown_group_warn(void)
1890{
1891}
1892
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02001893/*
1894 * Add default attributes, if there were no attributes specified or
1895 * if -d/--detailed, -d -d or -d -d -d is used:
1896 */
1897static int add_default_attributes(void)
1898{
Andi Kleen44b1e602016-05-30 12:49:42 -03001899 int err;
Andi Kleen9dec4472016-02-26 16:27:56 -08001900 struct perf_event_attr default_attrs0[] = {
Arnaldo Carvalho de Melob070a542012-10-01 15:20:58 -03001901
1902 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
1903 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
1904 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
1905 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
1906
1907 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
Andi Kleen9dec4472016-02-26 16:27:56 -08001908};
1909 struct perf_event_attr frontend_attrs[] = {
Arnaldo Carvalho de Melob070a542012-10-01 15:20:58 -03001910 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
Andi Kleen9dec4472016-02-26 16:27:56 -08001911};
1912 struct perf_event_attr backend_attrs[] = {
Arnaldo Carvalho de Melob070a542012-10-01 15:20:58 -03001913 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
Andi Kleen9dec4472016-02-26 16:27:56 -08001914};
1915 struct perf_event_attr default_attrs1[] = {
Arnaldo Carvalho de Melob070a542012-10-01 15:20:58 -03001916 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
1917 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
1918 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
1919
1920};
1921
1922/*
1923 * Detailed stats (-d), covering the L1 and last level data caches:
1924 */
1925 struct perf_event_attr detailed_attrs[] = {
1926
1927 { .type = PERF_TYPE_HW_CACHE,
1928 .config =
1929 PERF_COUNT_HW_CACHE_L1D << 0 |
1930 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1931 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1932
1933 { .type = PERF_TYPE_HW_CACHE,
1934 .config =
1935 PERF_COUNT_HW_CACHE_L1D << 0 |
1936 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1937 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1938
1939 { .type = PERF_TYPE_HW_CACHE,
1940 .config =
1941 PERF_COUNT_HW_CACHE_LL << 0 |
1942 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1943 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1944
1945 { .type = PERF_TYPE_HW_CACHE,
1946 .config =
1947 PERF_COUNT_HW_CACHE_LL << 0 |
1948 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1949 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1950};
1951
1952/*
1953 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
1954 */
1955 struct perf_event_attr very_detailed_attrs[] = {
1956
1957 { .type = PERF_TYPE_HW_CACHE,
1958 .config =
1959 PERF_COUNT_HW_CACHE_L1I << 0 |
1960 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1961 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1962
1963 { .type = PERF_TYPE_HW_CACHE,
1964 .config =
1965 PERF_COUNT_HW_CACHE_L1I << 0 |
1966 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1967 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1968
1969 { .type = PERF_TYPE_HW_CACHE,
1970 .config =
1971 PERF_COUNT_HW_CACHE_DTLB << 0 |
1972 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1973 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1974
1975 { .type = PERF_TYPE_HW_CACHE,
1976 .config =
1977 PERF_COUNT_HW_CACHE_DTLB << 0 |
1978 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1979 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1980
1981 { .type = PERF_TYPE_HW_CACHE,
1982 .config =
1983 PERF_COUNT_HW_CACHE_ITLB << 0 |
1984 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1985 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1986
1987 { .type = PERF_TYPE_HW_CACHE,
1988 .config =
1989 PERF_COUNT_HW_CACHE_ITLB << 0 |
1990 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1991 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1992
1993};
1994
1995/*
1996 * Very, very detailed stats (-d -d -d), adding prefetch events:
1997 */
1998 struct perf_event_attr very_very_detailed_attrs[] = {
1999
2000 { .type = PERF_TYPE_HW_CACHE,
2001 .config =
2002 PERF_COUNT_HW_CACHE_L1D << 0 |
2003 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
2004 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
2005
2006 { .type = PERF_TYPE_HW_CACHE,
2007 .config =
2008 PERF_COUNT_HW_CACHE_L1D << 0 |
2009 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
2010 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
2011};
2012
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02002013 /* Set attrs if no event is selected and !null_run: */
2014 if (null_run)
2015 return 0;
2016
Andi Kleen4cabc3d2013-08-21 16:47:26 -07002017 if (transaction_run) {
Andi Kleen4cabc3d2013-08-21 16:47:26 -07002018 if (pmu_have_event("cpu", "cycles-ct") &&
2019 pmu_have_event("cpu", "el-start"))
Jiri Olsaa4547422015-06-03 16:25:53 +02002020 err = parse_events(evsel_list, transaction_attrs, NULL);
Andi Kleen4cabc3d2013-08-21 16:47:26 -07002021 else
Jiri Olsaa4547422015-06-03 16:25:53 +02002022 err = parse_events(evsel_list, transaction_limited_attrs, NULL);
2023 if (err) {
Andi Kleen4cabc3d2013-08-21 16:47:26 -07002024 fprintf(stderr, "Cannot set up transaction events\n");
2025 return -1;
2026 }
2027 return 0;
2028 }
2029
Andi Kleen44b1e602016-05-30 12:49:42 -03002030 if (topdown_run) {
2031 char *str = NULL;
2032 bool warn = false;
2033
2034 if (stat_config.aggr_mode != AGGR_GLOBAL &&
2035 stat_config.aggr_mode != AGGR_CORE) {
2036 pr_err("top down event configuration requires --per-core mode\n");
2037 return -1;
2038 }
2039 stat_config.aggr_mode = AGGR_CORE;
2040 if (nr_cgroups || !target__has_cpu(&target)) {
2041 pr_err("top down event configuration requires system-wide mode (-a)\n");
2042 return -1;
2043 }
2044
2045 if (!force_metric_only)
2046 metric_only = true;
2047 if (topdown_filter_events(topdown_attrs, &str,
2048 arch_topdown_check_group(&warn)) < 0) {
2049 pr_err("Out of memory\n");
2050 return -1;
2051 }
2052 if (topdown_attrs[0] && str) {
2053 if (warn)
2054 arch_topdown_group_warn();
2055 err = parse_events(evsel_list, str, NULL);
2056 if (err) {
2057 fprintf(stderr,
2058 "Cannot set up top down events %s: %d\n",
2059 str, err);
2060 free(str);
2061 return -1;
2062 }
2063 } else {
2064 fprintf(stderr, "System does not support topdown\n");
2065 return -1;
2066 }
2067 free(str);
2068 }
2069
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02002070 if (!evsel_list->nr_entries) {
Namhyung Kima1f3d562016-05-13 15:01:03 +09002071 if (target__has_cpu(&target))
2072 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
2073
Andi Kleen9dec4472016-02-26 16:27:56 -08002074 if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
2075 return -1;
2076 if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
2077 if (perf_evlist__add_default_attrs(evsel_list,
2078 frontend_attrs) < 0)
2079 return -1;
2080 }
2081 if (pmu_have_event("cpu", "stalled-cycles-backend")) {
2082 if (perf_evlist__add_default_attrs(evsel_list,
2083 backend_attrs) < 0)
2084 return -1;
2085 }
2086 if (perf_evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -02002087 return -1;
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02002088 }
2089
2090 /* Detailed events get appended to the event list: */
2091
2092 if (detailed_run < 1)
2093 return 0;
2094
2095 /* Append detailed run extra attributes: */
Arnaldo Carvalho de Melo79695e12012-05-30 13:53:54 -03002096 if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -02002097 return -1;
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02002098
2099 if (detailed_run < 2)
2100 return 0;
2101
2102 /* Append very detailed run extra attributes: */
Arnaldo Carvalho de Melo79695e12012-05-30 13:53:54 -03002103 if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -02002104 return -1;
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02002105
2106 if (detailed_run < 3)
2107 return 0;
2108
2109 /* Append very, very detailed run extra attributes: */
Arnaldo Carvalho de Melo79695e12012-05-30 13:53:54 -03002110 return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02002111}
2112
Jiri Olsa8a59f3c2016-01-12 10:35:29 +01002113static const char * const stat_record_usage[] = {
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002114 "perf stat record [<options>]",
2115 NULL,
2116};
2117
Jiri Olsa3ba78bd2015-11-05 15:40:47 +01002118static void init_features(struct perf_session *session)
2119{
2120 int feat;
2121
2122 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
2123 perf_header__set_feat(&session->header, feat);
2124
2125 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
2126 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
2127 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
2128 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
2129}
2130
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002131static int __cmd_record(int argc, const char **argv)
2132{
2133 struct perf_session *session;
2134 struct perf_data_file *file = &perf_stat.file;
2135
Jiri Olsa8a59f3c2016-01-12 10:35:29 +01002136 argc = parse_options(argc, argv, stat_options, stat_record_usage,
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002137 PARSE_OPT_STOP_AT_NON_OPTION);
2138
2139 if (output_name)
2140 file->path = output_name;
2141
Jiri Olsae9d6db8e82015-11-05 15:40:53 +01002142 if (run_count != 1 || forever) {
2143 pr_err("Cannot use -r option with perf stat record.\n");
2144 return -1;
2145 }
2146
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002147 session = perf_session__new(file, false, NULL);
2148 if (session == NULL) {
2149 pr_err("Perf session creation failed.\n");
2150 return -1;
2151 }
2152
Jiri Olsa3ba78bd2015-11-05 15:40:47 +01002153 init_features(session);
2154
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002155 session->evlist = evsel_list;
2156 perf_stat.session = session;
2157 perf_stat.record = true;
2158 return argc;
2159}
2160
Jiri Olsaa56f9392015-11-05 15:40:59 +01002161static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
2162 union perf_event *event,
2163 struct perf_session *session)
2164{
Andi Kleene3b03b62016-05-05 16:04:03 -07002165 struct stat_round_event *stat_round = &event->stat_round;
Jiri Olsaa56f9392015-11-05 15:40:59 +01002166 struct perf_evsel *counter;
2167 struct timespec tsh, *ts = NULL;
2168 const char **argv = session->header.env.cmdline_argv;
2169 int argc = session->header.env.nr_cmdline;
2170
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002171 evlist__for_each_entry(evsel_list, counter)
Jiri Olsaa56f9392015-11-05 15:40:59 +01002172 perf_stat_process_counter(&stat_config, counter);
2173
Andi Kleene3b03b62016-05-05 16:04:03 -07002174 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
2175 update_stats(&walltime_nsecs_stats, stat_round->time);
Jiri Olsaa56f9392015-11-05 15:40:59 +01002176
Andi Kleene3b03b62016-05-05 16:04:03 -07002177 if (stat_config.interval && stat_round->time) {
2178 tsh.tv_sec = stat_round->time / NSECS_PER_SEC;
2179 tsh.tv_nsec = stat_round->time % NSECS_PER_SEC;
Jiri Olsaa56f9392015-11-05 15:40:59 +01002180 ts = &tsh;
2181 }
2182
2183 print_counters(ts, argc, argv);
2184 return 0;
2185}
2186
Jiri Olsa62ba18b2015-11-05 15:40:57 +01002187static
2188int process_stat_config_event(struct perf_tool *tool __maybe_unused,
2189 union perf_event *event,
2190 struct perf_session *session __maybe_unused)
2191{
Jiri Olsa68d702f2015-11-05 15:40:58 +01002192 struct perf_stat *st = container_of(tool, struct perf_stat, tool);
2193
Jiri Olsa62ba18b2015-11-05 15:40:57 +01002194 perf_event__read_stat_config(&stat_config, &event->stat_config);
Jiri Olsa68d702f2015-11-05 15:40:58 +01002195
Jiri Olsa89af4e02015-11-05 15:41:02 +01002196 if (cpu_map__empty(st->cpus)) {
2197 if (st->aggr_mode != AGGR_UNSET)
2198 pr_warning("warning: processing task data, aggregation mode not set\n");
2199 return 0;
2200 }
2201
2202 if (st->aggr_mode != AGGR_UNSET)
2203 stat_config.aggr_mode = st->aggr_mode;
2204
Jiri Olsa68d702f2015-11-05 15:40:58 +01002205 if (perf_stat.file.is_pipe)
2206 perf_stat_init_aggr_mode();
2207 else
2208 perf_stat_init_aggr_mode_file(st);
2209
Jiri Olsa62ba18b2015-11-05 15:40:57 +01002210 return 0;
2211}
2212
Jiri Olsa1975d362015-11-05 15:40:56 +01002213static int set_maps(struct perf_stat *st)
2214{
2215 if (!st->cpus || !st->threads)
2216 return 0;
2217
2218 if (WARN_ONCE(st->maps_allocated, "stats double allocation\n"))
2219 return -EINVAL;
2220
2221 perf_evlist__set_maps(evsel_list, st->cpus, st->threads);
2222
2223 if (perf_evlist__alloc_stats(evsel_list, true))
2224 return -ENOMEM;
2225
2226 st->maps_allocated = true;
2227 return 0;
2228}
2229
2230static
2231int process_thread_map_event(struct perf_tool *tool __maybe_unused,
2232 union perf_event *event,
2233 struct perf_session *session __maybe_unused)
2234{
2235 struct perf_stat *st = container_of(tool, struct perf_stat, tool);
2236
2237 if (st->threads) {
2238 pr_warning("Extra thread map event, ignoring.\n");
2239 return 0;
2240 }
2241
2242 st->threads = thread_map__new_event(&event->thread_map);
2243 if (!st->threads)
2244 return -ENOMEM;
2245
2246 return set_maps(st);
2247}
2248
2249static
2250int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
2251 union perf_event *event,
2252 struct perf_session *session __maybe_unused)
2253{
2254 struct perf_stat *st = container_of(tool, struct perf_stat, tool);
2255 struct cpu_map *cpus;
2256
2257 if (st->cpus) {
2258 pr_warning("Extra cpu map event, ignoring.\n");
2259 return 0;
2260 }
2261
2262 cpus = cpu_map__new_data(&event->cpu_map.data);
2263 if (!cpus)
2264 return -ENOMEM;
2265
2266 st->cpus = cpus;
2267 return set_maps(st);
2268}
2269
Jiri Olsa8a59f3c2016-01-12 10:35:29 +01002270static const char * const stat_report_usage[] = {
Jiri Olsaba6039b62015-11-05 15:40:55 +01002271 "perf stat report [<options>]",
2272 NULL,
2273};
2274
2275static struct perf_stat perf_stat = {
2276 .tool = {
2277 .attr = perf_event__process_attr,
Jiri Olsafa6ea782015-11-05 15:41:00 +01002278 .event_update = perf_event__process_event_update,
Jiri Olsa1975d362015-11-05 15:40:56 +01002279 .thread_map = process_thread_map_event,
2280 .cpu_map = process_cpu_map_event,
Jiri Olsa62ba18b2015-11-05 15:40:57 +01002281 .stat_config = process_stat_config_event,
Jiri Olsaa56f9392015-11-05 15:40:59 +01002282 .stat = perf_event__process_stat_event,
2283 .stat_round = process_stat_round_event,
Jiri Olsaba6039b62015-11-05 15:40:55 +01002284 },
Jiri Olsa89af4e02015-11-05 15:41:02 +01002285 .aggr_mode = AGGR_UNSET,
Jiri Olsaba6039b62015-11-05 15:40:55 +01002286};
2287
2288static int __cmd_report(int argc, const char **argv)
2289{
2290 struct perf_session *session;
2291 const struct option options[] = {
2292 OPT_STRING('i', "input", &input_name, "file", "input file name"),
Jiri Olsa89af4e02015-11-05 15:41:02 +01002293 OPT_SET_UINT(0, "per-socket", &perf_stat.aggr_mode,
2294 "aggregate counts per processor socket", AGGR_SOCKET),
2295 OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode,
2296 "aggregate counts per physical processor core", AGGR_CORE),
2297 OPT_SET_UINT('A', "no-aggr", &perf_stat.aggr_mode,
2298 "disable CPU count aggregation", AGGR_NONE),
Jiri Olsaba6039b62015-11-05 15:40:55 +01002299 OPT_END()
2300 };
2301 struct stat st;
2302 int ret;
2303
Jiri Olsa8a59f3c2016-01-12 10:35:29 +01002304 argc = parse_options(argc, argv, options, stat_report_usage, 0);
Jiri Olsaba6039b62015-11-05 15:40:55 +01002305
2306 if (!input_name || !strlen(input_name)) {
2307 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
2308 input_name = "-";
2309 else
2310 input_name = "perf.data";
2311 }
2312
2313 perf_stat.file.path = input_name;
2314 perf_stat.file.mode = PERF_DATA_MODE_READ;
2315
2316 session = perf_session__new(&perf_stat.file, false, &perf_stat.tool);
2317 if (session == NULL)
2318 return -1;
2319
2320 perf_stat.session = session;
2321 stat_config.output = stderr;
2322 evsel_list = session->evlist;
2323
2324 ret = perf_session__process_events(session);
2325 if (ret)
2326 return ret;
2327
2328 perf_session__delete(session);
2329 return 0;
2330}
2331
Irina Tirdea1d037ca2012-09-11 01:15:03 +03002332int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
Ingo Molnar52425192009-05-26 09:17:18 +02002333{
Arnaldo Carvalho de Melob070a542012-10-01 15:20:58 -03002334 const char * const stat_usage[] = {
2335 "perf stat [<options>] [<command>]",
2336 NULL
2337 };
Namhyung Kimcc03c542013-11-01 16:33:15 +09002338 int status = -EINVAL, run_idx;
Stephane Eranian4aa90152011-08-15 22:22:33 +02002339 const char *mode;
Jiri Olsa58215222015-07-21 14:31:24 +02002340 FILE *output = stderr;
Jiri Olsaec0d3d12015-07-21 14:31:25 +02002341 unsigned int interval;
Jiri Olsaba6039b62015-11-05 15:40:55 +01002342 const char * const stat_subcommands[] = { "record", "report" };
Ingo Molnar42202dd2009-06-13 14:57:28 +02002343
Stephane Eranian5af52b52010-05-18 15:00:01 +02002344 setlocale(LC_ALL, "");
2345
Namhyung Kim334fe7a2013-03-11 16:43:12 +09002346 evsel_list = perf_evlist__new();
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002347 if (evsel_list == NULL)
2348 return -ENOMEM;
2349
Wang Nan1669e502016-02-19 11:43:58 +00002350 parse_events__shrink_config_terms();
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002351 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
2352 (const char **) stat_usage,
2353 PARSE_OPT_STOP_AT_NON_OPTION);
Andi Kleenfb4605b2016-03-01 10:57:52 -08002354 perf_stat__init_shadow_stats();
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002355
Jiri Olsa6edb78a2015-11-05 15:41:01 +01002356 if (csv_sep) {
2357 csv_output = true;
2358 if (!strcmp(csv_sep, "\\t"))
2359 csv_sep = "\t";
2360 } else
2361 csv_sep = DEFAULT_SEPARATOR;
2362
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002363 if (argc && !strncmp(argv[0], "rec", 3)) {
2364 argc = __cmd_record(argc, argv);
2365 if (argc < 0)
2366 return -1;
Jiri Olsaba6039b62015-11-05 15:40:55 +01002367 } else if (argc && !strncmp(argv[0], "rep", 3))
2368 return __cmd_report(argc, argv);
Stephane Eraniand7470b62010-12-01 18:49:05 +02002369
Jiri Olsaec0d3d12015-07-21 14:31:25 +02002370 interval = stat_config.interval;
2371
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002372 /*
2373 * For record command the -o is already taken care of.
2374 */
2375 if (!STAT_RECORD && output_name && strcmp(output_name, "-"))
Stephane Eranian4aa90152011-08-15 22:22:33 +02002376 output = NULL;
2377
Jim Cromie56f3bae2011-09-07 17:14:00 -06002378 if (output_name && output_fd) {
2379 fprintf(stderr, "cannot use both --output and --log-fd\n");
Jiri Olsae0547312015-11-05 15:40:45 +01002380 parse_options_usage(stat_usage, stat_options, "o", 1);
2381 parse_options_usage(NULL, stat_options, "log-fd", 0);
Namhyung Kimcc03c542013-11-01 16:33:15 +09002382 goto out;
Jim Cromie56f3bae2011-09-07 17:14:00 -06002383 }
Stephane Eranianfc3e4d02012-05-15 13:11:11 +02002384
Andi Kleen54b50912016-03-03 15:57:36 -08002385 if (metric_only && stat_config.aggr_mode == AGGR_THREAD) {
2386 fprintf(stderr, "--metric-only is not supported with --per-thread\n");
2387 goto out;
2388 }
2389
Andi Kleen54b50912016-03-03 15:57:36 -08002390 if (metric_only && run_count > 1) {
2391 fprintf(stderr, "--metric-only is not supported with -r\n");
2392 goto out;
2393 }
2394
Stephane Eranianfc3e4d02012-05-15 13:11:11 +02002395 if (output_fd < 0) {
2396 fprintf(stderr, "argument to --log-fd must be a > 0\n");
Jiri Olsae0547312015-11-05 15:40:45 +01002397 parse_options_usage(stat_usage, stat_options, "log-fd", 0);
Namhyung Kimcc03c542013-11-01 16:33:15 +09002398 goto out;
Stephane Eranianfc3e4d02012-05-15 13:11:11 +02002399 }
2400
Stephane Eranian4aa90152011-08-15 22:22:33 +02002401 if (!output) {
2402 struct timespec tm;
2403 mode = append_file ? "a" : "w";
2404
2405 output = fopen(output_name, mode);
2406 if (!output) {
2407 perror("failed to create output file");
David Ahernfceda7f2012-08-26 12:24:44 -06002408 return -1;
Stephane Eranian4aa90152011-08-15 22:22:33 +02002409 }
2410 clock_gettime(CLOCK_REALTIME, &tm);
2411 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
Stephane Eranianfc3e4d02012-05-15 13:11:11 +02002412 } else if (output_fd > 0) {
Jim Cromie56f3bae2011-09-07 17:14:00 -06002413 mode = append_file ? "a" : "w";
2414 output = fdopen(output_fd, mode);
2415 if (!output) {
2416 perror("Failed opening logfd");
2417 return -errno;
2418 }
Stephane Eranian4aa90152011-08-15 22:22:33 +02002419 }
2420
Jiri Olsa58215222015-07-21 14:31:24 +02002421 stat_config.output = output;
2422
Stephane Eraniand7470b62010-12-01 18:49:05 +02002423 /*
2424 * let the spreadsheet do the pretty-printing
2425 */
2426 if (csv_output) {
Jim Cromie61a9f322011-09-07 17:14:04 -06002427 /* User explicitly passed -B? */
Stephane Eraniand7470b62010-12-01 18:49:05 +02002428 if (big_num_opt == 1) {
2429 fprintf(stderr, "-B option not supported with -x\n");
Jiri Olsae0547312015-11-05 15:40:45 +01002430 parse_options_usage(stat_usage, stat_options, "B", 1);
2431 parse_options_usage(NULL, stat_options, "x", 1);
Namhyung Kimcc03c542013-11-01 16:33:15 +09002432 goto out;
Stephane Eraniand7470b62010-12-01 18:49:05 +02002433 } else /* Nope, so disable big number formatting */
2434 big_num = false;
2435 } else if (big_num_opt == 0) /* User passed --no-big-num */
2436 big_num = false;
2437
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002438 if (!argc && target__none(&target))
Jiri Olsae0547312015-11-05 15:40:45 +01002439 usage_with_options(stat_usage, stat_options);
David Ahernac3063b2013-09-30 07:37:37 -06002440
Frederik Deweerdta7e191c2013-03-01 13:02:27 -05002441 if (run_count < 0) {
Namhyung Kimcc03c542013-11-01 16:33:15 +09002442 pr_err("Run count must be a positive number\n");
Jiri Olsae0547312015-11-05 15:40:45 +01002443 parse_options_usage(stat_usage, stat_options, "r", 1);
Namhyung Kimcc03c542013-11-01 16:33:15 +09002444 goto out;
Frederik Deweerdta7e191c2013-03-01 13:02:27 -05002445 } else if (run_count == 0) {
2446 forever = true;
2447 run_count = 1;
2448 }
Ingo Molnarddcacfa2009-04-20 15:37:32 +02002449
Jiri Olsa421a50f2015-07-21 14:31:22 +02002450 if ((stat_config.aggr_mode == AGGR_THREAD) && !target__has_task(&target)) {
Jiri Olsa32b8af82015-06-26 11:29:27 +02002451 fprintf(stderr, "The --per-thread option is only available "
2452 "when monitoring via -p -t options.\n");
Jiri Olsae0547312015-11-05 15:40:45 +01002453 parse_options_usage(NULL, stat_options, "p", 1);
2454 parse_options_usage(NULL, stat_options, "t", 1);
Jiri Olsa32b8af82015-06-26 11:29:27 +02002455 goto out;
2456 }
2457
2458 /*
2459 * no_aggr, cgroup are for system-wide only
2460 * --per-thread is aggregated per thread, we dont mix it with cpu mode
2461 */
Jiri Olsa421a50f2015-07-21 14:31:22 +02002462 if (((stat_config.aggr_mode != AGGR_GLOBAL &&
2463 stat_config.aggr_mode != AGGR_THREAD) || nr_cgroups) &&
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002464 !target__has_cpu(&target)) {
Stephane Eranian023695d2011-02-14 11:20:01 +02002465 fprintf(stderr, "both cgroup and no-aggregation "
2466 "modes only available in system-wide mode\n");
2467
Jiri Olsae0547312015-11-05 15:40:45 +01002468 parse_options_usage(stat_usage, stat_options, "G", 1);
2469 parse_options_usage(NULL, stat_options, "A", 1);
2470 parse_options_usage(NULL, stat_options, "a", 1);
Namhyung Kimcc03c542013-11-01 16:33:15 +09002471 goto out;
Stephane Eraniand7e7a452013-02-06 15:46:02 +01002472 }
2473
Ingo Molnar2cba3ff2011-05-19 13:30:56 +02002474 if (add_default_attributes())
2475 goto out;
Ingo Molnarddcacfa2009-04-20 15:37:32 +02002476
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002477 target__validate(&target);
Arnaldo Carvalho de Melo5c98d4662011-01-03 17:53:33 -02002478
Namhyung Kim77a6f012012-05-07 14:09:04 +09002479 if (perf_evlist__create_maps(evsel_list, &target) < 0) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002480 if (target__has_task(&target)) {
Namhyung Kim77a6f012012-05-07 14:09:04 +09002481 pr_err("Problems finding threads of monitor\n");
Jiri Olsae0547312015-11-05 15:40:45 +01002482 parse_options_usage(stat_usage, stat_options, "p", 1);
2483 parse_options_usage(NULL, stat_options, "t", 1);
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002484 } else if (target__has_cpu(&target)) {
Namhyung Kim77a6f012012-05-07 14:09:04 +09002485 perror("failed to parse CPUs map");
Jiri Olsae0547312015-11-05 15:40:45 +01002486 parse_options_usage(stat_usage, stat_options, "C", 1);
2487 parse_options_usage(NULL, stat_options, "a", 1);
Namhyung Kimcc03c542013-11-01 16:33:15 +09002488 }
2489 goto out;
Arnaldo Carvalho de Melo60d567e2011-01-03 17:49:48 -02002490 }
Jiri Olsa32b8af82015-06-26 11:29:27 +02002491
2492 /*
2493 * Initialize thread_map with comm names,
2494 * so we could print it out on output.
2495 */
Jiri Olsa421a50f2015-07-21 14:31:22 +02002496 if (stat_config.aggr_mode == AGGR_THREAD)
Jiri Olsa32b8af82015-06-26 11:29:27 +02002497 thread_map__read_comms(evsel_list->threads);
2498
Stephane Eranian13370a92013-01-29 12:47:44 +01002499 if (interval && interval < 100) {
Kan Liang19afd102015-10-02 05:04:34 -04002500 if (interval < 10) {
2501 pr_err("print interval must be >= 10ms\n");
Jiri Olsae0547312015-11-05 15:40:45 +01002502 parse_options_usage(stat_usage, stat_options, "I", 1);
Kan Liang19afd102015-10-02 05:04:34 -04002503 goto out;
2504 } else
2505 pr_warning("print interval < 100ms. "
2506 "The overhead percentage could be high in some cases. "
2507 "Please proceed with caution.\n");
Stephane Eranian13370a92013-01-29 12:47:44 +01002508 }
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02002509
Arnaldo Carvalho de Melod134ffb2013-03-18 11:24:21 -03002510 if (perf_evlist__alloc_stats(evsel_list, interval))
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -03002511 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03002512
Stephane Eranian86ee6e12013-02-14 13:57:27 +01002513 if (perf_stat_init_aggr_mode())
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -03002514 goto out;
Stephane Eranian86ee6e12013-02-14 13:57:27 +01002515
Ingo Molnar58d7e992009-05-15 11:03:23 +02002516 /*
2517 * We dont want to block the signals - that would cause
2518 * child tasks to inherit that and Ctrl-C would not work.
2519 * What we want is for Ctrl-C to work in the exec()-ed
2520 * task, but being ignored by perf stat itself:
2521 */
Peter Zijlstraf7b7c262009-06-10 15:55:59 +02002522 atexit(sig_atexit);
Frederik Deweerdta7e191c2013-03-01 13:02:27 -05002523 if (!forever)
2524 signal(SIGINT, skip_signal);
Stephane Eranian13370a92013-01-29 12:47:44 +01002525 signal(SIGCHLD, skip_signal);
Ingo Molnar58d7e992009-05-15 11:03:23 +02002526 signal(SIGALRM, skip_signal);
2527 signal(SIGABRT, skip_signal);
2528
Ingo Molnar42202dd2009-06-13 14:57:28 +02002529 status = 0;
Frederik Deweerdta7e191c2013-03-01 13:02:27 -05002530 for (run_idx = 0; forever || run_idx < run_count; run_idx++) {
Ingo Molnar42202dd2009-06-13 14:57:28 +02002531 if (run_count != 1 && verbose)
Stephane Eranian4aa90152011-08-15 22:22:33 +02002532 fprintf(output, "[ perf stat: executing run #%d ... ]\n",
2533 run_idx + 1);
Ingo Molnarf9cef0a2011-04-28 18:17:11 +02002534
Ingo Molnar42202dd2009-06-13 14:57:28 +02002535 status = run_perf_stat(argc, argv);
Frederik Deweerdta7e191c2013-03-01 13:02:27 -05002536 if (forever && status != -1) {
Jiri Olsad4f63a42015-06-26 11:29:26 +02002537 print_counters(NULL, argc, argv);
Jiri Olsa254ecbc2015-06-26 11:29:13 +02002538 perf_stat__reset_stats();
Frederik Deweerdta7e191c2013-03-01 13:02:27 -05002539 }
Ingo Molnar42202dd2009-06-13 14:57:28 +02002540 }
2541
Frederik Deweerdta7e191c2013-03-01 13:02:27 -05002542 if (!forever && status != -1 && !interval)
Jiri Olsad4f63a42015-06-26 11:29:26 +02002543 print_counters(NULL, argc, argv);
Arnaldo Carvalho de Melod134ffb2013-03-18 11:24:21 -03002544
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002545 if (STAT_RECORD) {
2546 /*
2547 * We synthesize the kernel mmap record just so that older tools
2548 * don't emit warnings about not being able to resolve symbols
2549 * due to /proc/sys/kernel/kptr_restrict settings and instear provide
2550 * a saner message about no samples being in the perf.data file.
2551 *
2552 * This also serves to suppress a warning about f_header.data.size == 0
Jiri Olsa8b99b1a2015-11-05 15:40:48 +01002553 * in header.c at the moment 'perf stat record' gets introduced, which
2554 * is not really needed once we start adding the stat specific PERF_RECORD_
2555 * records, but the need to suppress the kptr_restrict messages in older
2556 * tools remain -acme
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002557 */
2558 int fd = perf_data_file__fd(&perf_stat.file);
2559 int err = perf_event__synthesize_kernel_mmap((void *)&perf_stat,
2560 process_synthesized_event,
2561 &perf_stat.session->machines.host);
2562 if (err) {
2563 pr_warning("Couldn't synthesize the kernel mmap record, harmless, "
2564 "older tools may produce warnings about this file\n.");
2565 }
2566
Jiri Olsa7aad0c32015-11-05 15:40:52 +01002567 if (!interval) {
2568 if (WRITE_STAT_ROUND_EVENT(walltime_nsecs_stats.max, FINAL))
2569 pr_err("failed to write stat round event\n");
2570 }
2571
Jiri Olsa664c98d2015-11-05 15:40:50 +01002572 if (!perf_stat.file.is_pipe) {
2573 perf_stat.session->header.data_size += perf_stat.bytes_written;
2574 perf_session__write_header(perf_stat.session, evsel_list, fd, true);
2575 }
Jiri Olsa4979d0c2015-11-05 15:40:46 +01002576
2577 perf_session__delete(perf_stat.session);
2578 }
2579
Masami Hiramatsu544c2ae2015-12-09 11:11:27 +09002580 perf_stat__exit_aggr_mode();
Arnaldo Carvalho de Melod134ffb2013-03-18 11:24:21 -03002581 perf_evlist__free_stats(evsel_list);
Arnaldo Carvalho de Melo0015e2e2011-02-01 16:18:10 -02002582out:
2583 perf_evlist__delete(evsel_list);
Ingo Molnar42202dd2009-06-13 14:57:28 +02002584 return status;
Ingo Molnarddcacfa2009-04-20 15:37:32 +02002585}