blob: 3239a6ec9d2300c6ec7166b5c8b083717e5f7251 [file] [log] [blame]
Ingo Molnarabaff322009-06-02 22:59:57 +02001/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02002 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02007 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02008#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +02009
10#include "perf.h"
11
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020012#include "util/build-id.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020013#include "util/util.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060014#include <subcmd/parse-options.h>
Ingo Molnar8ad8db32009-05-26 11:10:09 +020015#include "util/parse-events.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020016
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030017#include "util/callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030018#include "util/cgroup.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020019#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020020#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020021#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020022#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020023#include "util/debug.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020024#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020025#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020026#include "util/symbol.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110027#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020028#include "util/thread_map.h"
Jiri Olsaf5fc1412013-10-15 16:27:32 +020029#include "util/data.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020030#include "util/perf_regs.h"
Adrian Hunteref149c22015-04-09 18:53:45 +030031#include "util/auxtrace.h"
Adrian Hunter46bc29b2016-03-08 10:38:44 +020032#include "util/tsc.h"
Andi Kleenf00898f2015-05-27 10:51:51 -070033#include "util/parse-branch-options.h"
Stephane Eranianbcc84ec2015-08-31 18:41:12 +020034#include "util/parse-regs-options.h"
Wang Nan71dc23262015-10-14 12:41:19 +000035#include "util/llvm-utils.h"
Wang Nan8690a2a2016-02-22 09:10:32 +000036#include "util/bpf-loader.h"
Wang Nand8871ea2016-02-26 09:32:06 +000037#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020038
Peter Zijlstra97124d52009-06-02 15:52:24 +020039#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020040#include <sched.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030041#include <sys/mman.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020042
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030043
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030044struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020045 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030046 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020047 u64 bytes_written;
Jiri Olsaf5fc1412013-10-15 16:27:32 +020048 struct perf_data_file file;
Adrian Hunteref149c22015-04-09 18:53:45 +030049 struct auxtrace_record *itr;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020050 struct perf_evlist *evlist;
51 struct perf_session *session;
52 const char *progname;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020053 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020054 bool no_buildid;
Wang Nand2db9a92016-01-25 09:56:19 +000055 bool no_buildid_set;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020056 bool no_buildid_cache;
Wang Nand2db9a92016-01-25 09:56:19 +000057 bool no_buildid_cache_set;
Namhyung Kim61566812016-01-11 22:37:09 +090058 bool buildid_all;
Wang Nanecfd7a92016-04-13 08:21:07 +000059 bool timestamp_filename;
Yang Shi9f065192015-09-29 14:49:43 -070060 unsigned long long samples;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020061};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020062
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030063static int record__write(struct record *rec, void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +020064{
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -030065 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +010066 pr_err("failed to write perf data, error: %m\n");
67 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +020068 }
David Ahern8d3eca22012-08-26 12:24:47 -060069
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -030070 rec->bytes_written += size;
David Ahern8d3eca22012-08-26 12:24:47 -060071 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +020072}
73
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020074static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020075 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +030076 struct perf_sample *sample __maybe_unused,
77 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -020078{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030079 struct record *rec = container_of(tool, struct record, tool);
80 return record__write(rec, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -020081}
82
Arnaldo Carvalho de Meloe5685732014-09-17 16:42:58 -030083static int record__mmap_read(struct record *rec, int idx)
Peter Zijlstrade9ac072009-04-08 15:01:31 +020084{
Arnaldo Carvalho de Meloe5685732014-09-17 16:42:58 -030085 struct perf_mmap *md = &rec->evlist->mmap[idx];
David Ahern7b8283b52015-04-07 09:20:37 -060086 u64 head = perf_mmap__read_head(md);
87 u64 old = md->prev;
Jiri Olsa918512b2013-09-12 18:39:35 +020088 unsigned char *data = md->base + page_size;
Peter Zijlstrade9ac072009-04-08 15:01:31 +020089 unsigned long size;
90 void *buf;
David Ahern8d3eca22012-08-26 12:24:47 -060091 int rc = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +020092
Arnaldo Carvalho de Melodc820092011-01-28 14:49:19 -020093 if (old == head)
David Ahern8d3eca22012-08-26 12:24:47 -060094 return 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +020095
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020096 rec->samples++;
Peter Zijlstrade9ac072009-04-08 15:01:31 +020097
98 size = head - old;
99
100 if ((old & md->mask) + size != (head & md->mask)) {
101 buf = &data[old & md->mask];
102 size = md->mask + 1 - (old & md->mask);
103 old += size;
Ingo Molnar021e9f42009-06-03 19:27:19 +0200104
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300105 if (record__write(rec, buf, size) < 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600106 rc = -1;
107 goto out;
108 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200109 }
110
111 buf = &data[old & md->mask];
112 size = head - old;
113 old += size;
Ingo Molnar021e9f42009-06-03 19:27:19 +0200114
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300115 if (record__write(rec, buf, size) < 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600116 rc = -1;
117 goto out;
118 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200119
120 md->prev = old;
Arnaldo Carvalho de Meloe5685732014-09-17 16:42:58 -0300121 perf_evlist__mmap_consume(rec->evlist, idx);
David Ahern8d3eca22012-08-26 12:24:47 -0600122out:
123 return rc;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200124}
125
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300126static volatile int done;
127static volatile int signr = -1;
128static volatile int child_finished;
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000129
130static volatile enum {
131 AUXTRACE_SNAPSHOT_OFF = -1,
132 AUXTRACE_SNAPSHOT_DISABLED = 0,
133 AUXTRACE_SNAPSHOT_ENABLED = 1,
134} auxtrace_snapshot_state = AUXTRACE_SNAPSHOT_OFF;
135
136static inline void
137auxtrace_snapshot_on(void)
138{
139 auxtrace_snapshot_state = AUXTRACE_SNAPSHOT_DISABLED;
140}
141
142static inline void
143auxtrace_snapshot_enable(void)
144{
145 if (auxtrace_snapshot_state == AUXTRACE_SNAPSHOT_OFF)
146 return;
147 auxtrace_snapshot_state = AUXTRACE_SNAPSHOT_ENABLED;
148}
149
150static inline void
151auxtrace_snapshot_disable(void)
152{
153 if (auxtrace_snapshot_state == AUXTRACE_SNAPSHOT_OFF)
154 return;
155 auxtrace_snapshot_state = AUXTRACE_SNAPSHOT_DISABLED;
156}
157
158static inline bool
159auxtrace_snapshot_is_enabled(void)
160{
161 if (auxtrace_snapshot_state == AUXTRACE_SNAPSHOT_OFF)
162 return false;
163 return auxtrace_snapshot_state == AUXTRACE_SNAPSHOT_ENABLED;
164}
165
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300166static volatile int auxtrace_snapshot_err;
167static volatile int auxtrace_record__snapshot_started;
168
169static void sig_handler(int sig)
170{
171 if (sig == SIGCHLD)
172 child_finished = 1;
173 else
174 signr = sig;
175
176 done = 1;
177}
178
179static void record__sig_exit(void)
180{
181 if (signr == -1)
182 return;
183
184 signal(signr, SIG_DFL);
185 raise(signr);
186}
187
Adrian Huntere31f0d02015-04-30 17:37:27 +0300188#ifdef HAVE_AUXTRACE_SUPPORT
189
Adrian Hunteref149c22015-04-09 18:53:45 +0300190static int record__process_auxtrace(struct perf_tool *tool,
191 union perf_event *event, void *data1,
192 size_t len1, void *data2, size_t len2)
193{
194 struct record *rec = container_of(tool, struct record, tool);
Adrian Hunter99fa2982015-04-30 17:37:25 +0300195 struct perf_data_file *file = &rec->file;
Adrian Hunteref149c22015-04-09 18:53:45 +0300196 size_t padding;
197 u8 pad[8] = {0};
198
Adrian Hunter99fa2982015-04-30 17:37:25 +0300199 if (!perf_data_file__is_pipe(file)) {
200 off_t file_offset;
201 int fd = perf_data_file__fd(file);
202 int err;
203
204 file_offset = lseek(fd, 0, SEEK_CUR);
205 if (file_offset == -1)
206 return -1;
207 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
208 event, file_offset);
209 if (err)
210 return err;
211 }
212
Adrian Hunteref149c22015-04-09 18:53:45 +0300213 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
214 padding = (len1 + len2) & 7;
215 if (padding)
216 padding = 8 - padding;
217
218 record__write(rec, event, event->header.size);
219 record__write(rec, data1, len1);
220 if (len2)
221 record__write(rec, data2, len2);
222 record__write(rec, &pad, padding);
223
224 return 0;
225}
226
227static int record__auxtrace_mmap_read(struct record *rec,
228 struct auxtrace_mmap *mm)
229{
230 int ret;
231
232 ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
233 record__process_auxtrace);
234 if (ret < 0)
235 return ret;
236
237 if (ret)
238 rec->samples++;
239
240 return 0;
241}
242
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300243static int record__auxtrace_mmap_read_snapshot(struct record *rec,
244 struct auxtrace_mmap *mm)
245{
246 int ret;
247
248 ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
249 record__process_auxtrace,
250 rec->opts.auxtrace_snapshot_size);
251 if (ret < 0)
252 return ret;
253
254 if (ret)
255 rec->samples++;
256
257 return 0;
258}
259
260static int record__auxtrace_read_snapshot_all(struct record *rec)
261{
262 int i;
263 int rc = 0;
264
265 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
266 struct auxtrace_mmap *mm =
267 &rec->evlist->mmap[i].auxtrace_mmap;
268
269 if (!mm->base)
270 continue;
271
272 if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
273 rc = -1;
274 goto out;
275 }
276 }
277out:
278 return rc;
279}
280
281static void record__read_auxtrace_snapshot(struct record *rec)
282{
283 pr_debug("Recording AUX area tracing snapshot\n");
284 if (record__auxtrace_read_snapshot_all(rec) < 0) {
285 auxtrace_snapshot_err = -1;
286 } else {
287 auxtrace_snapshot_err = auxtrace_record__snapshot_finish(rec->itr);
288 if (!auxtrace_snapshot_err)
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000289 auxtrace_snapshot_enable();
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300290 }
291}
292
Adrian Huntere31f0d02015-04-30 17:37:27 +0300293#else
294
295static inline
296int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
297 struct auxtrace_mmap *mm __maybe_unused)
298{
299 return 0;
300}
301
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300302static inline
303void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
304{
305}
306
307static inline
308int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
309{
310 return 0;
311}
312
Adrian Huntere31f0d02015-04-30 17:37:27 +0300313#endif
314
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300315static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200316{
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300317 char msg[512];
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200318 struct perf_evsel *pos;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200319 struct perf_evlist *evlist = rec->evlist;
320 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300321 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600322 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200323
Arnaldo Carvalho de Meloe68ae9c2016-04-11 18:15:29 -0300324 perf_evlist__config(evlist, opts, &callchain_param);
Jiri Olsacac21422012-11-12 18:34:00 +0100325
Arnaldo Carvalho de Melo0050f7a2014-01-10 10:37:27 -0300326 evlist__for_each(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200327try_again:
Kan Liangd988d5e2015-08-21 02:23:14 -0400328 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300329 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300330 if (verbose)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300331 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300332 goto try_again;
333 }
David Ahernca6a4252011-03-25 13:11:11 -0600334
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300335 rc = -errno;
336 perf_evsel__open_strerror(pos, &opts->target,
337 errno, msg, sizeof(msg));
338 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600339 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300340 }
Li Zefanc171b552009-10-15 11:22:07 +0800341 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200342
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -0300343 if (perf_evlist__apply_filters(evlist, &pos)) {
344 error("failed to set filter \"%s\" on event %s with %d (%s)\n",
345 pos->filter, perf_evsel__name(pos), errno,
Masami Hiramatsu35550da2014-08-14 02:22:43 +0000346 strerror_r(errno, msg, sizeof(msg)));
David Ahern8d3eca22012-08-26 12:24:47 -0600347 rc = -1;
348 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100349 }
350
Adrian Hunteref149c22015-04-09 18:53:45 +0300351 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300352 opts->auxtrace_mmap_pages,
353 opts->auxtrace_snapshot_mode) < 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600354 if (errno == EPERM) {
355 pr_err("Permission error mapping pages.\n"
356 "Consider increasing "
357 "/proc/sys/kernel/perf_event_mlock_kb,\n"
358 "or try again with a smaller value of -m/--mmap_pages.\n"
Adrian Hunteref149c22015-04-09 18:53:45 +0300359 "(current value: %u,%u)\n",
360 opts->mmap_pages, opts->auxtrace_mmap_pages);
David Ahern8d3eca22012-08-26 12:24:47 -0600361 rc = -errno;
David Ahern8d3eca22012-08-26 12:24:47 -0600362 } else {
Masami Hiramatsu35550da2014-08-14 02:22:43 +0000363 pr_err("failed to mmap with %d (%s)\n", errno,
364 strerror_r(errno, msg, sizeof(msg)));
Wang Nan95c36562016-02-26 09:32:17 +0000365 if (errno)
366 rc = -errno;
367 else
368 rc = -EINVAL;
David Ahern8d3eca22012-08-26 12:24:47 -0600369 }
370 goto out;
Nelson Elhage18e60932011-12-19 08:39:31 -0500371 }
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200372
Jiri Olsa563aecb2013-06-05 13:35:06 +0200373 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300374 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600375out:
376 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200377}
378
Namhyung Kime3d59112015-01-29 17:06:44 +0900379static int process_sample_event(struct perf_tool *tool,
380 union perf_event *event,
381 struct perf_sample *sample,
382 struct perf_evsel *evsel,
383 struct machine *machine)
384{
385 struct record *rec = container_of(tool, struct record, tool);
386
387 rec->samples++;
388
389 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
390}
391
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300392static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200393{
Jiri Olsaf5fc1412013-10-15 16:27:32 +0200394 struct perf_data_file *file = &rec->file;
395 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200396
He Kuang457ae942015-05-28 13:17:30 +0000397 if (file->size == 0)
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300398 return 0;
399
Namhyung Kim00dc8652014-11-04 10:14:32 +0900400 /*
401 * During this process, it'll load kernel map and replace the
402 * dso->long_name to a real pathname it found. In this case
403 * we prefer the vmlinux path like
404 * /lib/modules/3.16.4/build/vmlinux
405 *
406 * rather than build-id path (in debug directory).
407 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
408 */
409 symbol_conf.ignore_vmlinux_buildid = true;
410
Namhyung Kim61566812016-01-11 22:37:09 +0900411 /*
412 * If --buildid-all is given, it marks all DSO regardless of hits,
413 * so no need to process samples.
414 */
415 if (rec->buildid_all)
416 rec->tool.sample = NULL;
417
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300418 return perf_session__process_events(session);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200419}
420
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200421static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800422{
423 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200424 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800425 /*
426 *As for guest kernel when processing subcommand record&report,
427 *we arrange module mmap prior to guest kernel mmap and trigger
428 *a preload dso because default guest module symbols are loaded
429 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
430 *method is used to avoid symbol missing when the first addr is
431 *in module instead of in guest kernel.
432 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200433 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200434 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800435 if (err < 0)
436 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300437 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800438
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800439 /*
440 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
441 * have no _text sometimes.
442 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200443 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200444 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800445 if (err < 0)
446 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300447 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800448}
449
Frederic Weisbecker98402802010-05-02 22:05:29 +0200450static struct perf_event_header finished_round_event = {
451 .size = sizeof(struct perf_event_header),
452 .type = PERF_RECORD_FINISHED_ROUND,
453};
454
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300455static int record__mmap_read_all(struct record *rec)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200456{
Jiri Olsadcabb502014-07-25 16:56:16 +0200457 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200458 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600459 int rc = 0;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200460
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200461 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
Adrian Hunteref149c22015-04-09 18:53:45 +0300462 struct auxtrace_mmap *mm = &rec->evlist->mmap[i].auxtrace_mmap;
463
David Ahern8d3eca22012-08-26 12:24:47 -0600464 if (rec->evlist->mmap[i].base) {
Arnaldo Carvalho de Meloe5685732014-09-17 16:42:58 -0300465 if (record__mmap_read(rec, i) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600466 rc = -1;
467 goto out;
468 }
469 }
Adrian Hunteref149c22015-04-09 18:53:45 +0300470
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300471 if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
Adrian Hunteref149c22015-04-09 18:53:45 +0300472 record__auxtrace_mmap_read(rec, mm) != 0) {
473 rc = -1;
474 goto out;
475 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200476 }
477
Jiri Olsadcabb502014-07-25 16:56:16 +0200478 /*
479 * Mark the round finished in case we wrote
480 * at least one event.
481 */
482 if (bytes_written != rec->bytes_written)
483 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600484
485out:
486 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200487}
488
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300489static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -0700490{
David Ahern57706ab2013-11-06 11:41:34 -0700491 struct perf_session *session = rec->session;
492 int feat;
493
494 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
495 perf_header__set_feat(&session->header, feat);
496
497 if (rec->no_buildid)
498 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
499
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300500 if (!have_tracepoints(&rec->evlist->entries))
David Ahern57706ab2013-11-06 11:41:34 -0700501 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
502
503 if (!rec->opts.branch_stack)
504 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
Adrian Hunteref149c22015-04-09 18:53:45 +0300505
506 if (!rec->opts.full_auxtrace)
507 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
Jiri Olsaffa517a2015-10-25 15:51:43 +0100508
509 perf_header__clear_feat(&session->header, HEADER_STAT);
David Ahern57706ab2013-11-06 11:41:34 -0700510}
511
Wang Nane1ab48b2016-02-26 09:32:10 +0000512static void
513record__finish_output(struct record *rec)
514{
515 struct perf_data_file *file = &rec->file;
516 int fd = perf_data_file__fd(file);
517
518 if (file->is_pipe)
519 return;
520
521 rec->session->header.data_size += rec->bytes_written;
522 file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
523
524 if (!rec->no_buildid) {
525 process_buildids(rec);
526
527 if (rec->buildid_all)
528 dsos__hit_all(rec->session);
529 }
530 perf_session__write_header(rec->session, rec->evlist, fd, true);
531
532 return;
533}
534
Wang Nanecfd7a92016-04-13 08:21:07 +0000535static int
536record__switch_output(struct record *rec, bool at_exit)
537{
538 struct perf_data_file *file = &rec->file;
539 int fd, err;
540
541 /* Same Size: "2015122520103046"*/
542 char timestamp[] = "InvalidTimestamp";
543
544 rec->samples = 0;
545 record__finish_output(rec);
546 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
547 if (err) {
548 pr_err("Failed to get current timestamp\n");
549 return -EINVAL;
550 }
551
552 fd = perf_data_file__switch(file, timestamp,
553 rec->session->header.data_offset,
554 at_exit);
555 if (fd >= 0 && !at_exit) {
556 rec->bytes_written = 0;
557 rec->session->header.data_size = 0;
558 }
559
560 if (!quiet)
561 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
562 file->path, timestamp);
563 return fd;
564}
565
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300566static volatile int workload_exec_errno;
567
568/*
569 * perf_evlist__prepare_workload will send a SIGUSR1
570 * if the fork fails, since we asked by setting its
571 * want_signal to true.
572 */
Namhyung Kim45604712014-05-12 09:47:24 +0900573static void workload_exec_failed_signal(int signo __maybe_unused,
574 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300575 void *ucontext __maybe_unused)
576{
577 workload_exec_errno = info->si_value.sival_int;
578 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300579 child_finished = 1;
580}
581
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300582static void snapshot_sig_handler(int sig);
583
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200584int __weak
585perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
586 struct perf_tool *tool __maybe_unused,
587 perf_event__handler_t process __maybe_unused,
588 struct machine *machine __maybe_unused)
589{
590 return 0;
591}
592
Wang Nanc45c86e2016-02-26 09:32:07 +0000593static int record__synthesize(struct record *rec)
594{
595 struct perf_session *session = rec->session;
596 struct machine *machine = &session->machines.host;
597 struct perf_data_file *file = &rec->file;
598 struct record_opts *opts = &rec->opts;
599 struct perf_tool *tool = &rec->tool;
600 int fd = perf_data_file__fd(file);
601 int err = 0;
602
603 if (file->is_pipe) {
604 err = perf_event__synthesize_attrs(tool, session,
605 process_synthesized_event);
606 if (err < 0) {
607 pr_err("Couldn't synthesize attrs.\n");
608 goto out;
609 }
610
611 if (have_tracepoints(&rec->evlist->entries)) {
612 /*
613 * FIXME err <= 0 here actually means that
614 * there were no tracepoints so its not really
615 * an error, just that we don't need to
616 * synthesize anything. We really have to
617 * return this more properly and also
618 * propagate errors that now are calling die()
619 */
620 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
621 process_synthesized_event);
622 if (err <= 0) {
623 pr_err("Couldn't record tracing data.\n");
624 goto out;
625 }
626 rec->bytes_written += err;
627 }
628 }
629
Adrian Hunter46bc29b2016-03-08 10:38:44 +0200630 err = perf_event__synth_time_conv(rec->evlist->mmap[0].base, tool,
631 process_synthesized_event, machine);
632 if (err)
633 goto out;
634
Wang Nanc45c86e2016-02-26 09:32:07 +0000635 if (rec->opts.full_auxtrace) {
636 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
637 session, process_synthesized_event);
638 if (err)
639 goto out;
640 }
641
642 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
643 machine);
644 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
645 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
646 "Check /proc/kallsyms permission or run as root.\n");
647
648 err = perf_event__synthesize_modules(tool, process_synthesized_event,
649 machine);
650 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
651 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
652 "Check /proc/modules permission or run as root.\n");
653
654 if (perf_guest) {
655 machines__process_guests(&session->machines,
656 perf_event__synthesize_guest_os, tool);
657 }
658
659 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
660 process_synthesized_event, opts->sample_address,
661 opts->proc_map_timeout);
662out:
663 return err;
664}
665
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300666static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200667{
David Ahern57706ab2013-11-06 11:41:34 -0700668 int err;
Namhyung Kim45604712014-05-12 09:47:24 +0900669 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +0200670 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -0300671 const bool forks = argc > 0;
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300672 struct machine *machine;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200673 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300674 struct record_opts *opts = &rec->opts;
Jiri Olsaf5fc1412013-10-15 16:27:32 +0200675 struct perf_data_file *file = &rec->file;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200676 struct perf_session *session;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -0300677 bool disabled = false, draining = false;
Namhyung Kim42aa2762015-01-29 17:06:48 +0900678 int fd;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200679
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200680 rec->progname = argv[0];
Andi Kleen33e49ea2011-09-15 14:31:40 -0700681
Namhyung Kim45604712014-05-12 09:47:24 +0900682 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +0200683 signal(SIGCHLD, sig_handler);
684 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -0600685 signal(SIGTERM, sig_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000686
687 if (rec->opts.auxtrace_snapshot_mode) {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300688 signal(SIGUSR2, snapshot_sig_handler);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000689 auxtrace_snapshot_on();
690 } else {
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300691 signal(SIGUSR2, SIG_IGN);
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000692 }
Peter Zijlstraf5970552009-06-18 23:22:55 +0200693
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300694 session = perf_session__new(file, false, tool);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200695 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +0900696 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -0200697 return -1;
698 }
699
Namhyung Kim42aa2762015-01-29 17:06:48 +0900700 fd = perf_data_file__fd(file);
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200701 rec->session = session;
702
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300703 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +0100704
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -0200705 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300706 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsaf5fc1412013-10-15 16:27:32 +0200707 argv, file->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -0300708 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200709 if (err < 0) {
710 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +0900711 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200712 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +0200713 }
Peter Zijlstra856e9662009-12-16 17:55:55 +0100714 }
715
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300716 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600717 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900718 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600719 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200720
Wang Nan8690a2a2016-02-22 09:10:32 +0000721 err = bpf__apply_obj_config();
722 if (err) {
723 char errbuf[BUFSIZ];
724
725 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
726 pr_err("ERROR: Apply config to BPF failed: %s\n",
727 errbuf);
728 goto out_child;
729 }
730
Adrian Huntercca84822015-08-19 17:29:21 +0300731 /*
732 * Normally perf_session__new would do this, but it doesn't have the
733 * evlist.
734 */
735 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
736 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
737 rec->tool.ordered_events = false;
738 }
739
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300740 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +0900741 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
742
Jiri Olsaf5fc1412013-10-15 16:27:32 +0200743 if (file->is_pipe) {
Namhyung Kim42aa2762015-01-29 17:06:48 +0900744 err = perf_header__write_pipe(fd);
Tom Zanussi529870e2010-04-01 23:59:16 -0500745 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900746 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +0200747 } else {
Namhyung Kim42aa2762015-01-29 17:06:48 +0900748 err = perf_session__write_header(session, rec->evlist, fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -0200749 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900750 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -0200751 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +0200752
David Ahernd3665492012-02-06 15:27:52 -0700753 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +0100754 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -0700755 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +0100756 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -0600757 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900758 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +0100759 }
760
Arnaldo Carvalho de Melo34ba5122012-12-19 09:04:24 -0300761 machine = &session->machines.host;
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200762
Wang Nanc45c86e2016-02-26 09:32:07 +0000763 err = record__synthesize(rec);
764 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900765 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600766
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200767 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200768 struct sched_param param;
769
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200770 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200771 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -0200772 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -0600773 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900774 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200775 }
776 }
777
Jiri Olsa774cb492012-11-12 18:34:01 +0100778 /*
779 * When perf is starting the traced process, all the events
780 * (apart from group members) have enable_on_exec=1 set,
781 * so don't spoil it by prematurely enabling them.
782 */
Andi Kleen6619a532014-01-11 13:38:27 -0800783 if (!target__none(&opts->target) && !opts->initial_delay)
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300784 perf_evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -0600785
Peter Zijlstra856e9662009-12-16 17:55:55 +0100786 /*
787 * Let the child rip
788 */
Namhyung Kime803cf92015-09-22 09:24:55 +0900789 if (forks) {
Namhyung Kime5bed562015-09-30 10:45:24 +0900790 union perf_event *event;
791
792 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
793 if (event == NULL) {
794 err = -ENOMEM;
795 goto out_child;
796 }
797
Namhyung Kime803cf92015-09-22 09:24:55 +0900798 /*
799 * Some H/W events are generated before COMM event
800 * which is emitted during exec(), so perf script
801 * cannot see a correct process name for those events.
802 * Synthesize COMM event to prevent it.
803 */
Namhyung Kime5bed562015-09-30 10:45:24 +0900804 perf_event__synthesize_comm(tool, event,
Namhyung Kime803cf92015-09-22 09:24:55 +0900805 rec->evlist->workload.pid,
806 process_synthesized_event,
807 machine);
Namhyung Kime5bed562015-09-30 10:45:24 +0900808 free(event);
Namhyung Kime803cf92015-09-22 09:24:55 +0900809
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300810 perf_evlist__start_workload(rec->evlist);
Namhyung Kime803cf92015-09-22 09:24:55 +0900811 }
Peter Zijlstra856e9662009-12-16 17:55:55 +0100812
Andi Kleen6619a532014-01-11 13:38:27 -0800813 if (opts->initial_delay) {
814 usleep(opts->initial_delay * 1000);
815 perf_evlist__enable(rec->evlist);
816 }
817
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000818 auxtrace_snapshot_enable();
Peter Zijlstra649c48a2009-06-24 21:12:48 +0200819 for (;;) {
Yang Shi9f065192015-09-29 14:49:43 -0700820 unsigned long long hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200821
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300822 if (record__mmap_read_all(rec) < 0) {
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000823 auxtrace_snapshot_disable();
David Ahern8d3eca22012-08-26 12:24:47 -0600824 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900825 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600826 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200827
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +0300828 if (auxtrace_record__snapshot_started) {
829 auxtrace_record__snapshot_started = 0;
830 if (!auxtrace_snapshot_err)
831 record__read_auxtrace_snapshot(rec);
832 if (auxtrace_snapshot_err) {
833 pr_err("AUX area tracing snapshot failed\n");
834 err = -1;
835 goto out_child;
836 }
837 }
838
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200839 if (hits == rec->samples) {
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -0300840 if (done || draining)
Peter Zijlstra649c48a2009-06-24 21:12:48 +0200841 break;
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -0300842 err = perf_evlist__poll(rec->evlist, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -0400843 /*
844 * Propagate error, only if there's any. Ignore positive
845 * number of returned events and interrupt error.
846 */
847 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +0900848 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +0200849 waking++;
Arnaldo Carvalho de Melo6dcf45ef2014-08-13 11:33:59 -0300850
851 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
852 draining = true;
Peter Zijlstra8b412662009-09-17 19:59:05 +0200853 }
854
Jiri Olsa774cb492012-11-12 18:34:01 +0100855 /*
856 * When perf is starting the traced process, at the end events
857 * die with the process and we wait for that. Thus no need to
858 * disable events in this case.
859 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -0300860 if (done && !disabled && !target__none(&opts->target)) {
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000861 auxtrace_snapshot_disable();
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300862 perf_evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +0100863 disabled = true;
864 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200865 }
Wang Nanc0bdc1c2016-04-13 08:21:06 +0000866 auxtrace_snapshot_disable();
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200867
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300868 if (forks && workload_exec_errno) {
Masami Hiramatsu35550da2014-08-14 02:22:43 +0000869 char msg[STRERR_BUFSIZE];
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300870 const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
871 pr_err("Workload failed: %s\n", emsg);
872 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900873 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300874 }
875
Namhyung Kime3d59112015-01-29 17:06:44 +0900876 if (!quiet)
Namhyung Kim45604712014-05-12 09:47:24 +0900877 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -0200878
Namhyung Kim45604712014-05-12 09:47:24 +0900879out_child:
880 if (forks) {
881 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +0200882
Namhyung Kim45604712014-05-12 09:47:24 +0900883 if (!child_finished)
884 kill(rec->evlist->workload.pid, SIGTERM);
885
886 wait(&exit_status);
887
888 if (err < 0)
889 status = err;
890 else if (WIFEXITED(exit_status))
891 status = WEXITSTATUS(exit_status);
892 else if (WIFSIGNALED(exit_status))
893 signr = WTERMSIG(exit_status);
894 } else
895 status = err;
896
Namhyung Kime3d59112015-01-29 17:06:44 +0900897 /* this will be recalculated during process_buildids() */
898 rec->samples = 0;
899
Wang Nanecfd7a92016-04-13 08:21:07 +0000900 if (!err) {
901 if (!rec->timestamp_filename) {
902 record__finish_output(rec);
903 } else {
904 fd = record__switch_output(rec, true);
905 if (fd < 0) {
906 status = fd;
907 goto out_delete_session;
908 }
909 }
910 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -0300911
Namhyung Kime3d59112015-01-29 17:06:44 +0900912 if (!err && !quiet) {
913 char samples[128];
Wang Nanecfd7a92016-04-13 08:21:07 +0000914 const char *postfix = rec->timestamp_filename ?
915 ".<timestamp>" : "";
Namhyung Kime3d59112015-01-29 17:06:44 +0900916
Adrian Hunteref149c22015-04-09 18:53:45 +0300917 if (rec->samples && !rec->opts.full_auxtrace)
Namhyung Kime3d59112015-01-29 17:06:44 +0900918 scnprintf(samples, sizeof(samples),
919 " (%" PRIu64 " samples)", rec->samples);
920 else
921 samples[0] = '\0';
922
Wang Nanecfd7a92016-04-13 08:21:07 +0000923 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
Namhyung Kime3d59112015-01-29 17:06:44 +0900924 perf_data_file__size(file) / 1024.0 / 1024.0,
Wang Nanecfd7a92016-04-13 08:21:07 +0000925 file->path, postfix, samples);
Namhyung Kime3d59112015-01-29 17:06:44 +0900926 }
927
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -0300928out_delete_session:
929 perf_session__delete(session);
Namhyung Kim45604712014-05-12 09:47:24 +0900930 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200931}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +0200932
Namhyung Kim72a128a2014-09-23 10:01:41 +0900933static void callchain_debug(void)
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200934{
Kan Liangaad2b212015-01-05 13:23:04 -0500935 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
Jiri Olsaa601fdf2014-02-03 12:44:43 +0100936
Namhyung Kim72a128a2014-09-23 10:01:41 +0900937 pr_debug("callchain: type %s\n", str[callchain_param.record_mode]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200938
Namhyung Kim72a128a2014-09-23 10:01:41 +0900939 if (callchain_param.record_mode == CALLCHAIN_DWARF)
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200940 pr_debug("callchain: stack dump size %d\n",
Namhyung Kim72a128a2014-09-23 10:01:41 +0900941 callchain_param.dump_size);
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200942}
943
Kan Liangc421e802015-07-29 05:42:12 -0400944int record_parse_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200945 const char *arg,
946 int unset)
947{
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200948 int ret;
Kan Liangc421e802015-07-29 05:42:12 -0400949 struct record_opts *record = (struct record_opts *)opt->value;
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200950
Kan Liangc421e802015-07-29 05:42:12 -0400951 record->callgraph_set = true;
Namhyung Kim72a128a2014-09-23 10:01:41 +0900952 callchain_param.enabled = !unset;
Jiri Olsaeb853e82014-02-03 12:44:42 +0100953
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200954 /* --no-call-graph */
955 if (unset) {
Namhyung Kim72a128a2014-09-23 10:01:41 +0900956 callchain_param.record_mode = CALLCHAIN_NONE;
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200957 pr_debug("callchain: disabled\n");
958 return 0;
959 }
960
Kan Liangc3a6a8c2015-08-04 04:30:20 -0400961 ret = parse_callchain_record_opt(arg, &callchain_param);
Jiri Olsa5c0cf222016-01-07 14:30:22 +0100962 if (!ret) {
963 /* Enable data address sampling for DWARF unwind. */
964 if (callchain_param.record_mode == CALLCHAIN_DWARF)
965 record->sample_address = true;
Namhyung Kim72a128a2014-09-23 10:01:41 +0900966 callchain_debug();
Jiri Olsa5c0cf222016-01-07 14:30:22 +0100967 }
Jiri Olsa26d33022012-08-07 15:20:47 +0200968
969 return ret;
970}
971
Kan Liangc421e802015-07-29 05:42:12 -0400972int record_callchain_opt(const struct option *opt,
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200973 const char *arg __maybe_unused,
974 int unset __maybe_unused)
975{
Kan Liangc421e802015-07-29 05:42:12 -0400976 struct record_opts *record = (struct record_opts *)opt->value;
977
978 record->callgraph_set = true;
Namhyung Kim72a128a2014-09-23 10:01:41 +0900979 callchain_param.enabled = true;
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200980
Namhyung Kim72a128a2014-09-23 10:01:41 +0900981 if (callchain_param.record_mode == CALLCHAIN_NONE)
982 callchain_param.record_mode = CALLCHAIN_FP;
Jiri Olsaeb853e82014-02-03 12:44:42 +0100983
Namhyung Kim72a128a2014-09-23 10:01:41 +0900984 callchain_debug();
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200985 return 0;
986}
987
Jiri Olsaeb853e82014-02-03 12:44:42 +0100988static int perf_record_config(const char *var, const char *value, void *cb)
989{
Namhyung Kim7a29c082015-12-15 10:49:56 +0900990 struct record *rec = cb;
991
992 if (!strcmp(var, "record.build-id")) {
993 if (!strcmp(value, "cache"))
994 rec->no_buildid_cache = false;
995 else if (!strcmp(value, "no-cache"))
996 rec->no_buildid_cache = true;
997 else if (!strcmp(value, "skip"))
998 rec->no_buildid = true;
999 else
1000 return -1;
1001 return 0;
1002 }
Jiri Olsaeb853e82014-02-03 12:44:42 +01001003 if (!strcmp(var, "record.call-graph"))
Namhyung Kim5a2e5e82014-09-23 10:01:44 +09001004 var = "call-graph.record-mode"; /* fall-through */
Jiri Olsaeb853e82014-02-03 12:44:42 +01001005
1006 return perf_default_config(var, value, cb);
1007}
1008
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001009struct clockid_map {
1010 const char *name;
1011 int clockid;
1012};
1013
1014#define CLOCKID_MAP(n, c) \
1015 { .name = n, .clockid = (c), }
1016
1017#define CLOCKID_END { .name = NULL, }
1018
1019
1020/*
1021 * Add the missing ones, we need to build on many distros...
1022 */
1023#ifndef CLOCK_MONOTONIC_RAW
1024#define CLOCK_MONOTONIC_RAW 4
1025#endif
1026#ifndef CLOCK_BOOTTIME
1027#define CLOCK_BOOTTIME 7
1028#endif
1029#ifndef CLOCK_TAI
1030#define CLOCK_TAI 11
1031#endif
1032
1033static const struct clockid_map clockids[] = {
1034 /* available for all events, NMI safe */
1035 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1036 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1037
1038 /* available for some events */
1039 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1040 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1041 CLOCKID_MAP("tai", CLOCK_TAI),
1042
1043 /* available for the lazy */
1044 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1045 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1046 CLOCKID_MAP("real", CLOCK_REALTIME),
1047 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1048
1049 CLOCKID_END,
1050};
1051
1052static int parse_clockid(const struct option *opt, const char *str, int unset)
1053{
1054 struct record_opts *opts = (struct record_opts *)opt->value;
1055 const struct clockid_map *cm;
1056 const char *ostr = str;
1057
1058 if (unset) {
1059 opts->use_clockid = 0;
1060 return 0;
1061 }
1062
1063 /* no arg passed */
1064 if (!str)
1065 return 0;
1066
1067 /* no setting it twice */
1068 if (opts->use_clockid)
1069 return -1;
1070
1071 opts->use_clockid = true;
1072
1073 /* if its a number, we're done */
1074 if (sscanf(str, "%d", &opts->clockid) == 1)
1075 return 0;
1076
1077 /* allow a "CLOCK_" prefix to the name */
1078 if (!strncasecmp(str, "CLOCK_", 6))
1079 str += 6;
1080
1081 for (cm = clockids; cm->name; cm++) {
1082 if (!strcasecmp(str, cm->name)) {
1083 opts->clockid = cm->clockid;
1084 return 0;
1085 }
1086 }
1087
1088 opts->use_clockid = false;
1089 ui__warning("unknown clockid %s, check man page\n", ostr);
1090 return -1;
1091}
1092
Adrian Huntere9db1312015-04-09 18:53:46 +03001093static int record__parse_mmap_pages(const struct option *opt,
1094 const char *str,
1095 int unset __maybe_unused)
1096{
1097 struct record_opts *opts = opt->value;
1098 char *s, *p;
1099 unsigned int mmap_pages;
1100 int ret;
1101
1102 if (!str)
1103 return -EINVAL;
1104
1105 s = strdup(str);
1106 if (!s)
1107 return -ENOMEM;
1108
1109 p = strchr(s, ',');
1110 if (p)
1111 *p = '\0';
1112
1113 if (*s) {
1114 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1115 if (ret)
1116 goto out_free;
1117 opts->mmap_pages = mmap_pages;
1118 }
1119
1120 if (!p) {
1121 ret = 0;
1122 goto out_free;
1123 }
1124
1125 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1126 if (ret)
1127 goto out_free;
1128
1129 opts->auxtrace_mmap_pages = mmap_pages;
1130
1131out_free:
1132 free(s);
1133 return ret;
1134}
1135
Namhyung Kime5b2c202014-10-23 00:15:46 +09001136static const char * const __record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +02001137 "perf record [<options>] [<command>]",
1138 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001139 NULL
1140};
Namhyung Kime5b2c202014-10-23 00:15:46 +09001141const char * const *record_usage = __record_usage;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001142
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001143/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001144 * XXX Ideally would be local to cmd_record() and passed to a record__new
1145 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001146 * after cmd_record() exits, but since record_options need to be accessible to
1147 * builtin-script, leave it here.
1148 *
1149 * At least we don't ouch it in all the other functions here directly.
1150 *
1151 * Just say no to tons of global variables, sigh.
1152 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001153static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001154 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +08001155 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001156 .mmap_pages = UINT_MAX,
1157 .user_freq = UINT_MAX,
1158 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -03001159 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001160 .target = {
1161 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +02001162 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +09001163 },
Kan Liang9d9cad72015-06-17 09:51:11 -04001164 .proc_map_timeout = 500,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001165 },
Namhyung Kime3d59112015-01-29 17:06:44 +09001166 .tool = {
1167 .sample = process_sample_event,
1168 .fork = perf_event__process_fork,
Adrian Huntercca84822015-08-19 17:29:21 +03001169 .exit = perf_event__process_exit,
Namhyung Kime3d59112015-01-29 17:06:44 +09001170 .comm = perf_event__process_comm,
1171 .mmap = perf_event__process_mmap,
1172 .mmap2 = perf_event__process_mmap2,
Adrian Huntercca84822015-08-19 17:29:21 +03001173 .ordered_events = true,
Namhyung Kime3d59112015-01-29 17:06:44 +09001174 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001175};
Frederic Weisbecker7865e812010-04-14 19:42:07 +02001176
Namhyung Kim76a26542015-10-22 23:28:32 +09001177const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1178 "\n\t\t\t\tDefault: fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -03001179
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001180/*
1181 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1182 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001183 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001184 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1185 * using pipes, etc.
1186 */
Namhyung Kime5b2c202014-10-23 00:15:46 +09001187struct option __record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001188 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +02001189 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +02001190 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001191 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +08001192 "event filter", parse_filter),
Wang Nan4ba1faa2015-07-10 07:36:10 +00001193 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1194 NULL, "don't record events from perf itself",
1195 exclude_perf),
Namhyung Kimbea03402012-04-26 14:15:15 +09001196 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001197 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001198 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -03001199 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001200 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001201 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -03001202 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +03001203 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001204 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +02001205 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001206 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001207 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001208 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +02001209 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001210 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsaf5fc1412013-10-15 16:27:32 +02001211 OPT_STRING('o', "output", &record.file.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +02001212 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001213 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1214 &record.opts.no_inherit_set,
1215 "child tasks do not inherit counters"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001216 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
Adrian Huntere9db1312015-04-09 18:53:46 +03001217 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1218 "number of mmap data pages and AUX area tracing mmap pages",
1219 record__parse_mmap_pages),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001220 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +08001221 "put the counters into a counter group"),
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001222 OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
1223 NULL, "enables call-graph recording" ,
1224 &record_callchain_opt),
1225 OPT_CALLBACK(0, "call-graph", &record.opts,
Namhyung Kim76a26542015-10-22 23:28:32 +09001226 "record_mode[,record_size]", record_callchain_help,
Jiri Olsa09b0fd42013-10-26 16:25:33 +02001227 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +10001228 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +02001229 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -02001230 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001231 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001232 "per thread counts"),
Peter Zijlstra56100322015-06-10 16:48:50 +02001233 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
Adrian Hunter3abebc52015-07-06 14:51:01 +03001234 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1235 &record.opts.sample_time_set,
1236 "Record the sample timestamps"),
Peter Zijlstra56100322015-06-10 16:48:50 +02001237 OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001238 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +02001239 "don't sample"),
Wang Nand2db9a92016-01-25 09:56:19 +00001240 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1241 &record.no_buildid_cache_set,
1242 "do not update the buildid cache"),
1243 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1244 &record.no_buildid_set,
1245 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001246 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +02001247 "monitor event in cgroup name only",
1248 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -03001249 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -08001250 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +09001251 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1252 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +01001253
1254 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1255 "branch any", "sample any taken branches",
1256 parse_branch_stack),
1257
1258 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1259 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +01001260 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +01001261 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1262 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -07001263 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1264 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +02001265 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1266 "use per-thread mmaps"),
Stephane Eranianbcc84ec2015-08-31 18:41:12 +02001267 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1268 "sample selected machine registers on interrupt,"
1269 " use -I ? to list register names", parse_regs),
Andi Kleen85c273d2015-02-24 15:13:40 -08001270 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1271 "Record running/enabled time of read (:S) events"),
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001272 OPT_CALLBACK('k', "clockid", &record.opts,
1273 "clockid", "clockid to use for events, see clock_gettime()",
1274 parse_clockid),
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001275 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1276 "opts", "AUX area tracing Snapshot Mode", ""),
Kan Liang9d9cad72015-06-17 09:51:11 -04001277 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1278 "per thread proc mmap processing timeout in ms"),
Adrian Hunterb757bb02015-07-21 12:44:04 +03001279 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1280 "Record context switch events"),
Jiri Olsa85723882016-02-15 09:34:31 +01001281 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1282 "Configure all used events to run in kernel space.",
1283 PARSE_OPT_EXCLUSIVE),
1284 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1285 "Configure all used events to run in user space.",
1286 PARSE_OPT_EXCLUSIVE),
Wang Nan71dc23262015-10-14 12:41:19 +00001287 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1288 "clang binary to use for compiling BPF scriptlets"),
1289 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1290 "options passed to clang when compiling BPF scriptlets"),
He Kuang7efe0e02015-12-14 10:39:23 +00001291 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1292 "file", "vmlinux pathname"),
Namhyung Kim61566812016-01-11 22:37:09 +09001293 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1294 "Record build-id of all DSOs regardless of hits"),
Wang Nanecfd7a92016-04-13 08:21:07 +00001295 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1296 "append timestamp to output filename"),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001297 OPT_END()
1298};
1299
Namhyung Kime5b2c202014-10-23 00:15:46 +09001300struct option *record_options = __record_options;
1301
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001302int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001303{
Adrian Hunteref149c22015-04-09 18:53:45 +03001304 int err;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -03001305 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001306 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001307
Wang Nan48e1cab2015-12-14 10:39:22 +00001308#ifndef HAVE_LIBBPF_SUPPORT
1309# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1310 set_nobuild('\0', "clang-path", true);
1311 set_nobuild('\0', "clang-opt", true);
1312# undef set_nobuild
1313#endif
1314
He Kuang7efe0e02015-12-14 10:39:23 +00001315#ifndef HAVE_BPF_PROLOGUE
1316# if !defined (HAVE_DWARF_SUPPORT)
1317# define REASON "NO_DWARF=1"
1318# elif !defined (HAVE_LIBBPF_SUPPORT)
1319# define REASON "NO_LIBBPF=1"
1320# else
1321# define REASON "this architecture doesn't support BPF prologue"
1322# endif
1323# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1324 set_nobuild('\0', "vmlinux", true);
1325# undef set_nobuild
1326# undef REASON
1327#endif
1328
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001329 rec->evlist = perf_evlist__new();
1330 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02001331 return -ENOMEM;
1332
Jiri Olsaeb853e82014-02-03 12:44:42 +01001333 perf_config(perf_record_config, rec);
1334
Tom Zanussibca647a2010-11-10 08:11:30 -06001335 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02001336 PARSE_OPT_STOP_AT_NON_OPTION);
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001337 if (!argc && target__none(&rec->opts.target))
Tom Zanussibca647a2010-11-10 08:11:30 -06001338 usage_with_options(record_usage, record_options);
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001339
Namhyung Kimbea03402012-04-26 14:15:15 +09001340 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kimc7118362015-10-25 00:49:27 +09001341 usage_with_options_msg(record_usage, record_options,
1342 "cgroup monitoring only available in system-wide mode");
1343
Stephane Eranian023695d2011-02-14 11:20:01 +02001344 }
Adrian Hunterb757bb02015-07-21 12:44:04 +03001345 if (rec->opts.record_switch_events &&
1346 !perf_can_record_switch_events()) {
Namhyung Kimc7118362015-10-25 00:49:27 +09001347 ui__error("kernel does not support recording context switch events\n");
1348 parse_options_usage(record_usage, record_options, "switch-events", 0);
1349 return -EINVAL;
Adrian Hunterb757bb02015-07-21 12:44:04 +03001350 }
Stephane Eranian023695d2011-02-14 11:20:01 +02001351
Adrian Hunteref149c22015-04-09 18:53:45 +03001352 if (!rec->itr) {
1353 rec->itr = auxtrace_record__init(rec->evlist, &err);
1354 if (err)
1355 return err;
1356 }
1357
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001358 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
1359 rec->opts.auxtrace_snapshot_opts);
1360 if (err)
1361 return err;
1362
Wang Nand7888572016-04-08 15:07:24 +00001363 err = bpf__setup_stdout(rec->evlist);
1364 if (err) {
1365 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
1366 pr_err("ERROR: Setup BPF stdout failed: %s\n",
1367 errbuf);
1368 return err;
1369 }
1370
Adrian Hunteref149c22015-04-09 18:53:45 +03001371 err = -ENOMEM;
1372
Namhyung Kim0a7e6d12014-08-12 15:40:45 +09001373 symbol__init(NULL);
Arnaldo Carvalho de Melobaa2f6c2010-11-26 19:39:15 -02001374
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03001375 if (symbol_conf.kptr_restrict)
Arnaldo Carvalho de Melo646aaea62011-05-27 11:00:41 -03001376 pr_warning(
1377"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1378"check /proc/sys/kernel/kptr_restrict.\n\n"
1379"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1380"file is not found in the buildid cache or in the vmlinux path.\n\n"
1381"Samples in kernel modules won't be resolved at all.\n\n"
1382"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1383"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -03001384
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001385 if (rec->no_buildid_cache || rec->no_buildid)
Stephane Eraniana1ac1d32010-06-17 11:39:01 +02001386 disable_buildid_cache();
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -02001387
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001388 if (rec->evlist->nr_entries == 0 &&
1389 perf_evlist__add_default(rec->evlist) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001390 pr_err("Not enough memory for event selector list\n");
1391 goto out_symbol_exit;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +02001392 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001393
Adrian Hunter69e7e5b2013-11-18 11:55:57 +02001394 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1395 rec->opts.no_inherit = true;
1396
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001397 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001398 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001399 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001400 ui__warning("%s", errbuf);
1401 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +09001402
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001403 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001404 if (err) {
1405 int saved_errno = errno;
1406
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001407 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +09001408 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001409
1410 err = -saved_errno;
Namhyung Kim8fa60e12013-03-15 14:48:51 +09001411 goto out_symbol_exit;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001412 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -02001413
Namhyung Kim16ad2ff2012-05-07 14:09:02 +09001414 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -03001415 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -02001416 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -02001417
Adrian Hunteref149c22015-04-09 18:53:45 +03001418 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1419 if (err)
1420 goto out_symbol_exit;
1421
Namhyung Kim61566812016-01-11 22:37:09 +09001422 /*
1423 * We take all buildids when the file contains
1424 * AUX area tracing data because we do not decode the
1425 * trace because it would take too long.
1426 */
1427 if (rec->opts.full_auxtrace)
1428 rec->buildid_all = true;
1429
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -03001430 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001431 err = -EINVAL;
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -03001432 goto out_symbol_exit;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +02001433 }
1434
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02001435 err = __cmd_record(&record, argc, argv);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03001436out_symbol_exit:
Namhyung Kim45604712014-05-12 09:47:24 +09001437 perf_evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -03001438 symbol__exit();
Adrian Hunteref149c22015-04-09 18:53:45 +03001439 auxtrace_record__free(rec->itr);
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -03001440 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +02001441}
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001442
1443static void snapshot_sig_handler(int sig __maybe_unused)
1444{
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001445 if (!auxtrace_snapshot_is_enabled())
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001446 return;
Wang Nanc0bdc1c2016-04-13 08:21:06 +00001447 auxtrace_snapshot_disable();
Adrian Hunter2dd6d8a2015-04-30 17:37:32 +03001448 auxtrace_snapshot_err = auxtrace_record__snapshot_start(record.itr);
1449 auxtrace_record__snapshot_started = 1;
1450}