blob: 4db670d4b8da56421e548cd97668e4ed9d3fbb2d [file] [log] [blame]
Ingo Molnarabaff322009-06-02 22:59:57 +02001/*
Ingo Molnarbf9e1872009-06-02 23:37:05 +02002 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
Ingo Molnarabaff322009-06-02 22:59:57 +02007 */
Ingo Molnar16f762a2009-05-27 09:10:38 +02008#include "builtin.h"
Ingo Molnarbf9e1872009-06-02 23:37:05 +02009
10#include "perf.h"
11
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -020012#include "util/build-id.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020013#include "util/util.h"
Ingo Molnar0e9b20b2009-05-26 09:17:18 +020014#include "util/parse-options.h"
Ingo Molnar8ad8db32009-05-26 11:10:09 +020015#include "util/parse-events.h"
Thomas Gleixner6eda5832009-05-01 18:29:57 +020016
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020017#include "util/header.h"
Frederic Weisbecker66e274f2009-08-12 11:07:25 +020018#include "util/event.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020019#include "util/evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020020#include "util/evsel.h"
Frederic Weisbecker8f288272009-08-16 22:05:48 +020021#include "util/debug.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020022#include "util/session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020023#include "util/tool.h"
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020024#include "util/symbol.h"
Paul Mackerrasa12b51c2010-03-10 20:36:09 +110025#include "util/cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020026#include "util/thread_map.h"
Jiri Olsaf5fc1412013-10-15 16:27:32 +020027#include "util/data.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020028
Peter Zijlstra97124d52009-06-02 15:52:24 +020029#include <unistd.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020030#include <sched.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -030031#include <sys/mman.h>
Peter Zijlstrade9ac072009-04-08 15:01:31 +020032
Bernhard Rosenkraenzer78da39f2012-10-08 09:43:26 +030033
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030034struct record {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020035 struct perf_tool tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -030036 struct record_opts opts;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020037 u64 bytes_written;
Jiri Olsaf5fc1412013-10-15 16:27:32 +020038 struct perf_data_file file;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020039 struct perf_evlist *evlist;
40 struct perf_session *session;
41 const char *progname;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020042 int realtime_prio;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020043 bool no_buildid;
44 bool no_buildid_cache;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020045 long samples;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020046};
Ingo Molnara21ca2c2009-06-06 09:58:57 +020047
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030048static int record__write(struct record *rec, void *bf, size_t size)
Peter Zijlstraf5970552009-06-18 23:22:55 +020049{
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -030050 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
Jiri Olsa50a9b862013-11-22 13:11:24 +010051 pr_err("failed to write perf data, error: %m\n");
52 return -1;
Peter Zijlstraf5970552009-06-18 23:22:55 +020053 }
David Ahern8d3eca22012-08-26 12:24:47 -060054
Arnaldo Carvalho de Melocf8b2e62013-12-19 14:26:26 -030055 rec->bytes_written += size;
David Ahern8d3eca22012-08-26 12:24:47 -060056 return 0;
Peter Zijlstraf5970552009-06-18 23:22:55 +020057}
58
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020059static int process_synthesized_event(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020060 union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +030061 struct perf_sample *sample __maybe_unused,
62 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -020063{
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030064 struct record *rec = container_of(tool, struct record, tool);
65 return record__write(rec, event, event->header.size);
Arnaldo Carvalho de Melo234fbbf2009-10-26 19:23:18 -020066}
67
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030068static int record__mmap_read(struct record *rec, struct perf_mmap *md)
Peter Zijlstrade9ac072009-04-08 15:01:31 +020069{
Arnaldo Carvalho de Melo744bd8a2011-01-12 17:07:28 -020070 unsigned int head = perf_mmap__read_head(md);
Peter Zijlstrade9ac072009-04-08 15:01:31 +020071 unsigned int old = md->prev;
Jiri Olsa918512b2013-09-12 18:39:35 +020072 unsigned char *data = md->base + page_size;
Peter Zijlstrade9ac072009-04-08 15:01:31 +020073 unsigned long size;
74 void *buf;
David Ahern8d3eca22012-08-26 12:24:47 -060075 int rc = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +020076
Arnaldo Carvalho de Melodc820092011-01-28 14:49:19 -020077 if (old == head)
David Ahern8d3eca22012-08-26 12:24:47 -060078 return 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +020079
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -020080 rec->samples++;
Peter Zijlstrade9ac072009-04-08 15:01:31 +020081
82 size = head - old;
83
84 if ((old & md->mask) + size != (head & md->mask)) {
85 buf = &data[old & md->mask];
86 size = md->mask + 1 - (old & md->mask);
87 old += size;
Ingo Molnar021e9f42009-06-03 19:27:19 +020088
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030089 if (record__write(rec, buf, size) < 0) {
David Ahern8d3eca22012-08-26 12:24:47 -060090 rc = -1;
91 goto out;
92 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +020093 }
94
95 buf = &data[old & md->mask];
96 size = head - old;
97 old += size;
Ingo Molnar021e9f42009-06-03 19:27:19 +020098
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -030099 if (record__write(rec, buf, size) < 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600100 rc = -1;
101 goto out;
102 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200103
104 md->prev = old;
Arnaldo Carvalho de Melo115d2d82011-01-12 17:11:53 -0200105 perf_mmap__write_tail(md, old);
David Ahern8d3eca22012-08-26 12:24:47 -0600106
107out:
108 return rc;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200109}
110
111static volatile int done = 0;
Peter Zijlstraf7b7c262009-06-10 15:55:59 +0200112static volatile int signr = -1;
Andi Kleen33e49ea2011-09-15 14:31:40 -0700113static volatile int child_finished = 0;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200114
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200115static void sig_handler(int sig)
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200116{
Andi Kleen33e49ea2011-09-15 14:31:40 -0700117 if (sig == SIGCHLD)
118 child_finished = 1;
Namhyung Kim45604712014-05-12 09:47:24 +0900119 else
120 signr = sig;
Andi Kleen33e49ea2011-09-15 14:31:40 -0700121
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200122 done = 1;
Peter Zijlstraf7b7c262009-06-10 15:55:59 +0200123}
124
Namhyung Kim45604712014-05-12 09:47:24 +0900125static void record__sig_exit(void)
Peter Zijlstraf7b7c262009-06-10 15:55:59 +0200126{
Namhyung Kim45604712014-05-12 09:47:24 +0900127 if (signr == -1)
Peter Zijlstraf7b7c262009-06-10 15:55:59 +0200128 return;
129
130 signal(signr, SIG_DFL);
Namhyung Kim45604712014-05-12 09:47:24 +0900131 raise(signr);
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200132}
133
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300134static int record__open(struct record *rec)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200135{
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300136 char msg[512];
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200137 struct perf_evsel *pos;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200138 struct perf_evlist *evlist = rec->evlist;
139 struct perf_session *session = rec->session;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300140 struct record_opts *opts = &rec->opts;
David Ahern8d3eca22012-08-26 12:24:47 -0600141 int rc = 0;
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200142
Arnaldo Carvalho de Melof77a9512012-12-10 16:41:31 -0300143 perf_evlist__config(evlist, opts);
Jiri Olsacac21422012-11-12 18:34:00 +0100144
Arnaldo Carvalho de Melo0050f7a2014-01-10 10:37:27 -0300145 evlist__for_each(evlist, pos) {
Ingo Molnar3da297a2009-06-07 17:39:02 +0200146try_again:
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200147 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300148 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300149 if (verbose)
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -0300150 ui__warning("%s\n", msg);
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300151 goto try_again;
152 }
David Ahernca6a4252011-03-25 13:11:11 -0600153
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -0300154 rc = -errno;
155 perf_evsel__open_strerror(pos, &opts->target,
156 errno, msg, sizeof(msg));
157 ui__error("%s\n", msg);
David Ahern8d3eca22012-08-26 12:24:47 -0600158 goto out;
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300159 }
Li Zefanc171b552009-10-15 11:22:07 +0800160 }
Arnaldo Carvalho de Meloa43d3f02010-12-25 12:12:25 -0200161
Arnaldo Carvalho de Melo1491a632012-09-26 14:43:13 -0300162 if (perf_evlist__apply_filters(evlist)) {
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100163 error("failed to set filter with %d (%s)\n", errno,
164 strerror(errno));
David Ahern8d3eca22012-08-26 12:24:47 -0600165 rc = -1;
166 goto out;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100167 }
168
Nelson Elhage18e60932011-12-19 08:39:31 -0500169 if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600170 if (errno == EPERM) {
171 pr_err("Permission error mapping pages.\n"
172 "Consider increasing "
173 "/proc/sys/kernel/perf_event_mlock_kb,\n"
174 "or try again with a smaller value of -m/--mmap_pages.\n"
Adrian Hunter53653d72013-12-09 15:18:40 +0200175 "(current value: %u)\n", opts->mmap_pages);
David Ahern8d3eca22012-08-26 12:24:47 -0600176 rc = -errno;
David Ahern8d3eca22012-08-26 12:24:47 -0600177 } else {
178 pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
179 rc = -errno;
180 }
181 goto out;
Nelson Elhage18e60932011-12-19 08:39:31 -0500182 }
Arnaldo Carvalho de Melo0a27d7f2011-01-14 15:50:51 -0200183
Jiri Olsa563aecb2013-06-05 13:35:06 +0200184 session->evlist = evlist;
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300185 perf_session__set_id_hdr_size(session);
David Ahern8d3eca22012-08-26 12:24:47 -0600186out:
187 return rc;
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200188}
189
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300190static int process_buildids(struct record *rec)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200191{
Jiri Olsaf5fc1412013-10-15 16:27:32 +0200192 struct perf_data_file *file = &rec->file;
193 struct perf_session *session = rec->session;
David Ahern7ab75cf2013-11-06 11:41:36 -0700194 u64 start = session->header.data_offset;
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200195
Jiri Olsaf5fc1412013-10-15 16:27:32 +0200196 u64 size = lseek(file->fd, 0, SEEK_CUR);
Arnaldo Carvalho de Melo9f591fd2010-03-11 15:53:11 -0300197 if (size == 0)
198 return 0;
199
David Ahern7ab75cf2013-11-06 11:41:36 -0700200 return __perf_session__process_events(session, start,
201 size - start,
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -0200202 size, &build_id__mark_dso_hit_ops);
203}
204
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200205static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800206{
207 int err;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200208 struct perf_tool *tool = data;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800209 /*
210 *As for guest kernel when processing subcommand record&report,
211 *we arrange module mmap prior to guest kernel mmap and trigger
212 *a preload dso because default guest module symbols are loaded
213 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
214 *method is used to avoid symbol missing when the first addr is
215 *in module instead of in guest kernel.
216 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200217 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200218 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800219 if (err < 0)
220 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300221 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800222
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800223 /*
224 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
225 * have no _text sometimes.
226 */
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200227 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200228 machine);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800229 if (err < 0)
230 pr_err("Couldn't record guest kernel [%d]'s reference"
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300231 " relocation symbol.\n", machine->pid);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +0800232}
233
Frederic Weisbecker98402802010-05-02 22:05:29 +0200234static struct perf_event_header finished_round_event = {
235 .size = sizeof(struct perf_event_header),
236 .type = PERF_RECORD_FINISHED_ROUND,
237};
238
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300239static int record__mmap_read_all(struct record *rec)
Frederic Weisbecker98402802010-05-02 22:05:29 +0200240{
Jiri Olsadcabb502014-07-25 16:56:16 +0200241 u64 bytes_written = rec->bytes_written;
Peter Zijlstra0e2e63d2010-05-20 14:45:26 +0200242 int i;
David Ahern8d3eca22012-08-26 12:24:47 -0600243 int rc = 0;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200244
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200245 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
David Ahern8d3eca22012-08-26 12:24:47 -0600246 if (rec->evlist->mmap[i].base) {
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300247 if (record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600248 rc = -1;
249 goto out;
250 }
251 }
Frederic Weisbecker98402802010-05-02 22:05:29 +0200252 }
253
Jiri Olsadcabb502014-07-25 16:56:16 +0200254 /*
255 * Mark the round finished in case we wrote
256 * at least one event.
257 */
258 if (bytes_written != rec->bytes_written)
259 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
David Ahern8d3eca22012-08-26 12:24:47 -0600260
261out:
262 return rc;
Frederic Weisbecker98402802010-05-02 22:05:29 +0200263}
264
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300265static void record__init_features(struct record *rec)
David Ahern57706ab2013-11-06 11:41:34 -0700266{
David Ahern57706ab2013-11-06 11:41:34 -0700267 struct perf_session *session = rec->session;
268 int feat;
269
270 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
271 perf_header__set_feat(&session->header, feat);
272
273 if (rec->no_buildid)
274 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
275
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300276 if (!have_tracepoints(&rec->evlist->entries))
David Ahern57706ab2013-11-06 11:41:34 -0700277 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
278
279 if (!rec->opts.branch_stack)
280 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
281}
282
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300283static volatile int workload_exec_errno;
284
285/*
286 * perf_evlist__prepare_workload will send a SIGUSR1
287 * if the fork fails, since we asked by setting its
288 * want_signal to true.
289 */
Namhyung Kim45604712014-05-12 09:47:24 +0900290static void workload_exec_failed_signal(int signo __maybe_unused,
291 siginfo_t *info,
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300292 void *ucontext __maybe_unused)
293{
294 workload_exec_errno = info->si_value.sival_int;
295 done = 1;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300296 child_finished = 1;
297}
298
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300299static int __cmd_record(struct record *rec, int argc, const char **argv)
Peter Zijlstra16c8a102009-05-05 17:50:27 +0200300{
David Ahern57706ab2013-11-06 11:41:34 -0700301 int err;
Namhyung Kim45604712014-05-12 09:47:24 +0900302 int status = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +0200303 unsigned long waking = 0;
Zhang, Yanmin46be6042010-03-18 11:36:04 -0300304 const bool forks = argc > 0;
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -0300305 struct machine *machine;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200306 struct perf_tool *tool = &rec->tool;
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300307 struct record_opts *opts = &rec->opts;
Jiri Olsaf5fc1412013-10-15 16:27:32 +0200308 struct perf_data_file *file = &rec->file;
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200309 struct perf_session *session;
Jiri Olsa27119262012-11-12 18:34:02 +0100310 bool disabled = false;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200311
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200312 rec->progname = argv[0];
Andi Kleen33e49ea2011-09-15 14:31:40 -0700313
Namhyung Kim45604712014-05-12 09:47:24 +0900314 atexit(record__sig_exit);
Peter Zijlstraf5970552009-06-18 23:22:55 +0200315 signal(SIGCHLD, sig_handler);
316 signal(SIGINT, sig_handler);
David Ahern804f7ac2013-05-06 12:24:23 -0600317 signal(SIGTERM, sig_handler);
Peter Zijlstraf5970552009-06-18 23:22:55 +0200318
Jiri Olsaf5fc1412013-10-15 16:27:32 +0200319 session = perf_session__new(file, false, NULL);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200320 if (session == NULL) {
Adrien BAKffa91882014-04-18 11:00:43 +0900321 pr_err("Perf session creation failed.\n");
Arnaldo Carvalho de Meloa9a70bb2009-11-17 01:18:11 -0200322 return -1;
323 }
324
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200325 rec->session = session;
326
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300327 record__init_features(rec);
Stephane Eranian330aa672012-03-08 23:47:46 +0100328
Arnaldo Carvalho de Melod4db3f12009-12-27 21:36:57 -0200329 if (forks) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300330 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
Jiri Olsaf5fc1412013-10-15 16:27:32 +0200331 argv, file->is_pipe,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -0300332 workload_exec_failed_signal);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200333 if (err < 0) {
334 pr_err("Couldn't run the workload!\n");
Namhyung Kim45604712014-05-12 09:47:24 +0900335 status = err;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200336 goto out_delete_session;
Jens Axboe0a5ac842009-08-12 11:18:01 +0200337 }
Peter Zijlstra856e9662009-12-16 17:55:55 +0100338 }
339
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300340 if (record__open(rec) != 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600341 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900342 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600343 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200344
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300345 if (!rec->evlist->nr_groups)
Namhyung Kima8bb5592013-01-22 18:09:31 +0900346 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
347
Jiri Olsaf5fc1412013-10-15 16:27:32 +0200348 if (file->is_pipe) {
349 err = perf_header__write_pipe(file->fd);
Tom Zanussi529870e2010-04-01 23:59:16 -0500350 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900351 goto out_child;
Jiri Olsa563aecb2013-06-05 13:35:06 +0200352 } else {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300353 err = perf_session__write_header(session, rec->evlist,
Jiri Olsaf5fc1412013-10-15 16:27:32 +0200354 file->fd, false);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -0200355 if (err < 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900356 goto out_child;
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -0200357 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +0200358
David Ahernd3665492012-02-06 15:27:52 -0700359 if (!rec->no_buildid
Robert Richtere20960c2011-12-07 10:02:55 +0100360 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
David Ahernd3665492012-02-06 15:27:52 -0700361 pr_err("Couldn't generate buildids. "
Robert Richtere20960c2011-12-07 10:02:55 +0100362 "Use --no-buildid to profile anyway.\n");
David Ahern8d3eca22012-08-26 12:24:47 -0600363 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900364 goto out_child;
Robert Richtere20960c2011-12-07 10:02:55 +0100365 }
366
Arnaldo Carvalho de Melo34ba5122012-12-19 09:04:24 -0300367 machine = &session->machines.host;
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200368
Jiri Olsaf5fc1412013-10-15 16:27:32 +0200369 if (file->is_pipe) {
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200370 err = perf_event__synthesize_attrs(tool, session,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200371 process_synthesized_event);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -0500372 if (err < 0) {
373 pr_err("Couldn't synthesize attrs.\n");
Namhyung Kim45604712014-05-12 09:47:24 +0900374 goto out_child;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -0500375 }
Tom Zanussicd19a032010-04-01 23:59:20 -0500376
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300377 if (have_tracepoints(&rec->evlist->entries)) {
Tom Zanussi63e0c772010-05-03 00:14:48 -0500378 /*
379 * FIXME err <= 0 here actually means that
380 * there were no tracepoints so its not really
381 * an error, just that we don't need to
382 * synthesize anything. We really have to
383 * return this more properly and also
384 * propagate errors that now are calling die()
385 */
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300386 err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200387 process_synthesized_event);
Tom Zanussi63e0c772010-05-03 00:14:48 -0500388 if (err <= 0) {
389 pr_err("Couldn't record tracing data.\n");
Namhyung Kim45604712014-05-12 09:47:24 +0900390 goto out_child;
Tom Zanussi63e0c772010-05-03 00:14:48 -0500391 }
David Ahernf34b9002013-11-06 11:41:35 -0700392 rec->bytes_written += err;
Tom Zanussi63e0c772010-05-03 00:14:48 -0500393 }
Tom Zanussi2c46dbb2010-04-01 23:59:19 -0500394 }
395
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200396 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
Adrian Hunter0ae617b2014-01-29 16:14:40 +0200397 machine);
Arnaldo Carvalho de Meloc1a3a4b2010-11-22 14:01:55 -0200398 if (err < 0)
399 pr_err("Couldn't record kernel reference relocation symbol\n"
400 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
401 "Check /proc/kallsyms permission or run as root.\n");
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -0200402
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200403 err = perf_event__synthesize_modules(tool, process_synthesized_event,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200404 machine);
Arnaldo Carvalho de Meloc1a3a4b2010-11-22 14:01:55 -0200405 if (err < 0)
406 pr_err("Couldn't record kernel module information.\n"
407 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
408 "Check /proc/modules permission or run as root.\n");
409
Arnaldo Carvalho de Melo7e383de2012-12-18 15:49:27 -0300410 if (perf_guest) {
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -0300411 machines__process_guests(&session->machines,
412 perf_event__synthesize_guest_os, tool);
Arnaldo Carvalho de Melo7e383de2012-12-18 15:49:27 -0300413 }
Arnaldo Carvalho de Melob7cece72010-01-13 13:22:17 -0200414
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300415 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
Arnaldo Carvalho de Meloa33fbd52013-11-11 11:36:12 -0300416 process_synthesized_event, opts->sample_address);
David Ahern8d3eca22012-08-26 12:24:47 -0600417 if (err != 0)
Namhyung Kim45604712014-05-12 09:47:24 +0900418 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600419
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200420 if (rec->realtime_prio) {
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200421 struct sched_param param;
422
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200423 param.sched_priority = rec->realtime_prio;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200424 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
Arnaldo Carvalho de Melo6beba7a2009-10-21 17:34:06 -0200425 pr_err("Could not set realtime priority.\n");
David Ahern8d3eca22012-08-26 12:24:47 -0600426 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900427 goto out_child;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200428 }
429 }
430
Jiri Olsa774cb492012-11-12 18:34:01 +0100431 /*
432 * When perf is starting the traced process, all the events
433 * (apart from group members) have enable_on_exec=1 set,
434 * so don't spoil it by prematurely enabling them.
435 */
Andi Kleen6619a532014-01-11 13:38:27 -0800436 if (!target__none(&opts->target) && !opts->initial_delay)
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300437 perf_evlist__enable(rec->evlist);
David Ahern764e16a32011-08-25 10:17:55 -0600438
Peter Zijlstra856e9662009-12-16 17:55:55 +0100439 /*
440 * Let the child rip
441 */
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -0300442 if (forks)
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300443 perf_evlist__start_workload(rec->evlist);
Peter Zijlstra856e9662009-12-16 17:55:55 +0100444
Andi Kleen6619a532014-01-11 13:38:27 -0800445 if (opts->initial_delay) {
446 usleep(opts->initial_delay * 1000);
447 perf_evlist__enable(rec->evlist);
448 }
449
Peter Zijlstra649c48a2009-06-24 21:12:48 +0200450 for (;;) {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200451 int hits = rec->samples;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200452
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300453 if (record__mmap_read_all(rec) < 0) {
David Ahern8d3eca22012-08-26 12:24:47 -0600454 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900455 goto out_child;
David Ahern8d3eca22012-08-26 12:24:47 -0600456 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200457
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200458 if (hits == rec->samples) {
Peter Zijlstra649c48a2009-06-24 21:12:48 +0200459 if (done)
460 break;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300461 err = poll(rec->evlist->pollfd, rec->evlist->nr_fds, -1);
Jiri Olsaa5151142014-06-02 13:44:23 -0400462 /*
463 * Propagate error, only if there's any. Ignore positive
464 * number of returned events and interrupt error.
465 */
466 if (err > 0 || (err < 0 && errno == EINTR))
Namhyung Kim45604712014-05-12 09:47:24 +0900467 err = 0;
Peter Zijlstra8b412662009-09-17 19:59:05 +0200468 waking++;
469 }
470
Jiri Olsa774cb492012-11-12 18:34:01 +0100471 /*
472 * When perf is starting the traced process, at the end events
473 * die with the process and we wait for that. Thus no need to
474 * disable events in this case.
475 */
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -0300476 if (done && !disabled && !target__none(&opts->target)) {
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300477 perf_evlist__disable(rec->evlist);
Jiri Olsa27119262012-11-12 18:34:02 +0100478 disabled = true;
479 }
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200480 }
481
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300482 if (forks && workload_exec_errno) {
483 char msg[512];
484 const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
485 pr_err("Workload failed: %s\n", emsg);
486 err = -1;
Namhyung Kim45604712014-05-12 09:47:24 +0900487 goto out_child;
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -0300488 }
489
Namhyung Kim45604712014-05-12 09:47:24 +0900490 if (!quiet) {
491 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -0200492
Namhyung Kim45604712014-05-12 09:47:24 +0900493 /*
494 * Approximate RIP event size: 24 bytes.
495 */
496 fprintf(stderr,
497 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
498 (double)rec->bytes_written / 1024.0 / 1024.0,
499 file->path,
500 rec->bytes_written / 24);
501 }
Peter Zijlstra8b412662009-09-17 19:59:05 +0200502
Namhyung Kim45604712014-05-12 09:47:24 +0900503out_child:
504 if (forks) {
505 int exit_status;
Ingo Molnaraddc2782009-06-02 23:43:11 +0200506
Namhyung Kim45604712014-05-12 09:47:24 +0900507 if (!child_finished)
508 kill(rec->evlist->workload.pid, SIGTERM);
509
510 wait(&exit_status);
511
512 if (err < 0)
513 status = err;
514 else if (WIFEXITED(exit_status))
515 status = WEXITSTATUS(exit_status);
516 else if (WIFSIGNALED(exit_status))
517 signr = WTERMSIG(exit_status);
518 } else
519 status = err;
520
521 if (!err && !file->is_pipe) {
522 rec->session->header.data_size += rec->bytes_written;
523
524 if (!rec->no_buildid)
525 process_buildids(rec);
526 perf_session__write_header(rec->session, rec->evlist,
527 file->fd, true);
528 }
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -0300529
530out_delete_session:
531 perf_session__delete(session);
Namhyung Kim45604712014-05-12 09:47:24 +0900532 return status;
Peter Zijlstrade9ac072009-04-08 15:01:31 +0200533}
Ingo Molnar0e9b20b2009-05-26 09:17:18 +0200534
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100535#define BRANCH_OPT(n, m) \
536 { .name = n, .mode = (m) }
537
538#define BRANCH_END { .name = NULL }
539
540struct branch_mode {
541 const char *name;
542 int mode;
543};
544
545static const struct branch_mode branch_modes[] = {
546 BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
547 BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
548 BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
549 BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
550 BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
551 BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
552 BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
Andi Kleen0126d492013-09-20 07:40:42 -0700553 BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
554 BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
555 BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
Anshuman Khandual0fffa5d2014-05-22 12:50:08 +0530556 BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100557 BRANCH_END
558};
559
560static int
Stephane Eraniana5aabda2012-03-08 23:47:45 +0100561parse_branch_stack(const struct option *opt, const char *str, int unset)
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100562{
563#define ONLY_PLM \
564 (PERF_SAMPLE_BRANCH_USER |\
565 PERF_SAMPLE_BRANCH_KERNEL |\
566 PERF_SAMPLE_BRANCH_HV)
567
568 uint64_t *mode = (uint64_t *)opt->value;
569 const struct branch_mode *br;
Stephane Eraniana5aabda2012-03-08 23:47:45 +0100570 char *s, *os = NULL, *p;
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100571 int ret = -1;
572
Stephane Eraniana5aabda2012-03-08 23:47:45 +0100573 if (unset)
574 return 0;
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100575
Stephane Eraniana5aabda2012-03-08 23:47:45 +0100576 /*
577 * cannot set it twice, -b + --branch-filter for instance
578 */
579 if (*mode)
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100580 return -1;
581
Stephane Eraniana5aabda2012-03-08 23:47:45 +0100582 /* str may be NULL in case no arg is passed to -b */
583 if (str) {
584 /* because str is read-only */
585 s = os = strdup(str);
586 if (!s)
587 return -1;
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100588
Stephane Eraniana5aabda2012-03-08 23:47:45 +0100589 for (;;) {
590 p = strchr(s, ',');
591 if (p)
592 *p = '\0';
593
594 for (br = branch_modes; br->name; br++) {
595 if (!strcasecmp(s, br->name))
596 break;
597 }
598 if (!br->name) {
599 ui__warning("unknown branch filter %s,"
600 " check man page\n", s);
601 goto error;
602 }
603
604 *mode |= br->mode;
605
606 if (!p)
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100607 break;
Stephane Eraniana5aabda2012-03-08 23:47:45 +0100608
609 s = p + 1;
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100610 }
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100611 }
612 ret = 0;
613
Stephane Eraniana5aabda2012-03-08 23:47:45 +0100614 /* default to any branch */
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100615 if ((*mode & ~ONLY_PLM) == 0) {
Stephane Eraniana5aabda2012-03-08 23:47:45 +0100616 *mode = PERF_SAMPLE_BRANCH_ANY;
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100617 }
618error:
619 free(os);
620 return ret;
621}
622
Jiri Olsa9ff125d2014-01-07 13:47:28 +0100623#ifdef HAVE_DWARF_UNWIND_SUPPORT
Jiri Olsa26d33022012-08-07 15:20:47 +0200624static int get_stack_size(char *str, unsigned long *_size)
625{
626 char *endptr;
627 unsigned long size;
628 unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
629
630 size = strtoul(str, &endptr, 0);
631
632 do {
633 if (*endptr)
634 break;
635
636 size = round_up(size, sizeof(u64));
637 if (!size || size > max_size)
638 break;
639
640 *_size = size;
641 return 0;
642
643 } while (0);
644
645 pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
646 max_size, str);
647 return -1;
648}
Jiri Olsa9ff125d2014-01-07 13:47:28 +0100649#endif /* HAVE_DWARF_UNWIND_SUPPORT */
Jiri Olsa26d33022012-08-07 15:20:47 +0200650
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300651int record_parse_callchain(const char *arg, struct record_opts *opts)
Jiri Olsa26d33022012-08-07 15:20:47 +0200652{
Jiri Olsa26d33022012-08-07 15:20:47 +0200653 char *tok, *name, *saveptr = NULL;
654 char *buf;
655 int ret = -1;
656
Jiri Olsa26d33022012-08-07 15:20:47 +0200657 /* We need buffer that we know we can write to. */
658 buf = malloc(strlen(arg) + 1);
659 if (!buf)
660 return -ENOMEM;
661
662 strcpy(buf, arg);
663
664 tok = strtok_r((char *)buf, ",", &saveptr);
665 name = tok ? : (char *)buf;
666
667 do {
668 /* Framepointer style */
669 if (!strncmp(name, "fp", sizeof("fp"))) {
670 if (!strtok_r(NULL, ",", &saveptr)) {
Arnaldo Carvalho de Meloc5ff78c2012-12-11 16:16:47 -0300671 opts->call_graph = CALLCHAIN_FP;
Jiri Olsa26d33022012-08-07 15:20:47 +0200672 ret = 0;
673 } else
674 pr_err("callchain: No more arguments "
675 "needed for -g fp\n");
676 break;
677
Jiri Olsa9ff125d2014-01-07 13:47:28 +0100678#ifdef HAVE_DWARF_UNWIND_SUPPORT
Jiri Olsa26d33022012-08-07 15:20:47 +0200679 /* Dwarf style */
680 } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -0300681 const unsigned long default_stack_dump_size = 8192;
682
Jiri Olsa26d33022012-08-07 15:20:47 +0200683 ret = 0;
Arnaldo Carvalho de Meloc5ff78c2012-12-11 16:16:47 -0300684 opts->call_graph = CALLCHAIN_DWARF;
685 opts->stack_dump_size = default_stack_dump_size;
Jiri Olsa26d33022012-08-07 15:20:47 +0200686
687 tok = strtok_r(NULL, ",", &saveptr);
688 if (tok) {
689 unsigned long size = 0;
690
691 ret = get_stack_size(tok, &size);
Arnaldo Carvalho de Meloc5ff78c2012-12-11 16:16:47 -0300692 opts->stack_dump_size = size;
Jiri Olsa26d33022012-08-07 15:20:47 +0200693 }
Jiri Olsa9ff125d2014-01-07 13:47:28 +0100694#endif /* HAVE_DWARF_UNWIND_SUPPORT */
Jiri Olsa26d33022012-08-07 15:20:47 +0200695 } else {
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200696 pr_err("callchain: Unknown --call-graph option "
Jiri Olsa26d33022012-08-07 15:20:47 +0200697 "value: %s\n", arg);
698 break;
699 }
700
701 } while (0);
702
703 free(buf);
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200704 return ret;
705}
Jiri Olsa26d33022012-08-07 15:20:47 +0200706
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300707static void callchain_debug(struct record_opts *opts)
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200708{
Jiri Olsaa601fdf2014-02-03 12:44:43 +0100709 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" };
710
711 pr_debug("callchain: type %s\n", str[opts->call_graph]);
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200712
713 if (opts->call_graph == CALLCHAIN_DWARF)
714 pr_debug("callchain: stack dump size %d\n",
715 opts->stack_dump_size);
716}
717
718int record_parse_callchain_opt(const struct option *opt,
719 const char *arg,
720 int unset)
721{
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300722 struct record_opts *opts = opt->value;
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200723 int ret;
724
Jiri Olsaeb853e82014-02-03 12:44:42 +0100725 opts->call_graph_enabled = !unset;
726
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200727 /* --no-call-graph */
728 if (unset) {
729 opts->call_graph = CALLCHAIN_NONE;
730 pr_debug("callchain: disabled\n");
731 return 0;
732 }
733
734 ret = record_parse_callchain(arg, opts);
Jiri Olsa26d33022012-08-07 15:20:47 +0200735 if (!ret)
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200736 callchain_debug(opts);
Jiri Olsa26d33022012-08-07 15:20:47 +0200737
738 return ret;
739}
740
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200741int record_callchain_opt(const struct option *opt,
742 const char *arg __maybe_unused,
743 int unset __maybe_unused)
744{
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300745 struct record_opts *opts = opt->value;
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200746
Jiri Olsaeb853e82014-02-03 12:44:42 +0100747 opts->call_graph_enabled = !unset;
748
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200749 if (opts->call_graph == CALLCHAIN_NONE)
750 opts->call_graph = CALLCHAIN_FP;
751
752 callchain_debug(opts);
753 return 0;
754}
755
Jiri Olsaeb853e82014-02-03 12:44:42 +0100756static int perf_record_config(const char *var, const char *value, void *cb)
757{
758 struct record *rec = cb;
759
760 if (!strcmp(var, "record.call-graph"))
761 return record_parse_callchain(value, &rec->opts);
762
763 return perf_default_config(var, value, cb);
764}
765
Ingo Molnar0e9b20b2009-05-26 09:17:18 +0200766static const char * const record_usage[] = {
Mike Galbraith9e0967532009-05-28 16:25:34 +0200767 "perf record [<options>] [<command>]",
768 "perf record [<options>] -- <command> [<options>]",
Ingo Molnar0e9b20b2009-05-26 09:17:18 +0200769 NULL
770};
771
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200772/*
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300773 * XXX Ideally would be local to cmd_record() and passed to a record__new
774 * because we need to have access to it in record__exit, that is called
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200775 * after cmd_record() exits, but since record_options need to be accessible to
776 * builtin-script, leave it here.
777 *
778 * At least we don't ouch it in all the other functions here directly.
779 *
780 * Just say no to tons of global variables, sigh.
781 */
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300782static struct record record = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200783 .opts = {
Andi Kleen8affc2b2014-07-31 14:45:04 +0800784 .sample_time = true,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200785 .mmap_pages = UINT_MAX,
786 .user_freq = UINT_MAX,
787 .user_interval = ULLONG_MAX,
Arnaldo Carvalho de Melo447a6012012-05-22 13:14:18 -0300788 .freq = 4000,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +0900789 .target = {
790 .uses_mmap = true,
Adrian Hunter3aa59392013-11-15 15:52:29 +0200791 .default_per_cpu = true,
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +0900792 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200793 },
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200794};
Frederic Weisbecker7865e812010-04-14 19:42:07 +0200795
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200796#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -0300797
Jiri Olsa9ff125d2014-01-07 13:47:28 +0100798#ifdef HAVE_DWARF_UNWIND_SUPPORT
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200799const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -0300800#else
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200801const char record_callchain_help[] = CALLCHAIN_HELP "fp";
Arnaldo Carvalho de Melo61eaa3b2012-10-01 15:20:58 -0300802#endif
803
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200804/*
805 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
806 * with it and switch to use the library functions in perf_evlist that came
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300807 * from builtin-record.c, i.e. use record_opts,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200808 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
809 * using pipes, etc.
810 */
Tom Zanussibca647a2010-11-10 08:11:30 -0600811const struct option record_options[] = {
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200812 OPT_CALLBACK('e', "event", &record.evlist, "event",
Thomas Gleixner86847b62009-06-06 12:24:17 +0200813 "event selector. use 'perf list' to list available events",
Jiri Olsaf120f9d2011-07-14 11:25:32 +0200814 parse_events_option),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200815 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
Li Zefanc171b552009-10-15 11:22:07 +0800816 "event filter", parse_filter),
Namhyung Kimbea03402012-04-26 14:15:15 +0900817 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300818 "record events on existing process id"),
Namhyung Kimbea03402012-04-26 14:15:15 +0900819 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
Zhang, Yanmind6d901c2010-03-18 11:36:05 -0300820 "record events on existing thread id"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200821 OPT_INTEGER('r', "realtime", &record.realtime_prio,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +0200822 "collect data with this RT SCHED_FIFO priority"),
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -0300823 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
Kirill Smelkovacac03f2011-01-12 17:59:36 +0300824 "collect data without buffering"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200825 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
Frederic Weisbeckerdaac07b2009-08-13 10:27:19 +0200826 "collect raw sample records from all opened counters"),
Namhyung Kimbea03402012-04-26 14:15:15 +0900827 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
Ingo Molnar0e9b20b2009-05-26 09:17:18 +0200828 "system-wide collection from all CPUs"),
Namhyung Kimbea03402012-04-26 14:15:15 +0900829 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
Stephane Eranianc45c6ea2010-05-28 12:00:01 +0200830 "list of cpus to monitor"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200831 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
Jiri Olsaf5fc1412013-10-15 16:27:32 +0200832 OPT_STRING('o', "output", &record.file.path, "file",
Ingo Molnarabaff322009-06-02 22:59:57 +0200833 "output file name"),
Adrian Hunter69e7e5b2013-11-18 11:55:57 +0200834 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
835 &record.opts.no_inherit_set,
836 "child tasks do not inherit counters"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200837 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
Jiri Olsa994a1f72013-09-01 12:36:12 +0200838 OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
839 "number of mmap data pages",
840 perf_evlist__parse_mmap_pages),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200841 OPT_BOOLEAN(0, "group", &record.opts.group,
Lin Ming43bece72011-08-17 18:42:07 +0800842 "put the counters into a counter group"),
Jiri Olsa09b0fd42013-10-26 16:25:33 +0200843 OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
844 NULL, "enables call-graph recording" ,
845 &record_callchain_opt),
846 OPT_CALLBACK(0, "call-graph", &record.opts,
847 "mode[,dump_size]", record_callchain_help,
848 &record_parse_callchain_opt),
Ian Munsiec0555642010-04-13 18:37:33 +1000849 OPT_INCR('v', "verbose", &verbose,
Ingo Molnar3da297a2009-06-07 17:39:02 +0200850 "be more verbose (show counter open errors, etc)"),
Arnaldo Carvalho de Melob44308f2010-10-26 15:20:09 -0200851 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200852 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
Peter Zijlstra649c48a2009-06-24 21:12:48 +0200853 "per thread counts"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200854 OPT_BOOLEAN('d', "data", &record.opts.sample_address,
Anton Blanchard4bba8282009-07-16 15:44:29 +0200855 "Sample addresses"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200856 OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
Andrew Vagin3e76ac72011-12-20 17:32:45 +0300857 OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200858 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
Peter Zijlstra649c48a2009-06-24 21:12:48 +0200859 "don't sample"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200860 OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
Stephane Eraniana1ac1d32010-06-17 11:39:01 +0200861 "do not update the buildid cache"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200862 OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
Arnaldo Carvalho de Melobaa2f6c2010-11-26 19:39:15 -0200863 "do not collect buildids in perf.data"),
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200864 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
Stephane Eranian023695d2011-02-14 11:20:01 +0200865 "monitor event in cgroup name only",
866 parse_cgroups),
Arnaldo Carvalho de Meloa6205a32014-01-14 17:58:12 -0300867 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
Andi Kleen6619a532014-01-11 13:38:27 -0800868 "ms to wait before starting measurement after program start"),
Namhyung Kimbea03402012-04-26 14:15:15 +0900869 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
870 "user to profile"),
Stephane Eraniana5aabda2012-03-08 23:47:45 +0100871
872 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
873 "branch any", "sample any taken branches",
874 parse_branch_stack),
875
876 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
877 "branch filter mask", "branch stack filter modes",
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100878 parse_branch_stack),
Andi Kleen05484292013-01-24 16:10:29 +0100879 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
880 "sample by weight (on special events only)"),
Andi Kleen475eeab2013-09-20 07:40:43 -0700881 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
882 "sample transaction flags (special events only)"),
Adrian Hunter3aa59392013-11-15 15:52:29 +0200883 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
884 "use per-thread mmaps"),
Ingo Molnar0e9b20b2009-05-26 09:17:18 +0200885 OPT_END()
886};
887
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300888int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
Ingo Molnar0e9b20b2009-05-26 09:17:18 +0200889{
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200890 int err = -ENOMEM;
Arnaldo Carvalho de Melo8c6f45a2013-12-19 14:38:03 -0300891 struct record *rec = &record;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +0900892 char errbuf[BUFSIZ];
Ingo Molnar0e9b20b2009-05-26 09:17:18 +0200893
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300894 rec->evlist = perf_evlist__new();
895 if (rec->evlist == NULL)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200896 return -ENOMEM;
897
Jiri Olsaeb853e82014-02-03 12:44:42 +0100898 perf_config(perf_record_config, rec);
899
Tom Zanussibca647a2010-11-10 08:11:30 -0600900 argc = parse_options(argc, argv, record_options, record_usage,
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -0200901 PARSE_OPT_STOP_AT_NON_OPTION);
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -0300902 if (!argc && target__none(&rec->opts.target))
Tom Zanussibca647a2010-11-10 08:11:30 -0600903 usage_with_options(record_usage, record_options);
Ingo Molnar0e9b20b2009-05-26 09:17:18 +0200904
Namhyung Kimbea03402012-04-26 14:15:15 +0900905 if (nr_cgroups && !rec->opts.target.system_wide) {
Namhyung Kim3780f482012-05-29 13:22:57 +0900906 ui__error("cgroup monitoring only available in"
907 " system-wide mode\n");
Stephane Eranian023695d2011-02-14 11:20:01 +0200908 usage_with_options(record_usage, record_options);
909 }
910
Namhyung Kim0a7e6d12014-08-12 15:40:45 +0900911 symbol__init(NULL);
Arnaldo Carvalho de Melobaa2f6c2010-11-26 19:39:15 -0200912
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -0300913 if (symbol_conf.kptr_restrict)
Arnaldo Carvalho de Melo646aaea62011-05-27 11:00:41 -0300914 pr_warning(
915"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
916"check /proc/sys/kernel/kptr_restrict.\n\n"
917"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
918"file is not found in the buildid cache or in the vmlinux path.\n\n"
919"Samples in kernel modules won't be resolved at all.\n\n"
920"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
921"even with a suitable vmlinux or kallsyms file.\n\n");
Arnaldo Carvalho de Meloec80fde2011-05-26 09:53:51 -0300922
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200923 if (rec->no_buildid_cache || rec->no_buildid)
Stephane Eraniana1ac1d32010-06-17 11:39:01 +0200924 disable_buildid_cache();
Arnaldo Carvalho de Melo655000e2009-12-15 20:04:40 -0200925
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300926 if (rec->evlist->nr_entries == 0 &&
927 perf_evlist__add_default(rec->evlist) < 0) {
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200928 pr_err("Not enough memory for event selector list\n");
929 goto out_symbol_exit;
Peter Zijlstrabbd36e52009-06-11 23:11:50 +0200930 }
Ingo Molnar0e9b20b2009-05-26 09:17:18 +0200931
Adrian Hunter69e7e5b2013-11-18 11:55:57 +0200932 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
933 rec->opts.no_inherit = true;
934
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -0300935 err = target__validate(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +0900936 if (err) {
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -0300937 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +0900938 ui__warning("%s", errbuf);
939 }
Namhyung Kim4bd0f2d2012-04-26 14:15:18 +0900940
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -0300941 err = target__parse_uid(&rec->opts.target);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +0900942 if (err) {
943 int saved_errno = errno;
944
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -0300945 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
Namhyung Kim3780f482012-05-29 13:22:57 +0900946 ui__error("%s", errbuf);
Namhyung Kim16ad2ff2012-05-07 14:09:02 +0900947
948 err = -saved_errno;
Namhyung Kim8fa60e12013-03-15 14:48:51 +0900949 goto out_symbol_exit;
Namhyung Kim16ad2ff2012-05-07 14:09:02 +0900950 }
Arnaldo Carvalho de Melo0d37aa32012-01-19 14:08:15 -0200951
Namhyung Kim16ad2ff2012-05-07 14:09:02 +0900952 err = -ENOMEM;
Arnaldo Carvalho de Melo3e2be2d2014-01-03 15:03:26 -0300953 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
Arnaldo Carvalho de Melodd7927f2011-01-12 14:28:51 -0200954 usage_with_options(record_usage, record_options);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200955
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300956 if (record_opts__config(&rec->opts)) {
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -0300957 err = -EINVAL;
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -0300958 goto out_symbol_exit;
Mike Galbraith7e4ff9e2009-10-12 07:56:03 +0200959 }
960
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200961 err = __cmd_record(&record, argc, argv);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -0300962out_symbol_exit:
Namhyung Kim45604712014-05-12 09:47:24 +0900963 perf_evlist__delete(rec->evlist);
Arnaldo Carvalho de Melod65a4582010-07-30 18:31:28 -0300964 symbol__exit();
Arnaldo Carvalho de Melo39d17da2010-07-29 14:08:55 -0300965 return err;
Ingo Molnar0e9b20b2009-05-26 09:17:18 +0200966}