blob: e2f318a3f17ae0420fe04c862ec6d48c58e87141 [file] [log] [blame]
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02001#include <linux/kernel.h>
Robert Richter4e319022013-06-11 17:29:18 +02002#include <traceevent/event-parse.h>
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02003
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02004#include <byteswap.h>
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02005#include <unistd.h>
6#include <sys/types.h>
Arnaldo Carvalho de Meloa41794c2010-05-18 18:29:23 -03007#include <sys/mman.h>
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -02008
Arnaldo Carvalho de Meloe248de32011-03-05 21:40:06 -03009#include "evlist.h"
10#include "evsel.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020011#include "session.h"
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -020012#include "tool.h"
Arnaldo Carvalho de Meloa3286262009-12-14 14:22:59 -020013#include "sort.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020014#include "util.h"
Anton Blanchard5d67be92011-07-04 21:57:50 +100015#include "cpumap.h"
Jiri Olsa0f6a3012012-08-07 15:20:45 +020016#include "perf_regs.h"
Jiri Olsab0a45202014-06-12 09:50:11 +020017#include "asm/bug.h"
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020018
Arnaldo Carvalho de Melod10eb1e2015-03-03 12:20:38 -030019static int machines__deliver_event(struct machines *machines,
20 struct perf_evlist *evlist,
21 union perf_event *event,
22 struct perf_sample *sample,
23 struct perf_tool *tool, u64 file_offset);
24
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -030025static int perf_session__open(struct perf_session *session)
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020026{
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -030027 struct perf_data_file *file = session->file;
Tom Zanussi8dc58102010-04-01 23:59:15 -050028
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -030029 if (perf_session__read_header(session) < 0) {
Stephane Eranian69996df2012-02-09 23:21:06 +010030 pr_err("incompatible file format (rerun with -v to learn more)");
Jiri Olsa6a4d98d2013-10-15 16:27:33 +020031 return -1;
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020032 }
33
Jiri Olsacc9784bd2013-10-15 16:27:34 +020034 if (perf_data_file__is_pipe(file))
35 return 0;
36
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -030037 if (!perf_evlist__valid_sample_type(session->evlist)) {
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -030038 pr_err("non matching sample_type");
Jiri Olsa6a4d98d2013-10-15 16:27:33 +020039 return -1;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -030040 }
41
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -030042 if (!perf_evlist__valid_sample_id_all(session->evlist)) {
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -030043 pr_err("non matching sample_id_all");
Jiri Olsa6a4d98d2013-10-15 16:27:33 +020044 return -1;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -030045 }
46
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -030047 if (!perf_evlist__valid_read_format(session->evlist)) {
Jiri Olsa9ede4732012-10-10 17:38:13 +020048 pr_err("non matching read_format");
Jiri Olsa6a4d98d2013-10-15 16:27:33 +020049 return -1;
Jiri Olsa9ede4732012-10-10 17:38:13 +020050 }
51
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020052 return 0;
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -020053}
54
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -030055void perf_session__set_id_hdr_size(struct perf_session *session)
Arnaldo Carvalho de Melo9c90a612010-12-02 10:25:28 -020056{
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -030057 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
58
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -030059 machines__set_id_hdr_size(&session->machines, id_hdr_size);
Arnaldo Carvalho de Melo9c90a612010-12-02 10:25:28 -020060}
61
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -030062int perf_session__create_kernel_maps(struct perf_session *session)
Zhang, Yanmina1645ce2010-04-19 13:32:50 +080063{
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -030064 int ret = machine__create_kernel_maps(&session->machines.host);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +080065
Zhang, Yanmina1645ce2010-04-19 13:32:50 +080066 if (ret >= 0)
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -030067 ret = machines__create_guest_kernel_maps(&session->machines);
Zhang, Yanmina1645ce2010-04-19 13:32:50 +080068 return ret;
69}
70
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -030071static void perf_session__destroy_kernel_maps(struct perf_session *session)
Arnaldo Carvalho de Melo076c6e452010-08-02 18:18:28 -030072{
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -030073 machines__destroy_kernel_maps(&session->machines);
Arnaldo Carvalho de Melo076c6e452010-08-02 18:18:28 -030074}
75
Adrian Huntercfe1c412014-07-31 09:00:45 +030076static bool perf_session__has_comm_exec(struct perf_session *session)
77{
78 struct perf_evsel *evsel;
79
80 evlist__for_each(session->evlist, evsel) {
81 if (evsel->attr.comm_exec)
82 return true;
83 }
84
85 return false;
86}
87
88static void perf_session__set_comm_exec(struct perf_session *session)
89{
90 bool comm_exec = perf_session__has_comm_exec(session);
91
92 machines__set_comm_exec(&session->machines, comm_exec);
93}
94
Arnaldo Carvalho de Melod10eb1e2015-03-03 12:20:38 -030095static int ordered_events__deliver_event(struct ordered_events *oe,
96 struct ordered_event *event,
97 struct perf_sample *sample)
98{
99 return machines__deliver_event(oe->machines, oe->evlist, event->event,
100 sample, oe->tool, event->file_offset);
101}
102
Jiri Olsaf5fc1412013-10-15 16:27:32 +0200103struct perf_session *perf_session__new(struct perf_data_file *file,
104 bool repipe, struct perf_tool *tool)
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200105{
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300106 struct perf_session *session = zalloc(sizeof(*session));
Robert Richterefad1412011-12-07 10:02:54 +0100107
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300108 if (!session)
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200109 goto out;
110
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300111 session->repipe = repipe;
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300112 machines__init(&session->machines);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200113
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200114 if (file) {
115 if (perf_data_file__open(file))
Arnaldo Carvalho de Melo64abebf72010-01-27 21:05:52 -0200116 goto out_delete;
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200117
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300118 session->file = file;
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200119
120 if (perf_data_file__is_read(file)) {
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300121 if (perf_session__open(session) < 0)
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200122 goto out_close;
123
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300124 perf_session__set_id_hdr_size(session);
Adrian Huntercfe1c412014-07-31 09:00:45 +0300125 perf_session__set_comm_exec(session);
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200126 }
127 }
128
129 if (!file || perf_data_file__is_write(file)) {
Arnaldo Carvalho de Melo64abebf72010-01-27 21:05:52 -0200130 /*
131 * In O_RDONLY mode this will be performed when reading the
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200132 * kernel MMAP event, in perf_event__process_mmap().
Arnaldo Carvalho de Melo64abebf72010-01-27 21:05:52 -0200133 */
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300134 if (perf_session__create_kernel_maps(session) < 0)
Andi Kleena5c2a4c2014-09-24 14:39:54 -0700135 pr_warning("Cannot read kernel map\n");
Arnaldo Carvalho de Melo64abebf72010-01-27 21:05:52 -0200136 }
Arnaldo Carvalho de Melod549c7692009-12-27 21:37:02 -0200137
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200138 if (tool && tool->ordering_requires_timestamps &&
Jiri Olsa0a8cb852014-07-06 14:18:21 +0200139 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
Ian Munsie21ef97f2010-12-10 14:09:16 +1100140 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
Jiri Olsa0a8cb852014-07-06 14:18:21 +0200141 tool->ordered_events = false;
Arnaldo Carvalho de Melod10eb1e2015-03-03 12:20:38 -0300142 } else {
143 ordered_events__init(&session->ordered_events, &session->machines,
144 session->evlist, tool, ordered_events__deliver_event);
145 }
Ian Munsie21ef97f2010-12-10 14:09:16 +1100146
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300147 return session;
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200148
149 out_close:
150 perf_data_file__close(file);
151 out_delete:
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300152 perf_session__delete(session);
Jiri Olsa6a4d98d2013-10-15 16:27:33 +0200153 out:
Arnaldo Carvalho de Melo4aa65632009-12-13 19:50:29 -0200154 return NULL;
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200155}
156
Arnaldo Carvalho de Melob424eba2011-11-09 13:24:25 -0200157static void perf_session__delete_threads(struct perf_session *session)
158{
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -0300159 machine__delete_threads(&session->machines.host);
Arnaldo Carvalho de Melob424eba2011-11-09 13:24:25 -0200160}
161
Namhyung Kim03cd2092012-11-21 13:43:19 +0900162static void perf_session_env__delete(struct perf_session_env *env)
163{
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300164 zfree(&env->hostname);
165 zfree(&env->os_release);
166 zfree(&env->version);
167 zfree(&env->arch);
168 zfree(&env->cpu_desc);
169 zfree(&env->cpuid);
Namhyung Kim03cd2092012-11-21 13:43:19 +0900170
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300171 zfree(&env->cmdline);
172 zfree(&env->sibling_cores);
173 zfree(&env->sibling_threads);
174 zfree(&env->numa_nodes);
175 zfree(&env->pmu_mappings);
Namhyung Kim03cd2092012-11-21 13:43:19 +0900176}
177
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300178void perf_session__delete(struct perf_session *session)
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200179{
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300180 perf_session__destroy_kernel_maps(session);
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -0300181 perf_session__delete_threads(session);
182 perf_session_env__delete(&session->header.env);
183 machines__exit(&session->machines);
184 if (session->file)
185 perf_data_file__close(session->file);
186 free(session);
Arnaldo Carvalho de Melo94c744b2009-12-11 21:24:02 -0200187}
Arnaldo Carvalho de Meloa3286262009-12-14 14:22:59 -0200188
Adrian Hunter47c3d102013-07-04 16:20:21 +0300189static int process_event_synth_tracing_data_stub(struct perf_tool *tool
190 __maybe_unused,
191 union perf_event *event
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300192 __maybe_unused,
193 struct perf_session *session
194 __maybe_unused)
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200195{
196 dump_printf(": unhandled!\n");
197 return 0;
198}
199
Adrian Hunter47c3d102013-07-04 16:20:21 +0300200static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
201 union perf_event *event __maybe_unused,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300202 struct perf_evlist **pevlist
203 __maybe_unused)
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -0200204{
205 dump_printf(": unhandled!\n");
206 return 0;
207}
208
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300209static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
210 union perf_event *event __maybe_unused,
211 struct perf_sample *sample __maybe_unused,
212 struct perf_evsel *evsel __maybe_unused,
213 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo9e69c212011-03-15 15:44:01 -0300214{
215 dump_printf(": unhandled!\n");
216 return 0;
217}
218
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300219static int process_event_stub(struct perf_tool *tool __maybe_unused,
220 union perf_event *event __maybe_unused,
221 struct perf_sample *sample __maybe_unused,
222 struct machine *machine __maybe_unused)
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -0200223{
224 dump_printf(": unhandled!\n");
225 return 0;
226}
227
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300228static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
229 union perf_event *event __maybe_unused,
230 struct perf_session *perf_session
231 __maybe_unused)
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200232{
233 dump_printf(": unhandled!\n");
234 return 0;
235}
236
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200237static int process_finished_round(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200238 union perf_event *event,
239 struct perf_session *session);
Frederic Weisbeckerd6b17be2010-05-03 15:14:33 +0200240
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200241static int process_id_index_stub(struct perf_tool *tool __maybe_unused,
242 union perf_event *event __maybe_unused,
243 struct perf_session *perf_session
244 __maybe_unused)
245{
246 dump_printf(": unhandled!\n");
247 return 0;
248}
249
David Ahern9c501402013-08-02 14:05:41 -0600250void perf_tool__fill_defaults(struct perf_tool *tool)
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -0200251{
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200252 if (tool->sample == NULL)
253 tool->sample = process_event_sample_stub;
254 if (tool->mmap == NULL)
255 tool->mmap = process_event_stub;
David Ahern6adb0b02013-09-22 19:44:59 -0600256 if (tool->mmap2 == NULL)
257 tool->mmap2 = process_event_stub;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200258 if (tool->comm == NULL)
259 tool->comm = process_event_stub;
260 if (tool->fork == NULL)
261 tool->fork = process_event_stub;
262 if (tool->exit == NULL)
263 tool->exit = process_event_stub;
264 if (tool->lost == NULL)
265 tool->lost = perf_event__process_lost;
266 if (tool->read == NULL)
267 tool->read = process_event_sample_stub;
268 if (tool->throttle == NULL)
269 tool->throttle = process_event_stub;
270 if (tool->unthrottle == NULL)
271 tool->unthrottle = process_event_stub;
272 if (tool->attr == NULL)
273 tool->attr = process_event_synth_attr_stub;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200274 if (tool->tracing_data == NULL)
275 tool->tracing_data = process_event_synth_tracing_data_stub;
276 if (tool->build_id == NULL)
277 tool->build_id = process_finished_round_stub;
278 if (tool->finished_round == NULL) {
Jiri Olsa0a8cb852014-07-06 14:18:21 +0200279 if (tool->ordered_events)
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200280 tool->finished_round = process_finished_round;
Frederic Weisbeckerd6b17be2010-05-03 15:14:33 +0200281 else
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200282 tool->finished_round = process_finished_round_stub;
Frederic Weisbeckerd6b17be2010-05-03 15:14:33 +0200283 }
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200284 if (tool->id_index == NULL)
285 tool->id_index = process_id_index_stub;
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -0200286}
Arnaldo Carvalho de Melo48000a12014-12-17 17:24:45 -0300287
Jiri Olsa268fb202012-05-30 14:23:43 +0200288static void swap_sample_id_all(union perf_event *event, void *data)
289{
290 void *end = (void *) event + event->header.size;
291 int size = end - data;
292
293 BUG_ON(size % sizeof(u64));
294 mem_bswap_64(data, size);
295}
296
297static void perf_event__all64_swap(union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300298 bool sample_id_all __maybe_unused)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200299{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200300 struct perf_event_header *hdr = &event->header;
301 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200302}
303
Jiri Olsa268fb202012-05-30 14:23:43 +0200304static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200305{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200306 event->comm.pid = bswap_32(event->comm.pid);
307 event->comm.tid = bswap_32(event->comm.tid);
Jiri Olsa268fb202012-05-30 14:23:43 +0200308
309 if (sample_id_all) {
310 void *data = &event->comm.comm;
311
Irina Tirdea9ac3e482012-09-11 01:15:01 +0300312 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
Jiri Olsa268fb202012-05-30 14:23:43 +0200313 swap_sample_id_all(event, data);
314 }
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200315}
316
Jiri Olsa268fb202012-05-30 14:23:43 +0200317static void perf_event__mmap_swap(union perf_event *event,
318 bool sample_id_all)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200319{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200320 event->mmap.pid = bswap_32(event->mmap.pid);
321 event->mmap.tid = bswap_32(event->mmap.tid);
322 event->mmap.start = bswap_64(event->mmap.start);
323 event->mmap.len = bswap_64(event->mmap.len);
324 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
Jiri Olsa268fb202012-05-30 14:23:43 +0200325
326 if (sample_id_all) {
327 void *data = &event->mmap.filename;
328
Irina Tirdea9ac3e482012-09-11 01:15:01 +0300329 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
Jiri Olsa268fb202012-05-30 14:23:43 +0200330 swap_sample_id_all(event, data);
331 }
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200332}
333
Stephane Eranian5c5e8542013-08-21 12:10:25 +0200334static void perf_event__mmap2_swap(union perf_event *event,
335 bool sample_id_all)
336{
337 event->mmap2.pid = bswap_32(event->mmap2.pid);
338 event->mmap2.tid = bswap_32(event->mmap2.tid);
339 event->mmap2.start = bswap_64(event->mmap2.start);
340 event->mmap2.len = bswap_64(event->mmap2.len);
341 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
342 event->mmap2.maj = bswap_32(event->mmap2.maj);
343 event->mmap2.min = bswap_32(event->mmap2.min);
344 event->mmap2.ino = bswap_64(event->mmap2.ino);
345
346 if (sample_id_all) {
347 void *data = &event->mmap2.filename;
348
349 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
350 swap_sample_id_all(event, data);
351 }
352}
Jiri Olsa268fb202012-05-30 14:23:43 +0200353static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200354{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200355 event->fork.pid = bswap_32(event->fork.pid);
356 event->fork.tid = bswap_32(event->fork.tid);
357 event->fork.ppid = bswap_32(event->fork.ppid);
358 event->fork.ptid = bswap_32(event->fork.ptid);
359 event->fork.time = bswap_64(event->fork.time);
Jiri Olsa268fb202012-05-30 14:23:43 +0200360
361 if (sample_id_all)
362 swap_sample_id_all(event, &event->fork + 1);
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200363}
364
Jiri Olsa268fb202012-05-30 14:23:43 +0200365static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200366{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200367 event->read.pid = bswap_32(event->read.pid);
368 event->read.tid = bswap_32(event->read.tid);
369 event->read.value = bswap_64(event->read.value);
370 event->read.time_enabled = bswap_64(event->read.time_enabled);
371 event->read.time_running = bswap_64(event->read.time_running);
372 event->read.id = bswap_64(event->read.id);
Jiri Olsa268fb202012-05-30 14:23:43 +0200373
374 if (sample_id_all)
375 swap_sample_id_all(event, &event->read + 1);
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200376}
377
Jiri Olsadd96c462013-09-01 12:36:15 +0200378static void perf_event__throttle_swap(union perf_event *event,
379 bool sample_id_all)
380{
381 event->throttle.time = bswap_64(event->throttle.time);
382 event->throttle.id = bswap_64(event->throttle.id);
383 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
384
385 if (sample_id_all)
386 swap_sample_id_all(event, &event->throttle + 1);
387}
388
Jiri Olsae108c662012-05-16 08:59:03 +0200389static u8 revbyte(u8 b)
390{
391 int rev = (b >> 4) | ((b & 0xf) << 4);
392 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
393 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
394 return (u8) rev;
395}
396
397/*
398 * XXX this is hack in attempt to carry flags bitfield
399 * throught endian village. ABI says:
400 *
401 * Bit-fields are allocated from right to left (least to most significant)
402 * on little-endian implementations and from left to right (most to least
403 * significant) on big-endian implementations.
404 *
405 * The above seems to be byte specific, so we need to reverse each
406 * byte of the bitfield. 'Internet' also says this might be implementation
407 * specific and we probably need proper fix and carry perf_event_attr
408 * bitfield flags in separate data file FEAT_ section. Thought this seems
409 * to work for now.
410 */
411static void swap_bitfield(u8 *p, unsigned len)
412{
413 unsigned i;
414
415 for (i = 0; i < len; i++) {
416 *p = revbyte(*p);
417 p++;
418 }
419}
420
David Aherneda39132011-07-15 12:34:09 -0600421/* exported for swapping attributes in file header */
422void perf_event__attr_swap(struct perf_event_attr *attr)
423{
424 attr->type = bswap_32(attr->type);
425 attr->size = bswap_32(attr->size);
426 attr->config = bswap_64(attr->config);
427 attr->sample_period = bswap_64(attr->sample_period);
428 attr->sample_type = bswap_64(attr->sample_type);
429 attr->read_format = bswap_64(attr->read_format);
430 attr->wakeup_events = bswap_32(attr->wakeup_events);
431 attr->bp_type = bswap_32(attr->bp_type);
432 attr->bp_addr = bswap_64(attr->bp_addr);
433 attr->bp_len = bswap_64(attr->bp_len);
Adrian Hunter7db59522013-10-18 15:29:03 +0300434 attr->branch_sample_type = bswap_64(attr->branch_sample_type);
435 attr->sample_regs_user = bswap_64(attr->sample_regs_user);
436 attr->sample_stack_user = bswap_32(attr->sample_stack_user);
Jiri Olsae108c662012-05-16 08:59:03 +0200437
438 swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
David Aherneda39132011-07-15 12:34:09 -0600439}
440
Jiri Olsa268fb202012-05-30 14:23:43 +0200441static void perf_event__hdr_attr_swap(union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300442 bool sample_id_all __maybe_unused)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -0500443{
444 size_t size;
445
David Aherneda39132011-07-15 12:34:09 -0600446 perf_event__attr_swap(&event->attr.attr);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -0500447
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200448 size = event->header.size;
449 size -= (void *)&event->attr.id - (void *)event;
450 mem_bswap_64(event->attr.id, size);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -0500451}
452
Jiri Olsa268fb202012-05-30 14:23:43 +0200453static void perf_event__event_type_swap(union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300454 bool sample_id_all __maybe_unused)
Tom Zanussicd19a032010-04-01 23:59:20 -0500455{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200456 event->event_type.event_type.event_id =
457 bswap_64(event->event_type.event_type.event_id);
Tom Zanussicd19a032010-04-01 23:59:20 -0500458}
459
Jiri Olsa268fb202012-05-30 14:23:43 +0200460static void perf_event__tracing_data_swap(union perf_event *event,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300461 bool sample_id_all __maybe_unused)
Tom Zanussi92155452010-04-01 23:59:21 -0500462{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200463 event->tracing_data.size = bswap_32(event->tracing_data.size);
Tom Zanussi92155452010-04-01 23:59:21 -0500464}
465
Jiri Olsa268fb202012-05-30 14:23:43 +0200466typedef void (*perf_event__swap_op)(union perf_event *event,
467 bool sample_id_all);
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200468
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200469static perf_event__swap_op perf_event__swap_ops[] = {
470 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
Stephane Eranian5c5e8542013-08-21 12:10:25 +0200471 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200472 [PERF_RECORD_COMM] = perf_event__comm_swap,
473 [PERF_RECORD_FORK] = perf_event__task_swap,
474 [PERF_RECORD_EXIT] = perf_event__task_swap,
475 [PERF_RECORD_LOST] = perf_event__all64_swap,
476 [PERF_RECORD_READ] = perf_event__read_swap,
Jiri Olsadd96c462013-09-01 12:36:15 +0200477 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
478 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200479 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
David Aherneda39132011-07-15 12:34:09 -0600480 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200481 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
482 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
483 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200484 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200485 [PERF_RECORD_HEADER_MAX] = NULL,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -0200486};
487
Frederic Weisbeckerd6b17be2010-05-03 15:14:33 +0200488/*
489 * When perf record finishes a pass on every buffers, it records this pseudo
490 * event.
491 * We record the max timestamp t found in the pass n.
492 * Assuming these timestamps are monotonic across cpus, we know that if
493 * a buffer still has events with timestamps below t, they will be all
494 * available and then read in the pass n + 1.
495 * Hence when we start to read the pass n + 2, we can safely flush every
496 * events with timestamps below t.
497 *
498 * ============ PASS n =================
499 * CPU 0 | CPU 1
500 * |
501 * cnt1 timestamps | cnt2 timestamps
502 * 1 | 2
503 * 2 | 3
504 * - | 4 <--- max recorded
505 *
506 * ============ PASS n + 1 ==============
507 * CPU 0 | CPU 1
508 * |
509 * cnt1 timestamps | cnt2 timestamps
510 * 3 | 5
511 * 4 | 6
512 * 5 | 7 <---- max recorded
513 *
514 * Flush every events below timestamp 4
515 *
516 * ============ PASS n + 2 ==============
517 * CPU 0 | CPU 1
518 * |
519 * cnt1 timestamps | cnt2 timestamps
520 * 6 | 8
521 * 7 | 9
522 * - | 10
523 *
524 * Flush every events below timestamp 7
525 * etc...
526 */
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300527static int process_finished_round(struct perf_tool *tool __maybe_unused,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300528 union perf_event *event __maybe_unused,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -0200529 struct perf_session *session)
Frederic Weisbeckerd6b17be2010-05-03 15:14:33 +0200530{
Arnaldo Carvalho de Melofa713a4e2015-03-03 11:48:12 -0300531 struct ordered_events *oe = &session->ordered_events;
Arnaldo Carvalho de Melofa713a4e2015-03-03 11:48:12 -0300532
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300533 return ordered_events__flush(oe, OE_FLUSH__ROUND);
Frederic Weisbeckerd6b17be2010-05-03 15:14:33 +0200534}
535
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300536int perf_session__queue_event(struct perf_session *s, union perf_event *event,
537 struct perf_sample *sample, u64 file_offset)
Frederic Weisbeckerc61e52e2010-04-24 00:04:12 +0200538{
Jiri Olsa37e39aa2014-07-06 14:23:03 +0200539 struct ordered_events *oe = &s->ordered_events;
Arnaldo Carvalho de Melofa713a4e2015-03-03 11:48:12 -0300540
Arnaldo Carvalho de Melo8d50e5b2011-01-29 13:02:00 -0200541 u64 timestamp = sample->time;
Jiri Olsa37e39aa2014-07-06 14:23:03 +0200542 struct ordered_event *new;
Frederic Weisbeckerc61e52e2010-04-24 00:04:12 +0200543
Thomas Gleixner79a14c12010-12-07 12:48:44 +0000544 if (!timestamp || timestamp == ~0ULL)
Thomas Gleixnercbf41642010-12-05 14:32:55 +0100545 return -ETIME;
546
Jiri Olsacee3ab92014-07-11 14:49:54 +0200547 if (timestamp < oe->last_flush) {
Jiri Olsaf61ff6c2014-11-26 16:39:31 +0100548 pr_oe_time(timestamp, "out of order event\n");
Jiri Olsab0a45202014-06-12 09:50:11 +0200549 pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
550 oe->last_flush_type);
551
Arnaldo Carvalho de Melo75be9892015-02-14 14:50:11 -0300552 s->evlist->stats.nr_unordered_events++;
Frederic Weisbeckerc61e52e2010-04-24 00:04:12 +0200553 }
554
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400555 new = ordered_events__new(oe, timestamp, event);
Jiri Olsad40b4a12014-08-01 13:01:04 -0300556 if (!new) {
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300557 ordered_events__flush(oe, OE_FLUSH__HALF);
Alexander Yarygin54bf53b2014-10-03 18:40:11 +0400558 new = ordered_events__new(oe, timestamp, event);
Jiri Olsad40b4a12014-08-01 13:01:04 -0300559 }
560
Jiri Olsac64c7e12014-06-10 21:58:02 +0200561 if (!new)
562 return -ENOMEM;
Frederic Weisbeckerc61e52e2010-04-24 00:04:12 +0200563
Thomas Gleixnere4c2df12010-12-07 12:48:50 +0000564 new->file_offset = file_offset;
Frederic Weisbeckerc61e52e2010-04-24 00:04:12 +0200565 return 0;
566}
567
Kan Liang384b6052015-01-05 13:23:05 -0500568static void callchain__lbr_callstack_printf(struct perf_sample *sample)
Arnaldo Carvalho de Melo640c03c2010-12-02 14:10:21 -0200569{
Kan Liang384b6052015-01-05 13:23:05 -0500570 struct ip_callchain *callchain = sample->callchain;
571 struct branch_stack *lbr_stack = sample->branch_stack;
572 u64 kernel_callchain_nr = callchain->nr;
Arnaldo Carvalho de Melo640c03c2010-12-02 14:10:21 -0200573 unsigned int i;
574
Kan Liang384b6052015-01-05 13:23:05 -0500575 for (i = 0; i < kernel_callchain_nr; i++) {
576 if (callchain->ips[i] == PERF_CONTEXT_USER)
577 break;
578 }
Arnaldo Carvalho de Melo640c03c2010-12-02 14:10:21 -0200579
Kan Liang384b6052015-01-05 13:23:05 -0500580 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
581 u64 total_nr;
582 /*
583 * LBR callstack can only get user call chain,
584 * i is kernel call chain number,
585 * 1 is PERF_CONTEXT_USER.
586 *
587 * The user call chain is stored in LBR registers.
588 * LBR are pair registers. The caller is stored
589 * in "from" register, while the callee is stored
590 * in "to" register.
591 * For example, there is a call stack
592 * "A"->"B"->"C"->"D".
593 * The LBR registers will recorde like
594 * "C"->"D", "B"->"C", "A"->"B".
595 * So only the first "to" register and all "from"
596 * registers are needed to construct the whole stack.
597 */
598 total_nr = i + 1 + lbr_stack->nr + 1;
599 kernel_callchain_nr = i + 1;
600
601 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
602
603 for (i = 0; i < kernel_callchain_nr; i++)
604 printf("..... %2d: %016" PRIx64 "\n",
605 i, callchain->ips[i]);
606
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -0200607 printf("..... %2d: %016" PRIx64 "\n",
Kan Liang384b6052015-01-05 13:23:05 -0500608 (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
609 for (i = 0; i < lbr_stack->nr; i++)
610 printf("..... %2d: %016" PRIx64 "\n",
611 (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
612 }
613}
614
615static void callchain__printf(struct perf_evsel *evsel,
616 struct perf_sample *sample)
617{
618 unsigned int i;
619 struct ip_callchain *callchain = sample->callchain;
620
621 if (has_branch_callstack(evsel))
622 callchain__lbr_callstack_printf(sample);
623
624 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
625
626 for (i = 0; i < callchain->nr; i++)
627 printf("..... %2d: %016" PRIx64 "\n",
628 i, callchain->ips[i]);
Arnaldo Carvalho de Melo640c03c2010-12-02 14:10:21 -0200629}
630
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100631static void branch_stack__printf(struct perf_sample *sample)
632{
633 uint64_t i;
634
635 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
636
637 for (i = 0; i < sample->branch_stack->nr; i++)
638 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
639 i, sample->branch_stack->entries[i].from,
640 sample->branch_stack->entries[i].to);
641}
642
Jiri Olsa0f6a3012012-08-07 15:20:45 +0200643static void regs_dump__printf(u64 mask, u64 *regs)
644{
645 unsigned rid, i = 0;
646
647 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
648 u64 val = regs[i++];
649
650 printf(".... %-5s 0x%" PRIx64 "\n",
651 perf_reg_name(rid), val);
652 }
653}
654
Stephane Eranian6a21c0b2014-09-24 13:48:39 +0200655static const char *regs_abi[] = {
656 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
657 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
658 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
659};
660
661static inline const char *regs_dump_abi(struct regs_dump *d)
662{
663 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
664 return "unknown";
665
666 return regs_abi[d->abi];
667}
668
669static void regs__printf(const char *type, struct regs_dump *regs)
670{
671 u64 mask = regs->mask;
672
673 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
674 type,
675 mask,
676 regs_dump_abi(regs));
677
678 regs_dump__printf(mask, regs->regs);
679}
680
Jiri Olsa352ea452014-01-07 13:47:25 +0100681static void regs_user__printf(struct perf_sample *sample)
Jiri Olsa0f6a3012012-08-07 15:20:45 +0200682{
683 struct regs_dump *user_regs = &sample->user_regs;
684
Stephane Eranian6a21c0b2014-09-24 13:48:39 +0200685 if (user_regs->regs)
686 regs__printf("user", user_regs);
687}
688
689static void regs_intr__printf(struct perf_sample *sample)
690{
691 struct regs_dump *intr_regs = &sample->intr_regs;
692
693 if (intr_regs->regs)
694 regs__printf("intr", intr_regs);
Jiri Olsa0f6a3012012-08-07 15:20:45 +0200695}
696
697static void stack_user__printf(struct stack_dump *dump)
698{
699 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
700 dump->size, dump->offset);
701}
702
Arnaldo Carvalho de Melo9fa87272015-02-14 15:08:51 -0300703static void perf_evlist__print_tstamp(struct perf_evlist *evlist,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200704 union perf_event *event,
Arnaldo Carvalho de Melo8d50e5b2011-01-29 13:02:00 -0200705 struct perf_sample *sample)
Arnaldo Carvalho de Melo9c90a612010-12-02 10:25:28 -0200706{
Arnaldo Carvalho de Melo9fa87272015-02-14 15:08:51 -0300707 u64 sample_type = __perf_evlist__combined_sample_type(evlist);
Arnaldo Carvalho de Melo7f3be652012-08-01 19:15:52 -0300708
Arnaldo Carvalho de Melo9c90a612010-12-02 10:25:28 -0200709 if (event->header.type != PERF_RECORD_SAMPLE &&
Arnaldo Carvalho de Melo9fa87272015-02-14 15:08:51 -0300710 !perf_evlist__sample_id_all(evlist)) {
Arnaldo Carvalho de Melo9c90a612010-12-02 10:25:28 -0200711 fputs("-1 -1 ", stdout);
712 return;
713 }
714
Arnaldo Carvalho de Melo7f3be652012-08-01 19:15:52 -0300715 if ((sample_type & PERF_SAMPLE_CPU))
Arnaldo Carvalho de Melo9c90a612010-12-02 10:25:28 -0200716 printf("%u ", sample->cpu);
717
Arnaldo Carvalho de Melo7f3be652012-08-01 19:15:52 -0300718 if (sample_type & PERF_SAMPLE_TIME)
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -0200719 printf("%" PRIu64 " ", sample->time);
Arnaldo Carvalho de Melo9c90a612010-12-02 10:25:28 -0200720}
721
Jiri Olsa9ede4732012-10-10 17:38:13 +0200722static void sample_read__printf(struct perf_sample *sample, u64 read_format)
723{
724 printf("... sample_read:\n");
725
726 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
727 printf("...... time enabled %016" PRIx64 "\n",
728 sample->read.time_enabled);
729
730 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
731 printf("...... time running %016" PRIx64 "\n",
732 sample->read.time_running);
733
734 if (read_format & PERF_FORMAT_GROUP) {
735 u64 i;
736
737 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
738
739 for (i = 0; i < sample->read.group.nr; i++) {
740 struct sample_read_value *value;
741
742 value = &sample->read.group.values[i];
743 printf("..... id %016" PRIx64
744 ", value %016" PRIx64 "\n",
745 value->id, value->value);
746 }
747 } else
748 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
749 sample->read.one.id, sample->read.one.value);
750}
751
Arnaldo Carvalho de Melo9fa87272015-02-14 15:08:51 -0300752static void dump_event(struct perf_evlist *evlist, union perf_event *event,
Arnaldo Carvalho de Melo8d50e5b2011-01-29 13:02:00 -0200753 u64 file_offset, struct perf_sample *sample)
Thomas Gleixner9aefcab2010-12-07 12:48:47 +0000754{
755 if (!dump_trace)
756 return;
757
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -0200758 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
759 file_offset, event->header.size, event->header.type);
Thomas Gleixner9aefcab2010-12-07 12:48:47 +0000760
761 trace_event(event);
762
763 if (sample)
Arnaldo Carvalho de Melo9fa87272015-02-14 15:08:51 -0300764 perf_evlist__print_tstamp(evlist, event, sample);
Thomas Gleixner9aefcab2010-12-07 12:48:47 +0000765
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -0200766 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200767 event->header.size, perf_event__name(event->header.type));
Thomas Gleixner9aefcab2010-12-07 12:48:47 +0000768}
769
Jiri Olsa0f6a3012012-08-07 15:20:45 +0200770static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
Arnaldo Carvalho de Melo8d50e5b2011-01-29 13:02:00 -0200771 struct perf_sample *sample)
Thomas Gleixner9aefcab2010-12-07 12:48:47 +0000772{
Arnaldo Carvalho de Melo7f3be652012-08-01 19:15:52 -0300773 u64 sample_type;
774
Arnaldo Carvalho de Meloddbc24b2010-12-09 12:20:20 -0200775 if (!dump_trace)
776 return;
777
Don Zickus0ea590a2014-02-25 22:43:46 -0500778 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -0200779 event->header.misc, sample->pid, sample->tid, sample->ip,
David Ahern7cec0922011-05-30 13:08:23 -0600780 sample->period, sample->addr);
Thomas Gleixner9aefcab2010-12-07 12:48:47 +0000781
Jiri Olsa0f6a3012012-08-07 15:20:45 +0200782 sample_type = evsel->attr.sample_type;
Arnaldo Carvalho de Melo7f3be652012-08-01 19:15:52 -0300783
784 if (sample_type & PERF_SAMPLE_CALLCHAIN)
Kan Liang384b6052015-01-05 13:23:05 -0500785 callchain__printf(evsel, sample);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100786
Kan Liang384b6052015-01-05 13:23:05 -0500787 if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !has_branch_callstack(evsel))
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100788 branch_stack__printf(sample);
Jiri Olsa0f6a3012012-08-07 15:20:45 +0200789
790 if (sample_type & PERF_SAMPLE_REGS_USER)
Jiri Olsa352ea452014-01-07 13:47:25 +0100791 regs_user__printf(sample);
Jiri Olsa0f6a3012012-08-07 15:20:45 +0200792
Stephane Eranian6a21c0b2014-09-24 13:48:39 +0200793 if (sample_type & PERF_SAMPLE_REGS_INTR)
794 regs_intr__printf(sample);
795
Jiri Olsa0f6a3012012-08-07 15:20:45 +0200796 if (sample_type & PERF_SAMPLE_STACK_USER)
797 stack_user__printf(&sample->user_stack);
Andi Kleen05484292013-01-24 16:10:29 +0100798
799 if (sample_type & PERF_SAMPLE_WEIGHT)
800 printf("... weight: %" PRIu64 "\n", sample->weight);
Stephane Eranian98a3b322013-01-24 16:10:35 +0100801
802 if (sample_type & PERF_SAMPLE_DATA_SRC)
803 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
Jiri Olsa9ede4732012-10-10 17:38:13 +0200804
Andi Kleen475eeab2013-09-20 07:40:43 -0700805 if (sample_type & PERF_SAMPLE_TRANSACTION)
806 printf("... transaction: %" PRIx64 "\n", sample->transaction);
807
Jiri Olsa9ede4732012-10-10 17:38:13 +0200808 if (sample_type & PERF_SAMPLE_READ)
809 sample_read__printf(sample, evsel->attr.read_format);
Thomas Gleixner9aefcab2010-12-07 12:48:47 +0000810}
811
Arnaldo Carvalho de Melo54245fd2015-02-14 14:26:15 -0300812static struct machine *machines__find_for_cpumode(struct machines *machines,
Adrian Hunteref893252013-08-27 11:23:06 +0300813 union perf_event *event,
814 struct perf_sample *sample)
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200815{
816 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
Dongsheng Yangad85ace2013-12-20 13:41:47 -0500817 struct machine *machine;
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200818
David Ahern7c0f4a42012-07-20 17:25:48 -0600819 if (perf_guest &&
820 ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
821 (cpumode == PERF_RECORD_MISC_GUEST_USER))) {
Nikunj A. Dadhania7fb0a5e2012-04-09 13:52:23 +0530822 u32 pid;
823
Stephane Eranian5c5e8542013-08-21 12:10:25 +0200824 if (event->header.type == PERF_RECORD_MMAP
825 || event->header.type == PERF_RECORD_MMAP2)
Nikunj A. Dadhania7fb0a5e2012-04-09 13:52:23 +0530826 pid = event->mmap.pid;
827 else
Adrian Hunteref893252013-08-27 11:23:06 +0300828 pid = sample->pid;
Nikunj A. Dadhania7fb0a5e2012-04-09 13:52:23 +0530829
Arnaldo Carvalho de Melo54245fd2015-02-14 14:26:15 -0300830 machine = machines__find(machines, pid);
Dongsheng Yangad85ace2013-12-20 13:41:47 -0500831 if (!machine)
Arnaldo Carvalho de Melo54245fd2015-02-14 14:26:15 -0300832 machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID);
Dongsheng Yangad85ace2013-12-20 13:41:47 -0500833 return machine;
Nikunj A. Dadhania7fb0a5e2012-04-09 13:52:23 +0530834 }
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200835
Arnaldo Carvalho de Melo54245fd2015-02-14 14:26:15 -0300836 return &machines->host;
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200837}
838
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -0300839static int deliver_sample_value(struct perf_evlist *evlist,
Jiri Olsae4caec02012-10-10 18:52:24 +0200840 struct perf_tool *tool,
841 union perf_event *event,
842 struct perf_sample *sample,
843 struct sample_read_value *v,
844 struct machine *machine)
845{
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -0300846 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
Jiri Olsae4caec02012-10-10 18:52:24 +0200847
Jiri Olsae4caec02012-10-10 18:52:24 +0200848 if (sid) {
849 sample->id = v->id;
850 sample->period = v->value - sid->period;
851 sid->period = v->value;
852 }
853
854 if (!sid || sid->evsel == NULL) {
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -0300855 ++evlist->stats.nr_unknown_id;
Jiri Olsae4caec02012-10-10 18:52:24 +0200856 return 0;
857 }
858
859 return tool->sample(tool, event, sample, sid->evsel, machine);
860}
861
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -0300862static int deliver_sample_group(struct perf_evlist *evlist,
Jiri Olsae4caec02012-10-10 18:52:24 +0200863 struct perf_tool *tool,
864 union perf_event *event,
865 struct perf_sample *sample,
866 struct machine *machine)
867{
868 int ret = -EINVAL;
869 u64 i;
870
871 for (i = 0; i < sample->read.group.nr; i++) {
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -0300872 ret = deliver_sample_value(evlist, tool, event, sample,
Jiri Olsae4caec02012-10-10 18:52:24 +0200873 &sample->read.group.values[i],
874 machine);
875 if (ret)
876 break;
877 }
878
879 return ret;
880}
881
882static int
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -0300883 perf_evlist__deliver_sample(struct perf_evlist *evlist,
Jiri Olsae4caec02012-10-10 18:52:24 +0200884 struct perf_tool *tool,
885 union perf_event *event,
886 struct perf_sample *sample,
887 struct perf_evsel *evsel,
888 struct machine *machine)
889{
890 /* We know evsel != NULL. */
891 u64 sample_type = evsel->attr.sample_type;
892 u64 read_format = evsel->attr.read_format;
893
894 /* Standard sample delievery. */
895 if (!(sample_type & PERF_SAMPLE_READ))
896 return tool->sample(tool, event, sample, evsel, machine);
897
898 /* For PERF_SAMPLE_READ we have either single or group mode. */
899 if (read_format & PERF_FORMAT_GROUP)
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -0300900 return deliver_sample_group(evlist, tool, event, sample,
Jiri Olsae4caec02012-10-10 18:52:24 +0200901 machine);
902 else
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -0300903 return deliver_sample_value(evlist, tool, event, sample,
Jiri Olsae4caec02012-10-10 18:52:24 +0200904 &sample->read.one, machine);
905}
906
Arnaldo Carvalho de Melod10eb1e2015-03-03 12:20:38 -0300907static int machines__deliver_event(struct machines *machines,
908 struct perf_evlist *evlist,
909 union perf_event *event,
910 struct perf_sample *sample,
911 struct perf_tool *tool, u64 file_offset)
Thomas Gleixnercbf41642010-12-05 14:32:55 +0100912{
Arnaldo Carvalho de Melo9e69c212011-03-15 15:44:01 -0300913 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200914 struct machine *machine;
Arnaldo Carvalho de Melo9e69c212011-03-15 15:44:01 -0300915
Arnaldo Carvalho de Melo9fa87272015-02-14 15:08:51 -0300916 dump_event(evlist, event, file_offset, sample);
Thomas Gleixner532e7262010-12-07 12:48:55 +0000917
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -0300918 evsel = perf_evlist__id2evsel(evlist, sample->id);
Arnaldo Carvalho de Melo7b275092011-10-29 12:15:04 -0200919
Arnaldo Carvalho de Melofa713a4e2015-03-03 11:48:12 -0300920 machine = machines__find_for_cpumode(machines, event, sample);
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -0200921
Thomas Gleixnercbf41642010-12-05 14:32:55 +0100922 switch (event->header.type) {
923 case PERF_RECORD_SAMPLE:
Jiri Olsa0f6a3012012-08-07 15:20:45 +0200924 dump_sample(evsel, event, sample);
Arnaldo Carvalho de Melo9e69c212011-03-15 15:44:01 -0300925 if (evsel == NULL) {
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -0300926 ++evlist->stats.nr_unknown_id;
Jiri Olsa67822062012-04-12 14:21:01 +0200927 return 0;
Arnaldo Carvalho de Melo9e69c212011-03-15 15:44:01 -0300928 }
Joerg Roedel0c095712012-02-10 18:05:04 +0100929 if (machine == NULL) {
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -0300930 ++evlist->stats.nr_unprocessable_samples;
Jiri Olsa67822062012-04-12 14:21:01 +0200931 return 0;
Joerg Roedel0c095712012-02-10 18:05:04 +0100932 }
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -0300933 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
Thomas Gleixnercbf41642010-12-05 14:32:55 +0100934 case PERF_RECORD_MMAP:
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200935 return tool->mmap(tool, event, sample, machine);
Stephane Eranian5c5e8542013-08-21 12:10:25 +0200936 case PERF_RECORD_MMAP2:
937 return tool->mmap2(tool, event, sample, machine);
Thomas Gleixnercbf41642010-12-05 14:32:55 +0100938 case PERF_RECORD_COMM:
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200939 return tool->comm(tool, event, sample, machine);
Thomas Gleixnercbf41642010-12-05 14:32:55 +0100940 case PERF_RECORD_FORK:
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200941 return tool->fork(tool, event, sample, machine);
Thomas Gleixnercbf41642010-12-05 14:32:55 +0100942 case PERF_RECORD_EXIT:
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200943 return tool->exit(tool, event, sample, machine);
Thomas Gleixnercbf41642010-12-05 14:32:55 +0100944 case PERF_RECORD_LOST:
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200945 if (tool->lost == perf_event__process_lost)
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -0300946 evlist->stats.total_lost += event->lost.lost;
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200947 return tool->lost(tool, event, sample, machine);
Thomas Gleixnercbf41642010-12-05 14:32:55 +0100948 case PERF_RECORD_READ:
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200949 return tool->read(tool, event, sample, evsel, machine);
Thomas Gleixnercbf41642010-12-05 14:32:55 +0100950 case PERF_RECORD_THROTTLE:
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200951 return tool->throttle(tool, event, sample, machine);
Thomas Gleixnercbf41642010-12-05 14:32:55 +0100952 case PERF_RECORD_UNTHROTTLE:
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200953 return tool->unthrottle(tool, event, sample, machine);
Thomas Gleixnercbf41642010-12-05 14:32:55 +0100954 default:
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -0300955 ++evlist->stats.nr_unknown_events;
Thomas Gleixnercbf41642010-12-05 14:32:55 +0100956 return -1;
957 }
958}
959
Adrian Hunterd5652d82014-07-23 22:19:58 +0300960static s64 perf_session__process_user_event(struct perf_session *session,
961 union perf_event *event,
Adrian Hunterd5652d82014-07-23 22:19:58 +0300962 u64 file_offset)
Thomas Gleixnerba74f062010-12-07 12:49:01 +0000963{
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -0300964 struct perf_tool *tool = session->ordered_events.tool;
Jiri Olsacc9784bd2013-10-15 16:27:34 +0200965 int fd = perf_data_file__fd(session->file);
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -0200966 int err;
967
Arnaldo Carvalho de Melo9fa87272015-02-14 15:08:51 -0300968 dump_event(session->evlist, event, file_offset, NULL);
Thomas Gleixnerba74f062010-12-07 12:49:01 +0000969
970 /* These events are processed right away */
971 switch (event->header.type) {
972 case PERF_RECORD_HEADER_ATTR:
Adrian Hunter47c3d102013-07-04 16:20:21 +0300973 err = tool->attr(tool, event, &session->evlist);
Adrian Huntercfe1c412014-07-31 09:00:45 +0300974 if (err == 0) {
Arnaldo Carvalho de Melo7b56cce2012-08-01 19:31:00 -0300975 perf_session__set_id_hdr_size(session);
Adrian Huntercfe1c412014-07-31 09:00:45 +0300976 perf_session__set_comm_exec(session);
977 }
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -0200978 return err;
Jiri Olsaf67697b2014-02-04 15:37:48 +0100979 case PERF_RECORD_HEADER_EVENT_TYPE:
980 /*
981 * Depreceated, but we need to handle it for sake
982 * of old data files create in pipe mode.
983 */
984 return 0;
Thomas Gleixnerba74f062010-12-07 12:49:01 +0000985 case PERF_RECORD_HEADER_TRACING_DATA:
986 /* setup for reading amidst mmap */
Jiri Olsacc9784bd2013-10-15 16:27:34 +0200987 lseek(fd, file_offset, SEEK_SET);
Adrian Hunter47c3d102013-07-04 16:20:21 +0300988 return tool->tracing_data(tool, event, session);
Thomas Gleixnerba74f062010-12-07 12:49:01 +0000989 case PERF_RECORD_HEADER_BUILD_ID:
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200990 return tool->build_id(tool, event, session);
Thomas Gleixnerba74f062010-12-07 12:49:01 +0000991 case PERF_RECORD_FINISHED_ROUND:
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -0200992 return tool->finished_round(tool, event, session);
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200993 case PERF_RECORD_ID_INDEX:
994 return tool->id_index(tool, event, session);
Thomas Gleixnerba74f062010-12-07 12:49:01 +0000995 default:
996 return -EINVAL;
997 }
998}
999
Adrian Huntera2938292014-10-27 15:49:23 +02001000int perf_session__deliver_synth_event(struct perf_session *session,
1001 union perf_event *event,
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001002 struct perf_sample *sample)
Adrian Huntera2938292014-10-27 15:49:23 +02001003{
Arnaldo Carvalho de Melofa713a4e2015-03-03 11:48:12 -03001004 struct perf_evlist *evlist = session->evlist;
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001005 struct perf_tool *tool = session->ordered_events.tool;
Arnaldo Carvalho de Melofa713a4e2015-03-03 11:48:12 -03001006
1007 events_stats__inc(&evlist->stats, event->header.type);
Adrian Huntera2938292014-10-27 15:49:23 +02001008
1009 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001010 return perf_session__process_user_event(session, event, 0);
Adrian Huntera2938292014-10-27 15:49:23 +02001011
Arnaldo Carvalho de Melofa713a4e2015-03-03 11:48:12 -03001012 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
Adrian Huntera2938292014-10-27 15:49:23 +02001013}
1014
Jiri Olsa268fb202012-05-30 14:23:43 +02001015static void event_swap(union perf_event *event, bool sample_id_all)
1016{
1017 perf_event__swap_op swap;
1018
1019 swap = perf_event__swap_ops[event->header.type];
1020 if (swap)
1021 swap(event, sample_id_all);
1022}
1023
Adrian Hunter5a52f332014-07-31 09:00:57 +03001024int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1025 void *buf, size_t buf_sz,
1026 union perf_event **event_ptr,
1027 struct perf_sample *sample)
1028{
1029 union perf_event *event;
1030 size_t hdr_sz, rest;
1031 int fd;
1032
1033 if (session->one_mmap && !session->header.needs_swap) {
1034 event = file_offset - session->one_mmap_offset +
1035 session->one_mmap_addr;
1036 goto out_parse_sample;
1037 }
1038
1039 if (perf_data_file__is_pipe(session->file))
1040 return -1;
1041
1042 fd = perf_data_file__fd(session->file);
1043 hdr_sz = sizeof(struct perf_event_header);
1044
1045 if (buf_sz < hdr_sz)
1046 return -1;
1047
1048 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1049 readn(fd, &buf, hdr_sz) != (ssize_t)hdr_sz)
1050 return -1;
1051
1052 event = (union perf_event *)buf;
1053
1054 if (session->header.needs_swap)
1055 perf_event_header__bswap(&event->header);
1056
1057 if (event->header.size < hdr_sz)
1058 return -1;
1059
1060 rest = event->header.size - hdr_sz;
1061
1062 if (readn(fd, &buf, rest) != (ssize_t)rest)
1063 return -1;
1064
1065 if (session->header.needs_swap)
1066 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1067
1068out_parse_sample:
1069
1070 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1071 perf_evlist__parse_sample(session->evlist, event, sample))
1072 return -1;
1073
1074 *event_ptr = event;
1075
1076 return 0;
1077}
1078
Adrian Hunterd5652d82014-07-23 22:19:58 +03001079static s64 perf_session__process_event(struct perf_session *session,
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001080 union perf_event *event, u64 file_offset)
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001081{
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001082 struct perf_evlist *evlist = session->evlist;
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001083 struct perf_tool *tool = session->ordered_events.tool;
Arnaldo Carvalho de Melo8d50e5b2011-01-29 13:02:00 -02001084 struct perf_sample sample;
Thomas Gleixnercbf41642010-12-05 14:32:55 +01001085 int ret;
Arnaldo Carvalho de Melo640c03c2010-12-02 14:10:21 -02001086
Jiri Olsa268fb202012-05-30 14:23:43 +02001087 if (session->header.needs_swap)
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001088 event_swap(event, perf_evlist__sample_id_all(evlist));
Arnaldo Carvalho de Melo640c03c2010-12-02 14:10:21 -02001089
Thomas Gleixner9aefcab2010-12-07 12:48:47 +00001090 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1091 return -EINVAL;
Arnaldo Carvalho de Melo640c03c2010-12-02 14:10:21 -02001092
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001093 events_stats__inc(&evlist->stats, event->header.type);
Thomas Gleixner9aefcab2010-12-07 12:48:47 +00001094
1095 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001096 return perf_session__process_user_event(session, event, file_offset);
Thomas Gleixnercbf41642010-12-05 14:32:55 +01001097
Thomas Gleixner3dfc2c02010-12-07 12:48:58 +00001098 /*
1099 * For all kernel events we get the sample data
1100 */
Arnaldo Carvalho de Melo313e53b2015-02-14 15:05:28 -03001101 ret = perf_evlist__parse_sample(evlist, event, &sample);
Frederic Weisbecker5538bec2011-05-22 02:17:22 +02001102 if (ret)
1103 return ret;
Thomas Gleixner3dfc2c02010-12-07 12:48:58 +00001104
Jiri Olsa0a8cb852014-07-06 14:18:21 +02001105 if (tool->ordered_events) {
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001106 ret = perf_session__queue_event(session, event, &sample, file_offset);
Thomas Gleixnercbf41642010-12-05 14:32:55 +01001107 if (ret != -ETIME)
1108 return ret;
1109 }
1110
Arnaldo Carvalho de Melofa713a4e2015-03-03 11:48:12 -03001111 return machines__deliver_event(&session->machines, evlist, event,
1112 &sample, tool, file_offset);
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001113}
1114
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03001115void perf_event_header__bswap(struct perf_event_header *hdr)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02001116{
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03001117 hdr->type = bswap_32(hdr->type);
1118 hdr->misc = bswap_16(hdr->misc);
1119 hdr->size = bswap_16(hdr->size);
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02001120}
1121
Arnaldo Carvalho de Melob424eba2011-11-09 13:24:25 -02001122struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1123{
Adrian Hunter1fcb8762014-07-14 13:02:25 +03001124 return machine__findnew_thread(&session->machines.host, -1, pid);
Arnaldo Carvalho de Melob424eba2011-11-09 13:24:25 -02001125}
1126
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03001127static struct thread *perf_session__register_idle_thread(struct perf_session *session)
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001128{
Adrian Hunter1fcb8762014-07-14 13:02:25 +03001129 struct thread *thread;
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001130
Adrian Hunter1fcb8762014-07-14 13:02:25 +03001131 thread = machine__findnew_thread(&session->machines.host, 0, 0);
Frederic Weisbecker162f0be2013-09-11 16:18:24 +02001132 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001133 pr_err("problem inserting idle task.\n");
1134 thread = NULL;
1135 }
1136
1137 return thread;
1138}
1139
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001140static void perf_tool__warn_about_errors(const struct perf_tool *tool,
1141 const struct events_stats *stats)
Arnaldo Carvalho de Melo11095992011-01-04 16:25:15 -02001142{
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001143 if (tool->lost == perf_event__process_lost &&
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001144 stats->nr_events[PERF_RECORD_LOST] != 0) {
Arnaldo Carvalho de Melo7b275092011-10-29 12:15:04 -02001145 ui__warning("Processed %d events and lost %d chunks!\n\n"
1146 "Check IO/CPU overload!\n\n",
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001147 stats->nr_events[0],
1148 stats->nr_events[PERF_RECORD_LOST]);
Arnaldo Carvalho de Melo11095992011-01-04 16:25:15 -02001149 }
1150
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001151 if (stats->nr_unknown_events != 0) {
Arnaldo Carvalho de Melo11095992011-01-04 16:25:15 -02001152 ui__warning("Found %u unknown events!\n\n"
1153 "Is this an older tool processing a perf.data "
1154 "file generated by a more recent tool?\n\n"
1155 "If that is not the case, consider "
1156 "reporting to linux-kernel@vger.kernel.org.\n\n",
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001157 stats->nr_unknown_events);
Arnaldo Carvalho de Melo11095992011-01-04 16:25:15 -02001158 }
1159
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001160 if (stats->nr_unknown_id != 0) {
Arnaldo Carvalho de Melo9e69c212011-03-15 15:44:01 -03001161 ui__warning("%u samples with id not present in the header\n",
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001162 stats->nr_unknown_id);
Arnaldo Carvalho de Melo9e69c212011-03-15 15:44:01 -03001163 }
1164
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001165 if (stats->nr_invalid_chains != 0) {
Arnaldo Carvalho de Melo75be9892015-02-14 14:50:11 -03001166 ui__warning("Found invalid callchains!\n\n"
1167 "%u out of %u events were discarded for this reason.\n\n"
1168 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001169 stats->nr_invalid_chains,
1170 stats->nr_events[PERF_RECORD_SAMPLE]);
Arnaldo Carvalho de Melo75be9892015-02-14 14:50:11 -03001171 }
Joerg Roedel0c095712012-02-10 18:05:04 +01001172
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001173 if (stats->nr_unprocessable_samples != 0) {
Joerg Roedel0c095712012-02-10 18:05:04 +01001174 ui__warning("%u unprocessable samples recorded.\n"
1175 "Do you have a KVM guest running and not using 'perf kvm'?\n",
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001176 stats->nr_unprocessable_samples);
Joerg Roedel0c095712012-02-10 18:05:04 +01001177 }
Jiri Olsaf61ff6c2014-11-26 16:39:31 +01001178
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001179 if (stats->nr_unordered_events != 0)
1180 ui__warning("%u out of order events recorded.\n", stats->nr_unordered_events);
Arnaldo Carvalho de Melo11095992011-01-04 16:25:15 -02001181}
1182
Tom Zanussi8dc58102010-04-01 23:59:15 -05001183volatile int session_done;
1184
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001185static int __perf_session__process_pipe_events(struct perf_session *session)
Tom Zanussi8dc58102010-04-01 23:59:15 -05001186{
Arnaldo Carvalho de Melofa713a4e2015-03-03 11:48:12 -03001187 struct ordered_events *oe = &session->ordered_events;
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001188 struct perf_tool *tool = oe->tool;
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03001189 int fd = perf_data_file__fd(session->file);
Stephane Eranian444d2862012-05-15 13:28:12 +02001190 union perf_event *event;
1191 uint32_t size, cur_size = 0;
1192 void *buf = NULL;
Adrian Hunterd5652d82014-07-23 22:19:58 +03001193 s64 skip = 0;
Tom Zanussi8dc58102010-04-01 23:59:15 -05001194 u64 head;
Jiri Olsa727ebd52013-11-28 11:30:14 +01001195 ssize_t err;
Tom Zanussi8dc58102010-04-01 23:59:15 -05001196 void *p;
1197
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001198 perf_tool__fill_defaults(tool);
Tom Zanussi8dc58102010-04-01 23:59:15 -05001199
1200 head = 0;
Stephane Eranian444d2862012-05-15 13:28:12 +02001201 cur_size = sizeof(union perf_event);
1202
1203 buf = malloc(cur_size);
1204 if (!buf)
1205 return -errno;
Tom Zanussi8dc58102010-04-01 23:59:15 -05001206more:
Stephane Eranian444d2862012-05-15 13:28:12 +02001207 event = buf;
Jiri Olsacc9784bd2013-10-15 16:27:34 +02001208 err = readn(fd, event, sizeof(struct perf_event_header));
Tom Zanussi8dc58102010-04-01 23:59:15 -05001209 if (err <= 0) {
1210 if (err == 0)
1211 goto done;
1212
1213 pr_err("failed to read event header\n");
1214 goto out_err;
1215 }
1216
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03001217 if (session->header.needs_swap)
Stephane Eranian444d2862012-05-15 13:28:12 +02001218 perf_event_header__bswap(&event->header);
Tom Zanussi8dc58102010-04-01 23:59:15 -05001219
Stephane Eranian444d2862012-05-15 13:28:12 +02001220 size = event->header.size;
Adrian Hunter27389d72013-07-04 16:20:27 +03001221 if (size < sizeof(struct perf_event_header)) {
1222 pr_err("bad event header size\n");
1223 goto out_err;
1224 }
Tom Zanussi8dc58102010-04-01 23:59:15 -05001225
Stephane Eranian444d2862012-05-15 13:28:12 +02001226 if (size > cur_size) {
1227 void *new = realloc(buf, size);
1228 if (!new) {
1229 pr_err("failed to allocate memory to read event\n");
1230 goto out_err;
1231 }
1232 buf = new;
1233 cur_size = size;
1234 event = buf;
1235 }
1236 p = event;
Tom Zanussi8dc58102010-04-01 23:59:15 -05001237 p += sizeof(struct perf_event_header);
1238
Tom Zanussi794e43b2010-05-05 00:27:40 -05001239 if (size - sizeof(struct perf_event_header)) {
Jiri Olsacc9784bd2013-10-15 16:27:34 +02001240 err = readn(fd, p, size - sizeof(struct perf_event_header));
Tom Zanussi794e43b2010-05-05 00:27:40 -05001241 if (err <= 0) {
1242 if (err == 0) {
1243 pr_err("unexpected end of event stream\n");
1244 goto done;
1245 }
Tom Zanussi8dc58102010-04-01 23:59:15 -05001246
Tom Zanussi794e43b2010-05-05 00:27:40 -05001247 pr_err("failed to read event data\n");
1248 goto out_err;
1249 }
Tom Zanussi8dc58102010-04-01 23:59:15 -05001250 }
1251
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001252 if ((skip = perf_session__process_event(session, event, head)) < 0) {
Jiri Olsa9389a462012-04-16 20:42:51 +02001253 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
Stephane Eranian444d2862012-05-15 13:28:12 +02001254 head, event->header.size, event->header.type);
Jiri Olsa9389a462012-04-16 20:42:51 +02001255 err = -EINVAL;
1256 goto out_err;
Tom Zanussi8dc58102010-04-01 23:59:15 -05001257 }
1258
1259 head += size;
1260
Tom Zanussi8dc58102010-04-01 23:59:15 -05001261 if (skip > 0)
1262 head += skip;
1263
1264 if (!session_done())
1265 goto more;
1266done:
Adrian Hunter8c16b642013-10-18 15:29:02 +03001267 /* do the final flush for ordered samples */
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001268 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
Tom Zanussi8dc58102010-04-01 23:59:15 -05001269out_err:
Stephane Eranian444d2862012-05-15 13:28:12 +02001270 free(buf);
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001271 perf_tool__warn_about_errors(tool, &session->evlist->stats);
Jiri Olsaadc56ed2014-06-10 22:50:03 +02001272 ordered_events__free(&session->ordered_events);
Tom Zanussi8dc58102010-04-01 23:59:15 -05001273 return err;
1274}
1275
Frederic Weisbecker998bedc2011-05-23 13:06:28 +02001276static union perf_event *
1277fetch_mmaped_event(struct perf_session *session,
1278 u64 head, size_t mmap_size, char *buf)
1279{
1280 union perf_event *event;
1281
1282 /*
1283 * Ensure we have enough space remaining to read
1284 * the size of the event in the headers.
1285 */
1286 if (head + sizeof(event->header) > mmap_size)
1287 return NULL;
1288
1289 event = (union perf_event *)(buf + head);
1290
1291 if (session->header.needs_swap)
1292 perf_event_header__bswap(&event->header);
1293
Adrian Hunter27389d72013-07-04 16:20:27 +03001294 if (head + event->header.size > mmap_size) {
1295 /* We're not fetching the event so swap back again */
1296 if (session->header.needs_swap)
1297 perf_event_header__bswap(&event->header);
Frederic Weisbecker998bedc2011-05-23 13:06:28 +02001298 return NULL;
Adrian Hunter27389d72013-07-04 16:20:27 +03001299 }
Frederic Weisbecker998bedc2011-05-23 13:06:28 +02001300
1301 return event;
1302}
1303
David Miller35d48dd2012-11-10 14:12:19 -05001304/*
1305 * On 64bit we can mmap the data file in one go. No need for tiny mmap
1306 * slices. On 32bit we use 32MB.
1307 */
1308#if BITS_PER_LONG == 64
1309#define MMAP_SIZE ULLONG_MAX
1310#define NUM_MMAPS 1
1311#else
1312#define MMAP_SIZE (32 * 1024 * 1024ULL)
1313#define NUM_MMAPS 128
1314#endif
1315
Namhyung Kim4ac30cf2015-01-29 17:06:43 +09001316static int __perf_session__process_events(struct perf_session *session,
1317 u64 data_offset, u64 data_size,
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001318 u64 file_size)
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001319{
Arnaldo Carvalho de Melofa713a4e2015-03-03 11:48:12 -03001320 struct ordered_events *oe = &session->ordered_events;
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001321 struct perf_tool *tool = oe->tool;
Jiri Olsacc9784bd2013-10-15 16:27:34 +02001322 int fd = perf_data_file__fd(session->file);
Adrian Hunterd5652d82014-07-23 22:19:58 +03001323 u64 head, page_offset, file_offset, file_pos, size;
Thomas Gleixnerfe174202010-11-30 17:49:49 +00001324 int err, mmap_prot, mmap_flags, map_idx = 0;
Arnaldo Carvalho de Melo0c1fe6b2012-10-06 14:57:10 -03001325 size_t mmap_size;
David Miller35d48dd2012-11-10 14:12:19 -05001326 char *buf, *mmaps[NUM_MMAPS];
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02001327 union perf_event *event;
Arnaldo Carvalho de Melo4d3001f2013-10-23 15:40:38 -03001328 struct ui_progress prog;
Adrian Hunterd5652d82014-07-23 22:19:58 +03001329 s64 skip;
Thomas Gleixner0331ee02010-11-30 17:49:38 +00001330
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02001331 perf_tool__fill_defaults(tool);
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001332
Thomas Gleixner0331ee02010-11-30 17:49:38 +00001333 page_offset = page_size * (data_offset / page_size);
1334 file_offset = page_offset;
1335 head = data_offset - page_offset;
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001336
Namhyung Kimb314e5c2013-09-30 17:19:48 +09001337 if (data_size && (data_offset + data_size < file_size))
Thomas Gleixnerd6513282010-11-30 17:49:44 +00001338 file_size = data_offset + data_size;
1339
Arnaldo Carvalho de Melo4d3001f2013-10-23 15:40:38 -03001340 ui_progress__init(&prog, file_size, "Processing events...");
Thomas Gleixner55b44622010-11-30 17:49:46 +00001341
David Miller35d48dd2012-11-10 14:12:19 -05001342 mmap_size = MMAP_SIZE;
Adrian Hunter919d86d2014-07-14 13:02:51 +03001343 if (mmap_size > file_size) {
Thomas Gleixner55b44622010-11-30 17:49:46 +00001344 mmap_size = file_size;
Adrian Hunter919d86d2014-07-14 13:02:51 +03001345 session->one_mmap = true;
1346 }
Thomas Gleixner55b44622010-11-30 17:49:46 +00001347
Thomas Gleixnerfe174202010-11-30 17:49:49 +00001348 memset(mmaps, 0, sizeof(mmaps));
1349
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02001350 mmap_prot = PROT_READ;
1351 mmap_flags = MAP_SHARED;
1352
Thomas Gleixner0331ee02010-11-30 17:49:38 +00001353 if (session->header.needs_swap) {
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02001354 mmap_prot |= PROT_WRITE;
1355 mmap_flags = MAP_PRIVATE;
1356 }
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001357remap:
Jiri Olsacc9784bd2013-10-15 16:27:34 +02001358 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd,
Thomas Gleixner55b44622010-11-30 17:49:46 +00001359 file_offset);
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001360 if (buf == MAP_FAILED) {
1361 pr_err("failed to mmap file\n");
1362 err = -errno;
1363 goto out_err;
1364 }
Thomas Gleixnerfe174202010-11-30 17:49:49 +00001365 mmaps[map_idx] = buf;
1366 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
Thomas Gleixnerd6513282010-11-30 17:49:44 +00001367 file_pos = file_offset + head;
Adrian Hunter919d86d2014-07-14 13:02:51 +03001368 if (session->one_mmap) {
1369 session->one_mmap_addr = buf;
1370 session->one_mmap_offset = file_offset;
1371 }
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001372
1373more:
Frederic Weisbecker998bedc2011-05-23 13:06:28 +02001374 event = fetch_mmaped_event(session, head, mmap_size, buf);
1375 if (!event) {
Thomas Gleixnerfe174202010-11-30 17:49:49 +00001376 if (mmaps[map_idx]) {
1377 munmap(mmaps[map_idx], mmap_size);
1378 mmaps[map_idx] = NULL;
1379 }
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001380
Thomas Gleixner0331ee02010-11-30 17:49:38 +00001381 page_offset = page_size * (head / page_size);
1382 file_offset += page_offset;
1383 head -= page_offset;
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001384 goto remap;
1385 }
1386
1387 size = event->header.size;
1388
Adrian Hunter27389d72013-07-04 16:20:27 +03001389 if (size < sizeof(struct perf_event_header) ||
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001390 (skip = perf_session__process_event(session, event, file_pos)) < 0) {
Jiri Olsa9389a462012-04-16 20:42:51 +02001391 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1392 file_offset + head, event->header.size,
1393 event->header.type);
1394 err = -EINVAL;
1395 goto out_err;
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001396 }
1397
Adrian Hunter6f917c72014-07-23 22:19:57 +03001398 if (skip)
1399 size += skip;
1400
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001401 head += size;
Thomas Gleixnerd6513282010-11-30 17:49:44 +00001402 file_pos += size;
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001403
Arnaldo Carvalho de Melo4d3001f2013-10-23 15:40:38 -03001404 ui_progress__update(&prog, size);
Thomas Gleixner55b44622010-11-30 17:49:46 +00001405
Arnaldo Carvalho de Melo33e940a2013-09-17 16:34:28 -03001406 if (session_done())
Adrian Hunter8c16b642013-10-18 15:29:02 +03001407 goto out;
Arnaldo Carvalho de Melo33e940a2013-09-17 16:34:28 -03001408
Thomas Gleixnerd6513282010-11-30 17:49:44 +00001409 if (file_pos < file_size)
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001410 goto more;
Thomas Gleixnerd6513282010-11-30 17:49:44 +00001411
Adrian Hunter8c16b642013-10-18 15:29:02 +03001412out:
Frederic Weisbeckerc61e52e2010-04-24 00:04:12 +02001413 /* do the final flush for ordered samples */
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001414 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001415out_err:
Namhyung Kima5580f32012-11-13 22:30:34 +09001416 ui_progress__finish();
Arnaldo Carvalho de Meloccda0682015-02-14 14:57:13 -03001417 perf_tool__warn_about_errors(tool, &session->evlist->stats);
Jiri Olsaadc56ed2014-06-10 22:50:03 +02001418 ordered_events__free(&session->ordered_events);
Adrian Hunter919d86d2014-07-14 13:02:51 +03001419 session->one_mmap = false;
Arnaldo Carvalho de Melo06aae5902009-12-27 21:36:59 -02001420 return err;
1421}
Arnaldo Carvalho de Melo27295592009-12-27 21:37:01 -02001422
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001423int perf_session__process_events(struct perf_session *session)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -02001424{
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03001425 u64 size = perf_data_file__size(session->file);
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -02001426 int err;
1427
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03001428 if (perf_session__register_idle_thread(session) == NULL)
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -02001429 return -ENOMEM;
1430
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03001431 if (!perf_data_file__is_pipe(session->file))
1432 err = __perf_session__process_events(session,
1433 session->header.data_offset,
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001434 session->header.data_size, size);
Tom Zanussi8dc58102010-04-01 23:59:15 -05001435 else
Arnaldo Carvalho de Melob7b61cb2015-03-03 11:58:45 -03001436 err = __perf_session__process_pipe_events(session);
Dave Martin88ca8952010-07-27 11:46:12 -03001437
Arnaldo Carvalho de Melo6122e4e2010-02-03 16:52:05 -02001438 return err;
1439}
1440
Arnaldo Carvalho de Melo7f3be652012-08-01 19:15:52 -03001441bool perf_session__has_traces(struct perf_session *session, const char *msg)
Arnaldo Carvalho de Melo27295592009-12-27 21:37:01 -02001442{
David Ahern93ea01c2013-08-07 22:50:58 -04001443 struct perf_evsel *evsel;
1444
Arnaldo Carvalho de Melo0050f7a2014-01-10 10:37:27 -03001445 evlist__for_each(session->evlist, evsel) {
David Ahern93ea01c2013-08-07 22:50:58 -04001446 if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
1447 return true;
Arnaldo Carvalho de Melo27295592009-12-27 21:37:01 -02001448 }
1449
David Ahern93ea01c2013-08-07 22:50:58 -04001450 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
1451 return false;
Arnaldo Carvalho de Melo27295592009-12-27 21:37:01 -02001452}
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -02001453
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02001454int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
1455 const char *symbol_name, u64 addr)
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -02001456{
1457 char *bracket;
Arnaldo Carvalho de Melo9de89fe2010-02-03 16:52:00 -02001458 enum map_type i;
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001459 struct ref_reloc_sym *ref;
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -02001460
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001461 ref = zalloc(sizeof(struct ref_reloc_sym));
1462 if (ref == NULL)
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -02001463 return -ENOMEM;
1464
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001465 ref->name = strdup(symbol_name);
1466 if (ref->name == NULL) {
1467 free(ref);
1468 return -ENOMEM;
1469 }
1470
1471 bracket = strchr(ref->name, ']');
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -02001472 if (bracket)
1473 *bracket = '\0';
1474
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001475 ref->addr = addr;
Arnaldo Carvalho de Melo9de89fe2010-02-03 16:52:00 -02001476
1477 for (i = 0; i < MAP__NR_TYPES; ++i) {
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08001478 struct kmap *kmap = map__kmap(maps[i]);
1479 kmap->ref_reloc_sym = ref;
Arnaldo Carvalho de Melo9de89fe2010-02-03 16:52:00 -02001480 }
1481
Arnaldo Carvalho de Melo56b03f32010-01-05 16:50:31 -02001482 return 0;
1483}
Arnaldo Carvalho de Melo1f626bc2010-05-09 19:57:08 -03001484
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03001485size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
Arnaldo Carvalho de Melo1f626bc2010-05-09 19:57:08 -03001486{
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03001487 return machines__fprintf_dsos(&session->machines, fp);
Arnaldo Carvalho de Melo1f626bc2010-05-09 19:57:08 -03001488}
Arnaldo Carvalho de Melof8690972010-05-19 13:41:23 -03001489
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03001490size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
Arnaldo Carvalho de Melo417c2ff2012-12-07 09:53:58 -03001491 bool (skip)(struct dso *dso, int parm), int parm)
Arnaldo Carvalho de Melof8690972010-05-19 13:41:23 -03001492{
Arnaldo Carvalho de Melo316c7132013-11-05 15:32:36 -03001493 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
Arnaldo Carvalho de Melof8690972010-05-19 13:41:23 -03001494}
Arnaldo Carvalho de Meloe248de32011-03-05 21:40:06 -03001495
1496size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
1497{
Arnaldo Carvalho de Meloe248de32011-03-05 21:40:06 -03001498 size_t ret = fprintf(fp, "Aggregated stats:\n");
1499
Arnaldo Carvalho de Melo75be9892015-02-14 14:50:11 -03001500 ret += events_stats__fprintf(&session->evlist->stats, fp);
Arnaldo Carvalho de Meloe248de32011-03-05 21:40:06 -03001501 return ret;
1502}
David Ahernc0230b22011-03-09 22:23:27 -07001503
Arnaldo Carvalho de Melob424eba2011-11-09 13:24:25 -02001504size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
1505{
1506 /*
1507 * FIXME: Here we have to actually print all the machines in this
1508 * session, not just the host...
1509 */
Arnaldo Carvalho de Melo876650e2012-12-18 19:15:48 -03001510 return machine__fprintf(&session->machines.host, fp);
Arnaldo Carvalho de Melob424eba2011-11-09 13:24:25 -02001511}
1512
David Ahern9cbdb702011-04-06 21:54:20 -06001513struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1514 unsigned int type)
1515{
1516 struct perf_evsel *pos;
1517
Arnaldo Carvalho de Melo0050f7a2014-01-10 10:37:27 -03001518 evlist__for_each(session->evlist, pos) {
David Ahern9cbdb702011-04-06 21:54:20 -06001519 if (pos->attr.type == type)
1520 return pos;
1521 }
1522 return NULL;
1523}
1524
Adrian Huntera2cb3cf2013-12-04 16:16:36 +02001525void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample,
Arnaldo Carvalho de Melocc22e572013-12-19 17:20:06 -03001526 struct addr_location *al,
David Ahern307cbb92013-08-07 22:50:53 -04001527 unsigned int print_opts, unsigned int stack_depth)
David Ahernc0230b22011-03-09 22:23:27 -07001528{
David Ahernc0230b22011-03-09 22:23:27 -07001529 struct callchain_cursor_node *node;
David Aherna6ffaf92013-08-07 22:50:51 -04001530 int print_ip = print_opts & PRINT_IP_OPT_IP;
1531 int print_sym = print_opts & PRINT_IP_OPT_SYM;
1532 int print_dso = print_opts & PRINT_IP_OPT_DSO;
1533 int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET;
David Ahernb0b35f02013-08-07 22:50:52 -04001534 int print_oneline = print_opts & PRINT_IP_OPT_ONELINE;
Adrian Huntercc8fae12013-12-06 09:42:57 +02001535 int print_srcline = print_opts & PRINT_IP_OPT_SRCLINE;
David Ahernb0b35f02013-08-07 22:50:52 -04001536 char s = print_oneline ? ' ' : '\t';
David Ahernc0230b22011-03-09 22:23:27 -07001537
David Ahernc0230b22011-03-09 22:23:27 -07001538 if (symbol_conf.use_callchain && sample->callchain) {
Adrian Huntera2cb3cf2013-12-04 16:16:36 +02001539 struct addr_location node_al;
David Ahernc0230b22011-03-09 22:23:27 -07001540
Arnaldo Carvalho de Melocc8b7c22014-10-23 15:26:17 -03001541 if (thread__resolve_callchain(al->thread, evsel,
1542 sample, NULL, NULL,
1543 PERF_MAX_STACK_DEPTH) != 0) {
David Ahernc0230b22011-03-09 22:23:27 -07001544 if (verbose)
1545 error("Failed to resolve callchain. Skipping\n");
1546 return;
1547 }
Namhyung Kim47260642012-05-31 14:43:26 +09001548 callchain_cursor_commit(&callchain_cursor);
David Ahernc0230b22011-03-09 22:23:27 -07001549
Adrian Huntera2cb3cf2013-12-04 16:16:36 +02001550 if (print_symoffset)
1551 node_al = *al;
1552
David Ahern307cbb92013-08-07 22:50:53 -04001553 while (stack_depth) {
Adrian Huntera4eb24a2013-12-06 09:42:56 +02001554 u64 addr = 0;
1555
Namhyung Kim47260642012-05-31 14:43:26 +09001556 node = callchain_cursor_current(&callchain_cursor);
David Ahernc0230b22011-03-09 22:23:27 -07001557 if (!node)
1558 break;
1559
David Ahernd2ff1b12013-11-18 13:32:44 -07001560 if (node->sym && node->sym->ignore)
1561 goto next;
1562
David Aherna6ffaf92013-08-07 22:50:51 -04001563 if (print_ip)
David Ahernb0b35f02013-08-07 22:50:52 -04001564 printf("%c%16" PRIx64, s, node->ip);
David Aherna6ffaf92013-08-07 22:50:51 -04001565
Adrian Huntera4eb24a2013-12-06 09:42:56 +02001566 if (node->map)
1567 addr = node->map->map_ip(node->map, node->ip);
1568
David Ahern787bef12011-05-27 14:28:43 -06001569 if (print_sym) {
Akihiro Nagai547a92e2012-01-30 13:42:57 +09001570 printf(" ");
David Ahern251f4262013-07-28 09:14:34 -06001571 if (print_symoffset) {
Adrian Huntera4eb24a2013-12-06 09:42:56 +02001572 node_al.addr = addr;
Adrian Huntera2cb3cf2013-12-04 16:16:36 +02001573 node_al.map = node->map;
1574 symbol__fprintf_symname_offs(node->sym, &node_al, stdout);
David Ahern251f4262013-07-28 09:14:34 -06001575 } else
1576 symbol__fprintf_symname(node->sym, stdout);
David Ahern610723f2011-05-27 14:28:44 -06001577 }
David Ahern251f4262013-07-28 09:14:34 -06001578
David Ahern610723f2011-05-27 14:28:44 -06001579 if (print_dso) {
Akihiro Nagai547a92e2012-01-30 13:42:57 +09001580 printf(" (");
David Ahern52deff72012-05-29 22:58:26 -06001581 map__fprintf_dsoname(node->map, stdout);
Akihiro Nagai547a92e2012-01-30 13:42:57 +09001582 printf(")");
David Ahern787bef12011-05-27 14:28:43 -06001583 }
David Ahernb0b35f02013-08-07 22:50:52 -04001584
Adrian Huntercc8fae12013-12-06 09:42:57 +02001585 if (print_srcline)
1586 map__fprintf_srcline(node->map, addr, "\n ",
1587 stdout);
1588
David Ahernb0b35f02013-08-07 22:50:52 -04001589 if (!print_oneline)
1590 printf("\n");
David Ahernc0230b22011-03-09 22:23:27 -07001591
David Ahern307cbb92013-08-07 22:50:53 -04001592 stack_depth--;
David Ahernd2ff1b12013-11-18 13:32:44 -07001593next:
1594 callchain_cursor_advance(&callchain_cursor);
David Ahernc0230b22011-03-09 22:23:27 -07001595 }
1596
1597 } else {
Adrian Huntera2cb3cf2013-12-04 16:16:36 +02001598 if (al->sym && al->sym->ignore)
David Ahernd2ff1b12013-11-18 13:32:44 -07001599 return;
1600
David Aherna6ffaf92013-08-07 22:50:51 -04001601 if (print_ip)
1602 printf("%16" PRIx64, sample->ip);
1603
David Ahern787bef12011-05-27 14:28:43 -06001604 if (print_sym) {
Akihiro Nagai547a92e2012-01-30 13:42:57 +09001605 printf(" ");
Akihiro Nagaia978f2a2012-01-30 13:43:15 +09001606 if (print_symoffset)
Adrian Huntera2cb3cf2013-12-04 16:16:36 +02001607 symbol__fprintf_symname_offs(al->sym, al,
Akihiro Nagaia978f2a2012-01-30 13:43:15 +09001608 stdout);
1609 else
Adrian Huntera2cb3cf2013-12-04 16:16:36 +02001610 symbol__fprintf_symname(al->sym, stdout);
David Ahern610723f2011-05-27 14:28:44 -06001611 }
1612
1613 if (print_dso) {
Akihiro Nagai547a92e2012-01-30 13:42:57 +09001614 printf(" (");
Adrian Huntera2cb3cf2013-12-04 16:16:36 +02001615 map__fprintf_dsoname(al->map, stdout);
Akihiro Nagai547a92e2012-01-30 13:42:57 +09001616 printf(")");
David Ahern787bef12011-05-27 14:28:43 -06001617 }
Adrian Huntercc8fae12013-12-06 09:42:57 +02001618
1619 if (print_srcline)
1620 map__fprintf_srcline(al->map, al->addr, "\n ", stdout);
David Ahernc0230b22011-03-09 22:23:27 -07001621 }
1622}
Anton Blanchard5d67be92011-07-04 21:57:50 +10001623
1624int perf_session__cpu_bitmap(struct perf_session *session,
1625 const char *cpu_list, unsigned long *cpu_bitmap)
1626{
Stanislav Fomichev8bac41c2014-01-20 15:39:39 +04001627 int i, err = -1;
Anton Blanchard5d67be92011-07-04 21:57:50 +10001628 struct cpu_map *map;
1629
1630 for (i = 0; i < PERF_TYPE_MAX; ++i) {
1631 struct perf_evsel *evsel;
1632
1633 evsel = perf_session__find_first_evtype(session, i);
1634 if (!evsel)
1635 continue;
1636
1637 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) {
1638 pr_err("File does not contain CPU events. "
1639 "Remove -c option to proceed.\n");
1640 return -1;
1641 }
1642 }
1643
1644 map = cpu_map__new(cpu_list);
David Ahern47fbe532011-11-13 10:45:27 -07001645 if (map == NULL) {
1646 pr_err("Invalid cpu_list\n");
1647 return -1;
1648 }
Anton Blanchard5d67be92011-07-04 21:57:50 +10001649
1650 for (i = 0; i < map->nr; i++) {
1651 int cpu = map->map[i];
1652
1653 if (cpu >= MAX_NR_CPUS) {
1654 pr_err("Requested CPU %d too large. "
1655 "Consider raising MAX_NR_CPUS\n", cpu);
Stanislav Fomichev8bac41c2014-01-20 15:39:39 +04001656 goto out_delete_map;
Anton Blanchard5d67be92011-07-04 21:57:50 +10001657 }
1658
1659 set_bit(cpu, cpu_bitmap);
1660 }
1661
Stanislav Fomichev8bac41c2014-01-20 15:39:39 +04001662 err = 0;
1663
1664out_delete_map:
1665 cpu_map__delete(map);
1666 return err;
Anton Blanchard5d67be92011-07-04 21:57:50 +10001667}
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001668
1669void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
1670 bool full)
1671{
1672 struct stat st;
Masanari Iidac5765ec2014-05-15 02:13:38 +09001673 int fd, ret;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001674
1675 if (session == NULL || fp == NULL)
1676 return;
1677
Masanari Iidac5765ec2014-05-15 02:13:38 +09001678 fd = perf_data_file__fd(session->file);
1679
Jiri Olsacc9784bd2013-10-15 16:27:34 +02001680 ret = fstat(fd, &st);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001681 if (ret == -1)
1682 return;
1683
1684 fprintf(fp, "# ========\n");
1685 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
1686 perf_header__fprintf_info(session, fp, full);
1687 fprintf(fp, "# ========\n#\n");
1688}
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03001689
1690
1691int __perf_session__set_tracepoints_handlers(struct perf_session *session,
1692 const struct perf_evsel_str_handler *assocs,
1693 size_t nr_assocs)
1694{
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03001695 struct perf_evsel *evsel;
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03001696 size_t i;
1697 int err;
1698
1699 for (i = 0; i < nr_assocs; i++) {
Arnaldo Carvalho de Meloccf53ea2013-09-06 15:19:01 -03001700 /*
1701 * Adding a handler for an event not in the session,
1702 * just ignore it.
1703 */
1704 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03001705 if (evsel == NULL)
Arnaldo Carvalho de Meloccf53ea2013-09-06 15:19:01 -03001706 continue;
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03001707
1708 err = -EEXIST;
Arnaldo Carvalho de Melo744a9712013-11-06 10:17:38 -03001709 if (evsel->handler != NULL)
Arnaldo Carvalho de Meloccf53ea2013-09-06 15:19:01 -03001710 goto out;
Arnaldo Carvalho de Melo744a9712013-11-06 10:17:38 -03001711 evsel->handler = assocs[i].handler;
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03001712 }
1713
1714 err = 0;
1715out:
1716 return err;
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03001717}
Adrian Hunter3c659ee2014-10-27 15:49:22 +02001718
1719int perf_event__process_id_index(struct perf_tool *tool __maybe_unused,
1720 union perf_event *event,
1721 struct perf_session *session)
1722{
1723 struct perf_evlist *evlist = session->evlist;
1724 struct id_index_event *ie = &event->id_index;
1725 size_t i, nr, max_nr;
1726
1727 max_nr = (ie->header.size - sizeof(struct id_index_event)) /
1728 sizeof(struct id_index_entry);
1729 nr = ie->nr;
1730 if (nr > max_nr)
1731 return -EINVAL;
1732
1733 if (dump_trace)
1734 fprintf(stdout, " nr: %zu\n", nr);
1735
1736 for (i = 0; i < nr; i++) {
1737 struct id_index_entry *e = &ie->entries[i];
1738 struct perf_sample_id *sid;
1739
1740 if (dump_trace) {
1741 fprintf(stdout, " ... id: %"PRIu64, e->id);
1742 fprintf(stdout, " idx: %"PRIu64, e->idx);
1743 fprintf(stdout, " cpu: %"PRId64, e->cpu);
1744 fprintf(stdout, " tid: %"PRId64"\n", e->tid);
1745 }
1746
1747 sid = perf_evlist__id2sid(evlist, e->id);
1748 if (!sid)
1749 return -ENOENT;
1750 sid->idx = e->idx;
1751 sid->cpu = e->cpu;
1752 sid->tid = e->tid;
1753 }
1754 return 0;
1755}
1756
1757int perf_event__synthesize_id_index(struct perf_tool *tool,
1758 perf_event__handler_t process,
1759 struct perf_evlist *evlist,
1760 struct machine *machine)
1761{
1762 union perf_event *ev;
1763 struct perf_evsel *evsel;
1764 size_t nr = 0, i = 0, sz, max_nr, n;
1765 int err;
1766
1767 pr_debug2("Synthesizing id index\n");
1768
1769 max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
1770 sizeof(struct id_index_entry);
1771
Arnaldo Carvalho de Melocba9b842014-10-29 11:31:54 -02001772 evlist__for_each(evlist, evsel)
Adrian Hunter3c659ee2014-10-27 15:49:22 +02001773 nr += evsel->ids;
1774
1775 n = nr > max_nr ? max_nr : nr;
1776 sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry);
1777 ev = zalloc(sz);
1778 if (!ev)
1779 return -ENOMEM;
1780
1781 ev->id_index.header.type = PERF_RECORD_ID_INDEX;
1782 ev->id_index.header.size = sz;
1783 ev->id_index.nr = n;
1784
Arnaldo Carvalho de Melocba9b842014-10-29 11:31:54 -02001785 evlist__for_each(evlist, evsel) {
Adrian Hunter3c659ee2014-10-27 15:49:22 +02001786 u32 j;
1787
1788 for (j = 0; j < evsel->ids; j++) {
1789 struct id_index_entry *e;
1790 struct perf_sample_id *sid;
1791
1792 if (i >= n) {
1793 err = process(tool, ev, NULL, machine);
1794 if (err)
1795 goto out_err;
1796 nr -= n;
1797 i = 0;
1798 }
1799
1800 e = &ev->id_index.entries[i++];
1801
1802 e->id = evsel->id[j];
1803
1804 sid = perf_evlist__id2sid(evlist, e->id);
1805 if (!sid) {
1806 free(ev);
1807 return -ENOENT;
1808 }
1809
1810 e->idx = sid->idx;
1811 e->cpu = sid->cpu;
1812 e->tid = sid->tid;
1813 }
1814 }
1815
1816 sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry);
1817 ev->id_index.header.size = sz;
1818 ev->id_index.nr = nr;
1819
1820 err = process(tool, ev, NULL, machine);
1821out_err:
1822 free(ev);
1823
1824 return err;
1825}