blob: 1222f6c5e7b39a4e490fdc26e2a0a3404e3274fb [file] [log] [blame]
Arnaldo Carvalho de Meloa9072bc2011-10-26 12:41:38 -02001#include "util.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002#include <sys/types.h>
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02003#include <byteswap.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02004#include <unistd.h>
5#include <stdio.h>
6#include <stdlib.h>
Frederic Weisbecker8671dab2009-11-11 04:51:03 +01007#include <linux/list.h>
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02008#include <linux/kernel.h>
Robert Richterb1e5a9b2011-12-07 10:02:57 +01009#include <linux/bitops.h>
Stephane Eranianfbe96f22011-09-30 15:40:40 +020010#include <sys/utsname.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020011
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020012#include "evlist.h"
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030013#include "evsel.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020014#include "header.h"
Frederic Weisbecker03456a12009-10-06 23:36:47 +020015#include "../perf.h"
16#include "trace-event.h"
Arnaldo Carvalho de Melo301a0b02009-12-13 19:50:25 -020017#include "session.h"
Frederic Weisbecker8671dab2009-11-11 04:51:03 +010018#include "symbol.h"
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +010019#include "debug.h"
Stephane Eranianfbe96f22011-09-30 15:40:40 +020020#include "cpumap.h"
Robert Richter50a96672012-08-16 21:10:24 +020021#include "pmu.h"
Jiri Olsa7dbf4dc2012-09-10 18:50:19 +020022#include "vdso.h"
Namhyung Kima1ae5652012-09-24 17:14:59 +090023#include "strbuf.h"
Jiri Olsaebb296c2012-10-27 23:18:28 +020024#include "build-id.h"
Jiri Olsacc9784bd2013-10-15 16:27:34 +020025#include "data.h"
Jiri Olsa720e98b2016-02-16 16:01:43 +010026#include <api/fs/fs.h>
27#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020028
Stephane Eranian73323f52012-02-02 13:54:44 +010029/*
30 * magic2 = "PERFILE2"
31 * must be a numerical value to let the endianness
32 * determine the memory layout. That way we are able
33 * to detect endianness when reading the perf.data file
34 * back.
35 *
36 * we check for legacy (PERFFILE) format.
37 */
38static const char *__perf_magic1 = "PERFFILE";
39static const u64 __perf_magic2 = 0x32454c4946524550ULL;
40static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020041
Stephane Eranian73323f52012-02-02 13:54:44 +010042#define PERF_MAGIC __perf_magic2
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020043
Soramichi AKIYAMAd25ed5d2017-01-17 00:22:37 +090044const char perf_version_string[] = PERF_VERSION;
45
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020046struct perf_file_attr {
Ingo Molnarcdd6c482009-09-21 12:02:48 +020047 struct perf_event_attr attr;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020048 struct perf_file_section ids;
49};
50
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030051void perf_header__set_feat(struct perf_header *header, int feat)
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020052{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030053 set_bit(feat, header->adds_features);
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020054}
55
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030056void perf_header__clear_feat(struct perf_header *header, int feat)
Arnaldo Carvalho de Melobaa2f6c2010-11-26 19:39:15 -020057{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030058 clear_bit(feat, header->adds_features);
Arnaldo Carvalho de Melobaa2f6c2010-11-26 19:39:15 -020059}
60
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030061bool perf_header__has_feat(const struct perf_header *header, int feat)
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020062{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030063 return test_bit(feat, header->adds_features);
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020064}
65
Arnaldo Carvalho de Melo3726cc72009-11-17 01:18:12 -020066static int do_write(int fd, const void *buf, size_t size)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020067{
68 while (size) {
69 int ret = write(fd, buf, size);
70
71 if (ret < 0)
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -020072 return -errno;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020073
74 size -= ret;
75 buf += ret;
76 }
Arnaldo Carvalho de Melo3726cc72009-11-17 01:18:12 -020077
78 return 0;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020079}
80
Namhyung Kime195fac2014-11-04 10:14:30 +090081int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
Arnaldo Carvalho de Melof92cb242010-01-04 16:19:28 -020082{
83 static const char zero_buf[NAME_ALIGN];
84 int err = do_write(fd, bf, count);
85
86 if (!err)
87 err = do_write(fd, zero_buf, count_aligned - count);
88
89 return err;
90}
91
Kan Liang2bb00d22015-09-01 09:58:12 -040092#define string_size(str) \
93 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
94
Stephane Eranianfbe96f22011-09-30 15:40:40 +020095static int do_write_string(int fd, const char *str)
96{
97 u32 len, olen;
98 int ret;
99
100 olen = strlen(str) + 1;
Irina Tirdea9ac3e482012-09-11 01:15:01 +0300101 len = PERF_ALIGN(olen, NAME_ALIGN);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200102
103 /* write len, incl. \0 */
104 ret = do_write(fd, &len, sizeof(len));
105 if (ret < 0)
106 return ret;
107
108 return write_padded(fd, str, olen, len);
109}
110
111static char *do_read_string(int fd, struct perf_header *ph)
112{
113 ssize_t sz, ret;
114 u32 len;
115 char *buf;
116
Namhyung Kim5323f602012-12-17 15:38:54 +0900117 sz = readn(fd, &len, sizeof(len));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200118 if (sz < (ssize_t)sizeof(len))
119 return NULL;
120
121 if (ph->needs_swap)
122 len = bswap_32(len);
123
124 buf = malloc(len);
125 if (!buf)
126 return NULL;
127
Namhyung Kim5323f602012-12-17 15:38:54 +0900128 ret = readn(fd, buf, len);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200129 if (ret == (ssize_t)len) {
130 /*
131 * strings are padded by zeroes
132 * thus the actual strlen of buf
133 * may be less than len
134 */
135 return buf;
136 }
137
138 free(buf);
139 return NULL;
140}
141
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300142static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200143 struct perf_evlist *evlist)
144{
145 return read_tracing_data(fd, &evlist->entries);
146}
147
148
149static int write_build_id(int fd, struct perf_header *h,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300150 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200151{
152 struct perf_session *session;
153 int err;
154
155 session = container_of(h, struct perf_session, header);
156
Robert Richtere20960c2011-12-07 10:02:55 +0100157 if (!perf_session__read_build_ids(session, true))
158 return -1;
159
Namhyung Kim714c9c42014-11-04 10:14:29 +0900160 err = perf_session__write_buildid_table(session, fd);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200161 if (err < 0) {
162 pr_debug("failed to write buildid table\n");
163 return err;
164 }
Namhyung Kim73c5d222014-11-07 22:57:56 +0900165 perf_session__cache_build_ids(session);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200166
167 return 0;
168}
169
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300170static int write_hostname(int fd, struct perf_header *h __maybe_unused,
171 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200172{
173 struct utsname uts;
174 int ret;
175
176 ret = uname(&uts);
177 if (ret < 0)
178 return -1;
179
180 return do_write_string(fd, uts.nodename);
181}
182
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300183static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
184 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200185{
186 struct utsname uts;
187 int ret;
188
189 ret = uname(&uts);
190 if (ret < 0)
191 return -1;
192
193 return do_write_string(fd, uts.release);
194}
195
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300196static int write_arch(int fd, struct perf_header *h __maybe_unused,
197 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200198{
199 struct utsname uts;
200 int ret;
201
202 ret = uname(&uts);
203 if (ret < 0)
204 return -1;
205
206 return do_write_string(fd, uts.machine);
207}
208
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300209static int write_version(int fd, struct perf_header *h __maybe_unused,
210 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200211{
212 return do_write_string(fd, perf_version_string);
213}
214
Wang Nan493c3032014-10-24 09:45:26 +0800215static int __write_cpudesc(int fd, const char *cpuinfo_proc)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200216{
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200217 FILE *file;
218 char *buf = NULL;
219 char *s, *p;
Wang Nan493c3032014-10-24 09:45:26 +0800220 const char *search = cpuinfo_proc;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200221 size_t len = 0;
222 int ret = -1;
223
224 if (!search)
225 return -1;
226
227 file = fopen("/proc/cpuinfo", "r");
228 if (!file)
229 return -1;
230
231 while (getline(&buf, &len, file) > 0) {
232 ret = strncmp(buf, search, strlen(search));
233 if (!ret)
234 break;
235 }
236
Wang Naned307752014-10-16 11:08:29 +0800237 if (ret) {
238 ret = -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200239 goto done;
Wang Naned307752014-10-16 11:08:29 +0800240 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200241
242 s = buf;
243
244 p = strchr(buf, ':');
245 if (p && *(p+1) == ' ' && *(p+2))
246 s = p + 2;
247 p = strchr(s, '\n');
248 if (p)
249 *p = '\0';
250
251 /* squash extra space characters (branding string) */
252 p = s;
253 while (*p) {
254 if (isspace(*p)) {
255 char *r = p + 1;
256 char *q = r;
257 *p = ' ';
258 while (*q && isspace(*q))
259 q++;
260 if (q != (p+1))
261 while ((*r++ = *q++));
262 }
263 p++;
264 }
265 ret = do_write_string(fd, s);
266done:
267 free(buf);
268 fclose(file);
269 return ret;
270}
271
Wang Nan493c3032014-10-24 09:45:26 +0800272static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
273 struct perf_evlist *evlist __maybe_unused)
274{
275#ifndef CPUINFO_PROC
276#define CPUINFO_PROC {"model name", }
277#endif
278 const char *cpuinfo_procs[] = CPUINFO_PROC;
279 unsigned int i;
280
281 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
282 int ret;
283 ret = __write_cpudesc(fd, cpuinfo_procs[i]);
284 if (ret >= 0)
285 return ret;
286 }
287 return -1;
288}
289
290
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300291static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
292 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200293{
294 long nr;
295 u32 nrc, nra;
296 int ret;
297
298 nr = sysconf(_SC_NPROCESSORS_CONF);
299 if (nr < 0)
300 return -1;
301
302 nrc = (u32)(nr & UINT_MAX);
303
304 nr = sysconf(_SC_NPROCESSORS_ONLN);
305 if (nr < 0)
306 return -1;
307
308 nra = (u32)(nr & UINT_MAX);
309
310 ret = do_write(fd, &nrc, sizeof(nrc));
311 if (ret < 0)
312 return ret;
313
314 return do_write(fd, &nra, sizeof(nra));
315}
316
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300317static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200318 struct perf_evlist *evlist)
319{
Robert Richter6606f872012-08-16 21:10:19 +0200320 struct perf_evsel *evsel;
Namhyung Kim74ba9e12012-09-05 14:02:47 +0900321 u32 nre, nri, sz;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200322 int ret;
323
Namhyung Kim74ba9e12012-09-05 14:02:47 +0900324 nre = evlist->nr_entries;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200325
326 /*
327 * write number of events
328 */
329 ret = do_write(fd, &nre, sizeof(nre));
330 if (ret < 0)
331 return ret;
332
333 /*
334 * size of perf_event_attr struct
335 */
Robert Richter6606f872012-08-16 21:10:19 +0200336 sz = (u32)sizeof(evsel->attr);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200337 ret = do_write(fd, &sz, sizeof(sz));
338 if (ret < 0)
339 return ret;
340
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300341 evlist__for_each_entry(evlist, evsel) {
Robert Richter6606f872012-08-16 21:10:19 +0200342 ret = do_write(fd, &evsel->attr, sz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200343 if (ret < 0)
344 return ret;
345 /*
346 * write number of unique id per event
347 * there is one id per instance of an event
348 *
349 * copy into an nri to be independent of the
350 * type of ids,
351 */
Robert Richter6606f872012-08-16 21:10:19 +0200352 nri = evsel->ids;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200353 ret = do_write(fd, &nri, sizeof(nri));
354 if (ret < 0)
355 return ret;
356
357 /*
358 * write event string as passed on cmdline
359 */
Robert Richter6606f872012-08-16 21:10:19 +0200360 ret = do_write_string(fd, perf_evsel__name(evsel));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200361 if (ret < 0)
362 return ret;
363 /*
364 * write unique ids for this event
365 */
Robert Richter6606f872012-08-16 21:10:19 +0200366 ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200367 if (ret < 0)
368 return ret;
369 }
370 return 0;
371}
372
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300373static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
374 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200375{
376 char buf[MAXPATHLEN];
377 char proc[32];
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300378 u32 n;
379 int i, ret;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200380
381 /*
382 * actual atual path to perf binary
383 */
384 sprintf(proc, "/proc/%d/exe", getpid());
385 ret = readlink(proc, buf, sizeof(buf));
386 if (ret <= 0)
387 return -1;
388
389 /* readlink() does not add null termination */
390 buf[ret] = '\0';
391
392 /* account for binary path */
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300393 n = perf_env.nr_cmdline + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200394
395 ret = do_write(fd, &n, sizeof(n));
396 if (ret < 0)
397 return ret;
398
399 ret = do_write_string(fd, buf);
400 if (ret < 0)
401 return ret;
402
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300403 for (i = 0 ; i < perf_env.nr_cmdline; i++) {
404 ret = do_write_string(fd, perf_env.cmdline_argv[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200405 if (ret < 0)
406 return ret;
407 }
408 return 0;
409}
410
411#define CORE_SIB_FMT \
412 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
413#define THRD_SIB_FMT \
414 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
415
416struct cpu_topo {
Kan Liang2bb00d22015-09-01 09:58:12 -0400417 u32 cpu_nr;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200418 u32 core_sib;
419 u32 thread_sib;
420 char **core_siblings;
421 char **thread_siblings;
422};
423
424static int build_cpu_topo(struct cpu_topo *tp, int cpu)
425{
426 FILE *fp;
427 char filename[MAXPATHLEN];
428 char *buf = NULL, *p;
429 size_t len = 0;
Stephane Eranianc5885742013-08-14 12:04:26 +0200430 ssize_t sret;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200431 u32 i = 0;
432 int ret = -1;
433
434 sprintf(filename, CORE_SIB_FMT, cpu);
435 fp = fopen(filename, "r");
436 if (!fp)
Stephane Eranianc5885742013-08-14 12:04:26 +0200437 goto try_threads;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200438
Stephane Eranianc5885742013-08-14 12:04:26 +0200439 sret = getline(&buf, &len, fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200440 fclose(fp);
Stephane Eranianc5885742013-08-14 12:04:26 +0200441 if (sret <= 0)
442 goto try_threads;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200443
444 p = strchr(buf, '\n');
445 if (p)
446 *p = '\0';
447
448 for (i = 0; i < tp->core_sib; i++) {
449 if (!strcmp(buf, tp->core_siblings[i]))
450 break;
451 }
452 if (i == tp->core_sib) {
453 tp->core_siblings[i] = buf;
454 tp->core_sib++;
455 buf = NULL;
456 len = 0;
457 }
Stephane Eranianc5885742013-08-14 12:04:26 +0200458 ret = 0;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200459
Stephane Eranianc5885742013-08-14 12:04:26 +0200460try_threads:
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200461 sprintf(filename, THRD_SIB_FMT, cpu);
462 fp = fopen(filename, "r");
463 if (!fp)
464 goto done;
465
466 if (getline(&buf, &len, fp) <= 0)
467 goto done;
468
469 p = strchr(buf, '\n');
470 if (p)
471 *p = '\0';
472
473 for (i = 0; i < tp->thread_sib; i++) {
474 if (!strcmp(buf, tp->thread_siblings[i]))
475 break;
476 }
477 if (i == tp->thread_sib) {
478 tp->thread_siblings[i] = buf;
479 tp->thread_sib++;
480 buf = NULL;
481 }
482 ret = 0;
483done:
484 if(fp)
485 fclose(fp);
486 free(buf);
487 return ret;
488}
489
490static void free_cpu_topo(struct cpu_topo *tp)
491{
492 u32 i;
493
494 if (!tp)
495 return;
496
497 for (i = 0 ; i < tp->core_sib; i++)
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300498 zfree(&tp->core_siblings[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200499
500 for (i = 0 ; i < tp->thread_sib; i++)
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300501 zfree(&tp->thread_siblings[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200502
503 free(tp);
504}
505
506static struct cpu_topo *build_cpu_topology(void)
507{
Jan Stancek43db2842017-02-17 12:10:25 +0100508 struct cpu_topo *tp = NULL;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200509 void *addr;
510 u32 nr, i;
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300511 size_t sz;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200512 long ncpus;
513 int ret = -1;
Jan Stancek43db2842017-02-17 12:10:25 +0100514 struct cpu_map *map;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200515
516 ncpus = sysconf(_SC_NPROCESSORS_CONF);
517 if (ncpus < 0)
518 return NULL;
519
Jan Stancek43db2842017-02-17 12:10:25 +0100520 /* build online CPU map */
521 map = cpu_map__new(NULL);
522 if (map == NULL) {
523 pr_debug("failed to get system cpumap\n");
524 return NULL;
525 }
526
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200527 nr = (u32)(ncpus & UINT_MAX);
528
529 sz = nr * sizeof(char *);
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300530 addr = calloc(1, sizeof(*tp) + 2 * sz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200531 if (!addr)
Jan Stancek43db2842017-02-17 12:10:25 +0100532 goto out_free;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200533
534 tp = addr;
Kan Liang2bb00d22015-09-01 09:58:12 -0400535 tp->cpu_nr = nr;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200536 addr += sizeof(*tp);
537 tp->core_siblings = addr;
538 addr += sz;
539 tp->thread_siblings = addr;
540
541 for (i = 0; i < nr; i++) {
Jan Stancek43db2842017-02-17 12:10:25 +0100542 if (!cpu_map__has(map, i))
543 continue;
544
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200545 ret = build_cpu_topo(tp, i);
546 if (ret < 0)
547 break;
548 }
Jan Stancek43db2842017-02-17 12:10:25 +0100549
550out_free:
551 cpu_map__put(map);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200552 if (ret) {
553 free_cpu_topo(tp);
554 tp = NULL;
555 }
556 return tp;
557}
558
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300559static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
560 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200561{
562 struct cpu_topo *tp;
563 u32 i;
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300564 int ret, j;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200565
566 tp = build_cpu_topology();
567 if (!tp)
568 return -1;
569
570 ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
571 if (ret < 0)
572 goto done;
573
574 for (i = 0; i < tp->core_sib; i++) {
575 ret = do_write_string(fd, tp->core_siblings[i]);
576 if (ret < 0)
577 goto done;
578 }
579 ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
580 if (ret < 0)
581 goto done;
582
583 for (i = 0; i < tp->thread_sib; i++) {
584 ret = do_write_string(fd, tp->thread_siblings[i]);
585 if (ret < 0)
586 break;
587 }
Kan Liang2bb00d22015-09-01 09:58:12 -0400588
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300589 ret = perf_env__read_cpu_topology_map(&perf_env);
590 if (ret < 0)
591 goto done;
592
593 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
594 ret = do_write(fd, &perf_env.cpu[j].core_id,
595 sizeof(perf_env.cpu[j].core_id));
Kan Liang2bb00d22015-09-01 09:58:12 -0400596 if (ret < 0)
597 return ret;
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300598 ret = do_write(fd, &perf_env.cpu[j].socket_id,
599 sizeof(perf_env.cpu[j].socket_id));
Kan Liang2bb00d22015-09-01 09:58:12 -0400600 if (ret < 0)
601 return ret;
602 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200603done:
604 free_cpu_topo(tp);
605 return ret;
606}
607
608
609
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300610static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
611 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200612{
613 char *buf = NULL;
614 FILE *fp;
615 size_t len = 0;
616 int ret = -1, n;
617 uint64_t mem;
618
619 fp = fopen("/proc/meminfo", "r");
620 if (!fp)
621 return -1;
622
623 while (getline(&buf, &len, fp) > 0) {
624 ret = strncmp(buf, "MemTotal:", 9);
625 if (!ret)
626 break;
627 }
628 if (!ret) {
629 n = sscanf(buf, "%*s %"PRIu64, &mem);
630 if (n == 1)
631 ret = do_write(fd, &mem, sizeof(mem));
Wang Naned307752014-10-16 11:08:29 +0800632 } else
633 ret = -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200634 free(buf);
635 fclose(fp);
636 return ret;
637}
638
639static int write_topo_node(int fd, int node)
640{
641 char str[MAXPATHLEN];
642 char field[32];
643 char *buf = NULL, *p;
644 size_t len = 0;
645 FILE *fp;
646 u64 mem_total, mem_free, mem;
647 int ret = -1;
648
649 sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
650 fp = fopen(str, "r");
651 if (!fp)
652 return -1;
653
654 while (getline(&buf, &len, fp) > 0) {
655 /* skip over invalid lines */
656 if (!strchr(buf, ':'))
657 continue;
Alan Coxa761a2d2014-01-20 19:10:11 +0100658 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200659 goto done;
660 if (!strcmp(field, "MemTotal:"))
661 mem_total = mem;
662 if (!strcmp(field, "MemFree:"))
663 mem_free = mem;
664 }
665
666 fclose(fp);
Thomas Jarosch5809fde2013-01-28 10:21:14 +0100667 fp = NULL;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200668
669 ret = do_write(fd, &mem_total, sizeof(u64));
670 if (ret)
671 goto done;
672
673 ret = do_write(fd, &mem_free, sizeof(u64));
674 if (ret)
675 goto done;
676
677 ret = -1;
678 sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
679
680 fp = fopen(str, "r");
681 if (!fp)
682 goto done;
683
684 if (getline(&buf, &len, fp) <= 0)
685 goto done;
686
687 p = strchr(buf, '\n');
688 if (p)
689 *p = '\0';
690
691 ret = do_write_string(fd, buf);
692done:
693 free(buf);
Thomas Jarosch5809fde2013-01-28 10:21:14 +0100694 if (fp)
695 fclose(fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200696 return ret;
697}
698
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300699static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
700 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200701{
702 char *buf = NULL;
703 size_t len = 0;
704 FILE *fp;
705 struct cpu_map *node_map = NULL;
706 char *c;
707 u32 nr, i, j;
708 int ret = -1;
709
710 fp = fopen("/sys/devices/system/node/online", "r");
711 if (!fp)
712 return -1;
713
714 if (getline(&buf, &len, fp) <= 0)
715 goto done;
716
717 c = strchr(buf, '\n');
718 if (c)
719 *c = '\0';
720
721 node_map = cpu_map__new(buf);
722 if (!node_map)
723 goto done;
724
725 nr = (u32)node_map->nr;
726
727 ret = do_write(fd, &nr, sizeof(nr));
728 if (ret < 0)
729 goto done;
730
731 for (i = 0; i < nr; i++) {
732 j = (u32)node_map->map[i];
733 ret = do_write(fd, &j, sizeof(j));
734 if (ret < 0)
735 break;
736
737 ret = write_topo_node(fd, i);
738 if (ret < 0)
739 break;
740 }
741done:
742 free(buf);
743 fclose(fp);
Masami Hiramatsu5191d8872015-12-09 11:11:35 +0900744 cpu_map__put(node_map);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200745 return ret;
746}
747
748/*
Robert Richter50a96672012-08-16 21:10:24 +0200749 * File format:
750 *
751 * struct pmu_mappings {
752 * u32 pmu_num;
753 * struct pmu_map {
754 * u32 type;
755 * char name[];
756 * }[pmu_num];
757 * };
758 */
759
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300760static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
761 struct perf_evlist *evlist __maybe_unused)
Robert Richter50a96672012-08-16 21:10:24 +0200762{
763 struct perf_pmu *pmu = NULL;
764 off_t offset = lseek(fd, 0, SEEK_CUR);
765 __u32 pmu_num = 0;
Namhyung Kim5323f602012-12-17 15:38:54 +0900766 int ret;
Robert Richter50a96672012-08-16 21:10:24 +0200767
768 /* write real pmu_num later */
Namhyung Kim5323f602012-12-17 15:38:54 +0900769 ret = do_write(fd, &pmu_num, sizeof(pmu_num));
770 if (ret < 0)
771 return ret;
Robert Richter50a96672012-08-16 21:10:24 +0200772
773 while ((pmu = perf_pmu__scan(pmu))) {
774 if (!pmu->name)
775 continue;
776 pmu_num++;
Namhyung Kim5323f602012-12-17 15:38:54 +0900777
778 ret = do_write(fd, &pmu->type, sizeof(pmu->type));
779 if (ret < 0)
780 return ret;
781
782 ret = do_write_string(fd, pmu->name);
783 if (ret < 0)
784 return ret;
Robert Richter50a96672012-08-16 21:10:24 +0200785 }
786
787 if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
788 /* discard all */
789 lseek(fd, offset, SEEK_SET);
790 return -1;
791 }
792
793 return 0;
794}
795
796/*
Namhyung Kima8bb5592013-01-22 18:09:31 +0900797 * File format:
798 *
799 * struct group_descs {
800 * u32 nr_groups;
801 * struct group_desc {
802 * char name[];
803 * u32 leader_idx;
804 * u32 nr_members;
805 * }[nr_groups];
806 * };
807 */
808static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
809 struct perf_evlist *evlist)
810{
811 u32 nr_groups = evlist->nr_groups;
812 struct perf_evsel *evsel;
813 int ret;
814
815 ret = do_write(fd, &nr_groups, sizeof(nr_groups));
816 if (ret < 0)
817 return ret;
818
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300819 evlist__for_each_entry(evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +0900820 if (perf_evsel__is_group_leader(evsel) &&
821 evsel->nr_members > 1) {
822 const char *name = evsel->group_name ?: "{anon_group}";
823 u32 leader_idx = evsel->idx;
824 u32 nr_members = evsel->nr_members;
825
826 ret = do_write_string(fd, name);
827 if (ret < 0)
828 return ret;
829
830 ret = do_write(fd, &leader_idx, sizeof(leader_idx));
831 if (ret < 0)
832 return ret;
833
834 ret = do_write(fd, &nr_members, sizeof(nr_members));
835 if (ret < 0)
836 return ret;
837 }
838 }
839 return 0;
840}
841
842/*
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200843 * default get_cpuid(): nothing gets recorded
844 * actual implementation must be in arch/$(ARCH)/util/header.c
845 */
Rui Teng11d8f872016-07-28 10:05:57 +0800846int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200847{
848 return -1;
849}
850
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300851static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
852 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200853{
854 char buffer[64];
855 int ret;
856
857 ret = get_cpuid(buffer, sizeof(buffer));
858 if (!ret)
859 goto write_it;
860
861 return -1;
862write_it:
863 return do_write_string(fd, buffer);
864}
865
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300866static int write_branch_stack(int fd __maybe_unused,
867 struct perf_header *h __maybe_unused,
868 struct perf_evlist *evlist __maybe_unused)
Stephane Eranian330aa672012-03-08 23:47:46 +0100869{
870 return 0;
871}
872
Adrian Hunter99fa2982015-04-30 17:37:25 +0300873static int write_auxtrace(int fd, struct perf_header *h,
Adrian Hunter4025ea42015-04-09 18:53:41 +0300874 struct perf_evlist *evlist __maybe_unused)
875{
Adrian Hunter99fa2982015-04-30 17:37:25 +0300876 struct perf_session *session;
877 int err;
878
879 session = container_of(h, struct perf_session, header);
880
881 err = auxtrace_index__write(fd, &session->auxtrace_index);
882 if (err < 0)
883 pr_err("Failed to write auxtrace index\n");
884 return err;
Adrian Hunter4025ea42015-04-09 18:53:41 +0300885}
886
Jiri Olsa720e98b2016-02-16 16:01:43 +0100887static int cpu_cache_level__sort(const void *a, const void *b)
888{
889 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
890 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
891
892 return cache_a->level - cache_b->level;
893}
894
895static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
896{
897 if (a->level != b->level)
898 return false;
899
900 if (a->line_size != b->line_size)
901 return false;
902
903 if (a->sets != b->sets)
904 return false;
905
906 if (a->ways != b->ways)
907 return false;
908
909 if (strcmp(a->type, b->type))
910 return false;
911
912 if (strcmp(a->size, b->size))
913 return false;
914
915 if (strcmp(a->map, b->map))
916 return false;
917
918 return true;
919}
920
921static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
922{
923 char path[PATH_MAX], file[PATH_MAX];
924 struct stat st;
925 size_t len;
926
927 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
928 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
929
930 if (stat(file, &st))
931 return 1;
932
933 scnprintf(file, PATH_MAX, "%s/level", path);
934 if (sysfs__read_int(file, (int *) &cache->level))
935 return -1;
936
937 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
938 if (sysfs__read_int(file, (int *) &cache->line_size))
939 return -1;
940
941 scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
942 if (sysfs__read_int(file, (int *) &cache->sets))
943 return -1;
944
945 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
946 if (sysfs__read_int(file, (int *) &cache->ways))
947 return -1;
948
949 scnprintf(file, PATH_MAX, "%s/type", path);
950 if (sysfs__read_str(file, &cache->type, &len))
951 return -1;
952
953 cache->type[len] = 0;
954 cache->type = rtrim(cache->type);
955
956 scnprintf(file, PATH_MAX, "%s/size", path);
957 if (sysfs__read_str(file, &cache->size, &len)) {
958 free(cache->type);
959 return -1;
960 }
961
962 cache->size[len] = 0;
963 cache->size = rtrim(cache->size);
964
965 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
966 if (sysfs__read_str(file, &cache->map, &len)) {
967 free(cache->map);
968 free(cache->type);
969 return -1;
970 }
971
972 cache->map[len] = 0;
973 cache->map = rtrim(cache->map);
974 return 0;
975}
976
977static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
978{
979 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
980}
981
982static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
983{
984 u32 i, cnt = 0;
985 long ncpus;
986 u32 nr, cpu;
987 u16 level;
988
989 ncpus = sysconf(_SC_NPROCESSORS_CONF);
990 if (ncpus < 0)
991 return -1;
992
993 nr = (u32)(ncpus & UINT_MAX);
994
995 for (cpu = 0; cpu < nr; cpu++) {
996 for (level = 0; level < 10; level++) {
997 struct cpu_cache_level c;
998 int err;
999
1000 err = cpu_cache_level__read(&c, cpu, level);
1001 if (err < 0)
1002 return err;
1003
1004 if (err == 1)
1005 break;
1006
1007 for (i = 0; i < cnt; i++) {
1008 if (cpu_cache_level__cmp(&c, &caches[i]))
1009 break;
1010 }
1011
1012 if (i == cnt)
1013 caches[cnt++] = c;
1014 else
1015 cpu_cache_level__free(&c);
1016
1017 if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1018 goto out;
1019 }
1020 }
1021 out:
1022 *cntp = cnt;
1023 return 0;
1024}
1025
1026#define MAX_CACHES 2000
1027
1028static int write_cache(int fd, struct perf_header *h __maybe_unused,
1029 struct perf_evlist *evlist __maybe_unused)
1030{
1031 struct cpu_cache_level caches[MAX_CACHES];
1032 u32 cnt = 0, i, version = 1;
1033 int ret;
1034
1035 ret = build_caches(caches, MAX_CACHES, &cnt);
1036 if (ret)
1037 goto out;
1038
1039 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1040
1041 ret = do_write(fd, &version, sizeof(u32));
1042 if (ret < 0)
1043 goto out;
1044
1045 ret = do_write(fd, &cnt, sizeof(u32));
1046 if (ret < 0)
1047 goto out;
1048
1049 for (i = 0; i < cnt; i++) {
1050 struct cpu_cache_level *c = &caches[i];
1051
1052 #define _W(v) \
1053 ret = do_write(fd, &c->v, sizeof(u32)); \
1054 if (ret < 0) \
1055 goto out;
1056
1057 _W(level)
1058 _W(line_size)
1059 _W(sets)
1060 _W(ways)
1061 #undef _W
1062
1063 #define _W(v) \
1064 ret = do_write_string(fd, (const char *) c->v); \
1065 if (ret < 0) \
1066 goto out;
1067
1068 _W(type)
1069 _W(size)
1070 _W(map)
1071 #undef _W
1072 }
1073
1074out:
1075 for (i = 0; i < cnt; i++)
1076 cpu_cache_level__free(&caches[i]);
1077 return ret;
1078}
1079
Jiri Olsaffa517a2015-10-25 15:51:43 +01001080static int write_stat(int fd __maybe_unused,
1081 struct perf_header *h __maybe_unused,
1082 struct perf_evlist *evlist __maybe_unused)
1083{
1084 return 0;
1085}
1086
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001087static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
1088 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001089{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001090 fprintf(fp, "# hostname : %s\n", ph->env.hostname);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001091}
1092
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001093static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
1094 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001095{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001096 fprintf(fp, "# os release : %s\n", ph->env.os_release);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001097}
1098
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001099static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001100{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001101 fprintf(fp, "# arch : %s\n", ph->env.arch);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001102}
1103
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001104static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
1105 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001106{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001107 fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001108}
1109
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001110static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
1111 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001112{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001113 fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
1114 fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001115}
1116
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001117static void print_version(struct perf_header *ph, int fd __maybe_unused,
1118 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001119{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001120 fprintf(fp, "# perf version : %s\n", ph->env.version);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001121}
1122
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001123static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
1124 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001125{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001126 int nr, i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001127
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001128 nr = ph->env.nr_cmdline;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001129
1130 fprintf(fp, "# cmdline : ");
1131
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001132 for (i = 0; i < nr; i++)
1133 fprintf(fp, "%s ", ph->env.cmdline_argv[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001134 fputc('\n', fp);
1135}
1136
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001137static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
1138 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001139{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001140 int nr, i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001141 char *str;
Kan Liang2bb00d22015-09-01 09:58:12 -04001142 int cpu_nr = ph->env.nr_cpus_online;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001143
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001144 nr = ph->env.nr_sibling_cores;
1145 str = ph->env.sibling_cores;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001146
1147 for (i = 0; i < nr; i++) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001148 fprintf(fp, "# sibling cores : %s\n", str);
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001149 str += strlen(str) + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001150 }
1151
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001152 nr = ph->env.nr_sibling_threads;
1153 str = ph->env.sibling_threads;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001154
1155 for (i = 0; i < nr; i++) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001156 fprintf(fp, "# sibling threads : %s\n", str);
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001157 str += strlen(str) + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001158 }
Kan Liang2bb00d22015-09-01 09:58:12 -04001159
1160 if (ph->env.cpu != NULL) {
1161 for (i = 0; i < cpu_nr; i++)
1162 fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1163 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1164 } else
1165 fprintf(fp, "# Core ID and Socket ID information is not available\n");
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001166}
1167
Robert Richter4e1b9c62012-08-16 21:10:22 +02001168static void free_event_desc(struct perf_evsel *events)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001169{
Robert Richter4e1b9c62012-08-16 21:10:22 +02001170 struct perf_evsel *evsel;
1171
1172 if (!events)
1173 return;
1174
1175 for (evsel = events; evsel->attr.size; evsel++) {
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -03001176 zfree(&evsel->name);
1177 zfree(&evsel->id);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001178 }
1179
1180 free(events);
1181}
1182
1183static struct perf_evsel *
1184read_event_desc(struct perf_header *ph, int fd)
1185{
1186 struct perf_evsel *evsel, *events = NULL;
1187 u64 *id;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001188 void *buf = NULL;
Stephane Eranian62db9062012-02-09 23:21:07 +01001189 u32 nre, sz, nr, i, j;
1190 ssize_t ret;
1191 size_t msz;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001192
1193 /* number of events */
Namhyung Kim5323f602012-12-17 15:38:54 +09001194 ret = readn(fd, &nre, sizeof(nre));
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001195 if (ret != (ssize_t)sizeof(nre))
1196 goto error;
1197
1198 if (ph->needs_swap)
1199 nre = bswap_32(nre);
1200
Namhyung Kim5323f602012-12-17 15:38:54 +09001201 ret = readn(fd, &sz, sizeof(sz));
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001202 if (ret != (ssize_t)sizeof(sz))
1203 goto error;
1204
1205 if (ph->needs_swap)
1206 sz = bswap_32(sz);
1207
Stephane Eranian62db9062012-02-09 23:21:07 +01001208 /* buffer to hold on file attr struct */
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001209 buf = malloc(sz);
1210 if (!buf)
1211 goto error;
1212
Robert Richter4e1b9c62012-08-16 21:10:22 +02001213 /* the last event terminates with evsel->attr.size == 0: */
1214 events = calloc(nre + 1, sizeof(*events));
1215 if (!events)
1216 goto error;
1217
1218 msz = sizeof(evsel->attr);
Jiri Olsa9fafd982012-03-20 19:15:39 +01001219 if (sz < msz)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001220 msz = sz;
1221
Robert Richter4e1b9c62012-08-16 21:10:22 +02001222 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1223 evsel->idx = i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001224
Stephane Eranian62db9062012-02-09 23:21:07 +01001225 /*
1226 * must read entire on-file attr struct to
1227 * sync up with layout.
1228 */
Namhyung Kim5323f602012-12-17 15:38:54 +09001229 ret = readn(fd, buf, sz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001230 if (ret != (ssize_t)sz)
1231 goto error;
1232
1233 if (ph->needs_swap)
1234 perf_event__attr_swap(buf);
1235
Robert Richter4e1b9c62012-08-16 21:10:22 +02001236 memcpy(&evsel->attr, buf, msz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001237
Namhyung Kim5323f602012-12-17 15:38:54 +09001238 ret = readn(fd, &nr, sizeof(nr));
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001239 if (ret != (ssize_t)sizeof(nr))
1240 goto error;
1241
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001242 if (ph->needs_swap) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001243 nr = bswap_32(nr);
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001244 evsel->needs_swap = true;
1245 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001246
Robert Richter4e1b9c62012-08-16 21:10:22 +02001247 evsel->name = do_read_string(fd, ph);
1248
1249 if (!nr)
1250 continue;
1251
1252 id = calloc(nr, sizeof(*id));
1253 if (!id)
1254 goto error;
1255 evsel->ids = nr;
1256 evsel->id = id;
1257
1258 for (j = 0 ; j < nr; j++) {
Namhyung Kim5323f602012-12-17 15:38:54 +09001259 ret = readn(fd, id, sizeof(*id));
Robert Richter4e1b9c62012-08-16 21:10:22 +02001260 if (ret != (ssize_t)sizeof(*id))
1261 goto error;
1262 if (ph->needs_swap)
1263 *id = bswap_64(*id);
1264 id++;
1265 }
1266 }
1267out:
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -03001268 free(buf);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001269 return events;
1270error:
Markus Elfring4cc97612015-06-25 17:12:32 +02001271 free_event_desc(events);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001272 events = NULL;
1273 goto out;
1274}
1275
Peter Zijlstra2c5e8c52015-04-07 11:09:54 +02001276static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1277 void *priv __attribute__((unused)))
1278{
1279 return fprintf(fp, ", %s = %s", name, val);
1280}
1281
Robert Richter4e1b9c62012-08-16 21:10:22 +02001282static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1283{
1284 struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1285 u32 j;
1286 u64 *id;
1287
1288 if (!events) {
1289 fprintf(fp, "# event desc: not available or unable to read\n");
1290 return;
1291 }
1292
1293 for (evsel = events; evsel->attr.size; evsel++) {
1294 fprintf(fp, "# event : name = %s, ", evsel->name);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001295
Robert Richter4e1b9c62012-08-16 21:10:22 +02001296 if (evsel->ids) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001297 fprintf(fp, ", id = {");
Robert Richter4e1b9c62012-08-16 21:10:22 +02001298 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1299 if (j)
1300 fputc(',', fp);
1301 fprintf(fp, " %"PRIu64, *id);
1302 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001303 fprintf(fp, " }");
Robert Richter4e1b9c62012-08-16 21:10:22 +02001304 }
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001305
Peter Zijlstra2c5e8c52015-04-07 11:09:54 +02001306 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001307
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001308 fputc('\n', fp);
1309 }
Robert Richter4e1b9c62012-08-16 21:10:22 +02001310
1311 free_event_desc(events);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001312}
1313
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001314static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001315 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001316{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001317 fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001318}
1319
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001320static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001321 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001322{
Jiri Olsac60da222016-07-04 14:16:20 +02001323 int i;
1324 struct numa_node *n;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001325
Jiri Olsac60da222016-07-04 14:16:20 +02001326 for (i = 0; i < ph->env.nr_numa_nodes; i++) {
1327 n = &ph->env.numa_nodes[i];
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001328
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001329 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1330 " free = %"PRIu64" kB\n",
Jiri Olsac60da222016-07-04 14:16:20 +02001331 n->node, n->mem_total, n->mem_free);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001332
Jiri Olsac60da222016-07-04 14:16:20 +02001333 fprintf(fp, "# node%u cpu list : ", n->node);
1334 cpu_map__fprintf(n->map, fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001335 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001336}
1337
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001338static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001339{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001340 fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001341}
1342
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001343static void print_branch_stack(struct perf_header *ph __maybe_unused,
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001344 int fd __maybe_unused, FILE *fp)
Stephane Eranian330aa672012-03-08 23:47:46 +01001345{
1346 fprintf(fp, "# contains samples with branch stack\n");
1347}
1348
Adrian Hunter4025ea42015-04-09 18:53:41 +03001349static void print_auxtrace(struct perf_header *ph __maybe_unused,
1350 int fd __maybe_unused, FILE *fp)
1351{
1352 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1353}
1354
Jiri Olsaffa517a2015-10-25 15:51:43 +01001355static void print_stat(struct perf_header *ph __maybe_unused,
1356 int fd __maybe_unused, FILE *fp)
1357{
1358 fprintf(fp, "# contains stat data\n");
1359}
1360
Jiri Olsa720e98b2016-02-16 16:01:43 +01001361static void print_cache(struct perf_header *ph __maybe_unused,
1362 int fd __maybe_unused, FILE *fp __maybe_unused)
1363{
1364 int i;
1365
1366 fprintf(fp, "# CPU cache info:\n");
1367 for (i = 0; i < ph->env.caches_cnt; i++) {
1368 fprintf(fp, "# ");
1369 cpu_cache_level__fprintf(fp, &ph->env.caches[i]);
1370 }
1371}
1372
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001373static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1374 FILE *fp)
Robert Richter50a96672012-08-16 21:10:24 +02001375{
1376 const char *delimiter = "# pmu mappings: ";
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001377 char *str, *tmp;
Robert Richter50a96672012-08-16 21:10:24 +02001378 u32 pmu_num;
1379 u32 type;
1380
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001381 pmu_num = ph->env.nr_pmu_mappings;
Robert Richter50a96672012-08-16 21:10:24 +02001382 if (!pmu_num) {
1383 fprintf(fp, "# pmu mappings: not available\n");
1384 return;
1385 }
1386
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001387 str = ph->env.pmu_mappings;
Namhyung Kimbe4a2de2012-09-05 14:02:49 +09001388
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001389 while (pmu_num) {
1390 type = strtoul(str, &tmp, 0);
1391 if (*tmp != ':')
1392 goto error;
1393
1394 str = tmp + 1;
1395 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1396
Robert Richter50a96672012-08-16 21:10:24 +02001397 delimiter = ", ";
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001398 str += strlen(str) + 1;
1399 pmu_num--;
Robert Richter50a96672012-08-16 21:10:24 +02001400 }
1401
1402 fprintf(fp, "\n");
1403
1404 if (!pmu_num)
1405 return;
1406error:
1407 fprintf(fp, "# pmu mappings: unable to read\n");
1408}
1409
Namhyung Kima8bb5592013-01-22 18:09:31 +09001410static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1411 FILE *fp)
1412{
1413 struct perf_session *session;
1414 struct perf_evsel *evsel;
1415 u32 nr = 0;
1416
1417 session = container_of(ph, struct perf_session, header);
1418
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001419 evlist__for_each_entry(session->evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09001420 if (perf_evsel__is_group_leader(evsel) &&
1421 evsel->nr_members > 1) {
1422 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1423 perf_evsel__name(evsel));
1424
1425 nr = evsel->nr_members - 1;
1426 } else if (nr) {
1427 fprintf(fp, ",%s", perf_evsel__name(evsel));
1428
1429 if (--nr == 0)
1430 fprintf(fp, "}\n");
1431 }
1432 }
1433}
1434
Robert Richter08d95bd2012-02-10 15:41:55 +01001435static int __event_process_build_id(struct build_id_event *bev,
1436 char *filename,
1437 struct perf_session *session)
1438{
1439 int err = -1;
Robert Richter08d95bd2012-02-10 15:41:55 +01001440 struct machine *machine;
Wang Nan1f121b02015-06-03 08:52:21 +00001441 u16 cpumode;
Robert Richter08d95bd2012-02-10 15:41:55 +01001442 struct dso *dso;
1443 enum dso_kernel_type dso_type;
1444
1445 machine = perf_session__findnew_machine(session, bev->pid);
1446 if (!machine)
1447 goto out;
1448
Wang Nan1f121b02015-06-03 08:52:21 +00001449 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
Robert Richter08d95bd2012-02-10 15:41:55 +01001450
Wang Nan1f121b02015-06-03 08:52:21 +00001451 switch (cpumode) {
Robert Richter08d95bd2012-02-10 15:41:55 +01001452 case PERF_RECORD_MISC_KERNEL:
1453 dso_type = DSO_TYPE_KERNEL;
Robert Richter08d95bd2012-02-10 15:41:55 +01001454 break;
1455 case PERF_RECORD_MISC_GUEST_KERNEL:
1456 dso_type = DSO_TYPE_GUEST_KERNEL;
Robert Richter08d95bd2012-02-10 15:41:55 +01001457 break;
1458 case PERF_RECORD_MISC_USER:
1459 case PERF_RECORD_MISC_GUEST_USER:
1460 dso_type = DSO_TYPE_USER;
Robert Richter08d95bd2012-02-10 15:41:55 +01001461 break;
1462 default:
1463 goto out;
1464 }
1465
Arnaldo Carvalho de Meloaa7cc2a2015-05-29 11:31:12 -03001466 dso = machine__findnew_dso(machine, filename);
Robert Richter08d95bd2012-02-10 15:41:55 +01001467 if (dso != NULL) {
Masami Hiramatsub5d8bbe2016-05-11 22:51:59 +09001468 char sbuild_id[SBUILD_ID_SIZE];
Robert Richter08d95bd2012-02-10 15:41:55 +01001469
1470 dso__set_build_id(dso, &bev->build_id);
1471
Wang Nan1f121b02015-06-03 08:52:21 +00001472 if (!is_kernel_module(filename, cpumode))
Robert Richter08d95bd2012-02-10 15:41:55 +01001473 dso->kernel = dso_type;
1474
1475 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1476 sbuild_id);
1477 pr_debug("build id event received for %s: %s\n",
1478 dso->long_name, sbuild_id);
Arnaldo Carvalho de Melod3a7c482015-06-02 11:53:26 -03001479 dso__put(dso);
Robert Richter08d95bd2012-02-10 15:41:55 +01001480 }
1481
1482 err = 0;
1483out:
1484 return err;
1485}
1486
1487static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1488 int input, u64 offset, u64 size)
1489{
1490 struct perf_session *session = container_of(header, struct perf_session, header);
1491 struct {
1492 struct perf_event_header header;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03001493 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
Robert Richter08d95bd2012-02-10 15:41:55 +01001494 char filename[0];
1495 } old_bev;
1496 struct build_id_event bev;
1497 char filename[PATH_MAX];
1498 u64 limit = offset + size;
1499
1500 while (offset < limit) {
1501 ssize_t len;
1502
Namhyung Kim5323f602012-12-17 15:38:54 +09001503 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
Robert Richter08d95bd2012-02-10 15:41:55 +01001504 return -1;
1505
1506 if (header->needs_swap)
1507 perf_event_header__bswap(&old_bev.header);
1508
1509 len = old_bev.header.size - sizeof(old_bev);
Namhyung Kim5323f602012-12-17 15:38:54 +09001510 if (readn(input, filename, len) != len)
Robert Richter08d95bd2012-02-10 15:41:55 +01001511 return -1;
1512
1513 bev.header = old_bev.header;
1514
1515 /*
1516 * As the pid is the missing value, we need to fill
1517 * it properly. The header.misc value give us nice hint.
1518 */
1519 bev.pid = HOST_KERNEL_ID;
1520 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1521 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1522 bev.pid = DEFAULT_GUEST_KERNEL_ID;
1523
1524 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1525 __event_process_build_id(&bev, filename, session);
1526
1527 offset += bev.header.size;
1528 }
1529
1530 return 0;
1531}
1532
1533static int perf_header__read_build_ids(struct perf_header *header,
1534 int input, u64 offset, u64 size)
1535{
1536 struct perf_session *session = container_of(header, struct perf_session, header);
1537 struct build_id_event bev;
1538 char filename[PATH_MAX];
1539 u64 limit = offset + size, orig_offset = offset;
1540 int err = -1;
1541
1542 while (offset < limit) {
1543 ssize_t len;
1544
Namhyung Kim5323f602012-12-17 15:38:54 +09001545 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
Robert Richter08d95bd2012-02-10 15:41:55 +01001546 goto out;
1547
1548 if (header->needs_swap)
1549 perf_event_header__bswap(&bev.header);
1550
1551 len = bev.header.size - sizeof(bev);
Namhyung Kim5323f602012-12-17 15:38:54 +09001552 if (readn(input, filename, len) != len)
Robert Richter08d95bd2012-02-10 15:41:55 +01001553 goto out;
1554 /*
1555 * The a1645ce1 changeset:
1556 *
1557 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1558 *
1559 * Added a field to struct build_id_event that broke the file
1560 * format.
1561 *
1562 * Since the kernel build-id is the first entry, process the
1563 * table using the old format if the well known
1564 * '[kernel.kallsyms]' string for the kernel build-id has the
1565 * first 4 characters chopped off (where the pid_t sits).
1566 */
1567 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1568 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1569 return -1;
1570 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1571 }
1572
1573 __event_process_build_id(&bev, filename, session);
1574
1575 offset += bev.header.size;
1576 }
1577 err = 0;
1578out:
1579 return err;
1580}
1581
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001582static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1583 struct perf_header *ph __maybe_unused,
1584 int fd, void *data)
Robert Richterf1c67db2012-02-10 15:41:56 +01001585{
Namhyung Kim3dce2ce2013-03-21 16:18:48 +09001586 ssize_t ret = trace_report(fd, data, false);
1587 return ret < 0 ? -1 : 0;
Robert Richterf1c67db2012-02-10 15:41:56 +01001588}
1589
1590static int process_build_id(struct perf_file_section *section,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001591 struct perf_header *ph, int fd,
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001592 void *data __maybe_unused)
Robert Richterf1c67db2012-02-10 15:41:56 +01001593{
1594 if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1595 pr_debug("Failed to read buildids, continuing...\n");
1596 return 0;
1597}
1598
Namhyung Kima1ae5652012-09-24 17:14:59 +09001599static int process_hostname(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001600 struct perf_header *ph, int fd,
1601 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001602{
1603 ph->env.hostname = do_read_string(fd, ph);
1604 return ph->env.hostname ? 0 : -ENOMEM;
1605}
1606
1607static int process_osrelease(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001608 struct perf_header *ph, int fd,
1609 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001610{
1611 ph->env.os_release = do_read_string(fd, ph);
1612 return ph->env.os_release ? 0 : -ENOMEM;
1613}
1614
1615static int process_version(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001616 struct perf_header *ph, int fd,
1617 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001618{
1619 ph->env.version = do_read_string(fd, ph);
1620 return ph->env.version ? 0 : -ENOMEM;
1621}
1622
1623static int process_arch(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001624 struct perf_header *ph, int fd,
1625 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001626{
1627 ph->env.arch = do_read_string(fd, ph);
1628 return ph->env.arch ? 0 : -ENOMEM;
1629}
1630
1631static int process_nrcpus(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001632 struct perf_header *ph, int fd,
1633 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001634{
Jiri Olsa727ebd52013-11-28 11:30:14 +01001635 ssize_t ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001636 u32 nr;
1637
Namhyung Kim5323f602012-12-17 15:38:54 +09001638 ret = readn(fd, &nr, sizeof(nr));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001639 if (ret != sizeof(nr))
1640 return -1;
1641
1642 if (ph->needs_swap)
1643 nr = bswap_32(nr);
1644
Arnaldo Carvalho de Melocaa47042015-09-11 12:36:12 -03001645 ph->env.nr_cpus_avail = nr;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001646
Namhyung Kim5323f602012-12-17 15:38:54 +09001647 ret = readn(fd, &nr, sizeof(nr));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001648 if (ret != sizeof(nr))
1649 return -1;
1650
1651 if (ph->needs_swap)
1652 nr = bswap_32(nr);
1653
Arnaldo Carvalho de Melocaa47042015-09-11 12:36:12 -03001654 ph->env.nr_cpus_online = nr;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001655 return 0;
1656}
1657
1658static int process_cpudesc(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001659 struct perf_header *ph, int fd,
1660 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001661{
1662 ph->env.cpu_desc = do_read_string(fd, ph);
1663 return ph->env.cpu_desc ? 0 : -ENOMEM;
1664}
1665
1666static int process_cpuid(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001667 struct perf_header *ph, int fd,
1668 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001669{
1670 ph->env.cpuid = do_read_string(fd, ph);
1671 return ph->env.cpuid ? 0 : -ENOMEM;
1672}
1673
1674static int process_total_mem(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001675 struct perf_header *ph, int fd,
1676 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001677{
1678 uint64_t mem;
Jiri Olsa727ebd52013-11-28 11:30:14 +01001679 ssize_t ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001680
Namhyung Kim5323f602012-12-17 15:38:54 +09001681 ret = readn(fd, &mem, sizeof(mem));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001682 if (ret != sizeof(mem))
1683 return -1;
1684
1685 if (ph->needs_swap)
1686 mem = bswap_64(mem);
1687
1688 ph->env.total_mem = mem;
1689 return 0;
1690}
1691
Robert Richter7c2f7af2012-08-16 21:10:23 +02001692static struct perf_evsel *
1693perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1694{
1695 struct perf_evsel *evsel;
1696
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001697 evlist__for_each_entry(evlist, evsel) {
Robert Richter7c2f7af2012-08-16 21:10:23 +02001698 if (evsel->idx == idx)
1699 return evsel;
1700 }
1701
1702 return NULL;
1703}
1704
1705static void
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001706perf_evlist__set_event_name(struct perf_evlist *evlist,
1707 struct perf_evsel *event)
Robert Richter7c2f7af2012-08-16 21:10:23 +02001708{
1709 struct perf_evsel *evsel;
1710
1711 if (!event->name)
1712 return;
1713
1714 evsel = perf_evlist__find_by_index(evlist, event->idx);
1715 if (!evsel)
1716 return;
1717
1718 if (evsel->name)
1719 return;
1720
1721 evsel->name = strdup(event->name);
1722}
1723
1724static int
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001725process_event_desc(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001726 struct perf_header *header, int fd,
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001727 void *data __maybe_unused)
Robert Richter7c2f7af2012-08-16 21:10:23 +02001728{
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001729 struct perf_session *session;
Robert Richter7c2f7af2012-08-16 21:10:23 +02001730 struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1731
1732 if (!events)
1733 return 0;
1734
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001735 session = container_of(header, struct perf_session, header);
Robert Richter7c2f7af2012-08-16 21:10:23 +02001736 for (evsel = events; evsel->attr.size; evsel++)
1737 perf_evlist__set_event_name(session->evlist, evsel);
1738
1739 free_event_desc(events);
1740
1741 return 0;
1742}
1743
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001744static int process_cmdline(struct perf_file_section *section,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001745 struct perf_header *ph, int fd,
1746 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001747{
Jiri Olsa727ebd52013-11-28 11:30:14 +01001748 ssize_t ret;
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001749 char *str, *cmdline = NULL, **argv = NULL;
1750 u32 nr, i, len = 0;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001751
Namhyung Kim5323f602012-12-17 15:38:54 +09001752 ret = readn(fd, &nr, sizeof(nr));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001753 if (ret != sizeof(nr))
1754 return -1;
1755
1756 if (ph->needs_swap)
1757 nr = bswap_32(nr);
1758
1759 ph->env.nr_cmdline = nr;
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001760
1761 cmdline = zalloc(section->size + nr + 1);
1762 if (!cmdline)
1763 return -1;
1764
1765 argv = zalloc(sizeof(char *) * (nr + 1));
1766 if (!argv)
1767 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001768
1769 for (i = 0; i < nr; i++) {
1770 str = do_read_string(fd, ph);
1771 if (!str)
1772 goto error;
1773
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001774 argv[i] = cmdline + len;
1775 memcpy(argv[i], str, strlen(str) + 1);
1776 len += strlen(str) + 1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001777 free(str);
1778 }
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001779 ph->env.cmdline = cmdline;
1780 ph->env.cmdline_argv = (const char **) argv;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001781 return 0;
1782
1783error:
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001784 free(argv);
1785 free(cmdline);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001786 return -1;
1787}
1788
Kan Liang2bb00d22015-09-01 09:58:12 -04001789static int process_cpu_topology(struct perf_file_section *section,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001790 struct perf_header *ph, int fd,
1791 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001792{
Jiri Olsa727ebd52013-11-28 11:30:14 +01001793 ssize_t ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001794 u32 nr, i;
1795 char *str;
1796 struct strbuf sb;
Kan Liang2bb00d22015-09-01 09:58:12 -04001797 int cpu_nr = ph->env.nr_cpus_online;
1798 u64 size = 0;
1799
1800 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1801 if (!ph->env.cpu)
1802 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001803
Namhyung Kim5323f602012-12-17 15:38:54 +09001804 ret = readn(fd, &nr, sizeof(nr));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001805 if (ret != sizeof(nr))
Kan Liang2bb00d22015-09-01 09:58:12 -04001806 goto free_cpu;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001807
1808 if (ph->needs_swap)
1809 nr = bswap_32(nr);
1810
1811 ph->env.nr_sibling_cores = nr;
Kan Liang2bb00d22015-09-01 09:58:12 -04001812 size += sizeof(u32);
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001813 if (strbuf_init(&sb, 128) < 0)
1814 goto free_cpu;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001815
1816 for (i = 0; i < nr; i++) {
1817 str = do_read_string(fd, ph);
1818 if (!str)
1819 goto error;
1820
1821 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001822 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1823 goto error;
Kan Liang2bb00d22015-09-01 09:58:12 -04001824 size += string_size(str);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001825 free(str);
1826 }
1827 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1828
Namhyung Kim5323f602012-12-17 15:38:54 +09001829 ret = readn(fd, &nr, sizeof(nr));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001830 if (ret != sizeof(nr))
1831 return -1;
1832
1833 if (ph->needs_swap)
1834 nr = bswap_32(nr);
1835
1836 ph->env.nr_sibling_threads = nr;
Kan Liang2bb00d22015-09-01 09:58:12 -04001837 size += sizeof(u32);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001838
1839 for (i = 0; i < nr; i++) {
1840 str = do_read_string(fd, ph);
1841 if (!str)
1842 goto error;
1843
1844 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001845 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1846 goto error;
Kan Liang2bb00d22015-09-01 09:58:12 -04001847 size += string_size(str);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001848 free(str);
1849 }
1850 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
Kan Liang2bb00d22015-09-01 09:58:12 -04001851
1852 /*
1853 * The header may be from old perf,
1854 * which doesn't include core id and socket id information.
1855 */
1856 if (section->size <= size) {
1857 zfree(&ph->env.cpu);
1858 return 0;
1859 }
1860
1861 for (i = 0; i < (u32)cpu_nr; i++) {
1862 ret = readn(fd, &nr, sizeof(nr));
1863 if (ret != sizeof(nr))
1864 goto free_cpu;
1865
1866 if (ph->needs_swap)
1867 nr = bswap_32(nr);
1868
Kan Liang2bb00d22015-09-01 09:58:12 -04001869 ph->env.cpu[i].core_id = nr;
1870
1871 ret = readn(fd, &nr, sizeof(nr));
1872 if (ret != sizeof(nr))
1873 goto free_cpu;
1874
1875 if (ph->needs_swap)
1876 nr = bswap_32(nr);
1877
1878 if (nr > (u32)cpu_nr) {
1879 pr_debug("socket_id number is too big."
1880 "You may need to upgrade the perf tool.\n");
1881 goto free_cpu;
1882 }
1883
1884 ph->env.cpu[i].socket_id = nr;
1885 }
1886
Namhyung Kima1ae5652012-09-24 17:14:59 +09001887 return 0;
1888
1889error:
1890 strbuf_release(&sb);
Kan Liang2bb00d22015-09-01 09:58:12 -04001891free_cpu:
1892 zfree(&ph->env.cpu);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001893 return -1;
1894}
1895
1896static int process_numa_topology(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001897 struct perf_header *ph, int fd,
1898 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001899{
Jiri Olsac60da222016-07-04 14:16:20 +02001900 struct numa_node *nodes, *n;
Jiri Olsa727ebd52013-11-28 11:30:14 +01001901 ssize_t ret;
Jiri Olsac60da222016-07-04 14:16:20 +02001902 u32 nr, i;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001903 char *str;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001904
1905 /* nr nodes */
Namhyung Kim5323f602012-12-17 15:38:54 +09001906 ret = readn(fd, &nr, sizeof(nr));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001907 if (ret != sizeof(nr))
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001908 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001909
1910 if (ph->needs_swap)
1911 nr = bswap_32(nr);
1912
Jiri Olsac60da222016-07-04 14:16:20 +02001913 nodes = zalloc(sizeof(*nodes) * nr);
1914 if (!nodes)
1915 return -ENOMEM;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001916
1917 for (i = 0; i < nr; i++) {
Jiri Olsac60da222016-07-04 14:16:20 +02001918 n = &nodes[i];
1919
Namhyung Kima1ae5652012-09-24 17:14:59 +09001920 /* node number */
Jiri Olsac60da222016-07-04 14:16:20 +02001921 ret = readn(fd, &n->node, sizeof(u32));
1922 if (ret != sizeof(n->node))
Namhyung Kima1ae5652012-09-24 17:14:59 +09001923 goto error;
1924
Jiri Olsac60da222016-07-04 14:16:20 +02001925 ret = readn(fd, &n->mem_total, sizeof(u64));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001926 if (ret != sizeof(u64))
1927 goto error;
1928
Jiri Olsac60da222016-07-04 14:16:20 +02001929 ret = readn(fd, &n->mem_free, sizeof(u64));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001930 if (ret != sizeof(u64))
1931 goto error;
1932
1933 if (ph->needs_swap) {
Jiri Olsac60da222016-07-04 14:16:20 +02001934 n->node = bswap_32(n->node);
1935 n->mem_total = bswap_64(n->mem_total);
1936 n->mem_free = bswap_64(n->mem_free);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001937 }
1938
Namhyung Kima1ae5652012-09-24 17:14:59 +09001939 str = do_read_string(fd, ph);
1940 if (!str)
1941 goto error;
1942
Jiri Olsac60da222016-07-04 14:16:20 +02001943 n->map = cpu_map__new(str);
1944 if (!n->map)
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001945 goto error;
Jiri Olsac60da222016-07-04 14:16:20 +02001946
Namhyung Kima1ae5652012-09-24 17:14:59 +09001947 free(str);
1948 }
Jiri Olsaf957a532016-10-10 09:56:32 +02001949 ph->env.nr_numa_nodes = nr;
Jiri Olsac60da222016-07-04 14:16:20 +02001950 ph->env.numa_nodes = nodes;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001951 return 0;
1952
1953error:
Jiri Olsac60da222016-07-04 14:16:20 +02001954 free(nodes);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001955 return -1;
1956}
1957
1958static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001959 struct perf_header *ph, int fd,
1960 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001961{
Jiri Olsa727ebd52013-11-28 11:30:14 +01001962 ssize_t ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001963 char *name;
1964 u32 pmu_num;
1965 u32 type;
1966 struct strbuf sb;
1967
Namhyung Kim5323f602012-12-17 15:38:54 +09001968 ret = readn(fd, &pmu_num, sizeof(pmu_num));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001969 if (ret != sizeof(pmu_num))
1970 return -1;
1971
1972 if (ph->needs_swap)
1973 pmu_num = bswap_32(pmu_num);
1974
1975 if (!pmu_num) {
1976 pr_debug("pmu mappings not available\n");
1977 return 0;
1978 }
1979
1980 ph->env.nr_pmu_mappings = pmu_num;
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001981 if (strbuf_init(&sb, 128) < 0)
1982 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001983
1984 while (pmu_num) {
Namhyung Kim5323f602012-12-17 15:38:54 +09001985 if (readn(fd, &type, sizeof(type)) != sizeof(type))
Namhyung Kima1ae5652012-09-24 17:14:59 +09001986 goto error;
1987 if (ph->needs_swap)
1988 type = bswap_32(type);
1989
1990 name = do_read_string(fd, ph);
1991 if (!name)
1992 goto error;
1993
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001994 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
1995 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001996 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001997 if (strbuf_add(&sb, "", 1) < 0)
1998 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001999
Kan Liange0838e02015-09-10 11:03:05 -03002000 if (!strcmp(name, "msr"))
2001 ph->env.msr_pmu_type = type;
2002
Namhyung Kima1ae5652012-09-24 17:14:59 +09002003 free(name);
2004 pmu_num--;
2005 }
2006 ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2007 return 0;
2008
2009error:
2010 strbuf_release(&sb);
2011 return -1;
2012}
2013
Namhyung Kima8bb5592013-01-22 18:09:31 +09002014static int process_group_desc(struct perf_file_section *section __maybe_unused,
2015 struct perf_header *ph, int fd,
2016 void *data __maybe_unused)
2017{
2018 size_t ret = -1;
2019 u32 i, nr, nr_groups;
2020 struct perf_session *session;
2021 struct perf_evsel *evsel, *leader = NULL;
2022 struct group_desc {
2023 char *name;
2024 u32 leader_idx;
2025 u32 nr_members;
2026 } *desc;
2027
2028 if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2029 return -1;
2030
2031 if (ph->needs_swap)
2032 nr_groups = bswap_32(nr_groups);
2033
2034 ph->env.nr_groups = nr_groups;
2035 if (!nr_groups) {
2036 pr_debug("group desc not available\n");
2037 return 0;
2038 }
2039
2040 desc = calloc(nr_groups, sizeof(*desc));
2041 if (!desc)
2042 return -1;
2043
2044 for (i = 0; i < nr_groups; i++) {
2045 desc[i].name = do_read_string(fd, ph);
2046 if (!desc[i].name)
2047 goto out_free;
2048
2049 if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2050 goto out_free;
2051
2052 if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2053 goto out_free;
2054
2055 if (ph->needs_swap) {
2056 desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2057 desc[i].nr_members = bswap_32(desc[i].nr_members);
2058 }
2059 }
2060
2061 /*
2062 * Rebuild group relationship based on the group_desc
2063 */
2064 session = container_of(ph, struct perf_session, header);
2065 session->evlist->nr_groups = nr_groups;
2066
2067 i = nr = 0;
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002068 evlist__for_each_entry(session->evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09002069 if (evsel->idx == (int) desc[i].leader_idx) {
2070 evsel->leader = evsel;
2071 /* {anon_group} is a dummy name */
Namhyung Kim210e8122013-11-18 11:20:43 +09002072 if (strcmp(desc[i].name, "{anon_group}")) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09002073 evsel->group_name = desc[i].name;
Namhyung Kim210e8122013-11-18 11:20:43 +09002074 desc[i].name = NULL;
2075 }
Namhyung Kima8bb5592013-01-22 18:09:31 +09002076 evsel->nr_members = desc[i].nr_members;
2077
2078 if (i >= nr_groups || nr > 0) {
2079 pr_debug("invalid group desc\n");
2080 goto out_free;
2081 }
2082
2083 leader = evsel;
2084 nr = evsel->nr_members - 1;
2085 i++;
2086 } else if (nr) {
2087 /* This is a group member */
2088 evsel->leader = leader;
2089
2090 nr--;
2091 }
2092 }
2093
2094 if (i != nr_groups || nr != 0) {
2095 pr_debug("invalid group desc\n");
2096 goto out_free;
2097 }
2098
2099 ret = 0;
2100out_free:
Namhyung Kim50a27402013-11-18 11:20:44 +09002101 for (i = 0; i < nr_groups; i++)
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -03002102 zfree(&desc[i].name);
Namhyung Kima8bb5592013-01-22 18:09:31 +09002103 free(desc);
2104
2105 return ret;
2106}
2107
Adrian Hunter99fa2982015-04-30 17:37:25 +03002108static int process_auxtrace(struct perf_file_section *section,
2109 struct perf_header *ph, int fd,
2110 void *data __maybe_unused)
2111{
2112 struct perf_session *session;
2113 int err;
2114
2115 session = container_of(ph, struct perf_session, header);
2116
2117 err = auxtrace_index__process(fd, section->size, session,
2118 ph->needs_swap);
2119 if (err < 0)
2120 pr_err("Failed to process auxtrace index\n");
2121 return err;
2122}
2123
Jiri Olsa720e98b2016-02-16 16:01:43 +01002124static int process_cache(struct perf_file_section *section __maybe_unused,
2125 struct perf_header *ph __maybe_unused, int fd __maybe_unused,
2126 void *data __maybe_unused)
2127{
2128 struct cpu_cache_level *caches;
2129 u32 cnt, i, version;
2130
2131 if (readn(fd, &version, sizeof(version)) != sizeof(version))
2132 return -1;
2133
2134 if (ph->needs_swap)
2135 version = bswap_32(version);
2136
2137 if (version != 1)
2138 return -1;
2139
2140 if (readn(fd, &cnt, sizeof(cnt)) != sizeof(cnt))
2141 return -1;
2142
2143 if (ph->needs_swap)
2144 cnt = bswap_32(cnt);
2145
2146 caches = zalloc(sizeof(*caches) * cnt);
2147 if (!caches)
2148 return -1;
2149
2150 for (i = 0; i < cnt; i++) {
2151 struct cpu_cache_level c;
2152
2153 #define _R(v) \
2154 if (readn(fd, &c.v, sizeof(u32)) != sizeof(u32))\
2155 goto out_free_caches; \
2156 if (ph->needs_swap) \
2157 c.v = bswap_32(c.v); \
2158
2159 _R(level)
2160 _R(line_size)
2161 _R(sets)
2162 _R(ways)
2163 #undef _R
2164
2165 #define _R(v) \
2166 c.v = do_read_string(fd, ph); \
2167 if (!c.v) \
2168 goto out_free_caches;
2169
2170 _R(type)
2171 _R(size)
2172 _R(map)
2173 #undef _R
2174
2175 caches[i] = c;
2176 }
2177
2178 ph->env.caches = caches;
2179 ph->env.caches_cnt = cnt;
2180 return 0;
2181out_free_caches:
2182 free(caches);
2183 return -1;
2184}
2185
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002186struct feature_ops {
2187 int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
2188 void (*print)(struct perf_header *h, int fd, FILE *fp);
Robert Richterf1c67db2012-02-10 15:41:56 +01002189 int (*process)(struct perf_file_section *section,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09002190 struct perf_header *h, int fd, void *data);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002191 const char *name;
2192 bool full_only;
2193};
2194
Robert Richter8cdfa782011-12-07 10:02:56 +01002195#define FEAT_OPA(n, func) \
2196 [n] = { .name = #n, .write = write_##func, .print = print_##func }
Robert Richterf1c67db2012-02-10 15:41:56 +01002197#define FEAT_OPP(n, func) \
2198 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2199 .process = process_##func }
Robert Richter8cdfa782011-12-07 10:02:56 +01002200#define FEAT_OPF(n, func) \
Robert Richterf1c67db2012-02-10 15:41:56 +01002201 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
Namhyung Kima1ae5652012-09-24 17:14:59 +09002202 .process = process_##func, .full_only = true }
Robert Richter8cdfa782011-12-07 10:02:56 +01002203
2204/* feature_ops not implemented: */
Stephane Eranian2eeaaa02012-05-15 13:28:13 +02002205#define print_tracing_data NULL
2206#define print_build_id NULL
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002207
2208static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
Stephane Eranian2eeaaa02012-05-15 13:28:13 +02002209 FEAT_OPP(HEADER_TRACING_DATA, tracing_data),
Robert Richterf1c67db2012-02-10 15:41:56 +01002210 FEAT_OPP(HEADER_BUILD_ID, build_id),
Namhyung Kima1ae5652012-09-24 17:14:59 +09002211 FEAT_OPP(HEADER_HOSTNAME, hostname),
2212 FEAT_OPP(HEADER_OSRELEASE, osrelease),
2213 FEAT_OPP(HEADER_VERSION, version),
2214 FEAT_OPP(HEADER_ARCH, arch),
2215 FEAT_OPP(HEADER_NRCPUS, nrcpus),
2216 FEAT_OPP(HEADER_CPUDESC, cpudesc),
Namhyung Kim37e9d752012-09-24 17:15:03 +09002217 FEAT_OPP(HEADER_CPUID, cpuid),
Namhyung Kima1ae5652012-09-24 17:14:59 +09002218 FEAT_OPP(HEADER_TOTAL_MEM, total_mem),
Robert Richter7c2f7af2012-08-16 21:10:23 +02002219 FEAT_OPP(HEADER_EVENT_DESC, event_desc),
Namhyung Kima1ae5652012-09-24 17:14:59 +09002220 FEAT_OPP(HEADER_CMDLINE, cmdline),
Robert Richter8cdfa782011-12-07 10:02:56 +01002221 FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology),
2222 FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology),
Stephane Eranian330aa672012-03-08 23:47:46 +01002223 FEAT_OPA(HEADER_BRANCH_STACK, branch_stack),
Namhyung Kima1ae5652012-09-24 17:14:59 +09002224 FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings),
Namhyung Kima8bb5592013-01-22 18:09:31 +09002225 FEAT_OPP(HEADER_GROUP_DESC, group_desc),
Adrian Hunter99fa2982015-04-30 17:37:25 +03002226 FEAT_OPP(HEADER_AUXTRACE, auxtrace),
Jiri Olsaffa517a2015-10-25 15:51:43 +01002227 FEAT_OPA(HEADER_STAT, stat),
Jiri Olsa720e98b2016-02-16 16:01:43 +01002228 FEAT_OPF(HEADER_CACHE, cache),
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002229};
2230
2231struct header_print_data {
2232 FILE *fp;
2233 bool full; /* extended list of headers */
2234};
2235
2236static int perf_file_section__fprintf_info(struct perf_file_section *section,
2237 struct perf_header *ph,
2238 int feat, int fd, void *data)
2239{
2240 struct header_print_data *hd = data;
2241
2242 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2243 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2244 "%d, continuing...\n", section->offset, feat);
2245 return 0;
2246 }
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002247 if (feat >= HEADER_LAST_FEATURE) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002248 pr_warning("unknown feature %d\n", feat);
Robert Richterf7a8a132011-12-07 10:02:51 +01002249 return 0;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002250 }
2251 if (!feat_ops[feat].print)
2252 return 0;
2253
2254 if (!feat_ops[feat].full_only || hd->full)
2255 feat_ops[feat].print(ph, fd, hd->fp);
2256 else
2257 fprintf(hd->fp, "# %s info available, use -I to display\n",
2258 feat_ops[feat].name);
2259
2260 return 0;
2261}
2262
2263int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2264{
2265 struct header_print_data hd;
2266 struct perf_header *header = &session->header;
Jiri Olsacc9784bd2013-10-15 16:27:34 +02002267 int fd = perf_data_file__fd(session->file);
Jiri Olsaf45f5612016-10-10 09:03:07 +02002268 struct stat st;
Jiri Olsaaabae162016-10-10 09:35:50 +02002269 int ret, bit;
Jiri Olsaf45f5612016-10-10 09:03:07 +02002270
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002271 hd.fp = fp;
2272 hd.full = full;
2273
Jiri Olsaf45f5612016-10-10 09:03:07 +02002274 ret = fstat(fd, &st);
2275 if (ret == -1)
2276 return -1;
2277
2278 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
2279
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002280 perf_header__process_sections(header, fd, &hd,
2281 perf_file_section__fprintf_info);
Jiri Olsaaabae162016-10-10 09:35:50 +02002282
2283 fprintf(fp, "# missing features: ");
2284 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2285 if (bit)
2286 fprintf(fp, "%s ", feat_ops[bit].name);
2287 }
2288
2289 fprintf(fp, "\n");
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002290 return 0;
2291}
2292
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002293static int do_write_feat(int fd, struct perf_header *h, int type,
2294 struct perf_file_section **p,
2295 struct perf_evlist *evlist)
2296{
2297 int err;
2298 int ret = 0;
2299
2300 if (perf_header__has_feat(h, type)) {
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002301 if (!feat_ops[type].write)
2302 return -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002303
2304 (*p)->offset = lseek(fd, 0, SEEK_CUR);
2305
2306 err = feat_ops[type].write(fd, h, evlist);
2307 if (err < 0) {
Jiri Olsa0c2aff42016-10-10 09:38:02 +02002308 pr_debug("failed to write feature %s\n", feat_ops[type].name);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002309
2310 /* undo anything written */
2311 lseek(fd, (*p)->offset, SEEK_SET);
2312
2313 return -1;
2314 }
2315 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2316 (*p)++;
2317 }
2318 return ret;
2319}
2320
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002321static int perf_header__adds_write(struct perf_header *header,
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002322 struct perf_evlist *evlist, int fd)
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002323{
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002324 int nr_sections;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002325 struct perf_file_section *feat_sec, *p;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002326 int sec_size;
2327 u64 sec_start;
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002328 int feat;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002329 int err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002330
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002331 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002332 if (!nr_sections)
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002333 return 0;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002334
Paul Gortmaker91b98802013-01-30 20:05:49 -05002335 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002336 if (feat_sec == NULL)
2337 return -ENOMEM;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002338
2339 sec_size = sizeof(*feat_sec) * nr_sections;
2340
Jiri Olsa8d541e92013-07-17 19:49:44 +02002341 sec_start = header->feat_offset;
Xiao Guangrongf887f302010-02-04 16:46:42 +08002342 lseek(fd, sec_start + sec_size, SEEK_SET);
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002343
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002344 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2345 if (do_write_feat(fd, header, feat, &p, evlist))
2346 perf_header__clear_feat(header, feat);
2347 }
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002348
Xiao Guangrongf887f302010-02-04 16:46:42 +08002349 lseek(fd, sec_start, SEEK_SET);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002350 /*
2351 * may write more than needed due to dropped feature, but
2352 * this is okay, reader will skip the mising entries
2353 */
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002354 err = do_write(fd, feat_sec, sec_size);
2355 if (err < 0)
2356 pr_debug("failed to write feature section\n");
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002357 free(feat_sec);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002358 return err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002359}
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002360
Tom Zanussi8dc58102010-04-01 23:59:15 -05002361int perf_header__write_pipe(int fd)
2362{
2363 struct perf_pipe_file_header f_header;
2364 int err;
2365
2366 f_header = (struct perf_pipe_file_header){
2367 .magic = PERF_MAGIC,
2368 .size = sizeof(f_header),
2369 };
2370
2371 err = do_write(fd, &f_header, sizeof(f_header));
2372 if (err < 0) {
2373 pr_debug("failed to write perf pipe header\n");
2374 return err;
2375 }
2376
2377 return 0;
2378}
2379
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002380int perf_session__write_header(struct perf_session *session,
2381 struct perf_evlist *evlist,
2382 int fd, bool at_exit)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002383{
2384 struct perf_file_header f_header;
2385 struct perf_file_attr f_attr;
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002386 struct perf_header *header = &session->header;
Jiri Olsa563aecb2013-06-05 13:35:06 +02002387 struct perf_evsel *evsel;
Jiri Olsa944d62b2013-07-17 19:49:43 +02002388 u64 attr_offset;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002389 int err;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002390
2391 lseek(fd, sizeof(f_header), SEEK_SET);
2392
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002393 evlist__for_each_entry(session->evlist, evsel) {
Robert Richter6606f872012-08-16 21:10:19 +02002394 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2395 err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002396 if (err < 0) {
2397 pr_debug("failed to write perf header\n");
2398 return err;
2399 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002400 }
2401
Jiri Olsa944d62b2013-07-17 19:49:43 +02002402 attr_offset = lseek(fd, 0, SEEK_CUR);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002403
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002404 evlist__for_each_entry(evlist, evsel) {
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002405 f_attr = (struct perf_file_attr){
Robert Richter6606f872012-08-16 21:10:19 +02002406 .attr = evsel->attr,
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002407 .ids = {
Robert Richter6606f872012-08-16 21:10:19 +02002408 .offset = evsel->id_offset,
2409 .size = evsel->ids * sizeof(u64),
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002410 }
2411 };
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002412 err = do_write(fd, &f_attr, sizeof(f_attr));
2413 if (err < 0) {
2414 pr_debug("failed to write perf header attribute\n");
2415 return err;
2416 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002417 }
2418
Adrian Hunterd645c442013-12-11 14:36:28 +02002419 if (!header->data_offset)
2420 header->data_offset = lseek(fd, 0, SEEK_CUR);
Jiri Olsa8d541e92013-07-17 19:49:44 +02002421 header->feat_offset = header->data_offset + header->data_size;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002422
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002423 if (at_exit) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002424 err = perf_header__adds_write(header, evlist, fd);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002425 if (err < 0)
2426 return err;
2427 }
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002428
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002429 f_header = (struct perf_file_header){
2430 .magic = PERF_MAGIC,
2431 .size = sizeof(f_header),
2432 .attr_size = sizeof(f_attr),
2433 .attrs = {
Jiri Olsa944d62b2013-07-17 19:49:43 +02002434 .offset = attr_offset,
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002435 .size = evlist->nr_entries * sizeof(f_attr),
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002436 },
2437 .data = {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002438 .offset = header->data_offset,
2439 .size = header->data_size,
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002440 },
Jiri Olsa44b3c572013-07-11 17:28:31 +02002441 /* event_types is ignored, store zeros */
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002442 };
2443
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002444 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002445
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002446 lseek(fd, 0, SEEK_SET);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002447 err = do_write(fd, &f_header, sizeof(f_header));
2448 if (err < 0) {
2449 pr_debug("failed to write perf header\n");
2450 return err;
2451 }
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002452 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002453
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002454 return 0;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002455}
2456
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002457static int perf_header__getbuffer64(struct perf_header *header,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002458 int fd, void *buf, size_t size)
2459{
Arnaldo Carvalho de Melo1e7972c2011-01-03 16:50:55 -02002460 if (readn(fd, buf, size) <= 0)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002461 return -1;
2462
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002463 if (header->needs_swap)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002464 mem_bswap_64(buf, size);
2465
2466 return 0;
2467}
2468
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002469int perf_header__process_sections(struct perf_header *header, int fd,
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002470 void *data,
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002471 int (*process)(struct perf_file_section *section,
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002472 struct perf_header *ph,
2473 int feat, int fd, void *data))
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002474{
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002475 struct perf_file_section *feat_sec, *sec;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002476 int nr_sections;
2477 int sec_size;
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002478 int feat;
2479 int err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002480
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002481 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002482 if (!nr_sections)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002483 return 0;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002484
Paul Gortmaker91b98802013-01-30 20:05:49 -05002485 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002486 if (!feat_sec)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002487 return -1;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002488
2489 sec_size = sizeof(*feat_sec) * nr_sections;
2490
Jiri Olsa8d541e92013-07-17 19:49:44 +02002491 lseek(fd, header->feat_offset, SEEK_SET);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002492
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002493 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2494 if (err < 0)
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002495 goto out_free;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002496
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002497 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2498 err = process(sec++, header, feat, fd, data);
2499 if (err < 0)
2500 goto out_free;
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +01002501 }
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002502 err = 0;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002503out_free:
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002504 free(feat_sec);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002505 return err;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002506}
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002507
Stephane Eranian114382a2012-02-09 23:21:08 +01002508static const int attr_file_abi_sizes[] = {
2509 [0] = PERF_ATTR_SIZE_VER0,
2510 [1] = PERF_ATTR_SIZE_VER1,
Jiri Olsa239cc472012-08-07 15:20:42 +02002511 [2] = PERF_ATTR_SIZE_VER2,
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002512 [3] = PERF_ATTR_SIZE_VER3,
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02002513 [4] = PERF_ATTR_SIZE_VER4,
Stephane Eranian114382a2012-02-09 23:21:08 +01002514 0,
2515};
2516
2517/*
2518 * In the legacy file format, the magic number is not used to encode endianness.
2519 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2520 * on ABI revisions, we need to try all combinations for all endianness to
2521 * detect the endianness.
2522 */
2523static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2524{
2525 uint64_t ref_size, attr_size;
2526 int i;
2527
2528 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2529 ref_size = attr_file_abi_sizes[i]
2530 + sizeof(struct perf_file_section);
2531 if (hdr_sz != ref_size) {
2532 attr_size = bswap_64(hdr_sz);
2533 if (attr_size != ref_size)
2534 continue;
2535
2536 ph->needs_swap = true;
2537 }
2538 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2539 i,
2540 ph->needs_swap);
2541 return 0;
2542 }
2543 /* could not determine endianness */
2544 return -1;
2545}
2546
2547#define PERF_PIPE_HDR_VER0 16
2548
2549static const size_t attr_pipe_abi_sizes[] = {
2550 [0] = PERF_PIPE_HDR_VER0,
2551 0,
2552};
2553
2554/*
2555 * In the legacy pipe format, there is an implicit assumption that endiannesss
2556 * between host recording the samples, and host parsing the samples is the
2557 * same. This is not always the case given that the pipe output may always be
2558 * redirected into a file and analyzed on a different machine with possibly a
2559 * different endianness and perf_event ABI revsions in the perf tool itself.
2560 */
2561static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2562{
2563 u64 attr_size;
2564 int i;
2565
2566 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2567 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2568 attr_size = bswap_64(hdr_sz);
2569 if (attr_size != hdr_sz)
2570 continue;
2571
2572 ph->needs_swap = true;
2573 }
2574 pr_debug("Pipe ABI%d perf.data file detected\n", i);
2575 return 0;
2576 }
2577 return -1;
2578}
2579
Feng Tange84ba4e2012-10-30 11:56:07 +08002580bool is_perf_magic(u64 magic)
2581{
2582 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2583 || magic == __perf_magic2
2584 || magic == __perf_magic2_sw)
2585 return true;
2586
2587 return false;
2588}
2589
Stephane Eranian114382a2012-02-09 23:21:08 +01002590static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2591 bool is_pipe, struct perf_header *ph)
Stephane Eranian73323f52012-02-02 13:54:44 +01002592{
2593 int ret;
2594
2595 /* check for legacy format */
Stephane Eranian114382a2012-02-09 23:21:08 +01002596 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
Stephane Eranian73323f52012-02-02 13:54:44 +01002597 if (ret == 0) {
Jiri Olsa2a08c3e2013-07-17 19:49:47 +02002598 ph->version = PERF_HEADER_VERSION_1;
Stephane Eranian73323f52012-02-02 13:54:44 +01002599 pr_debug("legacy perf.data format\n");
Stephane Eranian114382a2012-02-09 23:21:08 +01002600 if (is_pipe)
2601 return try_all_pipe_abis(hdr_sz, ph);
Stephane Eranian73323f52012-02-02 13:54:44 +01002602
Stephane Eranian114382a2012-02-09 23:21:08 +01002603 return try_all_file_abis(hdr_sz, ph);
Stephane Eranian73323f52012-02-02 13:54:44 +01002604 }
Stephane Eranian114382a2012-02-09 23:21:08 +01002605 /*
2606 * the new magic number serves two purposes:
2607 * - unique number to identify actual perf.data files
2608 * - encode endianness of file
2609 */
Namhyung Kimf7913972015-01-29 17:06:45 +09002610 ph->version = PERF_HEADER_VERSION_2;
Stephane Eranian73323f52012-02-02 13:54:44 +01002611
Stephane Eranian114382a2012-02-09 23:21:08 +01002612 /* check magic number with one endianness */
2613 if (magic == __perf_magic2)
Stephane Eranian73323f52012-02-02 13:54:44 +01002614 return 0;
2615
Stephane Eranian114382a2012-02-09 23:21:08 +01002616 /* check magic number with opposite endianness */
2617 if (magic != __perf_magic2_sw)
Stephane Eranian73323f52012-02-02 13:54:44 +01002618 return -1;
2619
2620 ph->needs_swap = true;
2621
2622 return 0;
2623}
2624
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002625int perf_file_header__read(struct perf_file_header *header,
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002626 struct perf_header *ph, int fd)
2627{
Jiri Olsa727ebd52013-11-28 11:30:14 +01002628 ssize_t ret;
Stephane Eranian73323f52012-02-02 13:54:44 +01002629
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002630 lseek(fd, 0, SEEK_SET);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002631
Stephane Eranian73323f52012-02-02 13:54:44 +01002632 ret = readn(fd, header, sizeof(*header));
2633 if (ret <= 0)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002634 return -1;
2635
Stephane Eranian114382a2012-02-09 23:21:08 +01002636 if (check_magic_endian(header->magic,
2637 header->attr_size, false, ph) < 0) {
2638 pr_debug("magic/endian check failed\n");
Stephane Eranian73323f52012-02-02 13:54:44 +01002639 return -1;
Stephane Eranian114382a2012-02-09 23:21:08 +01002640 }
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002641
Stephane Eranian73323f52012-02-02 13:54:44 +01002642 if (ph->needs_swap) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002643 mem_bswap_64(header, offsetof(struct perf_file_header,
Stephane Eranian73323f52012-02-02 13:54:44 +01002644 adds_features));
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002645 }
2646
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002647 if (header->size != sizeof(*header)) {
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002648 /* Support the previous format */
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002649 if (header->size == offsetof(typeof(*header), adds_features))
2650 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002651 else
2652 return -1;
David Ahernd327fa42011-10-18 17:34:01 -06002653 } else if (ph->needs_swap) {
David Ahernd327fa42011-10-18 17:34:01 -06002654 /*
2655 * feature bitmap is declared as an array of unsigned longs --
2656 * not good since its size can differ between the host that
2657 * generated the data file and the host analyzing the file.
2658 *
2659 * We need to handle endianness, but we don't know the size of
2660 * the unsigned long where the file was generated. Take a best
2661 * guess at determining it: try 64-bit swap first (ie., file
2662 * created on a 64-bit host), and check if the hostname feature
2663 * bit is set (this feature bit is forced on as of fbe96f2).
2664 * If the bit is not, undo the 64-bit swap and try a 32-bit
2665 * swap. If the hostname bit is still not set (e.g., older data
2666 * file), punt and fallback to the original behavior --
2667 * clearing all feature bits and setting buildid.
2668 */
David Ahern80c01202012-06-08 11:47:51 -03002669 mem_bswap_64(&header->adds_features,
2670 BITS_TO_U64(HEADER_FEAT_BITS));
David Ahernd327fa42011-10-18 17:34:01 -06002671
2672 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
David Ahern80c01202012-06-08 11:47:51 -03002673 /* unswap as u64 */
2674 mem_bswap_64(&header->adds_features,
2675 BITS_TO_U64(HEADER_FEAT_BITS));
2676
2677 /* unswap as u32 */
2678 mem_bswap_32(&header->adds_features,
2679 BITS_TO_U32(HEADER_FEAT_BITS));
David Ahernd327fa42011-10-18 17:34:01 -06002680 }
2681
2682 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2683 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2684 set_bit(HEADER_BUILD_ID, header->adds_features);
2685 }
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002686 }
2687
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002688 memcpy(&ph->adds_features, &header->adds_features,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002689 sizeof(ph->adds_features));
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002690
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002691 ph->data_offset = header->data.offset;
2692 ph->data_size = header->data.size;
Jiri Olsa8d541e92013-07-17 19:49:44 +02002693 ph->feat_offset = header->data.offset + header->data.size;
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002694 return 0;
2695}
2696
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002697static int perf_file_section__process(struct perf_file_section *section,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002698 struct perf_header *ph,
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03002699 int feat, int fd, void *data)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002700{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002701 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -02002702 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002703 "%d, continuing...\n", section->offset, feat);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002704 return 0;
2705 }
2706
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002707 if (feat >= HEADER_LAST_FEATURE) {
2708 pr_debug("unknown feature %d, continuing...\n", feat);
2709 return 0;
2710 }
2711
Robert Richterf1c67db2012-02-10 15:41:56 +01002712 if (!feat_ops[feat].process)
2713 return 0;
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002714
Namhyung Kim3d7eb862012-09-24 17:15:01 +09002715 return feat_ops[feat].process(section, ph, fd, data);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002716}
2717
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002718static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
Tom Zanussi454c4072010-05-01 01:41:20 -05002719 struct perf_header *ph, int fd,
2720 bool repipe)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002721{
Jiri Olsa727ebd52013-11-28 11:30:14 +01002722 ssize_t ret;
Stephane Eranian73323f52012-02-02 13:54:44 +01002723
2724 ret = readn(fd, header, sizeof(*header));
2725 if (ret <= 0)
2726 return -1;
2727
Stephane Eranian114382a2012-02-09 23:21:08 +01002728 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2729 pr_debug("endian/magic failed\n");
Tom Zanussi8dc58102010-04-01 23:59:15 -05002730 return -1;
Stephane Eranian114382a2012-02-09 23:21:08 +01002731 }
2732
2733 if (ph->needs_swap)
2734 header->size = bswap_64(header->size);
Tom Zanussi8dc58102010-04-01 23:59:15 -05002735
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002736 if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
Tom Zanussi454c4072010-05-01 01:41:20 -05002737 return -1;
2738
Tom Zanussi8dc58102010-04-01 23:59:15 -05002739 return 0;
2740}
2741
Jiri Olsad4339562013-07-17 19:49:41 +02002742static int perf_header__read_pipe(struct perf_session *session)
Tom Zanussi8dc58102010-04-01 23:59:15 -05002743{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002744 struct perf_header *header = &session->header;
Tom Zanussi8dc58102010-04-01 23:59:15 -05002745 struct perf_pipe_file_header f_header;
2746
Jiri Olsacc9784bd2013-10-15 16:27:34 +02002747 if (perf_file_header__read_pipe(&f_header, header,
2748 perf_data_file__fd(session->file),
Tom Zanussi454c4072010-05-01 01:41:20 -05002749 session->repipe) < 0) {
Tom Zanussi8dc58102010-04-01 23:59:15 -05002750 pr_debug("incompatible file format\n");
2751 return -EINVAL;
2752 }
2753
Tom Zanussi8dc58102010-04-01 23:59:15 -05002754 return 0;
2755}
2756
Stephane Eranian69996df2012-02-09 23:21:06 +01002757static int read_attr(int fd, struct perf_header *ph,
2758 struct perf_file_attr *f_attr)
2759{
2760 struct perf_event_attr *attr = &f_attr->attr;
2761 size_t sz, left;
2762 size_t our_sz = sizeof(f_attr->attr);
Jiri Olsa727ebd52013-11-28 11:30:14 +01002763 ssize_t ret;
Stephane Eranian69996df2012-02-09 23:21:06 +01002764
2765 memset(f_attr, 0, sizeof(*f_attr));
2766
2767 /* read minimal guaranteed structure */
2768 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2769 if (ret <= 0) {
2770 pr_debug("cannot read %d bytes of header attr\n",
2771 PERF_ATTR_SIZE_VER0);
2772 return -1;
2773 }
2774
2775 /* on file perf_event_attr size */
2776 sz = attr->size;
Stephane Eranian114382a2012-02-09 23:21:08 +01002777
Stephane Eranian69996df2012-02-09 23:21:06 +01002778 if (ph->needs_swap)
2779 sz = bswap_32(sz);
2780
2781 if (sz == 0) {
2782 /* assume ABI0 */
2783 sz = PERF_ATTR_SIZE_VER0;
2784 } else if (sz > our_sz) {
2785 pr_debug("file uses a more recent and unsupported ABI"
2786 " (%zu bytes extra)\n", sz - our_sz);
2787 return -1;
2788 }
2789 /* what we have not yet read and that we know about */
2790 left = sz - PERF_ATTR_SIZE_VER0;
2791 if (left) {
2792 void *ptr = attr;
2793 ptr += PERF_ATTR_SIZE_VER0;
2794
2795 ret = readn(fd, ptr, left);
2796 }
2797 /* read perf_file_section, ids are read in caller */
2798 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2799
2800 return ret <= 0 ? -1 : 0;
2801}
2802
Namhyung Kim831394b2012-09-06 11:10:46 +09002803static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2804 struct pevent *pevent)
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002805{
Namhyung Kim831394b2012-09-06 11:10:46 +09002806 struct event_format *event;
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002807 char bf[128];
2808
Namhyung Kim831394b2012-09-06 11:10:46 +09002809 /* already prepared */
2810 if (evsel->tp_format)
2811 return 0;
2812
Namhyung Kim3dce2ce2013-03-21 16:18:48 +09002813 if (pevent == NULL) {
2814 pr_debug("broken or missing trace data\n");
2815 return -1;
2816 }
2817
Namhyung Kim831394b2012-09-06 11:10:46 +09002818 event = pevent_find_event(pevent, evsel->attr.config);
Namhyung Kima7619ae2013-04-18 21:24:16 +09002819 if (event == NULL) {
2820 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002821 return -1;
Namhyung Kima7619ae2013-04-18 21:24:16 +09002822 }
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002823
Namhyung Kim831394b2012-09-06 11:10:46 +09002824 if (!evsel->name) {
2825 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2826 evsel->name = strdup(bf);
2827 if (evsel->name == NULL)
2828 return -1;
2829 }
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002830
Arnaldo Carvalho de Melofcf65bf2012-08-07 09:58:03 -03002831 evsel->tp_format = event;
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002832 return 0;
2833}
2834
Namhyung Kim831394b2012-09-06 11:10:46 +09002835static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2836 struct pevent *pevent)
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002837{
2838 struct perf_evsel *pos;
2839
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002840 evlist__for_each_entry(evlist, pos) {
Namhyung Kim831394b2012-09-06 11:10:46 +09002841 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2842 perf_evsel__prepare_tracepoint_event(pos, pevent))
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002843 return -1;
2844 }
2845
2846 return 0;
2847}
2848
Jiri Olsad4339562013-07-17 19:49:41 +02002849int perf_session__read_header(struct perf_session *session)
Tom Zanussi8dc58102010-04-01 23:59:15 -05002850{
Jiri Olsacc9784bd2013-10-15 16:27:34 +02002851 struct perf_data_file *file = session->file;
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002852 struct perf_header *header = &session->header;
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002853 struct perf_file_header f_header;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002854 struct perf_file_attr f_attr;
2855 u64 f_id;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002856 int nr_attrs, nr_ids, i, j;
Jiri Olsacc9784bd2013-10-15 16:27:34 +02002857 int fd = perf_data_file__fd(file);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002858
Namhyung Kim334fe7a2013-03-11 16:43:12 +09002859 session->evlist = perf_evlist__new();
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002860 if (session->evlist == NULL)
2861 return -ENOMEM;
2862
Kan Liang2c071442015-08-28 05:48:05 -04002863 session->evlist->env = &header->env;
Arnaldo Carvalho de Melo4cde9982015-09-09 12:25:00 -03002864 session->machines.host.env = &header->env;
Jiri Olsacc9784bd2013-10-15 16:27:34 +02002865 if (perf_data_file__is_pipe(file))
Jiri Olsad4339562013-07-17 19:49:41 +02002866 return perf_header__read_pipe(session);
Tom Zanussi8dc58102010-04-01 23:59:15 -05002867
Stephane Eranian69996df2012-02-09 23:21:06 +01002868 if (perf_file_header__read(&f_header, header, fd) < 0)
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02002869 return -EINVAL;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002870
Namhyung Kimb314e5c2013-09-30 17:19:48 +09002871 /*
2872 * Sanity check that perf.data was written cleanly; data size is
2873 * initialized to 0 and updated only if the on_exit function is run.
2874 * If data size is still 0 then the file contains only partial
2875 * information. Just warn user and process it as much as it can.
2876 */
2877 if (f_header.data.size == 0) {
2878 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2879 "Was the 'perf record' command properly terminated?\n",
Jiri Olsacc9784bd2013-10-15 16:27:34 +02002880 file->path);
Namhyung Kimb314e5c2013-09-30 17:19:48 +09002881 }
2882
Stephane Eranian69996df2012-02-09 23:21:06 +01002883 nr_attrs = f_header.attrs.size / f_header.attr_size;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002884 lseek(fd, f_header.attrs.offset, SEEK_SET);
2885
2886 for (i = 0; i < nr_attrs; i++) {
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002887 struct perf_evsel *evsel;
Peter Zijlstra1c222bc2009-08-06 20:57:41 +02002888 off_t tmp;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002889
Stephane Eranian69996df2012-02-09 23:21:06 +01002890 if (read_attr(fd, header, &f_attr) < 0)
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002891 goto out_errno;
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002892
David Ahern1060ab82015-04-09 16:15:46 -04002893 if (header->needs_swap) {
2894 f_attr.ids.size = bswap_64(f_attr.ids.size);
2895 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
David Aherneda39132011-07-15 12:34:09 -06002896 perf_event__attr_swap(&f_attr.attr);
David Ahern1060ab82015-04-09 16:15:46 -04002897 }
David Aherneda39132011-07-15 12:34:09 -06002898
Peter Zijlstra1c222bc2009-08-06 20:57:41 +02002899 tmp = lseek(fd, 0, SEEK_CUR);
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -03002900 evsel = perf_evsel__new(&f_attr.attr);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002901
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002902 if (evsel == NULL)
2903 goto out_delete_evlist;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03002904
2905 evsel->needs_swap = header->needs_swap;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002906 /*
2907 * Do it before so that if perf_evsel__alloc_id fails, this
2908 * entry gets purged too at perf_evlist__delete().
2909 */
2910 perf_evlist__add(session->evlist, evsel);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002911
2912 nr_ids = f_attr.ids.size / sizeof(u64);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002913 /*
2914 * We don't have the cpu and thread maps on the header, so
2915 * for allocating the perf_sample_id table we fake 1 cpu and
2916 * hattr->ids threads.
2917 */
2918 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2919 goto out_delete_evlist;
2920
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002921 lseek(fd, f_attr.ids.offset, SEEK_SET);
2922
2923 for (j = 0; j < nr_ids; j++) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002924 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002925 goto out_errno;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002926
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002927 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02002928 }
Arnaldo Carvalho de Melo11deb1f2009-11-17 01:18:09 -02002929
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002930 lseek(fd, tmp, SEEK_SET);
2931 }
2932
Arnaldo Carvalho de Melod04b35f2011-11-11 22:17:32 -02002933 symbol_conf.nr_events = nr_attrs;
2934
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01002935 perf_header__process_sections(header, fd, &session->tevent,
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002936 perf_file_section__process);
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +01002937
Namhyung Kim831394b2012-09-06 11:10:46 +09002938 if (perf_evlist__prepare_tracepoint_events(session->evlist,
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01002939 session->tevent.pevent))
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002940 goto out_delete_evlist;
2941
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02002942 return 0;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002943out_errno:
2944 return -errno;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002945
2946out_delete_evlist:
2947 perf_evlist__delete(session->evlist);
2948 session->evlist = NULL;
2949 return -ENOMEM;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002950}
Frederic Weisbecker0d3a5c82009-08-16 20:56:37 +02002951
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02002952int perf_event__synthesize_attr(struct perf_tool *tool,
Robert Richterf4d83432012-08-16 21:10:17 +02002953 struct perf_event_attr *attr, u32 ids, u64 *id,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02002954 perf_event__handler_t process)
Frederic Weisbecker0d3a5c82009-08-16 20:56:37 +02002955{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02002956 union perf_event *ev;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05002957 size_t size;
2958 int err;
2959
2960 size = sizeof(struct perf_event_attr);
Irina Tirdea9ac3e482012-09-11 01:15:01 +03002961 size = PERF_ALIGN(size, sizeof(u64));
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05002962 size += sizeof(struct perf_event_header);
2963 size += ids * sizeof(u64);
2964
2965 ev = malloc(size);
2966
Chris Samuelce47dc52010-11-13 13:35:06 +11002967 if (ev == NULL)
2968 return -ENOMEM;
2969
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05002970 ev->attr.attr = *attr;
2971 memcpy(ev->attr.id, id, ids * sizeof(u64));
2972
2973 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
Robert Richterf4d83432012-08-16 21:10:17 +02002974 ev->attr.header.size = (u16)size;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05002975
Robert Richterf4d83432012-08-16 21:10:17 +02002976 if (ev->attr.header.size == size)
2977 err = process(tool, ev, NULL, NULL);
2978 else
2979 err = -E2BIG;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05002980
2981 free(ev);
2982
2983 return err;
2984}
2985
Jiri Olsaa6e52812015-10-25 15:51:37 +01002986static struct event_update_event *
2987event_update_event__new(size_t size, u64 type, u64 id)
2988{
2989 struct event_update_event *ev;
2990
2991 size += sizeof(*ev);
2992 size = PERF_ALIGN(size, sizeof(u64));
2993
2994 ev = zalloc(size);
2995 if (ev) {
2996 ev->header.type = PERF_RECORD_EVENT_UPDATE;
2997 ev->header.size = (u16)size;
2998 ev->type = type;
2999 ev->id = id;
3000 }
3001 return ev;
3002}
3003
3004int
3005perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3006 struct perf_evsel *evsel,
3007 perf_event__handler_t process)
3008{
3009 struct event_update_event *ev;
3010 size_t size = strlen(evsel->unit);
3011 int err;
3012
3013 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3014 if (ev == NULL)
3015 return -ENOMEM;
3016
3017 strncpy(ev->data, evsel->unit, size);
3018 err = process(tool, (union perf_event *)ev, NULL, NULL);
3019 free(ev);
3020 return err;
3021}
3022
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003023int
3024perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3025 struct perf_evsel *evsel,
3026 perf_event__handler_t process)
3027{
3028 struct event_update_event *ev;
3029 struct event_update_event_scale *ev_data;
3030 int err;
3031
3032 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3033 if (ev == NULL)
3034 return -ENOMEM;
3035
3036 ev_data = (struct event_update_event_scale *) ev->data;
3037 ev_data->scale = evsel->scale;
3038 err = process(tool, (union perf_event*) ev, NULL, NULL);
3039 free(ev);
3040 return err;
3041}
3042
Jiri Olsa802c9042015-10-25 15:51:39 +01003043int
3044perf_event__synthesize_event_update_name(struct perf_tool *tool,
3045 struct perf_evsel *evsel,
3046 perf_event__handler_t process)
3047{
3048 struct event_update_event *ev;
3049 size_t len = strlen(evsel->name);
3050 int err;
3051
3052 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3053 if (ev == NULL)
3054 return -ENOMEM;
3055
3056 strncpy(ev->data, evsel->name, len);
3057 err = process(tool, (union perf_event*) ev, NULL, NULL);
3058 free(ev);
3059 return err;
3060}
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003061
Jiri Olsa86ebb092015-10-25 15:51:40 +01003062int
3063perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3064 struct perf_evsel *evsel,
3065 perf_event__handler_t process)
3066{
3067 size_t size = sizeof(struct event_update_event);
3068 struct event_update_event *ev;
3069 int max, err;
3070 u16 type;
3071
3072 if (!evsel->own_cpus)
3073 return 0;
3074
3075 ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3076 if (!ev)
3077 return -ENOMEM;
3078
3079 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3080 ev->header.size = (u16)size;
3081 ev->type = PERF_EVENT_UPDATE__CPUS;
3082 ev->id = evsel->id[0];
3083
3084 cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3085 evsel->own_cpus,
3086 type, max);
3087
3088 err = process(tool, (union perf_event*) ev, NULL, NULL);
3089 free(ev);
3090 return err;
3091}
3092
Jiri Olsac853f932015-10-25 15:51:41 +01003093size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3094{
3095 struct event_update_event *ev = &event->event_update;
3096 struct event_update_event_scale *ev_scale;
3097 struct event_update_event_cpus *ev_cpus;
3098 struct cpu_map *map;
3099 size_t ret;
3100
3101 ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id);
3102
3103 switch (ev->type) {
3104 case PERF_EVENT_UPDATE__SCALE:
3105 ev_scale = (struct event_update_event_scale *) ev->data;
3106 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3107 break;
3108 case PERF_EVENT_UPDATE__UNIT:
3109 ret += fprintf(fp, "... unit: %s\n", ev->data);
3110 break;
3111 case PERF_EVENT_UPDATE__NAME:
3112 ret += fprintf(fp, "... name: %s\n", ev->data);
3113 break;
3114 case PERF_EVENT_UPDATE__CPUS:
3115 ev_cpus = (struct event_update_event_cpus *) ev->data;
3116 ret += fprintf(fp, "... ");
3117
3118 map = cpu_map__new_data(&ev_cpus->cpus);
3119 if (map)
3120 ret += cpu_map__fprintf(map, fp);
3121 else
3122 ret += fprintf(fp, "failed to get cpus\n");
3123 break;
3124 default:
3125 ret += fprintf(fp, "... unknown type\n");
3126 break;
3127 }
3128
3129 return ret;
3130}
Jiri Olsa86ebb092015-10-25 15:51:40 +01003131
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003132int perf_event__synthesize_attrs(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003133 struct perf_session *session,
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003134 perf_event__handler_t process)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003135{
Robert Richter6606f872012-08-16 21:10:19 +02003136 struct perf_evsel *evsel;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003137 int err = 0;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003138
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03003139 evlist__for_each_entry(session->evlist, evsel) {
Robert Richter6606f872012-08-16 21:10:19 +02003140 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3141 evsel->id, process);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003142 if (err) {
3143 pr_debug("failed to create perf header attribute\n");
3144 return err;
3145 }
3146 }
3147
3148 return err;
3149}
3150
Adrian Hunter47c3d102013-07-04 16:20:21 +03003151int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3152 union perf_event *event,
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003153 struct perf_evlist **pevlist)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003154{
Robert Richterf4d83432012-08-16 21:10:17 +02003155 u32 i, ids, n_ids;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003156 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003157 struct perf_evlist *evlist = *pevlist;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003158
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003159 if (evlist == NULL) {
Namhyung Kim334fe7a2013-03-11 16:43:12 +09003160 *pevlist = evlist = perf_evlist__new();
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003161 if (evlist == NULL)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003162 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003163 }
3164
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -03003165 evsel = perf_evsel__new(&event->attr.attr);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003166 if (evsel == NULL)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003167 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003168
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003169 perf_evlist__add(evlist, evsel);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003170
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003171 ids = event->header.size;
3172 ids -= (void *)&event->attr.id - (void *)event;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003173 n_ids = ids / sizeof(u64);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003174 /*
3175 * We don't have the cpu and thread maps on the header, so
3176 * for allocating the perf_sample_id table we fake 1 cpu and
3177 * hattr->ids threads.
3178 */
3179 if (perf_evsel__alloc_id(evsel, 1, n_ids))
3180 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003181
3182 for (i = 0; i < n_ids; i++) {
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003183 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003184 }
3185
Adrian Hunter7e0d6fc2013-07-04 16:20:29 +03003186 symbol_conf.nr_events = evlist->nr_entries;
3187
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003188 return 0;
3189}
Tom Zanussicd19a032010-04-01 23:59:20 -05003190
Jiri Olsaffe777252015-10-25 15:51:36 +01003191int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3192 union perf_event *event,
3193 struct perf_evlist **pevlist)
3194{
3195 struct event_update_event *ev = &event->event_update;
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003196 struct event_update_event_scale *ev_scale;
Jiri Olsa86ebb092015-10-25 15:51:40 +01003197 struct event_update_event_cpus *ev_cpus;
Jiri Olsaffe777252015-10-25 15:51:36 +01003198 struct perf_evlist *evlist;
3199 struct perf_evsel *evsel;
Jiri Olsa86ebb092015-10-25 15:51:40 +01003200 struct cpu_map *map;
Jiri Olsaffe777252015-10-25 15:51:36 +01003201
3202 if (!pevlist || *pevlist == NULL)
3203 return -EINVAL;
3204
3205 evlist = *pevlist;
3206
3207 evsel = perf_evlist__id2evsel(evlist, ev->id);
3208 if (evsel == NULL)
3209 return -EINVAL;
3210
Jiri Olsaa6e52812015-10-25 15:51:37 +01003211 switch (ev->type) {
3212 case PERF_EVENT_UPDATE__UNIT:
3213 evsel->unit = strdup(ev->data);
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003214 break;
Jiri Olsa802c9042015-10-25 15:51:39 +01003215 case PERF_EVENT_UPDATE__NAME:
3216 evsel->name = strdup(ev->data);
3217 break;
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003218 case PERF_EVENT_UPDATE__SCALE:
3219 ev_scale = (struct event_update_event_scale *) ev->data;
3220 evsel->scale = ev_scale->scale;
Arnaldo Carvalho de Melo8434a2e2017-02-08 21:57:22 -03003221 break;
Jiri Olsa86ebb092015-10-25 15:51:40 +01003222 case PERF_EVENT_UPDATE__CPUS:
3223 ev_cpus = (struct event_update_event_cpus *) ev->data;
3224
3225 map = cpu_map__new_data(&ev_cpus->cpus);
3226 if (map)
3227 evsel->own_cpus = map;
3228 else
3229 pr_err("failed to get event_update cpus\n");
Jiri Olsaa6e52812015-10-25 15:51:37 +01003230 default:
3231 break;
3232 }
3233
Jiri Olsaffe777252015-10-25 15:51:36 +01003234 return 0;
3235}
3236
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003237int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003238 struct perf_evlist *evlist,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02003239 perf_event__handler_t process)
Tom Zanussi92155452010-04-01 23:59:21 -05003240{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003241 union perf_event ev;
Jiri Olsa29208e52011-10-20 15:59:43 +02003242 struct tracing_data *tdata;
Tom Zanussi92155452010-04-01 23:59:21 -05003243 ssize_t size = 0, aligned_size = 0, padding;
Irina Tirdea1d037ca2012-09-11 01:15:03 +03003244 int err __maybe_unused = 0;
Tom Zanussi92155452010-04-01 23:59:21 -05003245
Jiri Olsa29208e52011-10-20 15:59:43 +02003246 /*
3247 * We are going to store the size of the data followed
3248 * by the data contents. Since the fd descriptor is a pipe,
3249 * we cannot seek back to store the size of the data once
3250 * we know it. Instead we:
3251 *
3252 * - write the tracing data to the temp file
3253 * - get/write the data size to pipe
3254 * - write the tracing data from the temp file
3255 * to the pipe
3256 */
3257 tdata = tracing_data_get(&evlist->entries, fd, true);
3258 if (!tdata)
3259 return -1;
3260
Tom Zanussi92155452010-04-01 23:59:21 -05003261 memset(&ev, 0, sizeof(ev));
3262
3263 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
Jiri Olsa29208e52011-10-20 15:59:43 +02003264 size = tdata->size;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03003265 aligned_size = PERF_ALIGN(size, sizeof(u64));
Tom Zanussi92155452010-04-01 23:59:21 -05003266 padding = aligned_size - size;
3267 ev.tracing_data.header.size = sizeof(ev.tracing_data);
3268 ev.tracing_data.size = aligned_size;
3269
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003270 process(tool, &ev, NULL, NULL);
Tom Zanussi92155452010-04-01 23:59:21 -05003271
Jiri Olsa29208e52011-10-20 15:59:43 +02003272 /*
3273 * The put function will copy all the tracing data
3274 * stored in temp file to the pipe.
3275 */
3276 tracing_data_put(tdata);
3277
Tom Zanussi92155452010-04-01 23:59:21 -05003278 write_padded(fd, NULL, 0, padding);
3279
3280 return aligned_size;
3281}
3282
Adrian Hunter47c3d102013-07-04 16:20:21 +03003283int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3284 union perf_event *event,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003285 struct perf_session *session)
Tom Zanussi92155452010-04-01 23:59:21 -05003286{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003287 ssize_t size_read, padding, size = event->tracing_data.size;
Jiri Olsacc9784bd2013-10-15 16:27:34 +02003288 int fd = perf_data_file__fd(session->file);
3289 off_t offset = lseek(fd, 0, SEEK_CUR);
Tom Zanussi92155452010-04-01 23:59:21 -05003290 char buf[BUFSIZ];
3291
3292 /* setup for reading amidst mmap */
Jiri Olsacc9784bd2013-10-15 16:27:34 +02003293 lseek(fd, offset + sizeof(struct tracing_data_event),
Tom Zanussi92155452010-04-01 23:59:21 -05003294 SEEK_SET);
3295
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01003296 size_read = trace_report(fd, &session->tevent,
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03003297 session->repipe);
Irina Tirdea9ac3e482012-09-11 01:15:01 +03003298 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
Tom Zanussi92155452010-04-01 23:59:21 -05003299
Jiri Olsacc9784bd2013-10-15 16:27:34 +02003300 if (readn(fd, buf, padding) < 0) {
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03003301 pr_err("%s: reading input file", __func__);
3302 return -1;
3303 }
Tom Zanussi454c4072010-05-01 01:41:20 -05003304 if (session->repipe) {
3305 int retw = write(STDOUT_FILENO, buf, padding);
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03003306 if (retw <= 0 || retw != padding) {
3307 pr_err("%s: repiping tracing data padding", __func__);
3308 return -1;
3309 }
Tom Zanussi454c4072010-05-01 01:41:20 -05003310 }
Tom Zanussi92155452010-04-01 23:59:21 -05003311
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03003312 if (size_read + padding != size) {
3313 pr_err("%s: tracing data size mismatch", __func__);
3314 return -1;
3315 }
Tom Zanussi92155452010-04-01 23:59:21 -05003316
Namhyung Kim831394b2012-09-06 11:10:46 +09003317 perf_evlist__prepare_tracepoint_events(session->evlist,
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01003318 session->tevent.pevent);
Arnaldo Carvalho de Melo8b6ee4c2012-08-07 23:36:16 -03003319
Tom Zanussi92155452010-04-01 23:59:21 -05003320 return size_read + padding;
3321}
Tom Zanussic7929e42010-04-01 23:59:22 -05003322
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003323int perf_event__synthesize_build_id(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003324 struct dso *pos, u16 misc,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003325 perf_event__handler_t process,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02003326 struct machine *machine)
Tom Zanussic7929e42010-04-01 23:59:22 -05003327{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003328 union perf_event ev;
Tom Zanussic7929e42010-04-01 23:59:22 -05003329 size_t len;
3330 int err = 0;
3331
3332 if (!pos->hit)
3333 return err;
3334
3335 memset(&ev, 0, sizeof(ev));
3336
3337 len = pos->long_name_len + 1;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03003338 len = PERF_ALIGN(len, NAME_ALIGN);
Tom Zanussic7929e42010-04-01 23:59:22 -05003339 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3340 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3341 ev.build_id.header.misc = misc;
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -03003342 ev.build_id.pid = machine->pid;
Tom Zanussic7929e42010-04-01 23:59:22 -05003343 ev.build_id.header.size = sizeof(ev.build_id) + len;
3344 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3345
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003346 err = process(tool, &ev, NULL, machine);
Tom Zanussic7929e42010-04-01 23:59:22 -05003347
3348 return err;
3349}
3350
Irina Tirdea1d037ca2012-09-11 01:15:03 +03003351int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003352 union perf_event *event,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003353 struct perf_session *session)
Tom Zanussic7929e42010-04-01 23:59:22 -05003354{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003355 __event_process_build_id(&event->build_id,
3356 event->build_id.filename,
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08003357 session);
Tom Zanussic7929e42010-04-01 23:59:22 -05003358 return 0;
3359}