blob: 314a07151fb772377752dae62658b79ffdc87cd6 [file] [log] [blame]
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -03001#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -03002#include <inttypes.h>
Arnaldo Carvalho de Meloa9072bc2011-10-26 12:41:38 -02003#include "util.h"
Arnaldo Carvalho de Meloa0675582017-04-17 16:51:59 -03004#include "string2.h"
Arnaldo Carvalho de Melo391e4202017-04-19 18:51:14 -03005#include <sys/param.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02006#include <sys/types.h>
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02007#include <byteswap.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02008#include <unistd.h>
9#include <stdio.h>
10#include <stdlib.h>
Frederic Weisbecker8671dab2009-11-11 04:51:03 +010011#include <linux/list.h>
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -020012#include <linux/kernel.h>
Robert Richterb1e5a9b2011-12-07 10:02:57 +010013#include <linux/bitops.h>
Arnaldo Carvalho de Melo7a8ef4c2017-04-19 20:57:47 -030014#include <sys/stat.h>
15#include <sys/types.h>
Stephane Eranianfbe96f22011-09-30 15:40:40 +020016#include <sys/utsname.h>
Arnaldo Carvalho de Melo7a8ef4c2017-04-19 20:57:47 -030017#include <unistd.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020018
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020019#include "evlist.h"
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030020#include "evsel.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020021#include "header.h"
Arnaldo Carvalho de Melo98521b32017-04-25 15:45:35 -030022#include "memswap.h"
Frederic Weisbecker03456a12009-10-06 23:36:47 +020023#include "../perf.h"
24#include "trace-event.h"
Arnaldo Carvalho de Melo301a0b02009-12-13 19:50:25 -020025#include "session.h"
Frederic Weisbecker8671dab2009-11-11 04:51:03 +010026#include "symbol.h"
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +010027#include "debug.h"
Stephane Eranianfbe96f22011-09-30 15:40:40 +020028#include "cpumap.h"
Robert Richter50a96672012-08-16 21:10:24 +020029#include "pmu.h"
Jiri Olsa7dbf4dc2012-09-10 18:50:19 +020030#include "vdso.h"
Namhyung Kima1ae5652012-09-24 17:14:59 +090031#include "strbuf.h"
Jiri Olsaebb296c2012-10-27 23:18:28 +020032#include "build-id.h"
Jiri Olsacc9784bd2013-10-15 16:27:34 +020033#include "data.h"
Jiri Olsa720e98b2016-02-16 16:01:43 +010034#include <api/fs/fs.h>
35#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020036
Arnaldo Carvalho de Melo3d689ed2017-04-17 16:10:49 -030037#include "sane_ctype.h"
38
Stephane Eranian73323f52012-02-02 13:54:44 +010039/*
40 * magic2 = "PERFILE2"
41 * must be a numerical value to let the endianness
42 * determine the memory layout. That way we are able
43 * to detect endianness when reading the perf.data file
44 * back.
45 *
46 * we check for legacy (PERFFILE) format.
47 */
48static const char *__perf_magic1 = "PERFFILE";
49static const u64 __perf_magic2 = 0x32454c4946524550ULL;
50static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020051
Stephane Eranian73323f52012-02-02 13:54:44 +010052#define PERF_MAGIC __perf_magic2
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020053
Soramichi AKIYAMAd25ed5d2017-01-17 00:22:37 +090054const char perf_version_string[] = PERF_VERSION;
55
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020056struct perf_file_attr {
Ingo Molnarcdd6c482009-09-21 12:02:48 +020057 struct perf_event_attr attr;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020058 struct perf_file_section ids;
59};
60
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030061void perf_header__set_feat(struct perf_header *header, int feat)
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020062{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030063 set_bit(feat, header->adds_features);
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020064}
65
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030066void perf_header__clear_feat(struct perf_header *header, int feat)
Arnaldo Carvalho de Melobaa2f6c2010-11-26 19:39:15 -020067{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030068 clear_bit(feat, header->adds_features);
Arnaldo Carvalho de Melobaa2f6c2010-11-26 19:39:15 -020069}
70
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030071bool perf_header__has_feat(const struct perf_header *header, int feat)
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020072{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030073 return test_bit(feat, header->adds_features);
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020074}
75
Arnaldo Carvalho de Melo3726cc72009-11-17 01:18:12 -020076static int do_write(int fd, const void *buf, size_t size)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020077{
78 while (size) {
79 int ret = write(fd, buf, size);
80
81 if (ret < 0)
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -020082 return -errno;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020083
84 size -= ret;
85 buf += ret;
86 }
Arnaldo Carvalho de Melo3726cc72009-11-17 01:18:12 -020087
88 return 0;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020089}
90
Namhyung Kime195fac2014-11-04 10:14:30 +090091int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
Arnaldo Carvalho de Melof92cb242010-01-04 16:19:28 -020092{
93 static const char zero_buf[NAME_ALIGN];
94 int err = do_write(fd, bf, count);
95
96 if (!err)
97 err = do_write(fd, zero_buf, count_aligned - count);
98
99 return err;
100}
101
Kan Liang2bb00d22015-09-01 09:58:12 -0400102#define string_size(str) \
103 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
104
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200105static int do_write_string(int fd, const char *str)
106{
107 u32 len, olen;
108 int ret;
109
110 olen = strlen(str) + 1;
Irina Tirdea9ac3e482012-09-11 01:15:01 +0300111 len = PERF_ALIGN(olen, NAME_ALIGN);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200112
113 /* write len, incl. \0 */
114 ret = do_write(fd, &len, sizeof(len));
115 if (ret < 0)
116 return ret;
117
118 return write_padded(fd, str, olen, len);
119}
120
121static char *do_read_string(int fd, struct perf_header *ph)
122{
123 ssize_t sz, ret;
124 u32 len;
125 char *buf;
126
Namhyung Kim5323f602012-12-17 15:38:54 +0900127 sz = readn(fd, &len, sizeof(len));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200128 if (sz < (ssize_t)sizeof(len))
129 return NULL;
130
131 if (ph->needs_swap)
132 len = bswap_32(len);
133
134 buf = malloc(len);
135 if (!buf)
136 return NULL;
137
Namhyung Kim5323f602012-12-17 15:38:54 +0900138 ret = readn(fd, buf, len);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200139 if (ret == (ssize_t)len) {
140 /*
141 * strings are padded by zeroes
142 * thus the actual strlen of buf
143 * may be less than len
144 */
145 return buf;
146 }
147
148 free(buf);
149 return NULL;
150}
151
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300152static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200153 struct perf_evlist *evlist)
154{
155 return read_tracing_data(fd, &evlist->entries);
156}
157
158
159static int write_build_id(int fd, struct perf_header *h,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300160 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200161{
162 struct perf_session *session;
163 int err;
164
165 session = container_of(h, struct perf_session, header);
166
Robert Richtere20960c2011-12-07 10:02:55 +0100167 if (!perf_session__read_build_ids(session, true))
168 return -1;
169
Namhyung Kim714c9c42014-11-04 10:14:29 +0900170 err = perf_session__write_buildid_table(session, fd);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200171 if (err < 0) {
172 pr_debug("failed to write buildid table\n");
173 return err;
174 }
Namhyung Kim73c5d222014-11-07 22:57:56 +0900175 perf_session__cache_build_ids(session);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200176
177 return 0;
178}
179
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300180static int write_hostname(int fd, struct perf_header *h __maybe_unused,
181 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200182{
183 struct utsname uts;
184 int ret;
185
186 ret = uname(&uts);
187 if (ret < 0)
188 return -1;
189
190 return do_write_string(fd, uts.nodename);
191}
192
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300193static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
194 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200195{
196 struct utsname uts;
197 int ret;
198
199 ret = uname(&uts);
200 if (ret < 0)
201 return -1;
202
203 return do_write_string(fd, uts.release);
204}
205
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300206static int write_arch(int fd, struct perf_header *h __maybe_unused,
207 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200208{
209 struct utsname uts;
210 int ret;
211
212 ret = uname(&uts);
213 if (ret < 0)
214 return -1;
215
216 return do_write_string(fd, uts.machine);
217}
218
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300219static int write_version(int fd, struct perf_header *h __maybe_unused,
220 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200221{
222 return do_write_string(fd, perf_version_string);
223}
224
Wang Nan493c3032014-10-24 09:45:26 +0800225static int __write_cpudesc(int fd, const char *cpuinfo_proc)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200226{
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200227 FILE *file;
228 char *buf = NULL;
229 char *s, *p;
Wang Nan493c3032014-10-24 09:45:26 +0800230 const char *search = cpuinfo_proc;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200231 size_t len = 0;
232 int ret = -1;
233
234 if (!search)
235 return -1;
236
237 file = fopen("/proc/cpuinfo", "r");
238 if (!file)
239 return -1;
240
241 while (getline(&buf, &len, file) > 0) {
242 ret = strncmp(buf, search, strlen(search));
243 if (!ret)
244 break;
245 }
246
Wang Naned307752014-10-16 11:08:29 +0800247 if (ret) {
248 ret = -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200249 goto done;
Wang Naned307752014-10-16 11:08:29 +0800250 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200251
252 s = buf;
253
254 p = strchr(buf, ':');
255 if (p && *(p+1) == ' ' && *(p+2))
256 s = p + 2;
257 p = strchr(s, '\n');
258 if (p)
259 *p = '\0';
260
261 /* squash extra space characters (branding string) */
262 p = s;
263 while (*p) {
264 if (isspace(*p)) {
265 char *r = p + 1;
266 char *q = r;
267 *p = ' ';
268 while (*q && isspace(*q))
269 q++;
270 if (q != (p+1))
271 while ((*r++ = *q++));
272 }
273 p++;
274 }
275 ret = do_write_string(fd, s);
276done:
277 free(buf);
278 fclose(file);
279 return ret;
280}
281
Wang Nan493c3032014-10-24 09:45:26 +0800282static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
283 struct perf_evlist *evlist __maybe_unused)
284{
285#ifndef CPUINFO_PROC
286#define CPUINFO_PROC {"model name", }
287#endif
288 const char *cpuinfo_procs[] = CPUINFO_PROC;
289 unsigned int i;
290
291 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
292 int ret;
293 ret = __write_cpudesc(fd, cpuinfo_procs[i]);
294 if (ret >= 0)
295 return ret;
296 }
297 return -1;
298}
299
300
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300301static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
302 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200303{
304 long nr;
305 u32 nrc, nra;
306 int ret;
307
Jan Stancekda8a58b2017-02-17 12:10:26 +0100308 nrc = cpu__max_present_cpu();
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200309
310 nr = sysconf(_SC_NPROCESSORS_ONLN);
311 if (nr < 0)
312 return -1;
313
314 nra = (u32)(nr & UINT_MAX);
315
316 ret = do_write(fd, &nrc, sizeof(nrc));
317 if (ret < 0)
318 return ret;
319
320 return do_write(fd, &nra, sizeof(nra));
321}
322
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300323static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200324 struct perf_evlist *evlist)
325{
Robert Richter6606f872012-08-16 21:10:19 +0200326 struct perf_evsel *evsel;
Namhyung Kim74ba9e12012-09-05 14:02:47 +0900327 u32 nre, nri, sz;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200328 int ret;
329
Namhyung Kim74ba9e12012-09-05 14:02:47 +0900330 nre = evlist->nr_entries;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200331
332 /*
333 * write number of events
334 */
335 ret = do_write(fd, &nre, sizeof(nre));
336 if (ret < 0)
337 return ret;
338
339 /*
340 * size of perf_event_attr struct
341 */
Robert Richter6606f872012-08-16 21:10:19 +0200342 sz = (u32)sizeof(evsel->attr);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200343 ret = do_write(fd, &sz, sizeof(sz));
344 if (ret < 0)
345 return ret;
346
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300347 evlist__for_each_entry(evlist, evsel) {
Robert Richter6606f872012-08-16 21:10:19 +0200348 ret = do_write(fd, &evsel->attr, sz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200349 if (ret < 0)
350 return ret;
351 /*
352 * write number of unique id per event
353 * there is one id per instance of an event
354 *
355 * copy into an nri to be independent of the
356 * type of ids,
357 */
Robert Richter6606f872012-08-16 21:10:19 +0200358 nri = evsel->ids;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200359 ret = do_write(fd, &nri, sizeof(nri));
360 if (ret < 0)
361 return ret;
362
363 /*
364 * write event string as passed on cmdline
365 */
Robert Richter6606f872012-08-16 21:10:19 +0200366 ret = do_write_string(fd, perf_evsel__name(evsel));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200367 if (ret < 0)
368 return ret;
369 /*
370 * write unique ids for this event
371 */
Robert Richter6606f872012-08-16 21:10:19 +0200372 ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200373 if (ret < 0)
374 return ret;
375 }
376 return 0;
377}
378
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300379static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
380 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200381{
382 char buf[MAXPATHLEN];
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300383 u32 n;
384 int i, ret;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200385
Tommi Rantala55f771282017-03-22 15:06:24 +0200386 /* actual path to perf binary */
387 ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200388 if (ret <= 0)
389 return -1;
390
391 /* readlink() does not add null termination */
392 buf[ret] = '\0';
393
394 /* account for binary path */
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300395 n = perf_env.nr_cmdline + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200396
397 ret = do_write(fd, &n, sizeof(n));
398 if (ret < 0)
399 return ret;
400
401 ret = do_write_string(fd, buf);
402 if (ret < 0)
403 return ret;
404
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300405 for (i = 0 ; i < perf_env.nr_cmdline; i++) {
406 ret = do_write_string(fd, perf_env.cmdline_argv[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200407 if (ret < 0)
408 return ret;
409 }
410 return 0;
411}
412
413#define CORE_SIB_FMT \
414 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
415#define THRD_SIB_FMT \
416 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
417
418struct cpu_topo {
Kan Liang2bb00d22015-09-01 09:58:12 -0400419 u32 cpu_nr;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200420 u32 core_sib;
421 u32 thread_sib;
422 char **core_siblings;
423 char **thread_siblings;
424};
425
426static int build_cpu_topo(struct cpu_topo *tp, int cpu)
427{
428 FILE *fp;
429 char filename[MAXPATHLEN];
430 char *buf = NULL, *p;
431 size_t len = 0;
Stephane Eranianc5885742013-08-14 12:04:26 +0200432 ssize_t sret;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200433 u32 i = 0;
434 int ret = -1;
435
436 sprintf(filename, CORE_SIB_FMT, cpu);
437 fp = fopen(filename, "r");
438 if (!fp)
Stephane Eranianc5885742013-08-14 12:04:26 +0200439 goto try_threads;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200440
Stephane Eranianc5885742013-08-14 12:04:26 +0200441 sret = getline(&buf, &len, fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200442 fclose(fp);
Stephane Eranianc5885742013-08-14 12:04:26 +0200443 if (sret <= 0)
444 goto try_threads;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200445
446 p = strchr(buf, '\n');
447 if (p)
448 *p = '\0';
449
450 for (i = 0; i < tp->core_sib; i++) {
451 if (!strcmp(buf, tp->core_siblings[i]))
452 break;
453 }
454 if (i == tp->core_sib) {
455 tp->core_siblings[i] = buf;
456 tp->core_sib++;
457 buf = NULL;
458 len = 0;
459 }
Stephane Eranianc5885742013-08-14 12:04:26 +0200460 ret = 0;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200461
Stephane Eranianc5885742013-08-14 12:04:26 +0200462try_threads:
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200463 sprintf(filename, THRD_SIB_FMT, cpu);
464 fp = fopen(filename, "r");
465 if (!fp)
466 goto done;
467
468 if (getline(&buf, &len, fp) <= 0)
469 goto done;
470
471 p = strchr(buf, '\n');
472 if (p)
473 *p = '\0';
474
475 for (i = 0; i < tp->thread_sib; i++) {
476 if (!strcmp(buf, tp->thread_siblings[i]))
477 break;
478 }
479 if (i == tp->thread_sib) {
480 tp->thread_siblings[i] = buf;
481 tp->thread_sib++;
482 buf = NULL;
483 }
484 ret = 0;
485done:
486 if(fp)
487 fclose(fp);
488 free(buf);
489 return ret;
490}
491
492static void free_cpu_topo(struct cpu_topo *tp)
493{
494 u32 i;
495
496 if (!tp)
497 return;
498
499 for (i = 0 ; i < tp->core_sib; i++)
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300500 zfree(&tp->core_siblings[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200501
502 for (i = 0 ; i < tp->thread_sib; i++)
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300503 zfree(&tp->thread_siblings[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200504
505 free(tp);
506}
507
508static struct cpu_topo *build_cpu_topology(void)
509{
Jan Stancek43db2842017-02-17 12:10:25 +0100510 struct cpu_topo *tp = NULL;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200511 void *addr;
512 u32 nr, i;
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300513 size_t sz;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200514 long ncpus;
515 int ret = -1;
Jan Stancek43db2842017-02-17 12:10:25 +0100516 struct cpu_map *map;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200517
Jan Stancekda8a58b2017-02-17 12:10:26 +0100518 ncpus = cpu__max_present_cpu();
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200519
Jan Stancek43db2842017-02-17 12:10:25 +0100520 /* build online CPU map */
521 map = cpu_map__new(NULL);
522 if (map == NULL) {
523 pr_debug("failed to get system cpumap\n");
524 return NULL;
525 }
526
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200527 nr = (u32)(ncpus & UINT_MAX);
528
529 sz = nr * sizeof(char *);
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300530 addr = calloc(1, sizeof(*tp) + 2 * sz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200531 if (!addr)
Jan Stancek43db2842017-02-17 12:10:25 +0100532 goto out_free;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200533
534 tp = addr;
Kan Liang2bb00d22015-09-01 09:58:12 -0400535 tp->cpu_nr = nr;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200536 addr += sizeof(*tp);
537 tp->core_siblings = addr;
538 addr += sz;
539 tp->thread_siblings = addr;
540
541 for (i = 0; i < nr; i++) {
Jan Stancek43db2842017-02-17 12:10:25 +0100542 if (!cpu_map__has(map, i))
543 continue;
544
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200545 ret = build_cpu_topo(tp, i);
546 if (ret < 0)
547 break;
548 }
Jan Stancek43db2842017-02-17 12:10:25 +0100549
550out_free:
551 cpu_map__put(map);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200552 if (ret) {
553 free_cpu_topo(tp);
554 tp = NULL;
555 }
556 return tp;
557}
558
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300559static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
560 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200561{
562 struct cpu_topo *tp;
563 u32 i;
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300564 int ret, j;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200565
566 tp = build_cpu_topology();
567 if (!tp)
568 return -1;
569
570 ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
571 if (ret < 0)
572 goto done;
573
574 for (i = 0; i < tp->core_sib; i++) {
575 ret = do_write_string(fd, tp->core_siblings[i]);
576 if (ret < 0)
577 goto done;
578 }
579 ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
580 if (ret < 0)
581 goto done;
582
583 for (i = 0; i < tp->thread_sib; i++) {
584 ret = do_write_string(fd, tp->thread_siblings[i]);
585 if (ret < 0)
586 break;
587 }
Kan Liang2bb00d22015-09-01 09:58:12 -0400588
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300589 ret = perf_env__read_cpu_topology_map(&perf_env);
590 if (ret < 0)
591 goto done;
592
593 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
594 ret = do_write(fd, &perf_env.cpu[j].core_id,
595 sizeof(perf_env.cpu[j].core_id));
Kan Liang2bb00d22015-09-01 09:58:12 -0400596 if (ret < 0)
597 return ret;
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300598 ret = do_write(fd, &perf_env.cpu[j].socket_id,
599 sizeof(perf_env.cpu[j].socket_id));
Kan Liang2bb00d22015-09-01 09:58:12 -0400600 if (ret < 0)
601 return ret;
602 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200603done:
604 free_cpu_topo(tp);
605 return ret;
606}
607
608
609
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300610static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
611 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200612{
613 char *buf = NULL;
614 FILE *fp;
615 size_t len = 0;
616 int ret = -1, n;
617 uint64_t mem;
618
619 fp = fopen("/proc/meminfo", "r");
620 if (!fp)
621 return -1;
622
623 while (getline(&buf, &len, fp) > 0) {
624 ret = strncmp(buf, "MemTotal:", 9);
625 if (!ret)
626 break;
627 }
628 if (!ret) {
629 n = sscanf(buf, "%*s %"PRIu64, &mem);
630 if (n == 1)
631 ret = do_write(fd, &mem, sizeof(mem));
Wang Naned307752014-10-16 11:08:29 +0800632 } else
633 ret = -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200634 free(buf);
635 fclose(fp);
636 return ret;
637}
638
639static int write_topo_node(int fd, int node)
640{
641 char str[MAXPATHLEN];
642 char field[32];
643 char *buf = NULL, *p;
644 size_t len = 0;
645 FILE *fp;
646 u64 mem_total, mem_free, mem;
647 int ret = -1;
648
649 sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
650 fp = fopen(str, "r");
651 if (!fp)
652 return -1;
653
654 while (getline(&buf, &len, fp) > 0) {
655 /* skip over invalid lines */
656 if (!strchr(buf, ':'))
657 continue;
Alan Coxa761a2d2014-01-20 19:10:11 +0100658 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200659 goto done;
660 if (!strcmp(field, "MemTotal:"))
661 mem_total = mem;
662 if (!strcmp(field, "MemFree:"))
663 mem_free = mem;
664 }
665
666 fclose(fp);
Thomas Jarosch5809fde2013-01-28 10:21:14 +0100667 fp = NULL;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200668
669 ret = do_write(fd, &mem_total, sizeof(u64));
670 if (ret)
671 goto done;
672
673 ret = do_write(fd, &mem_free, sizeof(u64));
674 if (ret)
675 goto done;
676
677 ret = -1;
678 sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
679
680 fp = fopen(str, "r");
681 if (!fp)
682 goto done;
683
684 if (getline(&buf, &len, fp) <= 0)
685 goto done;
686
687 p = strchr(buf, '\n');
688 if (p)
689 *p = '\0';
690
691 ret = do_write_string(fd, buf);
692done:
693 free(buf);
Thomas Jarosch5809fde2013-01-28 10:21:14 +0100694 if (fp)
695 fclose(fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200696 return ret;
697}
698
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300699static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
700 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200701{
702 char *buf = NULL;
703 size_t len = 0;
704 FILE *fp;
705 struct cpu_map *node_map = NULL;
706 char *c;
707 u32 nr, i, j;
708 int ret = -1;
709
710 fp = fopen("/sys/devices/system/node/online", "r");
711 if (!fp)
712 return -1;
713
714 if (getline(&buf, &len, fp) <= 0)
715 goto done;
716
717 c = strchr(buf, '\n');
718 if (c)
719 *c = '\0';
720
721 node_map = cpu_map__new(buf);
722 if (!node_map)
723 goto done;
724
725 nr = (u32)node_map->nr;
726
727 ret = do_write(fd, &nr, sizeof(nr));
728 if (ret < 0)
729 goto done;
730
731 for (i = 0; i < nr; i++) {
732 j = (u32)node_map->map[i];
733 ret = do_write(fd, &j, sizeof(j));
734 if (ret < 0)
735 break;
736
737 ret = write_topo_node(fd, i);
738 if (ret < 0)
739 break;
740 }
741done:
742 free(buf);
743 fclose(fp);
Masami Hiramatsu5191d8872015-12-09 11:11:35 +0900744 cpu_map__put(node_map);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200745 return ret;
746}
747
748/*
Robert Richter50a96672012-08-16 21:10:24 +0200749 * File format:
750 *
751 * struct pmu_mappings {
752 * u32 pmu_num;
753 * struct pmu_map {
754 * u32 type;
755 * char name[];
756 * }[pmu_num];
757 * };
758 */
759
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300760static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
761 struct perf_evlist *evlist __maybe_unused)
Robert Richter50a96672012-08-16 21:10:24 +0200762{
763 struct perf_pmu *pmu = NULL;
764 off_t offset = lseek(fd, 0, SEEK_CUR);
765 __u32 pmu_num = 0;
Namhyung Kim5323f602012-12-17 15:38:54 +0900766 int ret;
Robert Richter50a96672012-08-16 21:10:24 +0200767
768 /* write real pmu_num later */
Namhyung Kim5323f602012-12-17 15:38:54 +0900769 ret = do_write(fd, &pmu_num, sizeof(pmu_num));
770 if (ret < 0)
771 return ret;
Robert Richter50a96672012-08-16 21:10:24 +0200772
773 while ((pmu = perf_pmu__scan(pmu))) {
774 if (!pmu->name)
775 continue;
776 pmu_num++;
Namhyung Kim5323f602012-12-17 15:38:54 +0900777
778 ret = do_write(fd, &pmu->type, sizeof(pmu->type));
779 if (ret < 0)
780 return ret;
781
782 ret = do_write_string(fd, pmu->name);
783 if (ret < 0)
784 return ret;
Robert Richter50a96672012-08-16 21:10:24 +0200785 }
786
787 if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
788 /* discard all */
789 lseek(fd, offset, SEEK_SET);
790 return -1;
791 }
792
793 return 0;
794}
795
796/*
Namhyung Kima8bb5592013-01-22 18:09:31 +0900797 * File format:
798 *
799 * struct group_descs {
800 * u32 nr_groups;
801 * struct group_desc {
802 * char name[];
803 * u32 leader_idx;
804 * u32 nr_members;
805 * }[nr_groups];
806 * };
807 */
808static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
809 struct perf_evlist *evlist)
810{
811 u32 nr_groups = evlist->nr_groups;
812 struct perf_evsel *evsel;
813 int ret;
814
815 ret = do_write(fd, &nr_groups, sizeof(nr_groups));
816 if (ret < 0)
817 return ret;
818
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300819 evlist__for_each_entry(evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +0900820 if (perf_evsel__is_group_leader(evsel) &&
821 evsel->nr_members > 1) {
822 const char *name = evsel->group_name ?: "{anon_group}";
823 u32 leader_idx = evsel->idx;
824 u32 nr_members = evsel->nr_members;
825
826 ret = do_write_string(fd, name);
827 if (ret < 0)
828 return ret;
829
830 ret = do_write(fd, &leader_idx, sizeof(leader_idx));
831 if (ret < 0)
832 return ret;
833
834 ret = do_write(fd, &nr_members, sizeof(nr_members));
835 if (ret < 0)
836 return ret;
837 }
838 }
839 return 0;
840}
841
842/*
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200843 * default get_cpuid(): nothing gets recorded
844 * actual implementation must be in arch/$(ARCH)/util/header.c
845 */
Rui Teng11d8f872016-07-28 10:05:57 +0800846int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200847{
848 return -1;
849}
850
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300851static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
852 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200853{
854 char buffer[64];
855 int ret;
856
857 ret = get_cpuid(buffer, sizeof(buffer));
858 if (!ret)
859 goto write_it;
860
861 return -1;
862write_it:
863 return do_write_string(fd, buffer);
864}
865
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300866static int write_branch_stack(int fd __maybe_unused,
867 struct perf_header *h __maybe_unused,
868 struct perf_evlist *evlist __maybe_unused)
Stephane Eranian330aa672012-03-08 23:47:46 +0100869{
870 return 0;
871}
872
Adrian Hunter99fa2982015-04-30 17:37:25 +0300873static int write_auxtrace(int fd, struct perf_header *h,
Adrian Hunter4025ea42015-04-09 18:53:41 +0300874 struct perf_evlist *evlist __maybe_unused)
875{
Adrian Hunter99fa2982015-04-30 17:37:25 +0300876 struct perf_session *session;
877 int err;
878
879 session = container_of(h, struct perf_session, header);
880
881 err = auxtrace_index__write(fd, &session->auxtrace_index);
882 if (err < 0)
883 pr_err("Failed to write auxtrace index\n");
884 return err;
Adrian Hunter4025ea42015-04-09 18:53:41 +0300885}
886
Jiri Olsa720e98b2016-02-16 16:01:43 +0100887static int cpu_cache_level__sort(const void *a, const void *b)
888{
889 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
890 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
891
892 return cache_a->level - cache_b->level;
893}
894
895static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
896{
897 if (a->level != b->level)
898 return false;
899
900 if (a->line_size != b->line_size)
901 return false;
902
903 if (a->sets != b->sets)
904 return false;
905
906 if (a->ways != b->ways)
907 return false;
908
909 if (strcmp(a->type, b->type))
910 return false;
911
912 if (strcmp(a->size, b->size))
913 return false;
914
915 if (strcmp(a->map, b->map))
916 return false;
917
918 return true;
919}
920
921static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
922{
923 char path[PATH_MAX], file[PATH_MAX];
924 struct stat st;
925 size_t len;
926
927 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
928 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
929
930 if (stat(file, &st))
931 return 1;
932
933 scnprintf(file, PATH_MAX, "%s/level", path);
934 if (sysfs__read_int(file, (int *) &cache->level))
935 return -1;
936
937 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
938 if (sysfs__read_int(file, (int *) &cache->line_size))
939 return -1;
940
941 scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
942 if (sysfs__read_int(file, (int *) &cache->sets))
943 return -1;
944
945 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
946 if (sysfs__read_int(file, (int *) &cache->ways))
947 return -1;
948
949 scnprintf(file, PATH_MAX, "%s/type", path);
950 if (sysfs__read_str(file, &cache->type, &len))
951 return -1;
952
953 cache->type[len] = 0;
954 cache->type = rtrim(cache->type);
955
956 scnprintf(file, PATH_MAX, "%s/size", path);
957 if (sysfs__read_str(file, &cache->size, &len)) {
958 free(cache->type);
959 return -1;
960 }
961
962 cache->size[len] = 0;
963 cache->size = rtrim(cache->size);
964
965 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
966 if (sysfs__read_str(file, &cache->map, &len)) {
967 free(cache->map);
968 free(cache->type);
969 return -1;
970 }
971
972 cache->map[len] = 0;
973 cache->map = rtrim(cache->map);
974 return 0;
975}
976
977static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
978{
979 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
980}
981
982static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
983{
984 u32 i, cnt = 0;
985 long ncpus;
986 u32 nr, cpu;
987 u16 level;
988
989 ncpus = sysconf(_SC_NPROCESSORS_CONF);
990 if (ncpus < 0)
991 return -1;
992
993 nr = (u32)(ncpus & UINT_MAX);
994
995 for (cpu = 0; cpu < nr; cpu++) {
996 for (level = 0; level < 10; level++) {
997 struct cpu_cache_level c;
998 int err;
999
1000 err = cpu_cache_level__read(&c, cpu, level);
1001 if (err < 0)
1002 return err;
1003
1004 if (err == 1)
1005 break;
1006
1007 for (i = 0; i < cnt; i++) {
1008 if (cpu_cache_level__cmp(&c, &caches[i]))
1009 break;
1010 }
1011
1012 if (i == cnt)
1013 caches[cnt++] = c;
1014 else
1015 cpu_cache_level__free(&c);
1016
1017 if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1018 goto out;
1019 }
1020 }
1021 out:
1022 *cntp = cnt;
1023 return 0;
1024}
1025
1026#define MAX_CACHES 2000
1027
1028static int write_cache(int fd, struct perf_header *h __maybe_unused,
1029 struct perf_evlist *evlist __maybe_unused)
1030{
1031 struct cpu_cache_level caches[MAX_CACHES];
1032 u32 cnt = 0, i, version = 1;
1033 int ret;
1034
1035 ret = build_caches(caches, MAX_CACHES, &cnt);
1036 if (ret)
1037 goto out;
1038
1039 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1040
1041 ret = do_write(fd, &version, sizeof(u32));
1042 if (ret < 0)
1043 goto out;
1044
1045 ret = do_write(fd, &cnt, sizeof(u32));
1046 if (ret < 0)
1047 goto out;
1048
1049 for (i = 0; i < cnt; i++) {
1050 struct cpu_cache_level *c = &caches[i];
1051
1052 #define _W(v) \
1053 ret = do_write(fd, &c->v, sizeof(u32)); \
1054 if (ret < 0) \
1055 goto out;
1056
1057 _W(level)
1058 _W(line_size)
1059 _W(sets)
1060 _W(ways)
1061 #undef _W
1062
1063 #define _W(v) \
1064 ret = do_write_string(fd, (const char *) c->v); \
1065 if (ret < 0) \
1066 goto out;
1067
1068 _W(type)
1069 _W(size)
1070 _W(map)
1071 #undef _W
1072 }
1073
1074out:
1075 for (i = 0; i < cnt; i++)
1076 cpu_cache_level__free(&caches[i]);
1077 return ret;
1078}
1079
Jiri Olsaffa517a2015-10-25 15:51:43 +01001080static int write_stat(int fd __maybe_unused,
1081 struct perf_header *h __maybe_unused,
1082 struct perf_evlist *evlist __maybe_unused)
1083{
1084 return 0;
1085}
1086
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001087static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
1088 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001089{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001090 fprintf(fp, "# hostname : %s\n", ph->env.hostname);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001091}
1092
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001093static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
1094 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001095{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001096 fprintf(fp, "# os release : %s\n", ph->env.os_release);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001097}
1098
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001099static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001100{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001101 fprintf(fp, "# arch : %s\n", ph->env.arch);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001102}
1103
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001104static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
1105 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001106{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001107 fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001108}
1109
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001110static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
1111 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001112{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001113 fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
1114 fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001115}
1116
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001117static void print_version(struct perf_header *ph, int fd __maybe_unused,
1118 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001119{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001120 fprintf(fp, "# perf version : %s\n", ph->env.version);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001121}
1122
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001123static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
1124 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001125{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001126 int nr, i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001127
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001128 nr = ph->env.nr_cmdline;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001129
1130 fprintf(fp, "# cmdline : ");
1131
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001132 for (i = 0; i < nr; i++)
1133 fprintf(fp, "%s ", ph->env.cmdline_argv[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001134 fputc('\n', fp);
1135}
1136
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001137static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
1138 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001139{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001140 int nr, i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001141 char *str;
Jan Stancekda8a58b2017-02-17 12:10:26 +01001142 int cpu_nr = ph->env.nr_cpus_avail;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001143
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001144 nr = ph->env.nr_sibling_cores;
1145 str = ph->env.sibling_cores;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001146
1147 for (i = 0; i < nr; i++) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001148 fprintf(fp, "# sibling cores : %s\n", str);
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001149 str += strlen(str) + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001150 }
1151
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001152 nr = ph->env.nr_sibling_threads;
1153 str = ph->env.sibling_threads;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001154
1155 for (i = 0; i < nr; i++) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001156 fprintf(fp, "# sibling threads : %s\n", str);
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001157 str += strlen(str) + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001158 }
Kan Liang2bb00d22015-09-01 09:58:12 -04001159
1160 if (ph->env.cpu != NULL) {
1161 for (i = 0; i < cpu_nr; i++)
1162 fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1163 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1164 } else
1165 fprintf(fp, "# Core ID and Socket ID information is not available\n");
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001166}
1167
Robert Richter4e1b9c62012-08-16 21:10:22 +02001168static void free_event_desc(struct perf_evsel *events)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001169{
Robert Richter4e1b9c62012-08-16 21:10:22 +02001170 struct perf_evsel *evsel;
1171
1172 if (!events)
1173 return;
1174
1175 for (evsel = events; evsel->attr.size; evsel++) {
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -03001176 zfree(&evsel->name);
1177 zfree(&evsel->id);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001178 }
1179
1180 free(events);
1181}
1182
1183static struct perf_evsel *
1184read_event_desc(struct perf_header *ph, int fd)
1185{
1186 struct perf_evsel *evsel, *events = NULL;
1187 u64 *id;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001188 void *buf = NULL;
Stephane Eranian62db9062012-02-09 23:21:07 +01001189 u32 nre, sz, nr, i, j;
1190 ssize_t ret;
1191 size_t msz;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001192
1193 /* number of events */
Namhyung Kim5323f602012-12-17 15:38:54 +09001194 ret = readn(fd, &nre, sizeof(nre));
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001195 if (ret != (ssize_t)sizeof(nre))
1196 goto error;
1197
1198 if (ph->needs_swap)
1199 nre = bswap_32(nre);
1200
Namhyung Kim5323f602012-12-17 15:38:54 +09001201 ret = readn(fd, &sz, sizeof(sz));
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001202 if (ret != (ssize_t)sizeof(sz))
1203 goto error;
1204
1205 if (ph->needs_swap)
1206 sz = bswap_32(sz);
1207
Stephane Eranian62db9062012-02-09 23:21:07 +01001208 /* buffer to hold on file attr struct */
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001209 buf = malloc(sz);
1210 if (!buf)
1211 goto error;
1212
Robert Richter4e1b9c62012-08-16 21:10:22 +02001213 /* the last event terminates with evsel->attr.size == 0: */
1214 events = calloc(nre + 1, sizeof(*events));
1215 if (!events)
1216 goto error;
1217
1218 msz = sizeof(evsel->attr);
Jiri Olsa9fafd982012-03-20 19:15:39 +01001219 if (sz < msz)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001220 msz = sz;
1221
Robert Richter4e1b9c62012-08-16 21:10:22 +02001222 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1223 evsel->idx = i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001224
Stephane Eranian62db9062012-02-09 23:21:07 +01001225 /*
1226 * must read entire on-file attr struct to
1227 * sync up with layout.
1228 */
Namhyung Kim5323f602012-12-17 15:38:54 +09001229 ret = readn(fd, buf, sz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001230 if (ret != (ssize_t)sz)
1231 goto error;
1232
1233 if (ph->needs_swap)
1234 perf_event__attr_swap(buf);
1235
Robert Richter4e1b9c62012-08-16 21:10:22 +02001236 memcpy(&evsel->attr, buf, msz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001237
Namhyung Kim5323f602012-12-17 15:38:54 +09001238 ret = readn(fd, &nr, sizeof(nr));
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001239 if (ret != (ssize_t)sizeof(nr))
1240 goto error;
1241
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001242 if (ph->needs_swap) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001243 nr = bswap_32(nr);
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001244 evsel->needs_swap = true;
1245 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001246
Robert Richter4e1b9c62012-08-16 21:10:22 +02001247 evsel->name = do_read_string(fd, ph);
1248
1249 if (!nr)
1250 continue;
1251
1252 id = calloc(nr, sizeof(*id));
1253 if (!id)
1254 goto error;
1255 evsel->ids = nr;
1256 evsel->id = id;
1257
1258 for (j = 0 ; j < nr; j++) {
Namhyung Kim5323f602012-12-17 15:38:54 +09001259 ret = readn(fd, id, sizeof(*id));
Robert Richter4e1b9c62012-08-16 21:10:22 +02001260 if (ret != (ssize_t)sizeof(*id))
1261 goto error;
1262 if (ph->needs_swap)
1263 *id = bswap_64(*id);
1264 id++;
1265 }
1266 }
1267out:
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -03001268 free(buf);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001269 return events;
1270error:
Markus Elfring4cc97612015-06-25 17:12:32 +02001271 free_event_desc(events);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001272 events = NULL;
1273 goto out;
1274}
1275
Peter Zijlstra2c5e8c52015-04-07 11:09:54 +02001276static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1277 void *priv __attribute__((unused)))
1278{
1279 return fprintf(fp, ", %s = %s", name, val);
1280}
1281
Robert Richter4e1b9c62012-08-16 21:10:22 +02001282static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1283{
1284 struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1285 u32 j;
1286 u64 *id;
1287
1288 if (!events) {
1289 fprintf(fp, "# event desc: not available or unable to read\n");
1290 return;
1291 }
1292
1293 for (evsel = events; evsel->attr.size; evsel++) {
1294 fprintf(fp, "# event : name = %s, ", evsel->name);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001295
Robert Richter4e1b9c62012-08-16 21:10:22 +02001296 if (evsel->ids) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001297 fprintf(fp, ", id = {");
Robert Richter4e1b9c62012-08-16 21:10:22 +02001298 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1299 if (j)
1300 fputc(',', fp);
1301 fprintf(fp, " %"PRIu64, *id);
1302 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001303 fprintf(fp, " }");
Robert Richter4e1b9c62012-08-16 21:10:22 +02001304 }
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001305
Peter Zijlstra2c5e8c52015-04-07 11:09:54 +02001306 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001307
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001308 fputc('\n', fp);
1309 }
Robert Richter4e1b9c62012-08-16 21:10:22 +02001310
1311 free_event_desc(events);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001312}
1313
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001314static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001315 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001316{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001317 fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001318}
1319
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001320static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001321 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001322{
Jiri Olsac60da222016-07-04 14:16:20 +02001323 int i;
1324 struct numa_node *n;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001325
Jiri Olsac60da222016-07-04 14:16:20 +02001326 for (i = 0; i < ph->env.nr_numa_nodes; i++) {
1327 n = &ph->env.numa_nodes[i];
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001328
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001329 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1330 " free = %"PRIu64" kB\n",
Jiri Olsac60da222016-07-04 14:16:20 +02001331 n->node, n->mem_total, n->mem_free);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001332
Jiri Olsac60da222016-07-04 14:16:20 +02001333 fprintf(fp, "# node%u cpu list : ", n->node);
1334 cpu_map__fprintf(n->map, fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001335 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001336}
1337
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001338static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001339{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001340 fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001341}
1342
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001343static void print_branch_stack(struct perf_header *ph __maybe_unused,
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001344 int fd __maybe_unused, FILE *fp)
Stephane Eranian330aa672012-03-08 23:47:46 +01001345{
1346 fprintf(fp, "# contains samples with branch stack\n");
1347}
1348
Adrian Hunter4025ea42015-04-09 18:53:41 +03001349static void print_auxtrace(struct perf_header *ph __maybe_unused,
1350 int fd __maybe_unused, FILE *fp)
1351{
1352 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1353}
1354
Jiri Olsaffa517a2015-10-25 15:51:43 +01001355static void print_stat(struct perf_header *ph __maybe_unused,
1356 int fd __maybe_unused, FILE *fp)
1357{
1358 fprintf(fp, "# contains stat data\n");
1359}
1360
Jiri Olsa720e98b2016-02-16 16:01:43 +01001361static void print_cache(struct perf_header *ph __maybe_unused,
1362 int fd __maybe_unused, FILE *fp __maybe_unused)
1363{
1364 int i;
1365
1366 fprintf(fp, "# CPU cache info:\n");
1367 for (i = 0; i < ph->env.caches_cnt; i++) {
1368 fprintf(fp, "# ");
1369 cpu_cache_level__fprintf(fp, &ph->env.caches[i]);
1370 }
1371}
1372
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001373static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1374 FILE *fp)
Robert Richter50a96672012-08-16 21:10:24 +02001375{
1376 const char *delimiter = "# pmu mappings: ";
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001377 char *str, *tmp;
Robert Richter50a96672012-08-16 21:10:24 +02001378 u32 pmu_num;
1379 u32 type;
1380
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001381 pmu_num = ph->env.nr_pmu_mappings;
Robert Richter50a96672012-08-16 21:10:24 +02001382 if (!pmu_num) {
1383 fprintf(fp, "# pmu mappings: not available\n");
1384 return;
1385 }
1386
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001387 str = ph->env.pmu_mappings;
Namhyung Kimbe4a2de2012-09-05 14:02:49 +09001388
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001389 while (pmu_num) {
1390 type = strtoul(str, &tmp, 0);
1391 if (*tmp != ':')
1392 goto error;
1393
1394 str = tmp + 1;
1395 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1396
Robert Richter50a96672012-08-16 21:10:24 +02001397 delimiter = ", ";
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001398 str += strlen(str) + 1;
1399 pmu_num--;
Robert Richter50a96672012-08-16 21:10:24 +02001400 }
1401
1402 fprintf(fp, "\n");
1403
1404 if (!pmu_num)
1405 return;
1406error:
1407 fprintf(fp, "# pmu mappings: unable to read\n");
1408}
1409
Namhyung Kima8bb5592013-01-22 18:09:31 +09001410static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1411 FILE *fp)
1412{
1413 struct perf_session *session;
1414 struct perf_evsel *evsel;
1415 u32 nr = 0;
1416
1417 session = container_of(ph, struct perf_session, header);
1418
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001419 evlist__for_each_entry(session->evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09001420 if (perf_evsel__is_group_leader(evsel) &&
1421 evsel->nr_members > 1) {
1422 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1423 perf_evsel__name(evsel));
1424
1425 nr = evsel->nr_members - 1;
1426 } else if (nr) {
1427 fprintf(fp, ",%s", perf_evsel__name(evsel));
1428
1429 if (--nr == 0)
1430 fprintf(fp, "}\n");
1431 }
1432 }
1433}
1434
Robert Richter08d95bd2012-02-10 15:41:55 +01001435static int __event_process_build_id(struct build_id_event *bev,
1436 char *filename,
1437 struct perf_session *session)
1438{
1439 int err = -1;
Robert Richter08d95bd2012-02-10 15:41:55 +01001440 struct machine *machine;
Wang Nan1f121b02015-06-03 08:52:21 +00001441 u16 cpumode;
Robert Richter08d95bd2012-02-10 15:41:55 +01001442 struct dso *dso;
1443 enum dso_kernel_type dso_type;
1444
1445 machine = perf_session__findnew_machine(session, bev->pid);
1446 if (!machine)
1447 goto out;
1448
Wang Nan1f121b02015-06-03 08:52:21 +00001449 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
Robert Richter08d95bd2012-02-10 15:41:55 +01001450
Wang Nan1f121b02015-06-03 08:52:21 +00001451 switch (cpumode) {
Robert Richter08d95bd2012-02-10 15:41:55 +01001452 case PERF_RECORD_MISC_KERNEL:
1453 dso_type = DSO_TYPE_KERNEL;
Robert Richter08d95bd2012-02-10 15:41:55 +01001454 break;
1455 case PERF_RECORD_MISC_GUEST_KERNEL:
1456 dso_type = DSO_TYPE_GUEST_KERNEL;
Robert Richter08d95bd2012-02-10 15:41:55 +01001457 break;
1458 case PERF_RECORD_MISC_USER:
1459 case PERF_RECORD_MISC_GUEST_USER:
1460 dso_type = DSO_TYPE_USER;
Robert Richter08d95bd2012-02-10 15:41:55 +01001461 break;
1462 default:
1463 goto out;
1464 }
1465
Arnaldo Carvalho de Meloaa7cc2a2015-05-29 11:31:12 -03001466 dso = machine__findnew_dso(machine, filename);
Robert Richter08d95bd2012-02-10 15:41:55 +01001467 if (dso != NULL) {
Masami Hiramatsub5d8bbe2016-05-11 22:51:59 +09001468 char sbuild_id[SBUILD_ID_SIZE];
Robert Richter08d95bd2012-02-10 15:41:55 +01001469
1470 dso__set_build_id(dso, &bev->build_id);
1471
Wang Nan1f121b02015-06-03 08:52:21 +00001472 if (!is_kernel_module(filename, cpumode))
Robert Richter08d95bd2012-02-10 15:41:55 +01001473 dso->kernel = dso_type;
1474
1475 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1476 sbuild_id);
1477 pr_debug("build id event received for %s: %s\n",
1478 dso->long_name, sbuild_id);
Arnaldo Carvalho de Melod3a7c482015-06-02 11:53:26 -03001479 dso__put(dso);
Robert Richter08d95bd2012-02-10 15:41:55 +01001480 }
1481
1482 err = 0;
1483out:
1484 return err;
1485}
1486
1487static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1488 int input, u64 offset, u64 size)
1489{
1490 struct perf_session *session = container_of(header, struct perf_session, header);
1491 struct {
1492 struct perf_event_header header;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03001493 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
Robert Richter08d95bd2012-02-10 15:41:55 +01001494 char filename[0];
1495 } old_bev;
1496 struct build_id_event bev;
1497 char filename[PATH_MAX];
1498 u64 limit = offset + size;
1499
1500 while (offset < limit) {
1501 ssize_t len;
1502
Namhyung Kim5323f602012-12-17 15:38:54 +09001503 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
Robert Richter08d95bd2012-02-10 15:41:55 +01001504 return -1;
1505
1506 if (header->needs_swap)
1507 perf_event_header__bswap(&old_bev.header);
1508
1509 len = old_bev.header.size - sizeof(old_bev);
Namhyung Kim5323f602012-12-17 15:38:54 +09001510 if (readn(input, filename, len) != len)
Robert Richter08d95bd2012-02-10 15:41:55 +01001511 return -1;
1512
1513 bev.header = old_bev.header;
1514
1515 /*
1516 * As the pid is the missing value, we need to fill
1517 * it properly. The header.misc value give us nice hint.
1518 */
1519 bev.pid = HOST_KERNEL_ID;
1520 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1521 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1522 bev.pid = DEFAULT_GUEST_KERNEL_ID;
1523
1524 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1525 __event_process_build_id(&bev, filename, session);
1526
1527 offset += bev.header.size;
1528 }
1529
1530 return 0;
1531}
1532
1533static int perf_header__read_build_ids(struct perf_header *header,
1534 int input, u64 offset, u64 size)
1535{
1536 struct perf_session *session = container_of(header, struct perf_session, header);
1537 struct build_id_event bev;
1538 char filename[PATH_MAX];
1539 u64 limit = offset + size, orig_offset = offset;
1540 int err = -1;
1541
1542 while (offset < limit) {
1543 ssize_t len;
1544
Namhyung Kim5323f602012-12-17 15:38:54 +09001545 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
Robert Richter08d95bd2012-02-10 15:41:55 +01001546 goto out;
1547
1548 if (header->needs_swap)
1549 perf_event_header__bswap(&bev.header);
1550
1551 len = bev.header.size - sizeof(bev);
Namhyung Kim5323f602012-12-17 15:38:54 +09001552 if (readn(input, filename, len) != len)
Robert Richter08d95bd2012-02-10 15:41:55 +01001553 goto out;
1554 /*
1555 * The a1645ce1 changeset:
1556 *
1557 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1558 *
1559 * Added a field to struct build_id_event that broke the file
1560 * format.
1561 *
1562 * Since the kernel build-id is the first entry, process the
1563 * table using the old format if the well known
1564 * '[kernel.kallsyms]' string for the kernel build-id has the
1565 * first 4 characters chopped off (where the pid_t sits).
1566 */
1567 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1568 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1569 return -1;
1570 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1571 }
1572
1573 __event_process_build_id(&bev, filename, session);
1574
1575 offset += bev.header.size;
1576 }
1577 err = 0;
1578out:
1579 return err;
1580}
1581
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001582static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1583 struct perf_header *ph __maybe_unused,
1584 int fd, void *data)
Robert Richterf1c67db2012-02-10 15:41:56 +01001585{
Namhyung Kim3dce2ce2013-03-21 16:18:48 +09001586 ssize_t ret = trace_report(fd, data, false);
1587 return ret < 0 ? -1 : 0;
Robert Richterf1c67db2012-02-10 15:41:56 +01001588}
1589
1590static int process_build_id(struct perf_file_section *section,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001591 struct perf_header *ph, int fd,
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001592 void *data __maybe_unused)
Robert Richterf1c67db2012-02-10 15:41:56 +01001593{
1594 if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1595 pr_debug("Failed to read buildids, continuing...\n");
1596 return 0;
1597}
1598
Namhyung Kima1ae5652012-09-24 17:14:59 +09001599static int process_hostname(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001600 struct perf_header *ph, int fd,
1601 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001602{
1603 ph->env.hostname = do_read_string(fd, ph);
1604 return ph->env.hostname ? 0 : -ENOMEM;
1605}
1606
1607static int process_osrelease(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001608 struct perf_header *ph, int fd,
1609 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001610{
1611 ph->env.os_release = do_read_string(fd, ph);
1612 return ph->env.os_release ? 0 : -ENOMEM;
1613}
1614
1615static int process_version(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001616 struct perf_header *ph, int fd,
1617 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001618{
1619 ph->env.version = do_read_string(fd, ph);
1620 return ph->env.version ? 0 : -ENOMEM;
1621}
1622
1623static int process_arch(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001624 struct perf_header *ph, int fd,
1625 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001626{
1627 ph->env.arch = do_read_string(fd, ph);
1628 return ph->env.arch ? 0 : -ENOMEM;
1629}
1630
1631static int process_nrcpus(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001632 struct perf_header *ph, int fd,
1633 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001634{
Jiri Olsa727ebd52013-11-28 11:30:14 +01001635 ssize_t ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001636 u32 nr;
1637
Namhyung Kim5323f602012-12-17 15:38:54 +09001638 ret = readn(fd, &nr, sizeof(nr));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001639 if (ret != sizeof(nr))
1640 return -1;
1641
1642 if (ph->needs_swap)
1643 nr = bswap_32(nr);
1644
Arnaldo Carvalho de Melocaa47042015-09-11 12:36:12 -03001645 ph->env.nr_cpus_avail = nr;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001646
Namhyung Kim5323f602012-12-17 15:38:54 +09001647 ret = readn(fd, &nr, sizeof(nr));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001648 if (ret != sizeof(nr))
1649 return -1;
1650
1651 if (ph->needs_swap)
1652 nr = bswap_32(nr);
1653
Arnaldo Carvalho de Melocaa47042015-09-11 12:36:12 -03001654 ph->env.nr_cpus_online = nr;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001655 return 0;
1656}
1657
1658static int process_cpudesc(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001659 struct perf_header *ph, int fd,
1660 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001661{
1662 ph->env.cpu_desc = do_read_string(fd, ph);
1663 return ph->env.cpu_desc ? 0 : -ENOMEM;
1664}
1665
1666static int process_cpuid(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001667 struct perf_header *ph, int fd,
1668 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001669{
1670 ph->env.cpuid = do_read_string(fd, ph);
1671 return ph->env.cpuid ? 0 : -ENOMEM;
1672}
1673
1674static int process_total_mem(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001675 struct perf_header *ph, int fd,
1676 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001677{
1678 uint64_t mem;
Jiri Olsa727ebd52013-11-28 11:30:14 +01001679 ssize_t ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001680
Namhyung Kim5323f602012-12-17 15:38:54 +09001681 ret = readn(fd, &mem, sizeof(mem));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001682 if (ret != sizeof(mem))
1683 return -1;
1684
1685 if (ph->needs_swap)
1686 mem = bswap_64(mem);
1687
1688 ph->env.total_mem = mem;
1689 return 0;
1690}
1691
Robert Richter7c2f7af2012-08-16 21:10:23 +02001692static struct perf_evsel *
1693perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1694{
1695 struct perf_evsel *evsel;
1696
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001697 evlist__for_each_entry(evlist, evsel) {
Robert Richter7c2f7af2012-08-16 21:10:23 +02001698 if (evsel->idx == idx)
1699 return evsel;
1700 }
1701
1702 return NULL;
1703}
1704
1705static void
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001706perf_evlist__set_event_name(struct perf_evlist *evlist,
1707 struct perf_evsel *event)
Robert Richter7c2f7af2012-08-16 21:10:23 +02001708{
1709 struct perf_evsel *evsel;
1710
1711 if (!event->name)
1712 return;
1713
1714 evsel = perf_evlist__find_by_index(evlist, event->idx);
1715 if (!evsel)
1716 return;
1717
1718 if (evsel->name)
1719 return;
1720
1721 evsel->name = strdup(event->name);
1722}
1723
1724static int
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001725process_event_desc(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001726 struct perf_header *header, int fd,
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001727 void *data __maybe_unused)
Robert Richter7c2f7af2012-08-16 21:10:23 +02001728{
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001729 struct perf_session *session;
Robert Richter7c2f7af2012-08-16 21:10:23 +02001730 struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1731
1732 if (!events)
1733 return 0;
1734
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001735 session = container_of(header, struct perf_session, header);
Robert Richter7c2f7af2012-08-16 21:10:23 +02001736 for (evsel = events; evsel->attr.size; evsel++)
1737 perf_evlist__set_event_name(session->evlist, evsel);
1738
1739 free_event_desc(events);
1740
1741 return 0;
1742}
1743
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001744static int process_cmdline(struct perf_file_section *section,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001745 struct perf_header *ph, int fd,
1746 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001747{
Jiri Olsa727ebd52013-11-28 11:30:14 +01001748 ssize_t ret;
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001749 char *str, *cmdline = NULL, **argv = NULL;
1750 u32 nr, i, len = 0;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001751
Namhyung Kim5323f602012-12-17 15:38:54 +09001752 ret = readn(fd, &nr, sizeof(nr));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001753 if (ret != sizeof(nr))
1754 return -1;
1755
1756 if (ph->needs_swap)
1757 nr = bswap_32(nr);
1758
1759 ph->env.nr_cmdline = nr;
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001760
1761 cmdline = zalloc(section->size + nr + 1);
1762 if (!cmdline)
1763 return -1;
1764
1765 argv = zalloc(sizeof(char *) * (nr + 1));
1766 if (!argv)
1767 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001768
1769 for (i = 0; i < nr; i++) {
1770 str = do_read_string(fd, ph);
1771 if (!str)
1772 goto error;
1773
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001774 argv[i] = cmdline + len;
1775 memcpy(argv[i], str, strlen(str) + 1);
1776 len += strlen(str) + 1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001777 free(str);
1778 }
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001779 ph->env.cmdline = cmdline;
1780 ph->env.cmdline_argv = (const char **) argv;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001781 return 0;
1782
1783error:
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001784 free(argv);
1785 free(cmdline);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001786 return -1;
1787}
1788
Kan Liang2bb00d22015-09-01 09:58:12 -04001789static int process_cpu_topology(struct perf_file_section *section,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001790 struct perf_header *ph, int fd,
1791 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001792{
Jiri Olsa727ebd52013-11-28 11:30:14 +01001793 ssize_t ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001794 u32 nr, i;
1795 char *str;
1796 struct strbuf sb;
Jan Stancekda8a58b2017-02-17 12:10:26 +01001797 int cpu_nr = ph->env.nr_cpus_avail;
Kan Liang2bb00d22015-09-01 09:58:12 -04001798 u64 size = 0;
1799
1800 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1801 if (!ph->env.cpu)
1802 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001803
Namhyung Kim5323f602012-12-17 15:38:54 +09001804 ret = readn(fd, &nr, sizeof(nr));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001805 if (ret != sizeof(nr))
Kan Liang2bb00d22015-09-01 09:58:12 -04001806 goto free_cpu;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001807
1808 if (ph->needs_swap)
1809 nr = bswap_32(nr);
1810
1811 ph->env.nr_sibling_cores = nr;
Kan Liang2bb00d22015-09-01 09:58:12 -04001812 size += sizeof(u32);
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001813 if (strbuf_init(&sb, 128) < 0)
1814 goto free_cpu;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001815
1816 for (i = 0; i < nr; i++) {
1817 str = do_read_string(fd, ph);
1818 if (!str)
1819 goto error;
1820
1821 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001822 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1823 goto error;
Kan Liang2bb00d22015-09-01 09:58:12 -04001824 size += string_size(str);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001825 free(str);
1826 }
1827 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1828
Namhyung Kim5323f602012-12-17 15:38:54 +09001829 ret = readn(fd, &nr, sizeof(nr));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001830 if (ret != sizeof(nr))
1831 return -1;
1832
1833 if (ph->needs_swap)
1834 nr = bswap_32(nr);
1835
1836 ph->env.nr_sibling_threads = nr;
Kan Liang2bb00d22015-09-01 09:58:12 -04001837 size += sizeof(u32);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001838
1839 for (i = 0; i < nr; i++) {
1840 str = do_read_string(fd, ph);
1841 if (!str)
1842 goto error;
1843
1844 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001845 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1846 goto error;
Kan Liang2bb00d22015-09-01 09:58:12 -04001847 size += string_size(str);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001848 free(str);
1849 }
1850 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
Kan Liang2bb00d22015-09-01 09:58:12 -04001851
1852 /*
1853 * The header may be from old perf,
1854 * which doesn't include core id and socket id information.
1855 */
1856 if (section->size <= size) {
1857 zfree(&ph->env.cpu);
1858 return 0;
1859 }
1860
1861 for (i = 0; i < (u32)cpu_nr; i++) {
1862 ret = readn(fd, &nr, sizeof(nr));
1863 if (ret != sizeof(nr))
1864 goto free_cpu;
1865
1866 if (ph->needs_swap)
1867 nr = bswap_32(nr);
1868
Kan Liang2bb00d22015-09-01 09:58:12 -04001869 ph->env.cpu[i].core_id = nr;
1870
1871 ret = readn(fd, &nr, sizeof(nr));
1872 if (ret != sizeof(nr))
1873 goto free_cpu;
1874
1875 if (ph->needs_swap)
1876 nr = bswap_32(nr);
1877
Jan Stancekda8a58b2017-02-17 12:10:26 +01001878 if (nr != (u32)-1 && nr > (u32)cpu_nr) {
Kan Liang2bb00d22015-09-01 09:58:12 -04001879 pr_debug("socket_id number is too big."
1880 "You may need to upgrade the perf tool.\n");
1881 goto free_cpu;
1882 }
1883
1884 ph->env.cpu[i].socket_id = nr;
1885 }
1886
Namhyung Kima1ae5652012-09-24 17:14:59 +09001887 return 0;
1888
1889error:
1890 strbuf_release(&sb);
Kan Liang2bb00d22015-09-01 09:58:12 -04001891free_cpu:
1892 zfree(&ph->env.cpu);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001893 return -1;
1894}
1895
1896static int process_numa_topology(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001897 struct perf_header *ph, int fd,
1898 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001899{
Jiri Olsac60da222016-07-04 14:16:20 +02001900 struct numa_node *nodes, *n;
Jiri Olsa727ebd52013-11-28 11:30:14 +01001901 ssize_t ret;
Jiri Olsac60da222016-07-04 14:16:20 +02001902 u32 nr, i;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001903 char *str;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001904
1905 /* nr nodes */
Namhyung Kim5323f602012-12-17 15:38:54 +09001906 ret = readn(fd, &nr, sizeof(nr));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001907 if (ret != sizeof(nr))
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001908 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001909
1910 if (ph->needs_swap)
1911 nr = bswap_32(nr);
1912
Jiri Olsac60da222016-07-04 14:16:20 +02001913 nodes = zalloc(sizeof(*nodes) * nr);
1914 if (!nodes)
1915 return -ENOMEM;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001916
1917 for (i = 0; i < nr; i++) {
Jiri Olsac60da222016-07-04 14:16:20 +02001918 n = &nodes[i];
1919
Namhyung Kima1ae5652012-09-24 17:14:59 +09001920 /* node number */
Jiri Olsac60da222016-07-04 14:16:20 +02001921 ret = readn(fd, &n->node, sizeof(u32));
1922 if (ret != sizeof(n->node))
Namhyung Kima1ae5652012-09-24 17:14:59 +09001923 goto error;
1924
Jiri Olsac60da222016-07-04 14:16:20 +02001925 ret = readn(fd, &n->mem_total, sizeof(u64));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001926 if (ret != sizeof(u64))
1927 goto error;
1928
Jiri Olsac60da222016-07-04 14:16:20 +02001929 ret = readn(fd, &n->mem_free, sizeof(u64));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001930 if (ret != sizeof(u64))
1931 goto error;
1932
1933 if (ph->needs_swap) {
Jiri Olsac60da222016-07-04 14:16:20 +02001934 n->node = bswap_32(n->node);
1935 n->mem_total = bswap_64(n->mem_total);
1936 n->mem_free = bswap_64(n->mem_free);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001937 }
1938
Namhyung Kima1ae5652012-09-24 17:14:59 +09001939 str = do_read_string(fd, ph);
1940 if (!str)
1941 goto error;
1942
Jiri Olsac60da222016-07-04 14:16:20 +02001943 n->map = cpu_map__new(str);
1944 if (!n->map)
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001945 goto error;
Jiri Olsac60da222016-07-04 14:16:20 +02001946
Namhyung Kima1ae5652012-09-24 17:14:59 +09001947 free(str);
1948 }
Jiri Olsaf957a532016-10-10 09:56:32 +02001949 ph->env.nr_numa_nodes = nr;
Jiri Olsac60da222016-07-04 14:16:20 +02001950 ph->env.numa_nodes = nodes;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001951 return 0;
1952
1953error:
Jiri Olsac60da222016-07-04 14:16:20 +02001954 free(nodes);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001955 return -1;
1956}
1957
1958static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001959 struct perf_header *ph, int fd,
1960 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001961{
Jiri Olsa727ebd52013-11-28 11:30:14 +01001962 ssize_t ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001963 char *name;
1964 u32 pmu_num;
1965 u32 type;
1966 struct strbuf sb;
1967
Namhyung Kim5323f602012-12-17 15:38:54 +09001968 ret = readn(fd, &pmu_num, sizeof(pmu_num));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001969 if (ret != sizeof(pmu_num))
1970 return -1;
1971
1972 if (ph->needs_swap)
1973 pmu_num = bswap_32(pmu_num);
1974
1975 if (!pmu_num) {
1976 pr_debug("pmu mappings not available\n");
1977 return 0;
1978 }
1979
1980 ph->env.nr_pmu_mappings = pmu_num;
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001981 if (strbuf_init(&sb, 128) < 0)
1982 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001983
1984 while (pmu_num) {
Namhyung Kim5323f602012-12-17 15:38:54 +09001985 if (readn(fd, &type, sizeof(type)) != sizeof(type))
Namhyung Kima1ae5652012-09-24 17:14:59 +09001986 goto error;
1987 if (ph->needs_swap)
1988 type = bswap_32(type);
1989
1990 name = do_read_string(fd, ph);
1991 if (!name)
1992 goto error;
1993
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001994 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
1995 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001996 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001997 if (strbuf_add(&sb, "", 1) < 0)
1998 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001999
Kan Liange0838e02015-09-10 11:03:05 -03002000 if (!strcmp(name, "msr"))
2001 ph->env.msr_pmu_type = type;
2002
Namhyung Kima1ae5652012-09-24 17:14:59 +09002003 free(name);
2004 pmu_num--;
2005 }
2006 ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2007 return 0;
2008
2009error:
2010 strbuf_release(&sb);
2011 return -1;
2012}
2013
Namhyung Kima8bb5592013-01-22 18:09:31 +09002014static int process_group_desc(struct perf_file_section *section __maybe_unused,
2015 struct perf_header *ph, int fd,
2016 void *data __maybe_unused)
2017{
2018 size_t ret = -1;
2019 u32 i, nr, nr_groups;
2020 struct perf_session *session;
2021 struct perf_evsel *evsel, *leader = NULL;
2022 struct group_desc {
2023 char *name;
2024 u32 leader_idx;
2025 u32 nr_members;
2026 } *desc;
2027
2028 if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2029 return -1;
2030
2031 if (ph->needs_swap)
2032 nr_groups = bswap_32(nr_groups);
2033
2034 ph->env.nr_groups = nr_groups;
2035 if (!nr_groups) {
2036 pr_debug("group desc not available\n");
2037 return 0;
2038 }
2039
2040 desc = calloc(nr_groups, sizeof(*desc));
2041 if (!desc)
2042 return -1;
2043
2044 for (i = 0; i < nr_groups; i++) {
2045 desc[i].name = do_read_string(fd, ph);
2046 if (!desc[i].name)
2047 goto out_free;
2048
2049 if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2050 goto out_free;
2051
2052 if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2053 goto out_free;
2054
2055 if (ph->needs_swap) {
2056 desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2057 desc[i].nr_members = bswap_32(desc[i].nr_members);
2058 }
2059 }
2060
2061 /*
2062 * Rebuild group relationship based on the group_desc
2063 */
2064 session = container_of(ph, struct perf_session, header);
2065 session->evlist->nr_groups = nr_groups;
2066
2067 i = nr = 0;
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002068 evlist__for_each_entry(session->evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09002069 if (evsel->idx == (int) desc[i].leader_idx) {
2070 evsel->leader = evsel;
2071 /* {anon_group} is a dummy name */
Namhyung Kim210e8122013-11-18 11:20:43 +09002072 if (strcmp(desc[i].name, "{anon_group}")) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09002073 evsel->group_name = desc[i].name;
Namhyung Kim210e8122013-11-18 11:20:43 +09002074 desc[i].name = NULL;
2075 }
Namhyung Kima8bb5592013-01-22 18:09:31 +09002076 evsel->nr_members = desc[i].nr_members;
2077
2078 if (i >= nr_groups || nr > 0) {
2079 pr_debug("invalid group desc\n");
2080 goto out_free;
2081 }
2082
2083 leader = evsel;
2084 nr = evsel->nr_members - 1;
2085 i++;
2086 } else if (nr) {
2087 /* This is a group member */
2088 evsel->leader = leader;
2089
2090 nr--;
2091 }
2092 }
2093
2094 if (i != nr_groups || nr != 0) {
2095 pr_debug("invalid group desc\n");
2096 goto out_free;
2097 }
2098
2099 ret = 0;
2100out_free:
Namhyung Kim50a27402013-11-18 11:20:44 +09002101 for (i = 0; i < nr_groups; i++)
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -03002102 zfree(&desc[i].name);
Namhyung Kima8bb5592013-01-22 18:09:31 +09002103 free(desc);
2104
2105 return ret;
2106}
2107
Adrian Hunter99fa2982015-04-30 17:37:25 +03002108static int process_auxtrace(struct perf_file_section *section,
2109 struct perf_header *ph, int fd,
2110 void *data __maybe_unused)
2111{
2112 struct perf_session *session;
2113 int err;
2114
2115 session = container_of(ph, struct perf_session, header);
2116
2117 err = auxtrace_index__process(fd, section->size, session,
2118 ph->needs_swap);
2119 if (err < 0)
2120 pr_err("Failed to process auxtrace index\n");
2121 return err;
2122}
2123
Jiri Olsa720e98b2016-02-16 16:01:43 +01002124static int process_cache(struct perf_file_section *section __maybe_unused,
2125 struct perf_header *ph __maybe_unused, int fd __maybe_unused,
2126 void *data __maybe_unused)
2127{
2128 struct cpu_cache_level *caches;
2129 u32 cnt, i, version;
2130
2131 if (readn(fd, &version, sizeof(version)) != sizeof(version))
2132 return -1;
2133
2134 if (ph->needs_swap)
2135 version = bswap_32(version);
2136
2137 if (version != 1)
2138 return -1;
2139
2140 if (readn(fd, &cnt, sizeof(cnt)) != sizeof(cnt))
2141 return -1;
2142
2143 if (ph->needs_swap)
2144 cnt = bswap_32(cnt);
2145
2146 caches = zalloc(sizeof(*caches) * cnt);
2147 if (!caches)
2148 return -1;
2149
2150 for (i = 0; i < cnt; i++) {
2151 struct cpu_cache_level c;
2152
2153 #define _R(v) \
2154 if (readn(fd, &c.v, sizeof(u32)) != sizeof(u32))\
2155 goto out_free_caches; \
2156 if (ph->needs_swap) \
2157 c.v = bswap_32(c.v); \
2158
2159 _R(level)
2160 _R(line_size)
2161 _R(sets)
2162 _R(ways)
2163 #undef _R
2164
2165 #define _R(v) \
2166 c.v = do_read_string(fd, ph); \
2167 if (!c.v) \
2168 goto out_free_caches;
2169
2170 _R(type)
2171 _R(size)
2172 _R(map)
2173 #undef _R
2174
2175 caches[i] = c;
2176 }
2177
2178 ph->env.caches = caches;
2179 ph->env.caches_cnt = cnt;
2180 return 0;
2181out_free_caches:
2182 free(caches);
2183 return -1;
2184}
2185
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002186struct feature_ops {
2187 int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
2188 void (*print)(struct perf_header *h, int fd, FILE *fp);
Robert Richterf1c67db2012-02-10 15:41:56 +01002189 int (*process)(struct perf_file_section *section,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09002190 struct perf_header *h, int fd, void *data);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002191 const char *name;
2192 bool full_only;
2193};
2194
Robert Richter8cdfa782011-12-07 10:02:56 +01002195#define FEAT_OPA(n, func) \
2196 [n] = { .name = #n, .write = write_##func, .print = print_##func }
Robert Richterf1c67db2012-02-10 15:41:56 +01002197#define FEAT_OPP(n, func) \
2198 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2199 .process = process_##func }
Robert Richter8cdfa782011-12-07 10:02:56 +01002200#define FEAT_OPF(n, func) \
Robert Richterf1c67db2012-02-10 15:41:56 +01002201 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
Namhyung Kima1ae5652012-09-24 17:14:59 +09002202 .process = process_##func, .full_only = true }
Robert Richter8cdfa782011-12-07 10:02:56 +01002203
2204/* feature_ops not implemented: */
Stephane Eranian2eeaaa02012-05-15 13:28:13 +02002205#define print_tracing_data NULL
2206#define print_build_id NULL
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002207
2208static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
Stephane Eranian2eeaaa02012-05-15 13:28:13 +02002209 FEAT_OPP(HEADER_TRACING_DATA, tracing_data),
Robert Richterf1c67db2012-02-10 15:41:56 +01002210 FEAT_OPP(HEADER_BUILD_ID, build_id),
Namhyung Kima1ae5652012-09-24 17:14:59 +09002211 FEAT_OPP(HEADER_HOSTNAME, hostname),
2212 FEAT_OPP(HEADER_OSRELEASE, osrelease),
2213 FEAT_OPP(HEADER_VERSION, version),
2214 FEAT_OPP(HEADER_ARCH, arch),
2215 FEAT_OPP(HEADER_NRCPUS, nrcpus),
2216 FEAT_OPP(HEADER_CPUDESC, cpudesc),
Namhyung Kim37e9d752012-09-24 17:15:03 +09002217 FEAT_OPP(HEADER_CPUID, cpuid),
Namhyung Kima1ae5652012-09-24 17:14:59 +09002218 FEAT_OPP(HEADER_TOTAL_MEM, total_mem),
Robert Richter7c2f7af2012-08-16 21:10:23 +02002219 FEAT_OPP(HEADER_EVENT_DESC, event_desc),
Namhyung Kima1ae5652012-09-24 17:14:59 +09002220 FEAT_OPP(HEADER_CMDLINE, cmdline),
Robert Richter8cdfa782011-12-07 10:02:56 +01002221 FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology),
2222 FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology),
Stephane Eranian330aa672012-03-08 23:47:46 +01002223 FEAT_OPA(HEADER_BRANCH_STACK, branch_stack),
Namhyung Kima1ae5652012-09-24 17:14:59 +09002224 FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings),
Namhyung Kima8bb5592013-01-22 18:09:31 +09002225 FEAT_OPP(HEADER_GROUP_DESC, group_desc),
Adrian Hunter99fa2982015-04-30 17:37:25 +03002226 FEAT_OPP(HEADER_AUXTRACE, auxtrace),
Jiri Olsaffa517a2015-10-25 15:51:43 +01002227 FEAT_OPA(HEADER_STAT, stat),
Jiri Olsa720e98b2016-02-16 16:01:43 +01002228 FEAT_OPF(HEADER_CACHE, cache),
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002229};
2230
2231struct header_print_data {
2232 FILE *fp;
2233 bool full; /* extended list of headers */
2234};
2235
2236static int perf_file_section__fprintf_info(struct perf_file_section *section,
2237 struct perf_header *ph,
2238 int feat, int fd, void *data)
2239{
2240 struct header_print_data *hd = data;
2241
2242 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2243 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2244 "%d, continuing...\n", section->offset, feat);
2245 return 0;
2246 }
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002247 if (feat >= HEADER_LAST_FEATURE) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002248 pr_warning("unknown feature %d\n", feat);
Robert Richterf7a8a132011-12-07 10:02:51 +01002249 return 0;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002250 }
2251 if (!feat_ops[feat].print)
2252 return 0;
2253
2254 if (!feat_ops[feat].full_only || hd->full)
2255 feat_ops[feat].print(ph, fd, hd->fp);
2256 else
2257 fprintf(hd->fp, "# %s info available, use -I to display\n",
2258 feat_ops[feat].name);
2259
2260 return 0;
2261}
2262
2263int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2264{
2265 struct header_print_data hd;
2266 struct perf_header *header = &session->header;
Jiri Olsacc9784bd2013-10-15 16:27:34 +02002267 int fd = perf_data_file__fd(session->file);
Jiri Olsaf45f5612016-10-10 09:03:07 +02002268 struct stat st;
Jiri Olsaaabae162016-10-10 09:35:50 +02002269 int ret, bit;
Jiri Olsaf45f5612016-10-10 09:03:07 +02002270
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002271 hd.fp = fp;
2272 hd.full = full;
2273
Jiri Olsaf45f5612016-10-10 09:03:07 +02002274 ret = fstat(fd, &st);
2275 if (ret == -1)
2276 return -1;
2277
2278 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
2279
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002280 perf_header__process_sections(header, fd, &hd,
2281 perf_file_section__fprintf_info);
Jiri Olsaaabae162016-10-10 09:35:50 +02002282
David Carrillo-Cisnerosc9d1c932017-04-10 13:14:32 -07002283 if (session->file->is_pipe)
2284 return 0;
2285
Jiri Olsaaabae162016-10-10 09:35:50 +02002286 fprintf(fp, "# missing features: ");
2287 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2288 if (bit)
2289 fprintf(fp, "%s ", feat_ops[bit].name);
2290 }
2291
2292 fprintf(fp, "\n");
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002293 return 0;
2294}
2295
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002296static int do_write_feat(int fd, struct perf_header *h, int type,
2297 struct perf_file_section **p,
2298 struct perf_evlist *evlist)
2299{
2300 int err;
2301 int ret = 0;
2302
2303 if (perf_header__has_feat(h, type)) {
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002304 if (!feat_ops[type].write)
2305 return -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002306
2307 (*p)->offset = lseek(fd, 0, SEEK_CUR);
2308
2309 err = feat_ops[type].write(fd, h, evlist);
2310 if (err < 0) {
Jiri Olsa0c2aff42016-10-10 09:38:02 +02002311 pr_debug("failed to write feature %s\n", feat_ops[type].name);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002312
2313 /* undo anything written */
2314 lseek(fd, (*p)->offset, SEEK_SET);
2315
2316 return -1;
2317 }
2318 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2319 (*p)++;
2320 }
2321 return ret;
2322}
2323
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002324static int perf_header__adds_write(struct perf_header *header,
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002325 struct perf_evlist *evlist, int fd)
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002326{
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002327 int nr_sections;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002328 struct perf_file_section *feat_sec, *p;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002329 int sec_size;
2330 u64 sec_start;
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002331 int feat;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002332 int err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002333
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002334 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002335 if (!nr_sections)
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002336 return 0;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002337
Paul Gortmaker91b98802013-01-30 20:05:49 -05002338 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002339 if (feat_sec == NULL)
2340 return -ENOMEM;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002341
2342 sec_size = sizeof(*feat_sec) * nr_sections;
2343
Jiri Olsa8d541e92013-07-17 19:49:44 +02002344 sec_start = header->feat_offset;
Xiao Guangrongf887f302010-02-04 16:46:42 +08002345 lseek(fd, sec_start + sec_size, SEEK_SET);
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002346
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002347 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2348 if (do_write_feat(fd, header, feat, &p, evlist))
2349 perf_header__clear_feat(header, feat);
2350 }
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002351
Xiao Guangrongf887f302010-02-04 16:46:42 +08002352 lseek(fd, sec_start, SEEK_SET);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002353 /*
2354 * may write more than needed due to dropped feature, but
2355 * this is okay, reader will skip the mising entries
2356 */
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002357 err = do_write(fd, feat_sec, sec_size);
2358 if (err < 0)
2359 pr_debug("failed to write feature section\n");
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002360 free(feat_sec);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002361 return err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002362}
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002363
Tom Zanussi8dc58102010-04-01 23:59:15 -05002364int perf_header__write_pipe(int fd)
2365{
2366 struct perf_pipe_file_header f_header;
2367 int err;
2368
2369 f_header = (struct perf_pipe_file_header){
2370 .magic = PERF_MAGIC,
2371 .size = sizeof(f_header),
2372 };
2373
2374 err = do_write(fd, &f_header, sizeof(f_header));
2375 if (err < 0) {
2376 pr_debug("failed to write perf pipe header\n");
2377 return err;
2378 }
2379
2380 return 0;
2381}
2382
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002383int perf_session__write_header(struct perf_session *session,
2384 struct perf_evlist *evlist,
2385 int fd, bool at_exit)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002386{
2387 struct perf_file_header f_header;
2388 struct perf_file_attr f_attr;
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002389 struct perf_header *header = &session->header;
Jiri Olsa563aecb2013-06-05 13:35:06 +02002390 struct perf_evsel *evsel;
Jiri Olsa944d62b2013-07-17 19:49:43 +02002391 u64 attr_offset;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002392 int err;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002393
2394 lseek(fd, sizeof(f_header), SEEK_SET);
2395
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002396 evlist__for_each_entry(session->evlist, evsel) {
Robert Richter6606f872012-08-16 21:10:19 +02002397 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2398 err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002399 if (err < 0) {
2400 pr_debug("failed to write perf header\n");
2401 return err;
2402 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002403 }
2404
Jiri Olsa944d62b2013-07-17 19:49:43 +02002405 attr_offset = lseek(fd, 0, SEEK_CUR);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002406
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002407 evlist__for_each_entry(evlist, evsel) {
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002408 f_attr = (struct perf_file_attr){
Robert Richter6606f872012-08-16 21:10:19 +02002409 .attr = evsel->attr,
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002410 .ids = {
Robert Richter6606f872012-08-16 21:10:19 +02002411 .offset = evsel->id_offset,
2412 .size = evsel->ids * sizeof(u64),
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002413 }
2414 };
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002415 err = do_write(fd, &f_attr, sizeof(f_attr));
2416 if (err < 0) {
2417 pr_debug("failed to write perf header attribute\n");
2418 return err;
2419 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002420 }
2421
Adrian Hunterd645c442013-12-11 14:36:28 +02002422 if (!header->data_offset)
2423 header->data_offset = lseek(fd, 0, SEEK_CUR);
Jiri Olsa8d541e92013-07-17 19:49:44 +02002424 header->feat_offset = header->data_offset + header->data_size;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002425
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002426 if (at_exit) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002427 err = perf_header__adds_write(header, evlist, fd);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002428 if (err < 0)
2429 return err;
2430 }
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002431
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002432 f_header = (struct perf_file_header){
2433 .magic = PERF_MAGIC,
2434 .size = sizeof(f_header),
2435 .attr_size = sizeof(f_attr),
2436 .attrs = {
Jiri Olsa944d62b2013-07-17 19:49:43 +02002437 .offset = attr_offset,
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002438 .size = evlist->nr_entries * sizeof(f_attr),
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002439 },
2440 .data = {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002441 .offset = header->data_offset,
2442 .size = header->data_size,
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002443 },
Jiri Olsa44b3c572013-07-11 17:28:31 +02002444 /* event_types is ignored, store zeros */
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002445 };
2446
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002447 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002448
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002449 lseek(fd, 0, SEEK_SET);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002450 err = do_write(fd, &f_header, sizeof(f_header));
2451 if (err < 0) {
2452 pr_debug("failed to write perf header\n");
2453 return err;
2454 }
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002455 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002456
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002457 return 0;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002458}
2459
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002460static int perf_header__getbuffer64(struct perf_header *header,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002461 int fd, void *buf, size_t size)
2462{
Arnaldo Carvalho de Melo1e7972c2011-01-03 16:50:55 -02002463 if (readn(fd, buf, size) <= 0)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002464 return -1;
2465
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002466 if (header->needs_swap)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002467 mem_bswap_64(buf, size);
2468
2469 return 0;
2470}
2471
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002472int perf_header__process_sections(struct perf_header *header, int fd,
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002473 void *data,
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002474 int (*process)(struct perf_file_section *section,
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002475 struct perf_header *ph,
2476 int feat, int fd, void *data))
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002477{
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002478 struct perf_file_section *feat_sec, *sec;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002479 int nr_sections;
2480 int sec_size;
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002481 int feat;
2482 int err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002483
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002484 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002485 if (!nr_sections)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002486 return 0;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002487
Paul Gortmaker91b98802013-01-30 20:05:49 -05002488 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002489 if (!feat_sec)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002490 return -1;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002491
2492 sec_size = sizeof(*feat_sec) * nr_sections;
2493
Jiri Olsa8d541e92013-07-17 19:49:44 +02002494 lseek(fd, header->feat_offset, SEEK_SET);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002495
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002496 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2497 if (err < 0)
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002498 goto out_free;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002499
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002500 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2501 err = process(sec++, header, feat, fd, data);
2502 if (err < 0)
2503 goto out_free;
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +01002504 }
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002505 err = 0;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002506out_free:
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002507 free(feat_sec);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002508 return err;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002509}
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002510
Stephane Eranian114382a2012-02-09 23:21:08 +01002511static const int attr_file_abi_sizes[] = {
2512 [0] = PERF_ATTR_SIZE_VER0,
2513 [1] = PERF_ATTR_SIZE_VER1,
Jiri Olsa239cc472012-08-07 15:20:42 +02002514 [2] = PERF_ATTR_SIZE_VER2,
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002515 [3] = PERF_ATTR_SIZE_VER3,
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02002516 [4] = PERF_ATTR_SIZE_VER4,
Stephane Eranian114382a2012-02-09 23:21:08 +01002517 0,
2518};
2519
2520/*
2521 * In the legacy file format, the magic number is not used to encode endianness.
2522 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2523 * on ABI revisions, we need to try all combinations for all endianness to
2524 * detect the endianness.
2525 */
2526static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2527{
2528 uint64_t ref_size, attr_size;
2529 int i;
2530
2531 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2532 ref_size = attr_file_abi_sizes[i]
2533 + sizeof(struct perf_file_section);
2534 if (hdr_sz != ref_size) {
2535 attr_size = bswap_64(hdr_sz);
2536 if (attr_size != ref_size)
2537 continue;
2538
2539 ph->needs_swap = true;
2540 }
2541 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2542 i,
2543 ph->needs_swap);
2544 return 0;
2545 }
2546 /* could not determine endianness */
2547 return -1;
2548}
2549
2550#define PERF_PIPE_HDR_VER0 16
2551
2552static const size_t attr_pipe_abi_sizes[] = {
2553 [0] = PERF_PIPE_HDR_VER0,
2554 0,
2555};
2556
2557/*
2558 * In the legacy pipe format, there is an implicit assumption that endiannesss
2559 * between host recording the samples, and host parsing the samples is the
2560 * same. This is not always the case given that the pipe output may always be
2561 * redirected into a file and analyzed on a different machine with possibly a
2562 * different endianness and perf_event ABI revsions in the perf tool itself.
2563 */
2564static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2565{
2566 u64 attr_size;
2567 int i;
2568
2569 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2570 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2571 attr_size = bswap_64(hdr_sz);
2572 if (attr_size != hdr_sz)
2573 continue;
2574
2575 ph->needs_swap = true;
2576 }
2577 pr_debug("Pipe ABI%d perf.data file detected\n", i);
2578 return 0;
2579 }
2580 return -1;
2581}
2582
Feng Tange84ba4e2012-10-30 11:56:07 +08002583bool is_perf_magic(u64 magic)
2584{
2585 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2586 || magic == __perf_magic2
2587 || magic == __perf_magic2_sw)
2588 return true;
2589
2590 return false;
2591}
2592
Stephane Eranian114382a2012-02-09 23:21:08 +01002593static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2594 bool is_pipe, struct perf_header *ph)
Stephane Eranian73323f52012-02-02 13:54:44 +01002595{
2596 int ret;
2597
2598 /* check for legacy format */
Stephane Eranian114382a2012-02-09 23:21:08 +01002599 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
Stephane Eranian73323f52012-02-02 13:54:44 +01002600 if (ret == 0) {
Jiri Olsa2a08c3e2013-07-17 19:49:47 +02002601 ph->version = PERF_HEADER_VERSION_1;
Stephane Eranian73323f52012-02-02 13:54:44 +01002602 pr_debug("legacy perf.data format\n");
Stephane Eranian114382a2012-02-09 23:21:08 +01002603 if (is_pipe)
2604 return try_all_pipe_abis(hdr_sz, ph);
Stephane Eranian73323f52012-02-02 13:54:44 +01002605
Stephane Eranian114382a2012-02-09 23:21:08 +01002606 return try_all_file_abis(hdr_sz, ph);
Stephane Eranian73323f52012-02-02 13:54:44 +01002607 }
Stephane Eranian114382a2012-02-09 23:21:08 +01002608 /*
2609 * the new magic number serves two purposes:
2610 * - unique number to identify actual perf.data files
2611 * - encode endianness of file
2612 */
Namhyung Kimf7913972015-01-29 17:06:45 +09002613 ph->version = PERF_HEADER_VERSION_2;
Stephane Eranian73323f52012-02-02 13:54:44 +01002614
Stephane Eranian114382a2012-02-09 23:21:08 +01002615 /* check magic number with one endianness */
2616 if (magic == __perf_magic2)
Stephane Eranian73323f52012-02-02 13:54:44 +01002617 return 0;
2618
Stephane Eranian114382a2012-02-09 23:21:08 +01002619 /* check magic number with opposite endianness */
2620 if (magic != __perf_magic2_sw)
Stephane Eranian73323f52012-02-02 13:54:44 +01002621 return -1;
2622
2623 ph->needs_swap = true;
2624
2625 return 0;
2626}
2627
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002628int perf_file_header__read(struct perf_file_header *header,
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002629 struct perf_header *ph, int fd)
2630{
Jiri Olsa727ebd52013-11-28 11:30:14 +01002631 ssize_t ret;
Stephane Eranian73323f52012-02-02 13:54:44 +01002632
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002633 lseek(fd, 0, SEEK_SET);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002634
Stephane Eranian73323f52012-02-02 13:54:44 +01002635 ret = readn(fd, header, sizeof(*header));
2636 if (ret <= 0)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002637 return -1;
2638
Stephane Eranian114382a2012-02-09 23:21:08 +01002639 if (check_magic_endian(header->magic,
2640 header->attr_size, false, ph) < 0) {
2641 pr_debug("magic/endian check failed\n");
Stephane Eranian73323f52012-02-02 13:54:44 +01002642 return -1;
Stephane Eranian114382a2012-02-09 23:21:08 +01002643 }
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002644
Stephane Eranian73323f52012-02-02 13:54:44 +01002645 if (ph->needs_swap) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002646 mem_bswap_64(header, offsetof(struct perf_file_header,
Stephane Eranian73323f52012-02-02 13:54:44 +01002647 adds_features));
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002648 }
2649
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002650 if (header->size != sizeof(*header)) {
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002651 /* Support the previous format */
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002652 if (header->size == offsetof(typeof(*header), adds_features))
2653 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002654 else
2655 return -1;
David Ahernd327fa42011-10-18 17:34:01 -06002656 } else if (ph->needs_swap) {
David Ahernd327fa42011-10-18 17:34:01 -06002657 /*
2658 * feature bitmap is declared as an array of unsigned longs --
2659 * not good since its size can differ between the host that
2660 * generated the data file and the host analyzing the file.
2661 *
2662 * We need to handle endianness, but we don't know the size of
2663 * the unsigned long where the file was generated. Take a best
2664 * guess at determining it: try 64-bit swap first (ie., file
2665 * created on a 64-bit host), and check if the hostname feature
2666 * bit is set (this feature bit is forced on as of fbe96f2).
2667 * If the bit is not, undo the 64-bit swap and try a 32-bit
2668 * swap. If the hostname bit is still not set (e.g., older data
2669 * file), punt and fallback to the original behavior --
2670 * clearing all feature bits and setting buildid.
2671 */
David Ahern80c01202012-06-08 11:47:51 -03002672 mem_bswap_64(&header->adds_features,
2673 BITS_TO_U64(HEADER_FEAT_BITS));
David Ahernd327fa42011-10-18 17:34:01 -06002674
2675 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
David Ahern80c01202012-06-08 11:47:51 -03002676 /* unswap as u64 */
2677 mem_bswap_64(&header->adds_features,
2678 BITS_TO_U64(HEADER_FEAT_BITS));
2679
2680 /* unswap as u32 */
2681 mem_bswap_32(&header->adds_features,
2682 BITS_TO_U32(HEADER_FEAT_BITS));
David Ahernd327fa42011-10-18 17:34:01 -06002683 }
2684
2685 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2686 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2687 set_bit(HEADER_BUILD_ID, header->adds_features);
2688 }
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002689 }
2690
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002691 memcpy(&ph->adds_features, &header->adds_features,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002692 sizeof(ph->adds_features));
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002693
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002694 ph->data_offset = header->data.offset;
2695 ph->data_size = header->data.size;
Jiri Olsa8d541e92013-07-17 19:49:44 +02002696 ph->feat_offset = header->data.offset + header->data.size;
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002697 return 0;
2698}
2699
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002700static int perf_file_section__process(struct perf_file_section *section,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002701 struct perf_header *ph,
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03002702 int feat, int fd, void *data)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002703{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002704 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -02002705 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002706 "%d, continuing...\n", section->offset, feat);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002707 return 0;
2708 }
2709
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002710 if (feat >= HEADER_LAST_FEATURE) {
2711 pr_debug("unknown feature %d, continuing...\n", feat);
2712 return 0;
2713 }
2714
Robert Richterf1c67db2012-02-10 15:41:56 +01002715 if (!feat_ops[feat].process)
2716 return 0;
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002717
Namhyung Kim3d7eb862012-09-24 17:15:01 +09002718 return feat_ops[feat].process(section, ph, fd, data);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002719}
2720
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002721static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
Tom Zanussi454c4072010-05-01 01:41:20 -05002722 struct perf_header *ph, int fd,
2723 bool repipe)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002724{
Jiri Olsa727ebd52013-11-28 11:30:14 +01002725 ssize_t ret;
Stephane Eranian73323f52012-02-02 13:54:44 +01002726
2727 ret = readn(fd, header, sizeof(*header));
2728 if (ret <= 0)
2729 return -1;
2730
Stephane Eranian114382a2012-02-09 23:21:08 +01002731 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2732 pr_debug("endian/magic failed\n");
Tom Zanussi8dc58102010-04-01 23:59:15 -05002733 return -1;
Stephane Eranian114382a2012-02-09 23:21:08 +01002734 }
2735
2736 if (ph->needs_swap)
2737 header->size = bswap_64(header->size);
Tom Zanussi8dc58102010-04-01 23:59:15 -05002738
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002739 if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
Tom Zanussi454c4072010-05-01 01:41:20 -05002740 return -1;
2741
Tom Zanussi8dc58102010-04-01 23:59:15 -05002742 return 0;
2743}
2744
Jiri Olsad4339562013-07-17 19:49:41 +02002745static int perf_header__read_pipe(struct perf_session *session)
Tom Zanussi8dc58102010-04-01 23:59:15 -05002746{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002747 struct perf_header *header = &session->header;
Tom Zanussi8dc58102010-04-01 23:59:15 -05002748 struct perf_pipe_file_header f_header;
2749
Jiri Olsacc9784bd2013-10-15 16:27:34 +02002750 if (perf_file_header__read_pipe(&f_header, header,
2751 perf_data_file__fd(session->file),
Tom Zanussi454c4072010-05-01 01:41:20 -05002752 session->repipe) < 0) {
Tom Zanussi8dc58102010-04-01 23:59:15 -05002753 pr_debug("incompatible file format\n");
2754 return -EINVAL;
2755 }
2756
Tom Zanussi8dc58102010-04-01 23:59:15 -05002757 return 0;
2758}
2759
Stephane Eranian69996df2012-02-09 23:21:06 +01002760static int read_attr(int fd, struct perf_header *ph,
2761 struct perf_file_attr *f_attr)
2762{
2763 struct perf_event_attr *attr = &f_attr->attr;
2764 size_t sz, left;
2765 size_t our_sz = sizeof(f_attr->attr);
Jiri Olsa727ebd52013-11-28 11:30:14 +01002766 ssize_t ret;
Stephane Eranian69996df2012-02-09 23:21:06 +01002767
2768 memset(f_attr, 0, sizeof(*f_attr));
2769
2770 /* read minimal guaranteed structure */
2771 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2772 if (ret <= 0) {
2773 pr_debug("cannot read %d bytes of header attr\n",
2774 PERF_ATTR_SIZE_VER0);
2775 return -1;
2776 }
2777
2778 /* on file perf_event_attr size */
2779 sz = attr->size;
Stephane Eranian114382a2012-02-09 23:21:08 +01002780
Stephane Eranian69996df2012-02-09 23:21:06 +01002781 if (ph->needs_swap)
2782 sz = bswap_32(sz);
2783
2784 if (sz == 0) {
2785 /* assume ABI0 */
2786 sz = PERF_ATTR_SIZE_VER0;
2787 } else if (sz > our_sz) {
2788 pr_debug("file uses a more recent and unsupported ABI"
2789 " (%zu bytes extra)\n", sz - our_sz);
2790 return -1;
2791 }
2792 /* what we have not yet read and that we know about */
2793 left = sz - PERF_ATTR_SIZE_VER0;
2794 if (left) {
2795 void *ptr = attr;
2796 ptr += PERF_ATTR_SIZE_VER0;
2797
2798 ret = readn(fd, ptr, left);
2799 }
2800 /* read perf_file_section, ids are read in caller */
2801 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2802
2803 return ret <= 0 ? -1 : 0;
2804}
2805
Namhyung Kim831394b2012-09-06 11:10:46 +09002806static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2807 struct pevent *pevent)
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002808{
Namhyung Kim831394b2012-09-06 11:10:46 +09002809 struct event_format *event;
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002810 char bf[128];
2811
Namhyung Kim831394b2012-09-06 11:10:46 +09002812 /* already prepared */
2813 if (evsel->tp_format)
2814 return 0;
2815
Namhyung Kim3dce2ce2013-03-21 16:18:48 +09002816 if (pevent == NULL) {
2817 pr_debug("broken or missing trace data\n");
2818 return -1;
2819 }
2820
Namhyung Kim831394b2012-09-06 11:10:46 +09002821 event = pevent_find_event(pevent, evsel->attr.config);
Namhyung Kima7619ae2013-04-18 21:24:16 +09002822 if (event == NULL) {
2823 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002824 return -1;
Namhyung Kima7619ae2013-04-18 21:24:16 +09002825 }
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002826
Namhyung Kim831394b2012-09-06 11:10:46 +09002827 if (!evsel->name) {
2828 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2829 evsel->name = strdup(bf);
2830 if (evsel->name == NULL)
2831 return -1;
2832 }
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002833
Arnaldo Carvalho de Melofcf65bf2012-08-07 09:58:03 -03002834 evsel->tp_format = event;
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002835 return 0;
2836}
2837
Namhyung Kim831394b2012-09-06 11:10:46 +09002838static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2839 struct pevent *pevent)
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002840{
2841 struct perf_evsel *pos;
2842
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002843 evlist__for_each_entry(evlist, pos) {
Namhyung Kim831394b2012-09-06 11:10:46 +09002844 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2845 perf_evsel__prepare_tracepoint_event(pos, pevent))
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002846 return -1;
2847 }
2848
2849 return 0;
2850}
2851
Jiri Olsad4339562013-07-17 19:49:41 +02002852int perf_session__read_header(struct perf_session *session)
Tom Zanussi8dc58102010-04-01 23:59:15 -05002853{
Jiri Olsacc9784bd2013-10-15 16:27:34 +02002854 struct perf_data_file *file = session->file;
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002855 struct perf_header *header = &session->header;
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002856 struct perf_file_header f_header;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002857 struct perf_file_attr f_attr;
2858 u64 f_id;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002859 int nr_attrs, nr_ids, i, j;
Jiri Olsacc9784bd2013-10-15 16:27:34 +02002860 int fd = perf_data_file__fd(file);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002861
Namhyung Kim334fe7a2013-03-11 16:43:12 +09002862 session->evlist = perf_evlist__new();
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002863 if (session->evlist == NULL)
2864 return -ENOMEM;
2865
Kan Liang2c071442015-08-28 05:48:05 -04002866 session->evlist->env = &header->env;
Arnaldo Carvalho de Melo4cde9982015-09-09 12:25:00 -03002867 session->machines.host.env = &header->env;
Jiri Olsacc9784bd2013-10-15 16:27:34 +02002868 if (perf_data_file__is_pipe(file))
Jiri Olsad4339562013-07-17 19:49:41 +02002869 return perf_header__read_pipe(session);
Tom Zanussi8dc58102010-04-01 23:59:15 -05002870
Stephane Eranian69996df2012-02-09 23:21:06 +01002871 if (perf_file_header__read(&f_header, header, fd) < 0)
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02002872 return -EINVAL;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002873
Namhyung Kimb314e5c2013-09-30 17:19:48 +09002874 /*
2875 * Sanity check that perf.data was written cleanly; data size is
2876 * initialized to 0 and updated only if the on_exit function is run.
2877 * If data size is still 0 then the file contains only partial
2878 * information. Just warn user and process it as much as it can.
2879 */
2880 if (f_header.data.size == 0) {
2881 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2882 "Was the 'perf record' command properly terminated?\n",
Jiri Olsacc9784bd2013-10-15 16:27:34 +02002883 file->path);
Namhyung Kimb314e5c2013-09-30 17:19:48 +09002884 }
2885
Stephane Eranian69996df2012-02-09 23:21:06 +01002886 nr_attrs = f_header.attrs.size / f_header.attr_size;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002887 lseek(fd, f_header.attrs.offset, SEEK_SET);
2888
2889 for (i = 0; i < nr_attrs; i++) {
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002890 struct perf_evsel *evsel;
Peter Zijlstra1c222bc2009-08-06 20:57:41 +02002891 off_t tmp;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002892
Stephane Eranian69996df2012-02-09 23:21:06 +01002893 if (read_attr(fd, header, &f_attr) < 0)
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002894 goto out_errno;
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002895
David Ahern1060ab82015-04-09 16:15:46 -04002896 if (header->needs_swap) {
2897 f_attr.ids.size = bswap_64(f_attr.ids.size);
2898 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
David Aherneda39132011-07-15 12:34:09 -06002899 perf_event__attr_swap(&f_attr.attr);
David Ahern1060ab82015-04-09 16:15:46 -04002900 }
David Aherneda39132011-07-15 12:34:09 -06002901
Peter Zijlstra1c222bc2009-08-06 20:57:41 +02002902 tmp = lseek(fd, 0, SEEK_CUR);
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -03002903 evsel = perf_evsel__new(&f_attr.attr);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002904
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002905 if (evsel == NULL)
2906 goto out_delete_evlist;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03002907
2908 evsel->needs_swap = header->needs_swap;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002909 /*
2910 * Do it before so that if perf_evsel__alloc_id fails, this
2911 * entry gets purged too at perf_evlist__delete().
2912 */
2913 perf_evlist__add(session->evlist, evsel);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002914
2915 nr_ids = f_attr.ids.size / sizeof(u64);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002916 /*
2917 * We don't have the cpu and thread maps on the header, so
2918 * for allocating the perf_sample_id table we fake 1 cpu and
2919 * hattr->ids threads.
2920 */
2921 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2922 goto out_delete_evlist;
2923
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002924 lseek(fd, f_attr.ids.offset, SEEK_SET);
2925
2926 for (j = 0; j < nr_ids; j++) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002927 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002928 goto out_errno;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002929
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002930 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02002931 }
Arnaldo Carvalho de Melo11deb1f2009-11-17 01:18:09 -02002932
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002933 lseek(fd, tmp, SEEK_SET);
2934 }
2935
Arnaldo Carvalho de Melod04b35f2011-11-11 22:17:32 -02002936 symbol_conf.nr_events = nr_attrs;
2937
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01002938 perf_header__process_sections(header, fd, &session->tevent,
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002939 perf_file_section__process);
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +01002940
Namhyung Kim831394b2012-09-06 11:10:46 +09002941 if (perf_evlist__prepare_tracepoint_events(session->evlist,
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01002942 session->tevent.pevent))
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002943 goto out_delete_evlist;
2944
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02002945 return 0;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002946out_errno:
2947 return -errno;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002948
2949out_delete_evlist:
2950 perf_evlist__delete(session->evlist);
2951 session->evlist = NULL;
2952 return -ENOMEM;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002953}
Frederic Weisbecker0d3a5c82009-08-16 20:56:37 +02002954
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02002955int perf_event__synthesize_attr(struct perf_tool *tool,
Robert Richterf4d83432012-08-16 21:10:17 +02002956 struct perf_event_attr *attr, u32 ids, u64 *id,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02002957 perf_event__handler_t process)
Frederic Weisbecker0d3a5c82009-08-16 20:56:37 +02002958{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02002959 union perf_event *ev;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05002960 size_t size;
2961 int err;
2962
2963 size = sizeof(struct perf_event_attr);
Irina Tirdea9ac3e482012-09-11 01:15:01 +03002964 size = PERF_ALIGN(size, sizeof(u64));
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05002965 size += sizeof(struct perf_event_header);
2966 size += ids * sizeof(u64);
2967
2968 ev = malloc(size);
2969
Chris Samuelce47dc52010-11-13 13:35:06 +11002970 if (ev == NULL)
2971 return -ENOMEM;
2972
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05002973 ev->attr.attr = *attr;
2974 memcpy(ev->attr.id, id, ids * sizeof(u64));
2975
2976 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
Robert Richterf4d83432012-08-16 21:10:17 +02002977 ev->attr.header.size = (u16)size;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05002978
Robert Richterf4d83432012-08-16 21:10:17 +02002979 if (ev->attr.header.size == size)
2980 err = process(tool, ev, NULL, NULL);
2981 else
2982 err = -E2BIG;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05002983
2984 free(ev);
2985
2986 return err;
2987}
2988
Jiri Olsaa6e52812015-10-25 15:51:37 +01002989static struct event_update_event *
2990event_update_event__new(size_t size, u64 type, u64 id)
2991{
2992 struct event_update_event *ev;
2993
2994 size += sizeof(*ev);
2995 size = PERF_ALIGN(size, sizeof(u64));
2996
2997 ev = zalloc(size);
2998 if (ev) {
2999 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3000 ev->header.size = (u16)size;
3001 ev->type = type;
3002 ev->id = id;
3003 }
3004 return ev;
3005}
3006
3007int
3008perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3009 struct perf_evsel *evsel,
3010 perf_event__handler_t process)
3011{
3012 struct event_update_event *ev;
3013 size_t size = strlen(evsel->unit);
3014 int err;
3015
3016 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3017 if (ev == NULL)
3018 return -ENOMEM;
3019
3020 strncpy(ev->data, evsel->unit, size);
3021 err = process(tool, (union perf_event *)ev, NULL, NULL);
3022 free(ev);
3023 return err;
3024}
3025
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003026int
3027perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3028 struct perf_evsel *evsel,
3029 perf_event__handler_t process)
3030{
3031 struct event_update_event *ev;
3032 struct event_update_event_scale *ev_data;
3033 int err;
3034
3035 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3036 if (ev == NULL)
3037 return -ENOMEM;
3038
3039 ev_data = (struct event_update_event_scale *) ev->data;
3040 ev_data->scale = evsel->scale;
3041 err = process(tool, (union perf_event*) ev, NULL, NULL);
3042 free(ev);
3043 return err;
3044}
3045
Jiri Olsa802c9042015-10-25 15:51:39 +01003046int
3047perf_event__synthesize_event_update_name(struct perf_tool *tool,
3048 struct perf_evsel *evsel,
3049 perf_event__handler_t process)
3050{
3051 struct event_update_event *ev;
3052 size_t len = strlen(evsel->name);
3053 int err;
3054
3055 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3056 if (ev == NULL)
3057 return -ENOMEM;
3058
3059 strncpy(ev->data, evsel->name, len);
3060 err = process(tool, (union perf_event*) ev, NULL, NULL);
3061 free(ev);
3062 return err;
3063}
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003064
Jiri Olsa86ebb092015-10-25 15:51:40 +01003065int
3066perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3067 struct perf_evsel *evsel,
3068 perf_event__handler_t process)
3069{
3070 size_t size = sizeof(struct event_update_event);
3071 struct event_update_event *ev;
3072 int max, err;
3073 u16 type;
3074
3075 if (!evsel->own_cpus)
3076 return 0;
3077
3078 ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3079 if (!ev)
3080 return -ENOMEM;
3081
3082 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3083 ev->header.size = (u16)size;
3084 ev->type = PERF_EVENT_UPDATE__CPUS;
3085 ev->id = evsel->id[0];
3086
3087 cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3088 evsel->own_cpus,
3089 type, max);
3090
3091 err = process(tool, (union perf_event*) ev, NULL, NULL);
3092 free(ev);
3093 return err;
3094}
3095
Jiri Olsac853f932015-10-25 15:51:41 +01003096size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3097{
3098 struct event_update_event *ev = &event->event_update;
3099 struct event_update_event_scale *ev_scale;
3100 struct event_update_event_cpus *ev_cpus;
3101 struct cpu_map *map;
3102 size_t ret;
3103
3104 ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id);
3105
3106 switch (ev->type) {
3107 case PERF_EVENT_UPDATE__SCALE:
3108 ev_scale = (struct event_update_event_scale *) ev->data;
3109 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3110 break;
3111 case PERF_EVENT_UPDATE__UNIT:
3112 ret += fprintf(fp, "... unit: %s\n", ev->data);
3113 break;
3114 case PERF_EVENT_UPDATE__NAME:
3115 ret += fprintf(fp, "... name: %s\n", ev->data);
3116 break;
3117 case PERF_EVENT_UPDATE__CPUS:
3118 ev_cpus = (struct event_update_event_cpus *) ev->data;
3119 ret += fprintf(fp, "... ");
3120
3121 map = cpu_map__new_data(&ev_cpus->cpus);
3122 if (map)
3123 ret += cpu_map__fprintf(map, fp);
3124 else
3125 ret += fprintf(fp, "failed to get cpus\n");
3126 break;
3127 default:
3128 ret += fprintf(fp, "... unknown type\n");
3129 break;
3130 }
3131
3132 return ret;
3133}
Jiri Olsa86ebb092015-10-25 15:51:40 +01003134
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003135int perf_event__synthesize_attrs(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003136 struct perf_session *session,
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003137 perf_event__handler_t process)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003138{
Robert Richter6606f872012-08-16 21:10:19 +02003139 struct perf_evsel *evsel;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003140 int err = 0;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003141
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03003142 evlist__for_each_entry(session->evlist, evsel) {
Robert Richter6606f872012-08-16 21:10:19 +02003143 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3144 evsel->id, process);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003145 if (err) {
3146 pr_debug("failed to create perf header attribute\n");
3147 return err;
3148 }
3149 }
3150
3151 return err;
3152}
3153
Adrian Hunter47c3d102013-07-04 16:20:21 +03003154int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3155 union perf_event *event,
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003156 struct perf_evlist **pevlist)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003157{
Robert Richterf4d83432012-08-16 21:10:17 +02003158 u32 i, ids, n_ids;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003159 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003160 struct perf_evlist *evlist = *pevlist;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003161
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003162 if (evlist == NULL) {
Namhyung Kim334fe7a2013-03-11 16:43:12 +09003163 *pevlist = evlist = perf_evlist__new();
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003164 if (evlist == NULL)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003165 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003166 }
3167
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -03003168 evsel = perf_evsel__new(&event->attr.attr);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003169 if (evsel == NULL)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003170 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003171
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003172 perf_evlist__add(evlist, evsel);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003173
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003174 ids = event->header.size;
3175 ids -= (void *)&event->attr.id - (void *)event;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003176 n_ids = ids / sizeof(u64);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003177 /*
3178 * We don't have the cpu and thread maps on the header, so
3179 * for allocating the perf_sample_id table we fake 1 cpu and
3180 * hattr->ids threads.
3181 */
3182 if (perf_evsel__alloc_id(evsel, 1, n_ids))
3183 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003184
3185 for (i = 0; i < n_ids; i++) {
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003186 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003187 }
3188
Adrian Hunter7e0d6fc2013-07-04 16:20:29 +03003189 symbol_conf.nr_events = evlist->nr_entries;
3190
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003191 return 0;
3192}
Tom Zanussicd19a032010-04-01 23:59:20 -05003193
Jiri Olsaffe777252015-10-25 15:51:36 +01003194int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3195 union perf_event *event,
3196 struct perf_evlist **pevlist)
3197{
3198 struct event_update_event *ev = &event->event_update;
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003199 struct event_update_event_scale *ev_scale;
Jiri Olsa86ebb092015-10-25 15:51:40 +01003200 struct event_update_event_cpus *ev_cpus;
Jiri Olsaffe777252015-10-25 15:51:36 +01003201 struct perf_evlist *evlist;
3202 struct perf_evsel *evsel;
Jiri Olsa86ebb092015-10-25 15:51:40 +01003203 struct cpu_map *map;
Jiri Olsaffe777252015-10-25 15:51:36 +01003204
3205 if (!pevlist || *pevlist == NULL)
3206 return -EINVAL;
3207
3208 evlist = *pevlist;
3209
3210 evsel = perf_evlist__id2evsel(evlist, ev->id);
3211 if (evsel == NULL)
3212 return -EINVAL;
3213
Jiri Olsaa6e52812015-10-25 15:51:37 +01003214 switch (ev->type) {
3215 case PERF_EVENT_UPDATE__UNIT:
3216 evsel->unit = strdup(ev->data);
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003217 break;
Jiri Olsa802c9042015-10-25 15:51:39 +01003218 case PERF_EVENT_UPDATE__NAME:
3219 evsel->name = strdup(ev->data);
3220 break;
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003221 case PERF_EVENT_UPDATE__SCALE:
3222 ev_scale = (struct event_update_event_scale *) ev->data;
3223 evsel->scale = ev_scale->scale;
Arnaldo Carvalho de Melo8434a2e2017-02-08 21:57:22 -03003224 break;
Jiri Olsa86ebb092015-10-25 15:51:40 +01003225 case PERF_EVENT_UPDATE__CPUS:
3226 ev_cpus = (struct event_update_event_cpus *) ev->data;
3227
3228 map = cpu_map__new_data(&ev_cpus->cpus);
3229 if (map)
3230 evsel->own_cpus = map;
3231 else
3232 pr_err("failed to get event_update cpus\n");
Jiri Olsaa6e52812015-10-25 15:51:37 +01003233 default:
3234 break;
3235 }
3236
Jiri Olsaffe777252015-10-25 15:51:36 +01003237 return 0;
3238}
3239
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003240int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003241 struct perf_evlist *evlist,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02003242 perf_event__handler_t process)
Tom Zanussi92155452010-04-01 23:59:21 -05003243{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003244 union perf_event ev;
Jiri Olsa29208e52011-10-20 15:59:43 +02003245 struct tracing_data *tdata;
Tom Zanussi92155452010-04-01 23:59:21 -05003246 ssize_t size = 0, aligned_size = 0, padding;
Irina Tirdea1d037ca2012-09-11 01:15:03 +03003247 int err __maybe_unused = 0;
Tom Zanussi92155452010-04-01 23:59:21 -05003248
Jiri Olsa29208e52011-10-20 15:59:43 +02003249 /*
3250 * We are going to store the size of the data followed
3251 * by the data contents. Since the fd descriptor is a pipe,
3252 * we cannot seek back to store the size of the data once
3253 * we know it. Instead we:
3254 *
3255 * - write the tracing data to the temp file
3256 * - get/write the data size to pipe
3257 * - write the tracing data from the temp file
3258 * to the pipe
3259 */
3260 tdata = tracing_data_get(&evlist->entries, fd, true);
3261 if (!tdata)
3262 return -1;
3263
Tom Zanussi92155452010-04-01 23:59:21 -05003264 memset(&ev, 0, sizeof(ev));
3265
3266 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
Jiri Olsa29208e52011-10-20 15:59:43 +02003267 size = tdata->size;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03003268 aligned_size = PERF_ALIGN(size, sizeof(u64));
Tom Zanussi92155452010-04-01 23:59:21 -05003269 padding = aligned_size - size;
3270 ev.tracing_data.header.size = sizeof(ev.tracing_data);
3271 ev.tracing_data.size = aligned_size;
3272
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003273 process(tool, &ev, NULL, NULL);
Tom Zanussi92155452010-04-01 23:59:21 -05003274
Jiri Olsa29208e52011-10-20 15:59:43 +02003275 /*
3276 * The put function will copy all the tracing data
3277 * stored in temp file to the pipe.
3278 */
3279 tracing_data_put(tdata);
3280
Tom Zanussi92155452010-04-01 23:59:21 -05003281 write_padded(fd, NULL, 0, padding);
3282
3283 return aligned_size;
3284}
3285
Adrian Hunter47c3d102013-07-04 16:20:21 +03003286int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3287 union perf_event *event,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003288 struct perf_session *session)
Tom Zanussi92155452010-04-01 23:59:21 -05003289{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003290 ssize_t size_read, padding, size = event->tracing_data.size;
Jiri Olsacc9784bd2013-10-15 16:27:34 +02003291 int fd = perf_data_file__fd(session->file);
3292 off_t offset = lseek(fd, 0, SEEK_CUR);
Tom Zanussi92155452010-04-01 23:59:21 -05003293 char buf[BUFSIZ];
3294
3295 /* setup for reading amidst mmap */
Jiri Olsacc9784bd2013-10-15 16:27:34 +02003296 lseek(fd, offset + sizeof(struct tracing_data_event),
Tom Zanussi92155452010-04-01 23:59:21 -05003297 SEEK_SET);
3298
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01003299 size_read = trace_report(fd, &session->tevent,
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03003300 session->repipe);
Irina Tirdea9ac3e482012-09-11 01:15:01 +03003301 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
Tom Zanussi92155452010-04-01 23:59:21 -05003302
Jiri Olsacc9784bd2013-10-15 16:27:34 +02003303 if (readn(fd, buf, padding) < 0) {
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03003304 pr_err("%s: reading input file", __func__);
3305 return -1;
3306 }
Tom Zanussi454c4072010-05-01 01:41:20 -05003307 if (session->repipe) {
3308 int retw = write(STDOUT_FILENO, buf, padding);
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03003309 if (retw <= 0 || retw != padding) {
3310 pr_err("%s: repiping tracing data padding", __func__);
3311 return -1;
3312 }
Tom Zanussi454c4072010-05-01 01:41:20 -05003313 }
Tom Zanussi92155452010-04-01 23:59:21 -05003314
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03003315 if (size_read + padding != size) {
3316 pr_err("%s: tracing data size mismatch", __func__);
3317 return -1;
3318 }
Tom Zanussi92155452010-04-01 23:59:21 -05003319
Namhyung Kim831394b2012-09-06 11:10:46 +09003320 perf_evlist__prepare_tracepoint_events(session->evlist,
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01003321 session->tevent.pevent);
Arnaldo Carvalho de Melo8b6ee4c2012-08-07 23:36:16 -03003322
Tom Zanussi92155452010-04-01 23:59:21 -05003323 return size_read + padding;
3324}
Tom Zanussic7929e42010-04-01 23:59:22 -05003325
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003326int perf_event__synthesize_build_id(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003327 struct dso *pos, u16 misc,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003328 perf_event__handler_t process,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02003329 struct machine *machine)
Tom Zanussic7929e42010-04-01 23:59:22 -05003330{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003331 union perf_event ev;
Tom Zanussic7929e42010-04-01 23:59:22 -05003332 size_t len;
3333 int err = 0;
3334
3335 if (!pos->hit)
3336 return err;
3337
3338 memset(&ev, 0, sizeof(ev));
3339
3340 len = pos->long_name_len + 1;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03003341 len = PERF_ALIGN(len, NAME_ALIGN);
Tom Zanussic7929e42010-04-01 23:59:22 -05003342 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3343 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3344 ev.build_id.header.misc = misc;
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -03003345 ev.build_id.pid = machine->pid;
Tom Zanussic7929e42010-04-01 23:59:22 -05003346 ev.build_id.header.size = sizeof(ev.build_id) + len;
3347 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3348
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003349 err = process(tool, &ev, NULL, machine);
Tom Zanussic7929e42010-04-01 23:59:22 -05003350
3351 return err;
3352}
3353
Irina Tirdea1d037ca2012-09-11 01:15:03 +03003354int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003355 union perf_event *event,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003356 struct perf_session *session)
Tom Zanussic7929e42010-04-01 23:59:22 -05003357{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003358 __event_process_build_id(&event->build_id,
3359 event->build_id.filename,
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08003360 session);
Tom Zanussic7929e42010-04-01 23:59:22 -05003361 return 0;
3362}