blob: b5baff3007bbd477551cc2e62770e534d0919f33 [file] [log] [blame]
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -03001#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -03002#include <inttypes.h>
Arnaldo Carvalho de Meloa9072bc2011-10-26 12:41:38 -02003#include "util.h"
Arnaldo Carvalho de Meloa0675582017-04-17 16:51:59 -03004#include "string2.h"
Arnaldo Carvalho de Melo391e4202017-04-19 18:51:14 -03005#include <sys/param.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02006#include <sys/types.h>
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02007#include <byteswap.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02008#include <unistd.h>
9#include <stdio.h>
10#include <stdlib.h>
Frederic Weisbecker8671dab2009-11-11 04:51:03 +010011#include <linux/list.h>
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -020012#include <linux/kernel.h>
Robert Richterb1e5a9b2011-12-07 10:02:57 +010013#include <linux/bitops.h>
Arnaldo Carvalho de Melo7a8ef4c2017-04-19 20:57:47 -030014#include <sys/stat.h>
15#include <sys/types.h>
Stephane Eranianfbe96f22011-09-30 15:40:40 +020016#include <sys/utsname.h>
Arnaldo Carvalho de Melo7a8ef4c2017-04-19 20:57:47 -030017#include <unistd.h>
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020018
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020019#include "evlist.h"
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030020#include "evsel.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020021#include "header.h"
Arnaldo Carvalho de Melo98521b32017-04-25 15:45:35 -030022#include "memswap.h"
Frederic Weisbecker03456a12009-10-06 23:36:47 +020023#include "../perf.h"
24#include "trace-event.h"
Arnaldo Carvalho de Melo301a0b02009-12-13 19:50:25 -020025#include "session.h"
Frederic Weisbecker8671dab2009-11-11 04:51:03 +010026#include "symbol.h"
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +010027#include "debug.h"
Stephane Eranianfbe96f22011-09-30 15:40:40 +020028#include "cpumap.h"
Robert Richter50a96672012-08-16 21:10:24 +020029#include "pmu.h"
Jiri Olsa7dbf4dc2012-09-10 18:50:19 +020030#include "vdso.h"
Namhyung Kima1ae5652012-09-24 17:14:59 +090031#include "strbuf.h"
Jiri Olsaebb296c2012-10-27 23:18:28 +020032#include "build-id.h"
Jiri Olsacc9784bd2013-10-15 16:27:34 +020033#include "data.h"
Jiri Olsa720e98b2016-02-16 16:01:43 +010034#include <api/fs/fs.h>
35#include "asm/bug.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020036
Arnaldo Carvalho de Melo3d689ed2017-04-17 16:10:49 -030037#include "sane_ctype.h"
38
Stephane Eranian73323f52012-02-02 13:54:44 +010039/*
40 * magic2 = "PERFILE2"
41 * must be a numerical value to let the endianness
42 * determine the memory layout. That way we are able
43 * to detect endianness when reading the perf.data file
44 * back.
45 *
46 * we check for legacy (PERFFILE) format.
47 */
48static const char *__perf_magic1 = "PERFFILE";
49static const u64 __perf_magic2 = 0x32454c4946524550ULL;
50static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020051
Stephane Eranian73323f52012-02-02 13:54:44 +010052#define PERF_MAGIC __perf_magic2
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020053
Soramichi AKIYAMAd25ed5d2017-01-17 00:22:37 +090054const char perf_version_string[] = PERF_VERSION;
55
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020056struct perf_file_attr {
Ingo Molnarcdd6c482009-09-21 12:02:48 +020057 struct perf_event_attr attr;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020058 struct perf_file_section ids;
59};
60
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030061void perf_header__set_feat(struct perf_header *header, int feat)
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020062{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030063 set_bit(feat, header->adds_features);
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020064}
65
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030066void perf_header__clear_feat(struct perf_header *header, int feat)
Arnaldo Carvalho de Melobaa2f6c2010-11-26 19:39:15 -020067{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030068 clear_bit(feat, header->adds_features);
Arnaldo Carvalho de Melobaa2f6c2010-11-26 19:39:15 -020069}
70
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030071bool perf_header__has_feat(const struct perf_header *header, int feat)
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020072{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -030073 return test_bit(feat, header->adds_features);
Arnaldo Carvalho de Melo8d063672009-11-04 18:50:43 -020074}
75
Arnaldo Carvalho de Melo3726cc72009-11-17 01:18:12 -020076static int do_write(int fd, const void *buf, size_t size)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020077{
78 while (size) {
79 int ret = write(fd, buf, size);
80
81 if (ret < 0)
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -020082 return -errno;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020083
84 size -= ret;
85 buf += ret;
86 }
Arnaldo Carvalho de Melo3726cc72009-11-17 01:18:12 -020087
88 return 0;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020089}
90
Namhyung Kime195fac2014-11-04 10:14:30 +090091int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
Arnaldo Carvalho de Melof92cb242010-01-04 16:19:28 -020092{
93 static const char zero_buf[NAME_ALIGN];
94 int err = do_write(fd, bf, count);
95
96 if (!err)
97 err = do_write(fd, zero_buf, count_aligned - count);
98
99 return err;
100}
101
Kan Liang2bb00d22015-09-01 09:58:12 -0400102#define string_size(str) \
103 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
104
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200105static int do_write_string(int fd, const char *str)
106{
107 u32 len, olen;
108 int ret;
109
110 olen = strlen(str) + 1;
Irina Tirdea9ac3e482012-09-11 01:15:01 +0300111 len = PERF_ALIGN(olen, NAME_ALIGN);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200112
113 /* write len, incl. \0 */
114 ret = do_write(fd, &len, sizeof(len));
115 if (ret < 0)
116 return ret;
117
118 return write_padded(fd, str, olen, len);
119}
120
121static char *do_read_string(int fd, struct perf_header *ph)
122{
123 ssize_t sz, ret;
124 u32 len;
125 char *buf;
126
Namhyung Kim5323f602012-12-17 15:38:54 +0900127 sz = readn(fd, &len, sizeof(len));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200128 if (sz < (ssize_t)sizeof(len))
129 return NULL;
130
131 if (ph->needs_swap)
132 len = bswap_32(len);
133
134 buf = malloc(len);
135 if (!buf)
136 return NULL;
137
Namhyung Kim5323f602012-12-17 15:38:54 +0900138 ret = readn(fd, buf, len);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200139 if (ret == (ssize_t)len) {
140 /*
141 * strings are padded by zeroes
142 * thus the actual strlen of buf
143 * may be less than len
144 */
145 return buf;
146 }
147
148 free(buf);
149 return NULL;
150}
151
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300152static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200153 struct perf_evlist *evlist)
154{
155 return read_tracing_data(fd, &evlist->entries);
156}
157
158
159static int write_build_id(int fd, struct perf_header *h,
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300160 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200161{
162 struct perf_session *session;
163 int err;
164
165 session = container_of(h, struct perf_session, header);
166
Robert Richtere20960c2011-12-07 10:02:55 +0100167 if (!perf_session__read_build_ids(session, true))
168 return -1;
169
Namhyung Kim714c9c42014-11-04 10:14:29 +0900170 err = perf_session__write_buildid_table(session, fd);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200171 if (err < 0) {
172 pr_debug("failed to write buildid table\n");
173 return err;
174 }
Namhyung Kim73c5d222014-11-07 22:57:56 +0900175 perf_session__cache_build_ids(session);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200176
177 return 0;
178}
179
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300180static int write_hostname(int fd, struct perf_header *h __maybe_unused,
181 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200182{
183 struct utsname uts;
184 int ret;
185
186 ret = uname(&uts);
187 if (ret < 0)
188 return -1;
189
190 return do_write_string(fd, uts.nodename);
191}
192
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300193static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
194 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200195{
196 struct utsname uts;
197 int ret;
198
199 ret = uname(&uts);
200 if (ret < 0)
201 return -1;
202
203 return do_write_string(fd, uts.release);
204}
205
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300206static int write_arch(int fd, struct perf_header *h __maybe_unused,
207 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200208{
209 struct utsname uts;
210 int ret;
211
212 ret = uname(&uts);
213 if (ret < 0)
214 return -1;
215
216 return do_write_string(fd, uts.machine);
217}
218
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300219static int write_version(int fd, struct perf_header *h __maybe_unused,
220 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200221{
222 return do_write_string(fd, perf_version_string);
223}
224
Wang Nan493c3032014-10-24 09:45:26 +0800225static int __write_cpudesc(int fd, const char *cpuinfo_proc)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200226{
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200227 FILE *file;
228 char *buf = NULL;
229 char *s, *p;
Wang Nan493c3032014-10-24 09:45:26 +0800230 const char *search = cpuinfo_proc;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200231 size_t len = 0;
232 int ret = -1;
233
234 if (!search)
235 return -1;
236
237 file = fopen("/proc/cpuinfo", "r");
238 if (!file)
239 return -1;
240
241 while (getline(&buf, &len, file) > 0) {
242 ret = strncmp(buf, search, strlen(search));
243 if (!ret)
244 break;
245 }
246
Wang Naned307752014-10-16 11:08:29 +0800247 if (ret) {
248 ret = -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200249 goto done;
Wang Naned307752014-10-16 11:08:29 +0800250 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200251
252 s = buf;
253
254 p = strchr(buf, ':');
255 if (p && *(p+1) == ' ' && *(p+2))
256 s = p + 2;
257 p = strchr(s, '\n');
258 if (p)
259 *p = '\0';
260
261 /* squash extra space characters (branding string) */
262 p = s;
263 while (*p) {
264 if (isspace(*p)) {
265 char *r = p + 1;
266 char *q = r;
267 *p = ' ';
268 while (*q && isspace(*q))
269 q++;
270 if (q != (p+1))
271 while ((*r++ = *q++));
272 }
273 p++;
274 }
275 ret = do_write_string(fd, s);
276done:
277 free(buf);
278 fclose(file);
279 return ret;
280}
281
Wang Nan493c3032014-10-24 09:45:26 +0800282static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
283 struct perf_evlist *evlist __maybe_unused)
284{
285#ifndef CPUINFO_PROC
286#define CPUINFO_PROC {"model name", }
287#endif
288 const char *cpuinfo_procs[] = CPUINFO_PROC;
289 unsigned int i;
290
291 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
292 int ret;
293 ret = __write_cpudesc(fd, cpuinfo_procs[i]);
294 if (ret >= 0)
295 return ret;
296 }
297 return -1;
298}
299
300
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300301static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
302 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200303{
304 long nr;
305 u32 nrc, nra;
306 int ret;
307
Jan Stancekda8a58b2017-02-17 12:10:26 +0100308 nrc = cpu__max_present_cpu();
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200309
310 nr = sysconf(_SC_NPROCESSORS_ONLN);
311 if (nr < 0)
312 return -1;
313
314 nra = (u32)(nr & UINT_MAX);
315
316 ret = do_write(fd, &nrc, sizeof(nrc));
317 if (ret < 0)
318 return ret;
319
320 return do_write(fd, &nra, sizeof(nra));
321}
322
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300323static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200324 struct perf_evlist *evlist)
325{
Robert Richter6606f872012-08-16 21:10:19 +0200326 struct perf_evsel *evsel;
Namhyung Kim74ba9e12012-09-05 14:02:47 +0900327 u32 nre, nri, sz;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200328 int ret;
329
Namhyung Kim74ba9e12012-09-05 14:02:47 +0900330 nre = evlist->nr_entries;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200331
332 /*
333 * write number of events
334 */
335 ret = do_write(fd, &nre, sizeof(nre));
336 if (ret < 0)
337 return ret;
338
339 /*
340 * size of perf_event_attr struct
341 */
Robert Richter6606f872012-08-16 21:10:19 +0200342 sz = (u32)sizeof(evsel->attr);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200343 ret = do_write(fd, &sz, sizeof(sz));
344 if (ret < 0)
345 return ret;
346
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300347 evlist__for_each_entry(evlist, evsel) {
Robert Richter6606f872012-08-16 21:10:19 +0200348 ret = do_write(fd, &evsel->attr, sz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200349 if (ret < 0)
350 return ret;
351 /*
352 * write number of unique id per event
353 * there is one id per instance of an event
354 *
355 * copy into an nri to be independent of the
356 * type of ids,
357 */
Robert Richter6606f872012-08-16 21:10:19 +0200358 nri = evsel->ids;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200359 ret = do_write(fd, &nri, sizeof(nri));
360 if (ret < 0)
361 return ret;
362
363 /*
364 * write event string as passed on cmdline
365 */
Robert Richter6606f872012-08-16 21:10:19 +0200366 ret = do_write_string(fd, perf_evsel__name(evsel));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200367 if (ret < 0)
368 return ret;
369 /*
370 * write unique ids for this event
371 */
Robert Richter6606f872012-08-16 21:10:19 +0200372 ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200373 if (ret < 0)
374 return ret;
375 }
376 return 0;
377}
378
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300379static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
380 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200381{
382 char buf[MAXPATHLEN];
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300383 u32 n;
384 int i, ret;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200385
Tommi Rantala55f771282017-03-22 15:06:24 +0200386 /* actual path to perf binary */
387 ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200388 if (ret <= 0)
389 return -1;
390
391 /* readlink() does not add null termination */
392 buf[ret] = '\0';
393
394 /* account for binary path */
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300395 n = perf_env.nr_cmdline + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200396
397 ret = do_write(fd, &n, sizeof(n));
398 if (ret < 0)
399 return ret;
400
401 ret = do_write_string(fd, buf);
402 if (ret < 0)
403 return ret;
404
Arnaldo Carvalho de Melob6998692015-09-08 16:58:20 -0300405 for (i = 0 ; i < perf_env.nr_cmdline; i++) {
406 ret = do_write_string(fd, perf_env.cmdline_argv[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200407 if (ret < 0)
408 return ret;
409 }
410 return 0;
411}
412
413#define CORE_SIB_FMT \
414 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
415#define THRD_SIB_FMT \
416 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
417
418struct cpu_topo {
Kan Liang2bb00d22015-09-01 09:58:12 -0400419 u32 cpu_nr;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200420 u32 core_sib;
421 u32 thread_sib;
422 char **core_siblings;
423 char **thread_siblings;
424};
425
426static int build_cpu_topo(struct cpu_topo *tp, int cpu)
427{
428 FILE *fp;
429 char filename[MAXPATHLEN];
430 char *buf = NULL, *p;
431 size_t len = 0;
Stephane Eranianc5885742013-08-14 12:04:26 +0200432 ssize_t sret;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200433 u32 i = 0;
434 int ret = -1;
435
436 sprintf(filename, CORE_SIB_FMT, cpu);
437 fp = fopen(filename, "r");
438 if (!fp)
Stephane Eranianc5885742013-08-14 12:04:26 +0200439 goto try_threads;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200440
Stephane Eranianc5885742013-08-14 12:04:26 +0200441 sret = getline(&buf, &len, fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200442 fclose(fp);
Stephane Eranianc5885742013-08-14 12:04:26 +0200443 if (sret <= 0)
444 goto try_threads;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200445
446 p = strchr(buf, '\n');
447 if (p)
448 *p = '\0';
449
450 for (i = 0; i < tp->core_sib; i++) {
451 if (!strcmp(buf, tp->core_siblings[i]))
452 break;
453 }
454 if (i == tp->core_sib) {
455 tp->core_siblings[i] = buf;
456 tp->core_sib++;
457 buf = NULL;
458 len = 0;
459 }
Stephane Eranianc5885742013-08-14 12:04:26 +0200460 ret = 0;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200461
Stephane Eranianc5885742013-08-14 12:04:26 +0200462try_threads:
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200463 sprintf(filename, THRD_SIB_FMT, cpu);
464 fp = fopen(filename, "r");
465 if (!fp)
466 goto done;
467
468 if (getline(&buf, &len, fp) <= 0)
469 goto done;
470
471 p = strchr(buf, '\n');
472 if (p)
473 *p = '\0';
474
475 for (i = 0; i < tp->thread_sib; i++) {
476 if (!strcmp(buf, tp->thread_siblings[i]))
477 break;
478 }
479 if (i == tp->thread_sib) {
480 tp->thread_siblings[i] = buf;
481 tp->thread_sib++;
482 buf = NULL;
483 }
484 ret = 0;
485done:
486 if(fp)
487 fclose(fp);
488 free(buf);
489 return ret;
490}
491
492static void free_cpu_topo(struct cpu_topo *tp)
493{
494 u32 i;
495
496 if (!tp)
497 return;
498
499 for (i = 0 ; i < tp->core_sib; i++)
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300500 zfree(&tp->core_siblings[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200501
502 for (i = 0 ; i < tp->thread_sib; i++)
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300503 zfree(&tp->thread_siblings[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200504
505 free(tp);
506}
507
508static struct cpu_topo *build_cpu_topology(void)
509{
Jan Stancek43db2842017-02-17 12:10:25 +0100510 struct cpu_topo *tp = NULL;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200511 void *addr;
512 u32 nr, i;
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300513 size_t sz;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200514 long ncpus;
515 int ret = -1;
Jan Stancek43db2842017-02-17 12:10:25 +0100516 struct cpu_map *map;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200517
Jan Stancekda8a58b2017-02-17 12:10:26 +0100518 ncpus = cpu__max_present_cpu();
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200519
Jan Stancek43db2842017-02-17 12:10:25 +0100520 /* build online CPU map */
521 map = cpu_map__new(NULL);
522 if (map == NULL) {
523 pr_debug("failed to get system cpumap\n");
524 return NULL;
525 }
526
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200527 nr = (u32)(ncpus & UINT_MAX);
528
529 sz = nr * sizeof(char *);
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300530 addr = calloc(1, sizeof(*tp) + 2 * sz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200531 if (!addr)
Jan Stancek43db2842017-02-17 12:10:25 +0100532 goto out_free;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200533
534 tp = addr;
Kan Liang2bb00d22015-09-01 09:58:12 -0400535 tp->cpu_nr = nr;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200536 addr += sizeof(*tp);
537 tp->core_siblings = addr;
538 addr += sz;
539 tp->thread_siblings = addr;
540
541 for (i = 0; i < nr; i++) {
Jan Stancek43db2842017-02-17 12:10:25 +0100542 if (!cpu_map__has(map, i))
543 continue;
544
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200545 ret = build_cpu_topo(tp, i);
546 if (ret < 0)
547 break;
548 }
Jan Stancek43db2842017-02-17 12:10:25 +0100549
550out_free:
551 cpu_map__put(map);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200552 if (ret) {
553 free_cpu_topo(tp);
554 tp = NULL;
555 }
556 return tp;
557}
558
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300559static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
560 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200561{
562 struct cpu_topo *tp;
563 u32 i;
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300564 int ret, j;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200565
566 tp = build_cpu_topology();
567 if (!tp)
568 return -1;
569
570 ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
571 if (ret < 0)
572 goto done;
573
574 for (i = 0; i < tp->core_sib; i++) {
575 ret = do_write_string(fd, tp->core_siblings[i]);
576 if (ret < 0)
577 goto done;
578 }
579 ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
580 if (ret < 0)
581 goto done;
582
583 for (i = 0; i < tp->thread_sib; i++) {
584 ret = do_write_string(fd, tp->thread_siblings[i]);
585 if (ret < 0)
586 break;
587 }
Kan Liang2bb00d22015-09-01 09:58:12 -0400588
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300589 ret = perf_env__read_cpu_topology_map(&perf_env);
590 if (ret < 0)
591 goto done;
592
593 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
594 ret = do_write(fd, &perf_env.cpu[j].core_id,
595 sizeof(perf_env.cpu[j].core_id));
Kan Liang2bb00d22015-09-01 09:58:12 -0400596 if (ret < 0)
597 return ret;
Arnaldo Carvalho de Meloaa36ddd2015-09-09 10:37:01 -0300598 ret = do_write(fd, &perf_env.cpu[j].socket_id,
599 sizeof(perf_env.cpu[j].socket_id));
Kan Liang2bb00d22015-09-01 09:58:12 -0400600 if (ret < 0)
601 return ret;
602 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200603done:
604 free_cpu_topo(tp);
605 return ret;
606}
607
608
609
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300610static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
611 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200612{
613 char *buf = NULL;
614 FILE *fp;
615 size_t len = 0;
616 int ret = -1, n;
617 uint64_t mem;
618
619 fp = fopen("/proc/meminfo", "r");
620 if (!fp)
621 return -1;
622
623 while (getline(&buf, &len, fp) > 0) {
624 ret = strncmp(buf, "MemTotal:", 9);
625 if (!ret)
626 break;
627 }
628 if (!ret) {
629 n = sscanf(buf, "%*s %"PRIu64, &mem);
630 if (n == 1)
631 ret = do_write(fd, &mem, sizeof(mem));
Wang Naned307752014-10-16 11:08:29 +0800632 } else
633 ret = -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200634 free(buf);
635 fclose(fp);
636 return ret;
637}
638
639static int write_topo_node(int fd, int node)
640{
641 char str[MAXPATHLEN];
642 char field[32];
643 char *buf = NULL, *p;
644 size_t len = 0;
645 FILE *fp;
646 u64 mem_total, mem_free, mem;
647 int ret = -1;
648
649 sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
650 fp = fopen(str, "r");
651 if (!fp)
652 return -1;
653
654 while (getline(&buf, &len, fp) > 0) {
655 /* skip over invalid lines */
656 if (!strchr(buf, ':'))
657 continue;
Alan Coxa761a2d2014-01-20 19:10:11 +0100658 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200659 goto done;
660 if (!strcmp(field, "MemTotal:"))
661 mem_total = mem;
662 if (!strcmp(field, "MemFree:"))
663 mem_free = mem;
664 }
665
666 fclose(fp);
Thomas Jarosch5809fde2013-01-28 10:21:14 +0100667 fp = NULL;
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200668
669 ret = do_write(fd, &mem_total, sizeof(u64));
670 if (ret)
671 goto done;
672
673 ret = do_write(fd, &mem_free, sizeof(u64));
674 if (ret)
675 goto done;
676
677 ret = -1;
678 sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
679
680 fp = fopen(str, "r");
681 if (!fp)
682 goto done;
683
684 if (getline(&buf, &len, fp) <= 0)
685 goto done;
686
687 p = strchr(buf, '\n');
688 if (p)
689 *p = '\0';
690
691 ret = do_write_string(fd, buf);
692done:
693 free(buf);
Thomas Jarosch5809fde2013-01-28 10:21:14 +0100694 if (fp)
695 fclose(fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200696 return ret;
697}
698
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300699static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
700 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200701{
702 char *buf = NULL;
703 size_t len = 0;
704 FILE *fp;
705 struct cpu_map *node_map = NULL;
706 char *c;
707 u32 nr, i, j;
708 int ret = -1;
709
710 fp = fopen("/sys/devices/system/node/online", "r");
711 if (!fp)
712 return -1;
713
714 if (getline(&buf, &len, fp) <= 0)
715 goto done;
716
717 c = strchr(buf, '\n');
718 if (c)
719 *c = '\0';
720
721 node_map = cpu_map__new(buf);
722 if (!node_map)
723 goto done;
724
725 nr = (u32)node_map->nr;
726
727 ret = do_write(fd, &nr, sizeof(nr));
728 if (ret < 0)
729 goto done;
730
731 for (i = 0; i < nr; i++) {
732 j = (u32)node_map->map[i];
733 ret = do_write(fd, &j, sizeof(j));
734 if (ret < 0)
735 break;
736
737 ret = write_topo_node(fd, i);
738 if (ret < 0)
739 break;
740 }
741done:
742 free(buf);
743 fclose(fp);
Masami Hiramatsu5191d8872015-12-09 11:11:35 +0900744 cpu_map__put(node_map);
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200745 return ret;
746}
747
748/*
Robert Richter50a96672012-08-16 21:10:24 +0200749 * File format:
750 *
751 * struct pmu_mappings {
752 * u32 pmu_num;
753 * struct pmu_map {
754 * u32 type;
755 * char name[];
756 * }[pmu_num];
757 * };
758 */
759
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300760static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
761 struct perf_evlist *evlist __maybe_unused)
Robert Richter50a96672012-08-16 21:10:24 +0200762{
763 struct perf_pmu *pmu = NULL;
764 off_t offset = lseek(fd, 0, SEEK_CUR);
765 __u32 pmu_num = 0;
Namhyung Kim5323f602012-12-17 15:38:54 +0900766 int ret;
Robert Richter50a96672012-08-16 21:10:24 +0200767
768 /* write real pmu_num later */
Namhyung Kim5323f602012-12-17 15:38:54 +0900769 ret = do_write(fd, &pmu_num, sizeof(pmu_num));
770 if (ret < 0)
771 return ret;
Robert Richter50a96672012-08-16 21:10:24 +0200772
773 while ((pmu = perf_pmu__scan(pmu))) {
774 if (!pmu->name)
775 continue;
776 pmu_num++;
Namhyung Kim5323f602012-12-17 15:38:54 +0900777
778 ret = do_write(fd, &pmu->type, sizeof(pmu->type));
779 if (ret < 0)
780 return ret;
781
782 ret = do_write_string(fd, pmu->name);
783 if (ret < 0)
784 return ret;
Robert Richter50a96672012-08-16 21:10:24 +0200785 }
786
787 if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
788 /* discard all */
789 lseek(fd, offset, SEEK_SET);
790 return -1;
791 }
792
793 return 0;
794}
795
796/*
Namhyung Kima8bb5592013-01-22 18:09:31 +0900797 * File format:
798 *
799 * struct group_descs {
800 * u32 nr_groups;
801 * struct group_desc {
802 * char name[];
803 * u32 leader_idx;
804 * u32 nr_members;
805 * }[nr_groups];
806 * };
807 */
808static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
809 struct perf_evlist *evlist)
810{
811 u32 nr_groups = evlist->nr_groups;
812 struct perf_evsel *evsel;
813 int ret;
814
815 ret = do_write(fd, &nr_groups, sizeof(nr_groups));
816 if (ret < 0)
817 return ret;
818
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300819 evlist__for_each_entry(evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +0900820 if (perf_evsel__is_group_leader(evsel) &&
821 evsel->nr_members > 1) {
822 const char *name = evsel->group_name ?: "{anon_group}";
823 u32 leader_idx = evsel->idx;
824 u32 nr_members = evsel->nr_members;
825
826 ret = do_write_string(fd, name);
827 if (ret < 0)
828 return ret;
829
830 ret = do_write(fd, &leader_idx, sizeof(leader_idx));
831 if (ret < 0)
832 return ret;
833
834 ret = do_write(fd, &nr_members, sizeof(nr_members));
835 if (ret < 0)
836 return ret;
837 }
838 }
839 return 0;
840}
841
842/*
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200843 * default get_cpuid(): nothing gets recorded
Jiada Wang7a759cd2017-04-09 20:02:37 -0700844 * actual implementation must be in arch/$(SRCARCH)/util/header.c
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200845 */
Rui Teng11d8f872016-07-28 10:05:57 +0800846int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200847{
848 return -1;
849}
850
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300851static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
852 struct perf_evlist *evlist __maybe_unused)
Stephane Eranianfbe96f22011-09-30 15:40:40 +0200853{
854 char buffer[64];
855 int ret;
856
857 ret = get_cpuid(buffer, sizeof(buffer));
858 if (!ret)
859 goto write_it;
860
861 return -1;
862write_it:
863 return do_write_string(fd, buffer);
864}
865
Irina Tirdea1d037ca2012-09-11 01:15:03 +0300866static int write_branch_stack(int fd __maybe_unused,
867 struct perf_header *h __maybe_unused,
868 struct perf_evlist *evlist __maybe_unused)
Stephane Eranian330aa672012-03-08 23:47:46 +0100869{
870 return 0;
871}
872
Adrian Hunter99fa2982015-04-30 17:37:25 +0300873static int write_auxtrace(int fd, struct perf_header *h,
Adrian Hunter4025ea42015-04-09 18:53:41 +0300874 struct perf_evlist *evlist __maybe_unused)
875{
Adrian Hunter99fa2982015-04-30 17:37:25 +0300876 struct perf_session *session;
877 int err;
878
879 session = container_of(h, struct perf_session, header);
880
881 err = auxtrace_index__write(fd, &session->auxtrace_index);
882 if (err < 0)
883 pr_err("Failed to write auxtrace index\n");
884 return err;
Adrian Hunter4025ea42015-04-09 18:53:41 +0300885}
886
Jiri Olsa720e98b2016-02-16 16:01:43 +0100887static int cpu_cache_level__sort(const void *a, const void *b)
888{
889 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
890 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
891
892 return cache_a->level - cache_b->level;
893}
894
895static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
896{
897 if (a->level != b->level)
898 return false;
899
900 if (a->line_size != b->line_size)
901 return false;
902
903 if (a->sets != b->sets)
904 return false;
905
906 if (a->ways != b->ways)
907 return false;
908
909 if (strcmp(a->type, b->type))
910 return false;
911
912 if (strcmp(a->size, b->size))
913 return false;
914
915 if (strcmp(a->map, b->map))
916 return false;
917
918 return true;
919}
920
921static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
922{
923 char path[PATH_MAX], file[PATH_MAX];
924 struct stat st;
925 size_t len;
926
927 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
928 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
929
930 if (stat(file, &st))
931 return 1;
932
933 scnprintf(file, PATH_MAX, "%s/level", path);
934 if (sysfs__read_int(file, (int *) &cache->level))
935 return -1;
936
937 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
938 if (sysfs__read_int(file, (int *) &cache->line_size))
939 return -1;
940
941 scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
942 if (sysfs__read_int(file, (int *) &cache->sets))
943 return -1;
944
945 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
946 if (sysfs__read_int(file, (int *) &cache->ways))
947 return -1;
948
949 scnprintf(file, PATH_MAX, "%s/type", path);
950 if (sysfs__read_str(file, &cache->type, &len))
951 return -1;
952
953 cache->type[len] = 0;
954 cache->type = rtrim(cache->type);
955
956 scnprintf(file, PATH_MAX, "%s/size", path);
957 if (sysfs__read_str(file, &cache->size, &len)) {
958 free(cache->type);
959 return -1;
960 }
961
962 cache->size[len] = 0;
963 cache->size = rtrim(cache->size);
964
965 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
966 if (sysfs__read_str(file, &cache->map, &len)) {
967 free(cache->map);
968 free(cache->type);
969 return -1;
970 }
971
972 cache->map[len] = 0;
973 cache->map = rtrim(cache->map);
974 return 0;
975}
976
977static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
978{
979 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
980}
981
982static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
983{
984 u32 i, cnt = 0;
985 long ncpus;
986 u32 nr, cpu;
987 u16 level;
988
989 ncpus = sysconf(_SC_NPROCESSORS_CONF);
990 if (ncpus < 0)
991 return -1;
992
993 nr = (u32)(ncpus & UINT_MAX);
994
995 for (cpu = 0; cpu < nr; cpu++) {
996 for (level = 0; level < 10; level++) {
997 struct cpu_cache_level c;
998 int err;
999
1000 err = cpu_cache_level__read(&c, cpu, level);
1001 if (err < 0)
1002 return err;
1003
1004 if (err == 1)
1005 break;
1006
1007 for (i = 0; i < cnt; i++) {
1008 if (cpu_cache_level__cmp(&c, &caches[i]))
1009 break;
1010 }
1011
1012 if (i == cnt)
1013 caches[cnt++] = c;
1014 else
1015 cpu_cache_level__free(&c);
1016
1017 if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1018 goto out;
1019 }
1020 }
1021 out:
1022 *cntp = cnt;
1023 return 0;
1024}
1025
1026#define MAX_CACHES 2000
1027
1028static int write_cache(int fd, struct perf_header *h __maybe_unused,
1029 struct perf_evlist *evlist __maybe_unused)
1030{
1031 struct cpu_cache_level caches[MAX_CACHES];
1032 u32 cnt = 0, i, version = 1;
1033 int ret;
1034
1035 ret = build_caches(caches, MAX_CACHES, &cnt);
1036 if (ret)
1037 goto out;
1038
1039 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1040
1041 ret = do_write(fd, &version, sizeof(u32));
1042 if (ret < 0)
1043 goto out;
1044
1045 ret = do_write(fd, &cnt, sizeof(u32));
1046 if (ret < 0)
1047 goto out;
1048
1049 for (i = 0; i < cnt; i++) {
1050 struct cpu_cache_level *c = &caches[i];
1051
1052 #define _W(v) \
1053 ret = do_write(fd, &c->v, sizeof(u32)); \
1054 if (ret < 0) \
1055 goto out;
1056
1057 _W(level)
1058 _W(line_size)
1059 _W(sets)
1060 _W(ways)
1061 #undef _W
1062
1063 #define _W(v) \
1064 ret = do_write_string(fd, (const char *) c->v); \
1065 if (ret < 0) \
1066 goto out;
1067
1068 _W(type)
1069 _W(size)
1070 _W(map)
1071 #undef _W
1072 }
1073
1074out:
1075 for (i = 0; i < cnt; i++)
1076 cpu_cache_level__free(&caches[i]);
1077 return ret;
1078}
1079
Jiri Olsaffa517a2015-10-25 15:51:43 +01001080static int write_stat(int fd __maybe_unused,
1081 struct perf_header *h __maybe_unused,
1082 struct perf_evlist *evlist __maybe_unused)
1083{
1084 return 0;
1085}
1086
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001087static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
1088 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001089{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001090 fprintf(fp, "# hostname : %s\n", ph->env.hostname);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001091}
1092
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001093static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
1094 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001095{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001096 fprintf(fp, "# os release : %s\n", ph->env.os_release);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001097}
1098
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001099static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001100{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001101 fprintf(fp, "# arch : %s\n", ph->env.arch);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001102}
1103
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001104static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
1105 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001106{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001107 fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001108}
1109
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001110static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
1111 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001112{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001113 fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
1114 fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001115}
1116
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001117static void print_version(struct perf_header *ph, int fd __maybe_unused,
1118 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001119{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001120 fprintf(fp, "# perf version : %s\n", ph->env.version);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001121}
1122
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001123static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
1124 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001125{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001126 int nr, i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001127
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001128 nr = ph->env.nr_cmdline;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001129
1130 fprintf(fp, "# cmdline : ");
1131
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001132 for (i = 0; i < nr; i++)
1133 fprintf(fp, "%s ", ph->env.cmdline_argv[i]);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001134 fputc('\n', fp);
1135}
1136
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001137static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
1138 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001139{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001140 int nr, i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001141 char *str;
Jan Stancekda8a58b2017-02-17 12:10:26 +01001142 int cpu_nr = ph->env.nr_cpus_avail;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001143
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001144 nr = ph->env.nr_sibling_cores;
1145 str = ph->env.sibling_cores;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001146
1147 for (i = 0; i < nr; i++) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001148 fprintf(fp, "# sibling cores : %s\n", str);
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001149 str += strlen(str) + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001150 }
1151
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001152 nr = ph->env.nr_sibling_threads;
1153 str = ph->env.sibling_threads;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001154
1155 for (i = 0; i < nr; i++) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001156 fprintf(fp, "# sibling threads : %s\n", str);
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001157 str += strlen(str) + 1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001158 }
Kan Liang2bb00d22015-09-01 09:58:12 -04001159
1160 if (ph->env.cpu != NULL) {
1161 for (i = 0; i < cpu_nr; i++)
1162 fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1163 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1164 } else
1165 fprintf(fp, "# Core ID and Socket ID information is not available\n");
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001166}
1167
Robert Richter4e1b9c62012-08-16 21:10:22 +02001168static void free_event_desc(struct perf_evsel *events)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001169{
Robert Richter4e1b9c62012-08-16 21:10:22 +02001170 struct perf_evsel *evsel;
1171
1172 if (!events)
1173 return;
1174
1175 for (evsel = events; evsel->attr.size; evsel++) {
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -03001176 zfree(&evsel->name);
1177 zfree(&evsel->id);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001178 }
1179
1180 free(events);
1181}
1182
1183static struct perf_evsel *
1184read_event_desc(struct perf_header *ph, int fd)
1185{
1186 struct perf_evsel *evsel, *events = NULL;
1187 u64 *id;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001188 void *buf = NULL;
Stephane Eranian62db9062012-02-09 23:21:07 +01001189 u32 nre, sz, nr, i, j;
1190 ssize_t ret;
1191 size_t msz;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001192
1193 /* number of events */
Namhyung Kim5323f602012-12-17 15:38:54 +09001194 ret = readn(fd, &nre, sizeof(nre));
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001195 if (ret != (ssize_t)sizeof(nre))
1196 goto error;
1197
1198 if (ph->needs_swap)
1199 nre = bswap_32(nre);
1200
Namhyung Kim5323f602012-12-17 15:38:54 +09001201 ret = readn(fd, &sz, sizeof(sz));
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001202 if (ret != (ssize_t)sizeof(sz))
1203 goto error;
1204
1205 if (ph->needs_swap)
1206 sz = bswap_32(sz);
1207
Stephane Eranian62db9062012-02-09 23:21:07 +01001208 /* buffer to hold on file attr struct */
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001209 buf = malloc(sz);
1210 if (!buf)
1211 goto error;
1212
Robert Richter4e1b9c62012-08-16 21:10:22 +02001213 /* the last event terminates with evsel->attr.size == 0: */
1214 events = calloc(nre + 1, sizeof(*events));
1215 if (!events)
1216 goto error;
1217
1218 msz = sizeof(evsel->attr);
Jiri Olsa9fafd982012-03-20 19:15:39 +01001219 if (sz < msz)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001220 msz = sz;
1221
Robert Richter4e1b9c62012-08-16 21:10:22 +02001222 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1223 evsel->idx = i;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001224
Stephane Eranian62db9062012-02-09 23:21:07 +01001225 /*
1226 * must read entire on-file attr struct to
1227 * sync up with layout.
1228 */
Namhyung Kim5323f602012-12-17 15:38:54 +09001229 ret = readn(fd, buf, sz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001230 if (ret != (ssize_t)sz)
1231 goto error;
1232
1233 if (ph->needs_swap)
1234 perf_event__attr_swap(buf);
1235
Robert Richter4e1b9c62012-08-16 21:10:22 +02001236 memcpy(&evsel->attr, buf, msz);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001237
Namhyung Kim5323f602012-12-17 15:38:54 +09001238 ret = readn(fd, &nr, sizeof(nr));
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001239 if (ret != (ssize_t)sizeof(nr))
1240 goto error;
1241
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001242 if (ph->needs_swap) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001243 nr = bswap_32(nr);
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001244 evsel->needs_swap = true;
1245 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001246
Robert Richter4e1b9c62012-08-16 21:10:22 +02001247 evsel->name = do_read_string(fd, ph);
1248
1249 if (!nr)
1250 continue;
1251
1252 id = calloc(nr, sizeof(*id));
1253 if (!id)
1254 goto error;
1255 evsel->ids = nr;
1256 evsel->id = id;
1257
1258 for (j = 0 ; j < nr; j++) {
Namhyung Kim5323f602012-12-17 15:38:54 +09001259 ret = readn(fd, id, sizeof(*id));
Robert Richter4e1b9c62012-08-16 21:10:22 +02001260 if (ret != (ssize_t)sizeof(*id))
1261 goto error;
1262 if (ph->needs_swap)
1263 *id = bswap_64(*id);
1264 id++;
1265 }
1266 }
1267out:
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -03001268 free(buf);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001269 return events;
1270error:
Markus Elfring4cc97612015-06-25 17:12:32 +02001271 free_event_desc(events);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001272 events = NULL;
1273 goto out;
1274}
1275
Peter Zijlstra2c5e8c52015-04-07 11:09:54 +02001276static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1277 void *priv __attribute__((unused)))
1278{
1279 return fprintf(fp, ", %s = %s", name, val);
1280}
1281
Robert Richter4e1b9c62012-08-16 21:10:22 +02001282static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1283{
1284 struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1285 u32 j;
1286 u64 *id;
1287
1288 if (!events) {
1289 fprintf(fp, "# event desc: not available or unable to read\n");
1290 return;
1291 }
1292
1293 for (evsel = events; evsel->attr.size; evsel++) {
1294 fprintf(fp, "# event : name = %s, ", evsel->name);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001295
Robert Richter4e1b9c62012-08-16 21:10:22 +02001296 if (evsel->ids) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001297 fprintf(fp, ", id = {");
Robert Richter4e1b9c62012-08-16 21:10:22 +02001298 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1299 if (j)
1300 fputc(',', fp);
1301 fprintf(fp, " %"PRIu64, *id);
1302 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001303 fprintf(fp, " }");
Robert Richter4e1b9c62012-08-16 21:10:22 +02001304 }
Peter Zijlstra814c8c32015-03-31 00:19:31 +02001305
Peter Zijlstra2c5e8c52015-04-07 11:09:54 +02001306 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
Robert Richter4e1b9c62012-08-16 21:10:22 +02001307
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001308 fputc('\n', fp);
1309 }
Robert Richter4e1b9c62012-08-16 21:10:22 +02001310
1311 free_event_desc(events);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001312}
1313
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001314static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001315 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001316{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001317 fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001318}
1319
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001320static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001321 FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001322{
Jiri Olsac60da222016-07-04 14:16:20 +02001323 int i;
1324 struct numa_node *n;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001325
Jiri Olsac60da222016-07-04 14:16:20 +02001326 for (i = 0; i < ph->env.nr_numa_nodes; i++) {
1327 n = &ph->env.numa_nodes[i];
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001328
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001329 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1330 " free = %"PRIu64" kB\n",
Jiri Olsac60da222016-07-04 14:16:20 +02001331 n->node, n->mem_total, n->mem_free);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001332
Jiri Olsac60da222016-07-04 14:16:20 +02001333 fprintf(fp, "# node%u cpu list : ", n->node);
1334 cpu_map__fprintf(n->map, fp);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001335 }
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001336}
1337
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001338static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001339{
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001340 fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02001341}
1342
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001343static void print_branch_stack(struct perf_header *ph __maybe_unused,
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001344 int fd __maybe_unused, FILE *fp)
Stephane Eranian330aa672012-03-08 23:47:46 +01001345{
1346 fprintf(fp, "# contains samples with branch stack\n");
1347}
1348
Adrian Hunter4025ea42015-04-09 18:53:41 +03001349static void print_auxtrace(struct perf_header *ph __maybe_unused,
1350 int fd __maybe_unused, FILE *fp)
1351{
1352 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1353}
1354
Jiri Olsaffa517a2015-10-25 15:51:43 +01001355static void print_stat(struct perf_header *ph __maybe_unused,
1356 int fd __maybe_unused, FILE *fp)
1357{
1358 fprintf(fp, "# contains stat data\n");
1359}
1360
Jiri Olsa720e98b2016-02-16 16:01:43 +01001361static void print_cache(struct perf_header *ph __maybe_unused,
1362 int fd __maybe_unused, FILE *fp __maybe_unused)
1363{
1364 int i;
1365
1366 fprintf(fp, "# CPU cache info:\n");
1367 for (i = 0; i < ph->env.caches_cnt; i++) {
1368 fprintf(fp, "# ");
1369 cpu_cache_level__fprintf(fp, &ph->env.caches[i]);
1370 }
1371}
1372
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001373static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1374 FILE *fp)
Robert Richter50a96672012-08-16 21:10:24 +02001375{
1376 const char *delimiter = "# pmu mappings: ";
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001377 char *str, *tmp;
Robert Richter50a96672012-08-16 21:10:24 +02001378 u32 pmu_num;
1379 u32 type;
1380
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001381 pmu_num = ph->env.nr_pmu_mappings;
Robert Richter50a96672012-08-16 21:10:24 +02001382 if (!pmu_num) {
1383 fprintf(fp, "# pmu mappings: not available\n");
1384 return;
1385 }
1386
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001387 str = ph->env.pmu_mappings;
Namhyung Kimbe4a2de2012-09-05 14:02:49 +09001388
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001389 while (pmu_num) {
1390 type = strtoul(str, &tmp, 0);
1391 if (*tmp != ':')
1392 goto error;
1393
1394 str = tmp + 1;
1395 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1396
Robert Richter50a96672012-08-16 21:10:24 +02001397 delimiter = ", ";
Namhyung Kim7e94cfc2012-09-24 17:15:00 +09001398 str += strlen(str) + 1;
1399 pmu_num--;
Robert Richter50a96672012-08-16 21:10:24 +02001400 }
1401
1402 fprintf(fp, "\n");
1403
1404 if (!pmu_num)
1405 return;
1406error:
1407 fprintf(fp, "# pmu mappings: unable to read\n");
1408}
1409
Namhyung Kima8bb5592013-01-22 18:09:31 +09001410static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1411 FILE *fp)
1412{
1413 struct perf_session *session;
1414 struct perf_evsel *evsel;
1415 u32 nr = 0;
1416
1417 session = container_of(ph, struct perf_session, header);
1418
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001419 evlist__for_each_entry(session->evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09001420 if (perf_evsel__is_group_leader(evsel) &&
1421 evsel->nr_members > 1) {
1422 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1423 perf_evsel__name(evsel));
1424
1425 nr = evsel->nr_members - 1;
1426 } else if (nr) {
1427 fprintf(fp, ",%s", perf_evsel__name(evsel));
1428
1429 if (--nr == 0)
1430 fprintf(fp, "}\n");
1431 }
1432 }
1433}
1434
Robert Richter08d95bd2012-02-10 15:41:55 +01001435static int __event_process_build_id(struct build_id_event *bev,
1436 char *filename,
1437 struct perf_session *session)
1438{
1439 int err = -1;
Robert Richter08d95bd2012-02-10 15:41:55 +01001440 struct machine *machine;
Wang Nan1f121b02015-06-03 08:52:21 +00001441 u16 cpumode;
Robert Richter08d95bd2012-02-10 15:41:55 +01001442 struct dso *dso;
1443 enum dso_kernel_type dso_type;
1444
1445 machine = perf_session__findnew_machine(session, bev->pid);
1446 if (!machine)
1447 goto out;
1448
Wang Nan1f121b02015-06-03 08:52:21 +00001449 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
Robert Richter08d95bd2012-02-10 15:41:55 +01001450
Wang Nan1f121b02015-06-03 08:52:21 +00001451 switch (cpumode) {
Robert Richter08d95bd2012-02-10 15:41:55 +01001452 case PERF_RECORD_MISC_KERNEL:
1453 dso_type = DSO_TYPE_KERNEL;
Robert Richter08d95bd2012-02-10 15:41:55 +01001454 break;
1455 case PERF_RECORD_MISC_GUEST_KERNEL:
1456 dso_type = DSO_TYPE_GUEST_KERNEL;
Robert Richter08d95bd2012-02-10 15:41:55 +01001457 break;
1458 case PERF_RECORD_MISC_USER:
1459 case PERF_RECORD_MISC_GUEST_USER:
1460 dso_type = DSO_TYPE_USER;
Robert Richter08d95bd2012-02-10 15:41:55 +01001461 break;
1462 default:
1463 goto out;
1464 }
1465
Arnaldo Carvalho de Meloaa7cc2a2015-05-29 11:31:12 -03001466 dso = machine__findnew_dso(machine, filename);
Robert Richter08d95bd2012-02-10 15:41:55 +01001467 if (dso != NULL) {
Masami Hiramatsub5d8bbe2016-05-11 22:51:59 +09001468 char sbuild_id[SBUILD_ID_SIZE];
Robert Richter08d95bd2012-02-10 15:41:55 +01001469
1470 dso__set_build_id(dso, &bev->build_id);
1471
Namhyung Kim1deec1b2017-05-31 21:01:03 +09001472 if (dso_type != DSO_TYPE_USER) {
1473 struct kmod_path m = { .name = NULL, };
1474
1475 if (!kmod_path__parse_name(&m, filename) && m.kmod)
Namhyung Kim6b335e82017-05-31 21:01:04 +09001476 dso__set_module_info(dso, &m, machine);
Namhyung Kim1deec1b2017-05-31 21:01:03 +09001477 else
1478 dso->kernel = dso_type;
1479
1480 free(m.name);
1481 }
Robert Richter08d95bd2012-02-10 15:41:55 +01001482
1483 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1484 sbuild_id);
1485 pr_debug("build id event received for %s: %s\n",
1486 dso->long_name, sbuild_id);
Arnaldo Carvalho de Melod3a7c482015-06-02 11:53:26 -03001487 dso__put(dso);
Robert Richter08d95bd2012-02-10 15:41:55 +01001488 }
1489
1490 err = 0;
1491out:
1492 return err;
1493}
1494
1495static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1496 int input, u64 offset, u64 size)
1497{
1498 struct perf_session *session = container_of(header, struct perf_session, header);
1499 struct {
1500 struct perf_event_header header;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03001501 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
Robert Richter08d95bd2012-02-10 15:41:55 +01001502 char filename[0];
1503 } old_bev;
1504 struct build_id_event bev;
1505 char filename[PATH_MAX];
1506 u64 limit = offset + size;
1507
1508 while (offset < limit) {
1509 ssize_t len;
1510
Namhyung Kim5323f602012-12-17 15:38:54 +09001511 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
Robert Richter08d95bd2012-02-10 15:41:55 +01001512 return -1;
1513
1514 if (header->needs_swap)
1515 perf_event_header__bswap(&old_bev.header);
1516
1517 len = old_bev.header.size - sizeof(old_bev);
Namhyung Kim5323f602012-12-17 15:38:54 +09001518 if (readn(input, filename, len) != len)
Robert Richter08d95bd2012-02-10 15:41:55 +01001519 return -1;
1520
1521 bev.header = old_bev.header;
1522
1523 /*
1524 * As the pid is the missing value, we need to fill
1525 * it properly. The header.misc value give us nice hint.
1526 */
1527 bev.pid = HOST_KERNEL_ID;
1528 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1529 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1530 bev.pid = DEFAULT_GUEST_KERNEL_ID;
1531
1532 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1533 __event_process_build_id(&bev, filename, session);
1534
1535 offset += bev.header.size;
1536 }
1537
1538 return 0;
1539}
1540
1541static int perf_header__read_build_ids(struct perf_header *header,
1542 int input, u64 offset, u64 size)
1543{
1544 struct perf_session *session = container_of(header, struct perf_session, header);
1545 struct build_id_event bev;
1546 char filename[PATH_MAX];
1547 u64 limit = offset + size, orig_offset = offset;
1548 int err = -1;
1549
1550 while (offset < limit) {
1551 ssize_t len;
1552
Namhyung Kim5323f602012-12-17 15:38:54 +09001553 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
Robert Richter08d95bd2012-02-10 15:41:55 +01001554 goto out;
1555
1556 if (header->needs_swap)
1557 perf_event_header__bswap(&bev.header);
1558
1559 len = bev.header.size - sizeof(bev);
Namhyung Kim5323f602012-12-17 15:38:54 +09001560 if (readn(input, filename, len) != len)
Robert Richter08d95bd2012-02-10 15:41:55 +01001561 goto out;
1562 /*
1563 * The a1645ce1 changeset:
1564 *
1565 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1566 *
1567 * Added a field to struct build_id_event that broke the file
1568 * format.
1569 *
1570 * Since the kernel build-id is the first entry, process the
1571 * table using the old format if the well known
1572 * '[kernel.kallsyms]' string for the kernel build-id has the
1573 * first 4 characters chopped off (where the pid_t sits).
1574 */
1575 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1576 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1577 return -1;
1578 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1579 }
1580
1581 __event_process_build_id(&bev, filename, session);
1582
1583 offset += bev.header.size;
1584 }
1585 err = 0;
1586out:
1587 return err;
1588}
1589
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001590static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1591 struct perf_header *ph __maybe_unused,
1592 int fd, void *data)
Robert Richterf1c67db2012-02-10 15:41:56 +01001593{
Namhyung Kim3dce2ce2013-03-21 16:18:48 +09001594 ssize_t ret = trace_report(fd, data, false);
1595 return ret < 0 ? -1 : 0;
Robert Richterf1c67db2012-02-10 15:41:56 +01001596}
1597
1598static int process_build_id(struct perf_file_section *section,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001599 struct perf_header *ph, int fd,
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001600 void *data __maybe_unused)
Robert Richterf1c67db2012-02-10 15:41:56 +01001601{
1602 if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1603 pr_debug("Failed to read buildids, continuing...\n");
1604 return 0;
1605}
1606
Namhyung Kima1ae5652012-09-24 17:14:59 +09001607static int process_hostname(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001608 struct perf_header *ph, int fd,
1609 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001610{
1611 ph->env.hostname = do_read_string(fd, ph);
1612 return ph->env.hostname ? 0 : -ENOMEM;
1613}
1614
1615static int process_osrelease(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001616 struct perf_header *ph, int fd,
1617 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001618{
1619 ph->env.os_release = do_read_string(fd, ph);
1620 return ph->env.os_release ? 0 : -ENOMEM;
1621}
1622
1623static int process_version(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001624 struct perf_header *ph, int fd,
1625 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001626{
1627 ph->env.version = do_read_string(fd, ph);
1628 return ph->env.version ? 0 : -ENOMEM;
1629}
1630
1631static int process_arch(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001632 struct perf_header *ph, int fd,
1633 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001634{
1635 ph->env.arch = do_read_string(fd, ph);
1636 return ph->env.arch ? 0 : -ENOMEM;
1637}
1638
1639static int process_nrcpus(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001640 struct perf_header *ph, int fd,
1641 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001642{
Jiri Olsa727ebd52013-11-28 11:30:14 +01001643 ssize_t ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001644 u32 nr;
1645
Namhyung Kim5323f602012-12-17 15:38:54 +09001646 ret = readn(fd, &nr, sizeof(nr));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001647 if (ret != sizeof(nr))
1648 return -1;
1649
1650 if (ph->needs_swap)
1651 nr = bswap_32(nr);
1652
Arnaldo Carvalho de Melocaa47042015-09-11 12:36:12 -03001653 ph->env.nr_cpus_avail = nr;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001654
Namhyung Kim5323f602012-12-17 15:38:54 +09001655 ret = readn(fd, &nr, sizeof(nr));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001656 if (ret != sizeof(nr))
1657 return -1;
1658
1659 if (ph->needs_swap)
1660 nr = bswap_32(nr);
1661
Arnaldo Carvalho de Melocaa47042015-09-11 12:36:12 -03001662 ph->env.nr_cpus_online = nr;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001663 return 0;
1664}
1665
1666static int process_cpudesc(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001667 struct perf_header *ph, int fd,
1668 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001669{
1670 ph->env.cpu_desc = do_read_string(fd, ph);
1671 return ph->env.cpu_desc ? 0 : -ENOMEM;
1672}
1673
1674static int process_cpuid(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001675 struct perf_header *ph, int fd,
1676 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001677{
1678 ph->env.cpuid = do_read_string(fd, ph);
1679 return ph->env.cpuid ? 0 : -ENOMEM;
1680}
1681
1682static int process_total_mem(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001683 struct perf_header *ph, int fd,
1684 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001685{
1686 uint64_t mem;
Jiri Olsa727ebd52013-11-28 11:30:14 +01001687 ssize_t ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001688
Namhyung Kim5323f602012-12-17 15:38:54 +09001689 ret = readn(fd, &mem, sizeof(mem));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001690 if (ret != sizeof(mem))
1691 return -1;
1692
1693 if (ph->needs_swap)
1694 mem = bswap_64(mem);
1695
1696 ph->env.total_mem = mem;
1697 return 0;
1698}
1699
Robert Richter7c2f7af2012-08-16 21:10:23 +02001700static struct perf_evsel *
1701perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1702{
1703 struct perf_evsel *evsel;
1704
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001705 evlist__for_each_entry(evlist, evsel) {
Robert Richter7c2f7af2012-08-16 21:10:23 +02001706 if (evsel->idx == idx)
1707 return evsel;
1708 }
1709
1710 return NULL;
1711}
1712
1713static void
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001714perf_evlist__set_event_name(struct perf_evlist *evlist,
1715 struct perf_evsel *event)
Robert Richter7c2f7af2012-08-16 21:10:23 +02001716{
1717 struct perf_evsel *evsel;
1718
1719 if (!event->name)
1720 return;
1721
1722 evsel = perf_evlist__find_by_index(evlist, event->idx);
1723 if (!evsel)
1724 return;
1725
1726 if (evsel->name)
1727 return;
1728
1729 evsel->name = strdup(event->name);
1730}
1731
1732static int
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001733process_event_desc(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001734 struct perf_header *header, int fd,
Irina Tirdea1d037ca2012-09-11 01:15:03 +03001735 void *data __maybe_unused)
Robert Richter7c2f7af2012-08-16 21:10:23 +02001736{
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001737 struct perf_session *session;
Robert Richter7c2f7af2012-08-16 21:10:23 +02001738 struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1739
1740 if (!events)
1741 return 0;
1742
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001743 session = container_of(header, struct perf_session, header);
Robert Richter7c2f7af2012-08-16 21:10:23 +02001744 for (evsel = events; evsel->attr.size; evsel++)
1745 perf_evlist__set_event_name(session->evlist, evsel);
1746
1747 free_event_desc(events);
1748
1749 return 0;
1750}
1751
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001752static int process_cmdline(struct perf_file_section *section,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001753 struct perf_header *ph, int fd,
1754 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001755{
Jiri Olsa727ebd52013-11-28 11:30:14 +01001756 ssize_t ret;
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001757 char *str, *cmdline = NULL, **argv = NULL;
1758 u32 nr, i, len = 0;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001759
Namhyung Kim5323f602012-12-17 15:38:54 +09001760 ret = readn(fd, &nr, sizeof(nr));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001761 if (ret != sizeof(nr))
1762 return -1;
1763
1764 if (ph->needs_swap)
1765 nr = bswap_32(nr);
1766
1767 ph->env.nr_cmdline = nr;
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001768
1769 cmdline = zalloc(section->size + nr + 1);
1770 if (!cmdline)
1771 return -1;
1772
1773 argv = zalloc(sizeof(char *) * (nr + 1));
1774 if (!argv)
1775 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001776
1777 for (i = 0; i < nr; i++) {
1778 str = do_read_string(fd, ph);
1779 if (!str)
1780 goto error;
1781
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001782 argv[i] = cmdline + len;
1783 memcpy(argv[i], str, strlen(str) + 1);
1784 len += strlen(str) + 1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001785 free(str);
1786 }
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001787 ph->env.cmdline = cmdline;
1788 ph->env.cmdline_argv = (const char **) argv;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001789 return 0;
1790
1791error:
Jiri Olsa768dd3f2015-07-21 14:31:31 +02001792 free(argv);
1793 free(cmdline);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001794 return -1;
1795}
1796
Kan Liang2bb00d22015-09-01 09:58:12 -04001797static int process_cpu_topology(struct perf_file_section *section,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001798 struct perf_header *ph, int fd,
1799 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001800{
Jiri Olsa727ebd52013-11-28 11:30:14 +01001801 ssize_t ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001802 u32 nr, i;
1803 char *str;
1804 struct strbuf sb;
Jan Stancekda8a58b2017-02-17 12:10:26 +01001805 int cpu_nr = ph->env.nr_cpus_avail;
Kan Liang2bb00d22015-09-01 09:58:12 -04001806 u64 size = 0;
1807
1808 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1809 if (!ph->env.cpu)
1810 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001811
Namhyung Kim5323f602012-12-17 15:38:54 +09001812 ret = readn(fd, &nr, sizeof(nr));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001813 if (ret != sizeof(nr))
Kan Liang2bb00d22015-09-01 09:58:12 -04001814 goto free_cpu;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001815
1816 if (ph->needs_swap)
1817 nr = bswap_32(nr);
1818
1819 ph->env.nr_sibling_cores = nr;
Kan Liang2bb00d22015-09-01 09:58:12 -04001820 size += sizeof(u32);
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001821 if (strbuf_init(&sb, 128) < 0)
1822 goto free_cpu;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001823
1824 for (i = 0; i < nr; i++) {
1825 str = do_read_string(fd, ph);
1826 if (!str)
1827 goto error;
1828
1829 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001830 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1831 goto error;
Kan Liang2bb00d22015-09-01 09:58:12 -04001832 size += string_size(str);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001833 free(str);
1834 }
1835 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1836
Namhyung Kim5323f602012-12-17 15:38:54 +09001837 ret = readn(fd, &nr, sizeof(nr));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001838 if (ret != sizeof(nr))
1839 return -1;
1840
1841 if (ph->needs_swap)
1842 nr = bswap_32(nr);
1843
1844 ph->env.nr_sibling_threads = nr;
Kan Liang2bb00d22015-09-01 09:58:12 -04001845 size += sizeof(u32);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001846
1847 for (i = 0; i < nr; i++) {
1848 str = do_read_string(fd, ph);
1849 if (!str)
1850 goto error;
1851
1852 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001853 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1854 goto error;
Kan Liang2bb00d22015-09-01 09:58:12 -04001855 size += string_size(str);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001856 free(str);
1857 }
1858 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
Kan Liang2bb00d22015-09-01 09:58:12 -04001859
1860 /*
1861 * The header may be from old perf,
1862 * which doesn't include core id and socket id information.
1863 */
1864 if (section->size <= size) {
1865 zfree(&ph->env.cpu);
1866 return 0;
1867 }
1868
1869 for (i = 0; i < (u32)cpu_nr; i++) {
1870 ret = readn(fd, &nr, sizeof(nr));
1871 if (ret != sizeof(nr))
1872 goto free_cpu;
1873
1874 if (ph->needs_swap)
1875 nr = bswap_32(nr);
1876
Kan Liang2bb00d22015-09-01 09:58:12 -04001877 ph->env.cpu[i].core_id = nr;
1878
1879 ret = readn(fd, &nr, sizeof(nr));
1880 if (ret != sizeof(nr))
1881 goto free_cpu;
1882
1883 if (ph->needs_swap)
1884 nr = bswap_32(nr);
1885
Jan Stancekda8a58b2017-02-17 12:10:26 +01001886 if (nr != (u32)-1 && nr > (u32)cpu_nr) {
Kan Liang2bb00d22015-09-01 09:58:12 -04001887 pr_debug("socket_id number is too big."
1888 "You may need to upgrade the perf tool.\n");
1889 goto free_cpu;
1890 }
1891
1892 ph->env.cpu[i].socket_id = nr;
1893 }
1894
Namhyung Kima1ae5652012-09-24 17:14:59 +09001895 return 0;
1896
1897error:
1898 strbuf_release(&sb);
Kan Liang2bb00d22015-09-01 09:58:12 -04001899free_cpu:
1900 zfree(&ph->env.cpu);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001901 return -1;
1902}
1903
1904static int process_numa_topology(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001905 struct perf_header *ph, int fd,
1906 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001907{
Jiri Olsac60da222016-07-04 14:16:20 +02001908 struct numa_node *nodes, *n;
Jiri Olsa727ebd52013-11-28 11:30:14 +01001909 ssize_t ret;
Jiri Olsac60da222016-07-04 14:16:20 +02001910 u32 nr, i;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001911 char *str;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001912
1913 /* nr nodes */
Namhyung Kim5323f602012-12-17 15:38:54 +09001914 ret = readn(fd, &nr, sizeof(nr));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001915 if (ret != sizeof(nr))
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001916 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001917
1918 if (ph->needs_swap)
1919 nr = bswap_32(nr);
1920
Jiri Olsac60da222016-07-04 14:16:20 +02001921 nodes = zalloc(sizeof(*nodes) * nr);
1922 if (!nodes)
1923 return -ENOMEM;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001924
1925 for (i = 0; i < nr; i++) {
Jiri Olsac60da222016-07-04 14:16:20 +02001926 n = &nodes[i];
1927
Namhyung Kima1ae5652012-09-24 17:14:59 +09001928 /* node number */
Jiri Olsac60da222016-07-04 14:16:20 +02001929 ret = readn(fd, &n->node, sizeof(u32));
1930 if (ret != sizeof(n->node))
Namhyung Kima1ae5652012-09-24 17:14:59 +09001931 goto error;
1932
Jiri Olsac60da222016-07-04 14:16:20 +02001933 ret = readn(fd, &n->mem_total, sizeof(u64));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001934 if (ret != sizeof(u64))
1935 goto error;
1936
Jiri Olsac60da222016-07-04 14:16:20 +02001937 ret = readn(fd, &n->mem_free, sizeof(u64));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001938 if (ret != sizeof(u64))
1939 goto error;
1940
1941 if (ph->needs_swap) {
Jiri Olsac60da222016-07-04 14:16:20 +02001942 n->node = bswap_32(n->node);
1943 n->mem_total = bswap_64(n->mem_total);
1944 n->mem_free = bswap_64(n->mem_free);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001945 }
1946
Namhyung Kima1ae5652012-09-24 17:14:59 +09001947 str = do_read_string(fd, ph);
1948 if (!str)
1949 goto error;
1950
Jiri Olsac60da222016-07-04 14:16:20 +02001951 n->map = cpu_map__new(str);
1952 if (!n->map)
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001953 goto error;
Jiri Olsac60da222016-07-04 14:16:20 +02001954
Namhyung Kima1ae5652012-09-24 17:14:59 +09001955 free(str);
1956 }
Jiri Olsaf957a532016-10-10 09:56:32 +02001957 ph->env.nr_numa_nodes = nr;
Jiri Olsac60da222016-07-04 14:16:20 +02001958 ph->env.numa_nodes = nodes;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001959 return 0;
1960
1961error:
Jiri Olsac60da222016-07-04 14:16:20 +02001962 free(nodes);
Namhyung Kima1ae5652012-09-24 17:14:59 +09001963 return -1;
1964}
1965
1966static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09001967 struct perf_header *ph, int fd,
1968 void *data __maybe_unused)
Namhyung Kima1ae5652012-09-24 17:14:59 +09001969{
Jiri Olsa727ebd52013-11-28 11:30:14 +01001970 ssize_t ret;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001971 char *name;
1972 u32 pmu_num;
1973 u32 type;
1974 struct strbuf sb;
1975
Namhyung Kim5323f602012-12-17 15:38:54 +09001976 ret = readn(fd, &pmu_num, sizeof(pmu_num));
Namhyung Kima1ae5652012-09-24 17:14:59 +09001977 if (ret != sizeof(pmu_num))
1978 return -1;
1979
1980 if (ph->needs_swap)
1981 pmu_num = bswap_32(pmu_num);
1982
1983 if (!pmu_num) {
1984 pr_debug("pmu mappings not available\n");
1985 return 0;
1986 }
1987
1988 ph->env.nr_pmu_mappings = pmu_num;
Masami Hiramatsu642aada2016-05-10 14:47:35 +09001989 if (strbuf_init(&sb, 128) < 0)
1990 return -1;
Namhyung Kima1ae5652012-09-24 17:14:59 +09001991
1992 while (pmu_num) {
Namhyung Kim5323f602012-12-17 15:38:54 +09001993 if (readn(fd, &type, sizeof(type)) != sizeof(type))
Namhyung Kima1ae5652012-09-24 17:14:59 +09001994 goto error;
1995 if (ph->needs_swap)
1996 type = bswap_32(type);
1997
1998 name = do_read_string(fd, ph);
1999 if (!name)
2000 goto error;
2001
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002002 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2003 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002004 /* include a NULL character at the end */
Masami Hiramatsu642aada2016-05-10 14:47:35 +09002005 if (strbuf_add(&sb, "", 1) < 0)
2006 goto error;
Namhyung Kima1ae5652012-09-24 17:14:59 +09002007
Kan Liange0838e02015-09-10 11:03:05 -03002008 if (!strcmp(name, "msr"))
2009 ph->env.msr_pmu_type = type;
2010
Namhyung Kima1ae5652012-09-24 17:14:59 +09002011 free(name);
2012 pmu_num--;
2013 }
2014 ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2015 return 0;
2016
2017error:
2018 strbuf_release(&sb);
2019 return -1;
2020}
2021
Namhyung Kima8bb5592013-01-22 18:09:31 +09002022static int process_group_desc(struct perf_file_section *section __maybe_unused,
2023 struct perf_header *ph, int fd,
2024 void *data __maybe_unused)
2025{
2026 size_t ret = -1;
2027 u32 i, nr, nr_groups;
2028 struct perf_session *session;
2029 struct perf_evsel *evsel, *leader = NULL;
2030 struct group_desc {
2031 char *name;
2032 u32 leader_idx;
2033 u32 nr_members;
2034 } *desc;
2035
2036 if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2037 return -1;
2038
2039 if (ph->needs_swap)
2040 nr_groups = bswap_32(nr_groups);
2041
2042 ph->env.nr_groups = nr_groups;
2043 if (!nr_groups) {
2044 pr_debug("group desc not available\n");
2045 return 0;
2046 }
2047
2048 desc = calloc(nr_groups, sizeof(*desc));
2049 if (!desc)
2050 return -1;
2051
2052 for (i = 0; i < nr_groups; i++) {
2053 desc[i].name = do_read_string(fd, ph);
2054 if (!desc[i].name)
2055 goto out_free;
2056
2057 if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2058 goto out_free;
2059
2060 if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2061 goto out_free;
2062
2063 if (ph->needs_swap) {
2064 desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2065 desc[i].nr_members = bswap_32(desc[i].nr_members);
2066 }
2067 }
2068
2069 /*
2070 * Rebuild group relationship based on the group_desc
2071 */
2072 session = container_of(ph, struct perf_session, header);
2073 session->evlist->nr_groups = nr_groups;
2074
2075 i = nr = 0;
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002076 evlist__for_each_entry(session->evlist, evsel) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09002077 if (evsel->idx == (int) desc[i].leader_idx) {
2078 evsel->leader = evsel;
2079 /* {anon_group} is a dummy name */
Namhyung Kim210e8122013-11-18 11:20:43 +09002080 if (strcmp(desc[i].name, "{anon_group}")) {
Namhyung Kima8bb5592013-01-22 18:09:31 +09002081 evsel->group_name = desc[i].name;
Namhyung Kim210e8122013-11-18 11:20:43 +09002082 desc[i].name = NULL;
2083 }
Namhyung Kima8bb5592013-01-22 18:09:31 +09002084 evsel->nr_members = desc[i].nr_members;
2085
2086 if (i >= nr_groups || nr > 0) {
2087 pr_debug("invalid group desc\n");
2088 goto out_free;
2089 }
2090
2091 leader = evsel;
2092 nr = evsel->nr_members - 1;
2093 i++;
2094 } else if (nr) {
2095 /* This is a group member */
2096 evsel->leader = leader;
2097
2098 nr--;
2099 }
2100 }
2101
2102 if (i != nr_groups || nr != 0) {
2103 pr_debug("invalid group desc\n");
2104 goto out_free;
2105 }
2106
2107 ret = 0;
2108out_free:
Namhyung Kim50a27402013-11-18 11:20:44 +09002109 for (i = 0; i < nr_groups; i++)
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -03002110 zfree(&desc[i].name);
Namhyung Kima8bb5592013-01-22 18:09:31 +09002111 free(desc);
2112
2113 return ret;
2114}
2115
Adrian Hunter99fa2982015-04-30 17:37:25 +03002116static int process_auxtrace(struct perf_file_section *section,
2117 struct perf_header *ph, int fd,
2118 void *data __maybe_unused)
2119{
2120 struct perf_session *session;
2121 int err;
2122
2123 session = container_of(ph, struct perf_session, header);
2124
2125 err = auxtrace_index__process(fd, section->size, session,
2126 ph->needs_swap);
2127 if (err < 0)
2128 pr_err("Failed to process auxtrace index\n");
2129 return err;
2130}
2131
Jiri Olsa720e98b2016-02-16 16:01:43 +01002132static int process_cache(struct perf_file_section *section __maybe_unused,
2133 struct perf_header *ph __maybe_unused, int fd __maybe_unused,
2134 void *data __maybe_unused)
2135{
2136 struct cpu_cache_level *caches;
2137 u32 cnt, i, version;
2138
2139 if (readn(fd, &version, sizeof(version)) != sizeof(version))
2140 return -1;
2141
2142 if (ph->needs_swap)
2143 version = bswap_32(version);
2144
2145 if (version != 1)
2146 return -1;
2147
2148 if (readn(fd, &cnt, sizeof(cnt)) != sizeof(cnt))
2149 return -1;
2150
2151 if (ph->needs_swap)
2152 cnt = bswap_32(cnt);
2153
2154 caches = zalloc(sizeof(*caches) * cnt);
2155 if (!caches)
2156 return -1;
2157
2158 for (i = 0; i < cnt; i++) {
2159 struct cpu_cache_level c;
2160
2161 #define _R(v) \
2162 if (readn(fd, &c.v, sizeof(u32)) != sizeof(u32))\
2163 goto out_free_caches; \
2164 if (ph->needs_swap) \
2165 c.v = bswap_32(c.v); \
2166
2167 _R(level)
2168 _R(line_size)
2169 _R(sets)
2170 _R(ways)
2171 #undef _R
2172
2173 #define _R(v) \
2174 c.v = do_read_string(fd, ph); \
2175 if (!c.v) \
2176 goto out_free_caches;
2177
2178 _R(type)
2179 _R(size)
2180 _R(map)
2181 #undef _R
2182
2183 caches[i] = c;
2184 }
2185
2186 ph->env.caches = caches;
2187 ph->env.caches_cnt = cnt;
2188 return 0;
2189out_free_caches:
2190 free(caches);
2191 return -1;
2192}
2193
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002194struct feature_ops {
2195 int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
2196 void (*print)(struct perf_header *h, int fd, FILE *fp);
Robert Richterf1c67db2012-02-10 15:41:56 +01002197 int (*process)(struct perf_file_section *section,
Namhyung Kim3d7eb862012-09-24 17:15:01 +09002198 struct perf_header *h, int fd, void *data);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002199 const char *name;
2200 bool full_only;
2201};
2202
Robert Richter8cdfa782011-12-07 10:02:56 +01002203#define FEAT_OPA(n, func) \
2204 [n] = { .name = #n, .write = write_##func, .print = print_##func }
Robert Richterf1c67db2012-02-10 15:41:56 +01002205#define FEAT_OPP(n, func) \
2206 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
2207 .process = process_##func }
Robert Richter8cdfa782011-12-07 10:02:56 +01002208#define FEAT_OPF(n, func) \
Robert Richterf1c67db2012-02-10 15:41:56 +01002209 [n] = { .name = #n, .write = write_##func, .print = print_##func, \
Namhyung Kima1ae5652012-09-24 17:14:59 +09002210 .process = process_##func, .full_only = true }
Robert Richter8cdfa782011-12-07 10:02:56 +01002211
2212/* feature_ops not implemented: */
Stephane Eranian2eeaaa02012-05-15 13:28:13 +02002213#define print_tracing_data NULL
2214#define print_build_id NULL
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002215
2216static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
Stephane Eranian2eeaaa02012-05-15 13:28:13 +02002217 FEAT_OPP(HEADER_TRACING_DATA, tracing_data),
Robert Richterf1c67db2012-02-10 15:41:56 +01002218 FEAT_OPP(HEADER_BUILD_ID, build_id),
Namhyung Kima1ae5652012-09-24 17:14:59 +09002219 FEAT_OPP(HEADER_HOSTNAME, hostname),
2220 FEAT_OPP(HEADER_OSRELEASE, osrelease),
2221 FEAT_OPP(HEADER_VERSION, version),
2222 FEAT_OPP(HEADER_ARCH, arch),
2223 FEAT_OPP(HEADER_NRCPUS, nrcpus),
2224 FEAT_OPP(HEADER_CPUDESC, cpudesc),
Namhyung Kim37e9d752012-09-24 17:15:03 +09002225 FEAT_OPP(HEADER_CPUID, cpuid),
Namhyung Kima1ae5652012-09-24 17:14:59 +09002226 FEAT_OPP(HEADER_TOTAL_MEM, total_mem),
Robert Richter7c2f7af2012-08-16 21:10:23 +02002227 FEAT_OPP(HEADER_EVENT_DESC, event_desc),
Namhyung Kima1ae5652012-09-24 17:14:59 +09002228 FEAT_OPP(HEADER_CMDLINE, cmdline),
Robert Richter8cdfa782011-12-07 10:02:56 +01002229 FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology),
2230 FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology),
Stephane Eranian330aa672012-03-08 23:47:46 +01002231 FEAT_OPA(HEADER_BRANCH_STACK, branch_stack),
Namhyung Kima1ae5652012-09-24 17:14:59 +09002232 FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings),
Namhyung Kima8bb5592013-01-22 18:09:31 +09002233 FEAT_OPP(HEADER_GROUP_DESC, group_desc),
Adrian Hunter99fa2982015-04-30 17:37:25 +03002234 FEAT_OPP(HEADER_AUXTRACE, auxtrace),
Jiri Olsaffa517a2015-10-25 15:51:43 +01002235 FEAT_OPA(HEADER_STAT, stat),
Jiri Olsa720e98b2016-02-16 16:01:43 +01002236 FEAT_OPF(HEADER_CACHE, cache),
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002237};
2238
2239struct header_print_data {
2240 FILE *fp;
2241 bool full; /* extended list of headers */
2242};
2243
2244static int perf_file_section__fprintf_info(struct perf_file_section *section,
2245 struct perf_header *ph,
2246 int feat, int fd, void *data)
2247{
2248 struct header_print_data *hd = data;
2249
2250 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2251 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2252 "%d, continuing...\n", section->offset, feat);
2253 return 0;
2254 }
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002255 if (feat >= HEADER_LAST_FEATURE) {
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002256 pr_warning("unknown feature %d\n", feat);
Robert Richterf7a8a132011-12-07 10:02:51 +01002257 return 0;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002258 }
2259 if (!feat_ops[feat].print)
2260 return 0;
2261
2262 if (!feat_ops[feat].full_only || hd->full)
2263 feat_ops[feat].print(ph, fd, hd->fp);
2264 else
2265 fprintf(hd->fp, "# %s info available, use -I to display\n",
2266 feat_ops[feat].name);
2267
2268 return 0;
2269}
2270
2271int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2272{
2273 struct header_print_data hd;
2274 struct perf_header *header = &session->header;
Jiri Olsacc9784bd2013-10-15 16:27:34 +02002275 int fd = perf_data_file__fd(session->file);
Jiri Olsaf45f5612016-10-10 09:03:07 +02002276 struct stat st;
Jiri Olsaaabae162016-10-10 09:35:50 +02002277 int ret, bit;
Jiri Olsaf45f5612016-10-10 09:03:07 +02002278
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002279 hd.fp = fp;
2280 hd.full = full;
2281
Jiri Olsaf45f5612016-10-10 09:03:07 +02002282 ret = fstat(fd, &st);
2283 if (ret == -1)
2284 return -1;
2285
2286 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
2287
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002288 perf_header__process_sections(header, fd, &hd,
2289 perf_file_section__fprintf_info);
Jiri Olsaaabae162016-10-10 09:35:50 +02002290
David Carrillo-Cisnerosc9d1c932017-04-10 13:14:32 -07002291 if (session->file->is_pipe)
2292 return 0;
2293
Jiri Olsaaabae162016-10-10 09:35:50 +02002294 fprintf(fp, "# missing features: ");
2295 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2296 if (bit)
2297 fprintf(fp, "%s ", feat_ops[bit].name);
2298 }
2299
2300 fprintf(fp, "\n");
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002301 return 0;
2302}
2303
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002304static int do_write_feat(int fd, struct perf_header *h, int type,
2305 struct perf_file_section **p,
2306 struct perf_evlist *evlist)
2307{
2308 int err;
2309 int ret = 0;
2310
2311 if (perf_header__has_feat(h, type)) {
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002312 if (!feat_ops[type].write)
2313 return -1;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002314
2315 (*p)->offset = lseek(fd, 0, SEEK_CUR);
2316
2317 err = feat_ops[type].write(fd, h, evlist);
2318 if (err < 0) {
Jiri Olsa0c2aff42016-10-10 09:38:02 +02002319 pr_debug("failed to write feature %s\n", feat_ops[type].name);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002320
2321 /* undo anything written */
2322 lseek(fd, (*p)->offset, SEEK_SET);
2323
2324 return -1;
2325 }
2326 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2327 (*p)++;
2328 }
2329 return ret;
2330}
2331
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002332static int perf_header__adds_write(struct perf_header *header,
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -02002333 struct perf_evlist *evlist, int fd)
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002334{
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002335 int nr_sections;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002336 struct perf_file_section *feat_sec, *p;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002337 int sec_size;
2338 u64 sec_start;
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002339 int feat;
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002340 int err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002341
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002342 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002343 if (!nr_sections)
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002344 return 0;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002345
Paul Gortmaker91b98802013-01-30 20:05:49 -05002346 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002347 if (feat_sec == NULL)
2348 return -ENOMEM;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002349
2350 sec_size = sizeof(*feat_sec) * nr_sections;
2351
Jiri Olsa8d541e92013-07-17 19:49:44 +02002352 sec_start = header->feat_offset;
Xiao Guangrongf887f302010-02-04 16:46:42 +08002353 lseek(fd, sec_start + sec_size, SEEK_SET);
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002354
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002355 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2356 if (do_write_feat(fd, header, feat, &p, evlist))
2357 perf_header__clear_feat(header, feat);
2358 }
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002359
Xiao Guangrongf887f302010-02-04 16:46:42 +08002360 lseek(fd, sec_start, SEEK_SET);
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002361 /*
2362 * may write more than needed due to dropped feature, but
2363 * this is okay, reader will skip the mising entries
2364 */
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002365 err = do_write(fd, feat_sec, sec_size);
2366 if (err < 0)
2367 pr_debug("failed to write feature section\n");
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002368 free(feat_sec);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002369 return err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002370}
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002371
Tom Zanussi8dc58102010-04-01 23:59:15 -05002372int perf_header__write_pipe(int fd)
2373{
2374 struct perf_pipe_file_header f_header;
2375 int err;
2376
2377 f_header = (struct perf_pipe_file_header){
2378 .magic = PERF_MAGIC,
2379 .size = sizeof(f_header),
2380 };
2381
2382 err = do_write(fd, &f_header, sizeof(f_header));
2383 if (err < 0) {
2384 pr_debug("failed to write perf pipe header\n");
2385 return err;
2386 }
2387
2388 return 0;
2389}
2390
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002391int perf_session__write_header(struct perf_session *session,
2392 struct perf_evlist *evlist,
2393 int fd, bool at_exit)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002394{
2395 struct perf_file_header f_header;
2396 struct perf_file_attr f_attr;
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002397 struct perf_header *header = &session->header;
Jiri Olsa563aecb2013-06-05 13:35:06 +02002398 struct perf_evsel *evsel;
Jiri Olsa944d62b2013-07-17 19:49:43 +02002399 u64 attr_offset;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002400 int err;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002401
2402 lseek(fd, sizeof(f_header), SEEK_SET);
2403
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002404 evlist__for_each_entry(session->evlist, evsel) {
Robert Richter6606f872012-08-16 21:10:19 +02002405 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2406 err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002407 if (err < 0) {
2408 pr_debug("failed to write perf header\n");
2409 return err;
2410 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002411 }
2412
Jiri Olsa944d62b2013-07-17 19:49:43 +02002413 attr_offset = lseek(fd, 0, SEEK_CUR);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002414
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002415 evlist__for_each_entry(evlist, evsel) {
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002416 f_attr = (struct perf_file_attr){
Robert Richter6606f872012-08-16 21:10:19 +02002417 .attr = evsel->attr,
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002418 .ids = {
Robert Richter6606f872012-08-16 21:10:19 +02002419 .offset = evsel->id_offset,
2420 .size = evsel->ids * sizeof(u64),
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002421 }
2422 };
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002423 err = do_write(fd, &f_attr, sizeof(f_attr));
2424 if (err < 0) {
2425 pr_debug("failed to write perf header attribute\n");
2426 return err;
2427 }
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002428 }
2429
Adrian Hunterd645c442013-12-11 14:36:28 +02002430 if (!header->data_offset)
2431 header->data_offset = lseek(fd, 0, SEEK_CUR);
Jiri Olsa8d541e92013-07-17 19:49:44 +02002432 header->feat_offset = header->data_offset + header->data_size;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002433
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002434 if (at_exit) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002435 err = perf_header__adds_write(header, evlist, fd);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002436 if (err < 0)
2437 return err;
2438 }
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002439
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002440 f_header = (struct perf_file_header){
2441 .magic = PERF_MAGIC,
2442 .size = sizeof(f_header),
2443 .attr_size = sizeof(f_attr),
2444 .attrs = {
Jiri Olsa944d62b2013-07-17 19:49:43 +02002445 .offset = attr_offset,
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002446 .size = evlist->nr_entries * sizeof(f_attr),
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002447 },
2448 .data = {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002449 .offset = header->data_offset,
2450 .size = header->data_size,
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002451 },
Jiri Olsa44b3c572013-07-11 17:28:31 +02002452 /* event_types is ignored, store zeros */
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002453 };
2454
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002455 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002456
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002457 lseek(fd, 0, SEEK_SET);
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002458 err = do_write(fd, &f_header, sizeof(f_header));
2459 if (err < 0) {
2460 pr_debug("failed to write perf header\n");
2461 return err;
2462 }
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002463 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002464
Arnaldo Carvalho de Melod5eed902009-11-19 14:55:56 -02002465 return 0;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002466}
2467
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002468static int perf_header__getbuffer64(struct perf_header *header,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002469 int fd, void *buf, size_t size)
2470{
Arnaldo Carvalho de Melo1e7972c2011-01-03 16:50:55 -02002471 if (readn(fd, buf, size) <= 0)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002472 return -1;
2473
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002474 if (header->needs_swap)
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002475 mem_bswap_64(buf, size);
2476
2477 return 0;
2478}
2479
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002480int perf_header__process_sections(struct perf_header *header, int fd,
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002481 void *data,
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002482 int (*process)(struct perf_file_section *section,
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002483 struct perf_header *ph,
2484 int feat, int fd, void *data))
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002485{
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002486 struct perf_file_section *feat_sec, *sec;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002487 int nr_sections;
2488 int sec_size;
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002489 int feat;
2490 int err;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002491
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002492 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002493 if (!nr_sections)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002494 return 0;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002495
Paul Gortmaker91b98802013-01-30 20:05:49 -05002496 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002497 if (!feat_sec)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002498 return -1;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002499
2500 sec_size = sizeof(*feat_sec) * nr_sections;
2501
Jiri Olsa8d541e92013-07-17 19:49:44 +02002502 lseek(fd, header->feat_offset, SEEK_SET);
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002503
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002504 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2505 if (err < 0)
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002506 goto out_free;
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002507
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002508 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2509 err = process(sec++, header, feat, fd, data);
2510 if (err < 0)
2511 goto out_free;
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +01002512 }
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002513 err = 0;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002514out_free:
Frederic Weisbecker9e827dd2009-11-11 04:51:07 +01002515 free(feat_sec);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002516 return err;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002517}
Frederic Weisbecker2ba08252009-10-17 17:12:34 +02002518
Stephane Eranian114382a2012-02-09 23:21:08 +01002519static const int attr_file_abi_sizes[] = {
2520 [0] = PERF_ATTR_SIZE_VER0,
2521 [1] = PERF_ATTR_SIZE_VER1,
Jiri Olsa239cc472012-08-07 15:20:42 +02002522 [2] = PERF_ATTR_SIZE_VER2,
Jiri Olsa0f6a3012012-08-07 15:20:45 +02002523 [3] = PERF_ATTR_SIZE_VER3,
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02002524 [4] = PERF_ATTR_SIZE_VER4,
Stephane Eranian114382a2012-02-09 23:21:08 +01002525 0,
2526};
2527
2528/*
2529 * In the legacy file format, the magic number is not used to encode endianness.
2530 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2531 * on ABI revisions, we need to try all combinations for all endianness to
2532 * detect the endianness.
2533 */
2534static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2535{
2536 uint64_t ref_size, attr_size;
2537 int i;
2538
2539 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2540 ref_size = attr_file_abi_sizes[i]
2541 + sizeof(struct perf_file_section);
2542 if (hdr_sz != ref_size) {
2543 attr_size = bswap_64(hdr_sz);
2544 if (attr_size != ref_size)
2545 continue;
2546
2547 ph->needs_swap = true;
2548 }
2549 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2550 i,
2551 ph->needs_swap);
2552 return 0;
2553 }
2554 /* could not determine endianness */
2555 return -1;
2556}
2557
2558#define PERF_PIPE_HDR_VER0 16
2559
2560static const size_t attr_pipe_abi_sizes[] = {
2561 [0] = PERF_PIPE_HDR_VER0,
2562 0,
2563};
2564
2565/*
2566 * In the legacy pipe format, there is an implicit assumption that endiannesss
2567 * between host recording the samples, and host parsing the samples is the
2568 * same. This is not always the case given that the pipe output may always be
2569 * redirected into a file and analyzed on a different machine with possibly a
2570 * different endianness and perf_event ABI revsions in the perf tool itself.
2571 */
2572static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2573{
2574 u64 attr_size;
2575 int i;
2576
2577 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2578 if (hdr_sz != attr_pipe_abi_sizes[i]) {
2579 attr_size = bswap_64(hdr_sz);
2580 if (attr_size != hdr_sz)
2581 continue;
2582
2583 ph->needs_swap = true;
2584 }
2585 pr_debug("Pipe ABI%d perf.data file detected\n", i);
2586 return 0;
2587 }
2588 return -1;
2589}
2590
Feng Tange84ba4e2012-10-30 11:56:07 +08002591bool is_perf_magic(u64 magic)
2592{
2593 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2594 || magic == __perf_magic2
2595 || magic == __perf_magic2_sw)
2596 return true;
2597
2598 return false;
2599}
2600
Stephane Eranian114382a2012-02-09 23:21:08 +01002601static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2602 bool is_pipe, struct perf_header *ph)
Stephane Eranian73323f52012-02-02 13:54:44 +01002603{
2604 int ret;
2605
2606 /* check for legacy format */
Stephane Eranian114382a2012-02-09 23:21:08 +01002607 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
Stephane Eranian73323f52012-02-02 13:54:44 +01002608 if (ret == 0) {
Jiri Olsa2a08c3e2013-07-17 19:49:47 +02002609 ph->version = PERF_HEADER_VERSION_1;
Stephane Eranian73323f52012-02-02 13:54:44 +01002610 pr_debug("legacy perf.data format\n");
Stephane Eranian114382a2012-02-09 23:21:08 +01002611 if (is_pipe)
2612 return try_all_pipe_abis(hdr_sz, ph);
Stephane Eranian73323f52012-02-02 13:54:44 +01002613
Stephane Eranian114382a2012-02-09 23:21:08 +01002614 return try_all_file_abis(hdr_sz, ph);
Stephane Eranian73323f52012-02-02 13:54:44 +01002615 }
Stephane Eranian114382a2012-02-09 23:21:08 +01002616 /*
2617 * the new magic number serves two purposes:
2618 * - unique number to identify actual perf.data files
2619 * - encode endianness of file
2620 */
Namhyung Kimf7913972015-01-29 17:06:45 +09002621 ph->version = PERF_HEADER_VERSION_2;
Stephane Eranian73323f52012-02-02 13:54:44 +01002622
Stephane Eranian114382a2012-02-09 23:21:08 +01002623 /* check magic number with one endianness */
2624 if (magic == __perf_magic2)
Stephane Eranian73323f52012-02-02 13:54:44 +01002625 return 0;
2626
Stephane Eranian114382a2012-02-09 23:21:08 +01002627 /* check magic number with opposite endianness */
2628 if (magic != __perf_magic2_sw)
Stephane Eranian73323f52012-02-02 13:54:44 +01002629 return -1;
2630
2631 ph->needs_swap = true;
2632
2633 return 0;
2634}
2635
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002636int perf_file_header__read(struct perf_file_header *header,
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002637 struct perf_header *ph, int fd)
2638{
Jiri Olsa727ebd52013-11-28 11:30:14 +01002639 ssize_t ret;
Stephane Eranian73323f52012-02-02 13:54:44 +01002640
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002641 lseek(fd, 0, SEEK_SET);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002642
Stephane Eranian73323f52012-02-02 13:54:44 +01002643 ret = readn(fd, header, sizeof(*header));
2644 if (ret <= 0)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002645 return -1;
2646
Stephane Eranian114382a2012-02-09 23:21:08 +01002647 if (check_magic_endian(header->magic,
2648 header->attr_size, false, ph) < 0) {
2649 pr_debug("magic/endian check failed\n");
Stephane Eranian73323f52012-02-02 13:54:44 +01002650 return -1;
Stephane Eranian114382a2012-02-09 23:21:08 +01002651 }
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002652
Stephane Eranian73323f52012-02-02 13:54:44 +01002653 if (ph->needs_swap) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002654 mem_bswap_64(header, offsetof(struct perf_file_header,
Stephane Eranian73323f52012-02-02 13:54:44 +01002655 adds_features));
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002656 }
2657
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002658 if (header->size != sizeof(*header)) {
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002659 /* Support the previous format */
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002660 if (header->size == offsetof(typeof(*header), adds_features))
2661 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002662 else
2663 return -1;
David Ahernd327fa42011-10-18 17:34:01 -06002664 } else if (ph->needs_swap) {
David Ahernd327fa42011-10-18 17:34:01 -06002665 /*
2666 * feature bitmap is declared as an array of unsigned longs --
2667 * not good since its size can differ between the host that
2668 * generated the data file and the host analyzing the file.
2669 *
2670 * We need to handle endianness, but we don't know the size of
2671 * the unsigned long where the file was generated. Take a best
2672 * guess at determining it: try 64-bit swap first (ie., file
2673 * created on a 64-bit host), and check if the hostname feature
2674 * bit is set (this feature bit is forced on as of fbe96f2).
2675 * If the bit is not, undo the 64-bit swap and try a 32-bit
2676 * swap. If the hostname bit is still not set (e.g., older data
2677 * file), punt and fallback to the original behavior --
2678 * clearing all feature bits and setting buildid.
2679 */
David Ahern80c01202012-06-08 11:47:51 -03002680 mem_bswap_64(&header->adds_features,
2681 BITS_TO_U64(HEADER_FEAT_BITS));
David Ahernd327fa42011-10-18 17:34:01 -06002682
2683 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
David Ahern80c01202012-06-08 11:47:51 -03002684 /* unswap as u64 */
2685 mem_bswap_64(&header->adds_features,
2686 BITS_TO_U64(HEADER_FEAT_BITS));
2687
2688 /* unswap as u32 */
2689 mem_bswap_32(&header->adds_features,
2690 BITS_TO_U32(HEADER_FEAT_BITS));
David Ahernd327fa42011-10-18 17:34:01 -06002691 }
2692
2693 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2694 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2695 set_bit(HEADER_BUILD_ID, header->adds_features);
2696 }
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002697 }
2698
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002699 memcpy(&ph->adds_features, &header->adds_features,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002700 sizeof(ph->adds_features));
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002701
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002702 ph->data_offset = header->data.offset;
2703 ph->data_size = header->data.size;
Jiri Olsa8d541e92013-07-17 19:49:44 +02002704 ph->feat_offset = header->data.offset + header->data.size;
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002705 return 0;
2706}
2707
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002708static int perf_file_section__process(struct perf_file_section *section,
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002709 struct perf_header *ph,
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03002710 int feat, int fd, void *data)
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002711{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002712 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
Arnaldo Carvalho de Melo9486aa32011-01-22 20:37:02 -02002713 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002714 "%d, continuing...\n", section->offset, feat);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002715 return 0;
2716 }
2717
Robert Richterb1e5a9b2011-12-07 10:02:57 +01002718 if (feat >= HEADER_LAST_FEATURE) {
2719 pr_debug("unknown feature %d, continuing...\n", feat);
2720 return 0;
2721 }
2722
Robert Richterf1c67db2012-02-10 15:41:56 +01002723 if (!feat_ops[feat].process)
2724 return 0;
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002725
Namhyung Kim3d7eb862012-09-24 17:15:01 +09002726 return feat_ops[feat].process(section, ph, fd, data);
Arnaldo Carvalho de Melo37562ea2009-11-16 16:32:43 -02002727}
2728
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002729static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
Tom Zanussi454c4072010-05-01 01:41:20 -05002730 struct perf_header *ph, int fd,
2731 bool repipe)
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002732{
Jiri Olsa727ebd52013-11-28 11:30:14 +01002733 ssize_t ret;
Stephane Eranian73323f52012-02-02 13:54:44 +01002734
2735 ret = readn(fd, header, sizeof(*header));
2736 if (ret <= 0)
2737 return -1;
2738
Stephane Eranian114382a2012-02-09 23:21:08 +01002739 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2740 pr_debug("endian/magic failed\n");
Tom Zanussi8dc58102010-04-01 23:59:15 -05002741 return -1;
Stephane Eranian114382a2012-02-09 23:21:08 +01002742 }
2743
2744 if (ph->needs_swap)
2745 header->size = bswap_64(header->size);
Tom Zanussi8dc58102010-04-01 23:59:15 -05002746
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002747 if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
Tom Zanussi454c4072010-05-01 01:41:20 -05002748 return -1;
2749
Tom Zanussi8dc58102010-04-01 23:59:15 -05002750 return 0;
2751}
2752
Jiri Olsad4339562013-07-17 19:49:41 +02002753static int perf_header__read_pipe(struct perf_session *session)
Tom Zanussi8dc58102010-04-01 23:59:15 -05002754{
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002755 struct perf_header *header = &session->header;
Tom Zanussi8dc58102010-04-01 23:59:15 -05002756 struct perf_pipe_file_header f_header;
2757
Jiri Olsacc9784bd2013-10-15 16:27:34 +02002758 if (perf_file_header__read_pipe(&f_header, header,
2759 perf_data_file__fd(session->file),
Tom Zanussi454c4072010-05-01 01:41:20 -05002760 session->repipe) < 0) {
Tom Zanussi8dc58102010-04-01 23:59:15 -05002761 pr_debug("incompatible file format\n");
2762 return -EINVAL;
2763 }
2764
Tom Zanussi8dc58102010-04-01 23:59:15 -05002765 return 0;
2766}
2767
Stephane Eranian69996df2012-02-09 23:21:06 +01002768static int read_attr(int fd, struct perf_header *ph,
2769 struct perf_file_attr *f_attr)
2770{
2771 struct perf_event_attr *attr = &f_attr->attr;
2772 size_t sz, left;
2773 size_t our_sz = sizeof(f_attr->attr);
Jiri Olsa727ebd52013-11-28 11:30:14 +01002774 ssize_t ret;
Stephane Eranian69996df2012-02-09 23:21:06 +01002775
2776 memset(f_attr, 0, sizeof(*f_attr));
2777
2778 /* read minimal guaranteed structure */
2779 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2780 if (ret <= 0) {
2781 pr_debug("cannot read %d bytes of header attr\n",
2782 PERF_ATTR_SIZE_VER0);
2783 return -1;
2784 }
2785
2786 /* on file perf_event_attr size */
2787 sz = attr->size;
Stephane Eranian114382a2012-02-09 23:21:08 +01002788
Stephane Eranian69996df2012-02-09 23:21:06 +01002789 if (ph->needs_swap)
2790 sz = bswap_32(sz);
2791
2792 if (sz == 0) {
2793 /* assume ABI0 */
2794 sz = PERF_ATTR_SIZE_VER0;
2795 } else if (sz > our_sz) {
2796 pr_debug("file uses a more recent and unsupported ABI"
2797 " (%zu bytes extra)\n", sz - our_sz);
2798 return -1;
2799 }
2800 /* what we have not yet read and that we know about */
2801 left = sz - PERF_ATTR_SIZE_VER0;
2802 if (left) {
2803 void *ptr = attr;
2804 ptr += PERF_ATTR_SIZE_VER0;
2805
2806 ret = readn(fd, ptr, left);
2807 }
2808 /* read perf_file_section, ids are read in caller */
2809 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2810
2811 return ret <= 0 ? -1 : 0;
2812}
2813
Namhyung Kim831394b2012-09-06 11:10:46 +09002814static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2815 struct pevent *pevent)
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002816{
Namhyung Kim831394b2012-09-06 11:10:46 +09002817 struct event_format *event;
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002818 char bf[128];
2819
Namhyung Kim831394b2012-09-06 11:10:46 +09002820 /* already prepared */
2821 if (evsel->tp_format)
2822 return 0;
2823
Namhyung Kim3dce2ce2013-03-21 16:18:48 +09002824 if (pevent == NULL) {
2825 pr_debug("broken or missing trace data\n");
2826 return -1;
2827 }
2828
Namhyung Kim831394b2012-09-06 11:10:46 +09002829 event = pevent_find_event(pevent, evsel->attr.config);
Namhyung Kima7619ae2013-04-18 21:24:16 +09002830 if (event == NULL) {
2831 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002832 return -1;
Namhyung Kima7619ae2013-04-18 21:24:16 +09002833 }
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002834
Namhyung Kim831394b2012-09-06 11:10:46 +09002835 if (!evsel->name) {
2836 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2837 evsel->name = strdup(bf);
2838 if (evsel->name == NULL)
2839 return -1;
2840 }
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002841
Arnaldo Carvalho de Melofcf65bf2012-08-07 09:58:03 -03002842 evsel->tp_format = event;
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002843 return 0;
2844}
2845
Namhyung Kim831394b2012-09-06 11:10:46 +09002846static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2847 struct pevent *pevent)
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002848{
2849 struct perf_evsel *pos;
2850
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03002851 evlist__for_each_entry(evlist, pos) {
Namhyung Kim831394b2012-09-06 11:10:46 +09002852 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2853 perf_evsel__prepare_tracepoint_event(pos, pevent))
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002854 return -1;
2855 }
2856
2857 return 0;
2858}
2859
Jiri Olsad4339562013-07-17 19:49:41 +02002860int perf_session__read_header(struct perf_session *session)
Tom Zanussi8dc58102010-04-01 23:59:15 -05002861{
Jiri Olsacc9784bd2013-10-15 16:27:34 +02002862 struct perf_data_file *file = session->file;
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002863 struct perf_header *header = &session->header;
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002864 struct perf_file_header f_header;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002865 struct perf_file_attr f_attr;
2866 u64 f_id;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002867 int nr_attrs, nr_ids, i, j;
Jiri Olsacc9784bd2013-10-15 16:27:34 +02002868 int fd = perf_data_file__fd(file);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002869
Namhyung Kim334fe7a2013-03-11 16:43:12 +09002870 session->evlist = perf_evlist__new();
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002871 if (session->evlist == NULL)
2872 return -ENOMEM;
2873
Kan Liang2c071442015-08-28 05:48:05 -04002874 session->evlist->env = &header->env;
Arnaldo Carvalho de Melo4cde9982015-09-09 12:25:00 -03002875 session->machines.host.env = &header->env;
Jiri Olsacc9784bd2013-10-15 16:27:34 +02002876 if (perf_data_file__is_pipe(file))
Jiri Olsad4339562013-07-17 19:49:41 +02002877 return perf_header__read_pipe(session);
Tom Zanussi8dc58102010-04-01 23:59:15 -05002878
Stephane Eranian69996df2012-02-09 23:21:06 +01002879 if (perf_file_header__read(&f_header, header, fd) < 0)
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02002880 return -EINVAL;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002881
Namhyung Kimb314e5c2013-09-30 17:19:48 +09002882 /*
2883 * Sanity check that perf.data was written cleanly; data size is
2884 * initialized to 0 and updated only if the on_exit function is run.
2885 * If data size is still 0 then the file contains only partial
2886 * information. Just warn user and process it as much as it can.
2887 */
2888 if (f_header.data.size == 0) {
2889 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2890 "Was the 'perf record' command properly terminated?\n",
Jiri Olsacc9784bd2013-10-15 16:27:34 +02002891 file->path);
Namhyung Kimb314e5c2013-09-30 17:19:48 +09002892 }
2893
Stephane Eranian69996df2012-02-09 23:21:06 +01002894 nr_attrs = f_header.attrs.size / f_header.attr_size;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002895 lseek(fd, f_header.attrs.offset, SEEK_SET);
2896
2897 for (i = 0; i < nr_attrs; i++) {
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002898 struct perf_evsel *evsel;
Peter Zijlstra1c222bc2009-08-06 20:57:41 +02002899 off_t tmp;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002900
Stephane Eranian69996df2012-02-09 23:21:06 +01002901 if (read_attr(fd, header, &f_attr) < 0)
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002902 goto out_errno;
Arnaldo Carvalho de Meloba215942010-01-14 12:23:10 -02002903
David Ahern1060ab82015-04-09 16:15:46 -04002904 if (header->needs_swap) {
2905 f_attr.ids.size = bswap_64(f_attr.ids.size);
2906 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
David Aherneda39132011-07-15 12:34:09 -06002907 perf_event__attr_swap(&f_attr.attr);
David Ahern1060ab82015-04-09 16:15:46 -04002908 }
David Aherneda39132011-07-15 12:34:09 -06002909
Peter Zijlstra1c222bc2009-08-06 20:57:41 +02002910 tmp = lseek(fd, 0, SEEK_CUR);
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -03002911 evsel = perf_evsel__new(&f_attr.attr);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002912
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002913 if (evsel == NULL)
2914 goto out_delete_evlist;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03002915
2916 evsel->needs_swap = header->needs_swap;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002917 /*
2918 * Do it before so that if perf_evsel__alloc_id fails, this
2919 * entry gets purged too at perf_evlist__delete().
2920 */
2921 perf_evlist__add(session->evlist, evsel);
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002922
2923 nr_ids = f_attr.ids.size / sizeof(u64);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002924 /*
2925 * We don't have the cpu and thread maps on the header, so
2926 * for allocating the perf_sample_id table we fake 1 cpu and
2927 * hattr->ids threads.
2928 */
2929 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2930 goto out_delete_evlist;
2931
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002932 lseek(fd, f_attr.ids.offset, SEEK_SET);
2933
2934 for (j = 0; j < nr_ids; j++) {
Arnaldo Carvalho de Melo1c0b04d2011-03-09 08:13:19 -03002935 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002936 goto out_errno;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002937
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002938 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02002939 }
Arnaldo Carvalho de Melo11deb1f2009-11-17 01:18:09 -02002940
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002941 lseek(fd, tmp, SEEK_SET);
2942 }
2943
Arnaldo Carvalho de Melod04b35f2011-11-11 22:17:32 -02002944 symbol_conf.nr_events = nr_attrs;
2945
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01002946 perf_header__process_sections(header, fd, &session->tevent,
Stephane Eranianfbe96f22011-09-30 15:40:40 +02002947 perf_file_section__process);
Frederic Weisbecker4778d2e2009-11-11 04:51:05 +01002948
Namhyung Kim831394b2012-09-06 11:10:46 +09002949 if (perf_evlist__prepare_tracepoint_events(session->evlist,
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01002950 session->tevent.pevent))
Arnaldo Carvalho de Melocb9dd492012-06-11 19:03:32 -03002951 goto out_delete_evlist;
2952
Arnaldo Carvalho de Melo4dc0a042009-11-19 14:55:55 -02002953 return 0;
Arnaldo Carvalho de Melo769885f2009-12-28 22:48:32 -02002954out_errno:
2955 return -errno;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03002956
2957out_delete_evlist:
2958 perf_evlist__delete(session->evlist);
2959 session->evlist = NULL;
2960 return -ENOMEM;
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +02002961}
Frederic Weisbecker0d3a5c82009-08-16 20:56:37 +02002962
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02002963int perf_event__synthesize_attr(struct perf_tool *tool,
Robert Richterf4d83432012-08-16 21:10:17 +02002964 struct perf_event_attr *attr, u32 ids, u64 *id,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02002965 perf_event__handler_t process)
Frederic Weisbecker0d3a5c82009-08-16 20:56:37 +02002966{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02002967 union perf_event *ev;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05002968 size_t size;
2969 int err;
2970
2971 size = sizeof(struct perf_event_attr);
Irina Tirdea9ac3e482012-09-11 01:15:01 +03002972 size = PERF_ALIGN(size, sizeof(u64));
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05002973 size += sizeof(struct perf_event_header);
2974 size += ids * sizeof(u64);
2975
2976 ev = malloc(size);
2977
Chris Samuelce47dc52010-11-13 13:35:06 +11002978 if (ev == NULL)
2979 return -ENOMEM;
2980
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05002981 ev->attr.attr = *attr;
2982 memcpy(ev->attr.id, id, ids * sizeof(u64));
2983
2984 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
Robert Richterf4d83432012-08-16 21:10:17 +02002985 ev->attr.header.size = (u16)size;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05002986
Robert Richterf4d83432012-08-16 21:10:17 +02002987 if (ev->attr.header.size == size)
2988 err = process(tool, ev, NULL, NULL);
2989 else
2990 err = -E2BIG;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05002991
2992 free(ev);
2993
2994 return err;
2995}
2996
Jiri Olsaa6e52812015-10-25 15:51:37 +01002997static struct event_update_event *
2998event_update_event__new(size_t size, u64 type, u64 id)
2999{
3000 struct event_update_event *ev;
3001
3002 size += sizeof(*ev);
3003 size = PERF_ALIGN(size, sizeof(u64));
3004
3005 ev = zalloc(size);
3006 if (ev) {
3007 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3008 ev->header.size = (u16)size;
3009 ev->type = type;
3010 ev->id = id;
3011 }
3012 return ev;
3013}
3014
3015int
3016perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3017 struct perf_evsel *evsel,
3018 perf_event__handler_t process)
3019{
3020 struct event_update_event *ev;
3021 size_t size = strlen(evsel->unit);
3022 int err;
3023
3024 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3025 if (ev == NULL)
3026 return -ENOMEM;
3027
3028 strncpy(ev->data, evsel->unit, size);
3029 err = process(tool, (union perf_event *)ev, NULL, NULL);
3030 free(ev);
3031 return err;
3032}
3033
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003034int
3035perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3036 struct perf_evsel *evsel,
3037 perf_event__handler_t process)
3038{
3039 struct event_update_event *ev;
3040 struct event_update_event_scale *ev_data;
3041 int err;
3042
3043 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3044 if (ev == NULL)
3045 return -ENOMEM;
3046
3047 ev_data = (struct event_update_event_scale *) ev->data;
3048 ev_data->scale = evsel->scale;
3049 err = process(tool, (union perf_event*) ev, NULL, NULL);
3050 free(ev);
3051 return err;
3052}
3053
Jiri Olsa802c9042015-10-25 15:51:39 +01003054int
3055perf_event__synthesize_event_update_name(struct perf_tool *tool,
3056 struct perf_evsel *evsel,
3057 perf_event__handler_t process)
3058{
3059 struct event_update_event *ev;
3060 size_t len = strlen(evsel->name);
3061 int err;
3062
3063 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3064 if (ev == NULL)
3065 return -ENOMEM;
3066
3067 strncpy(ev->data, evsel->name, len);
3068 err = process(tool, (union perf_event*) ev, NULL, NULL);
3069 free(ev);
3070 return err;
3071}
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003072
Jiri Olsa86ebb092015-10-25 15:51:40 +01003073int
3074perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3075 struct perf_evsel *evsel,
3076 perf_event__handler_t process)
3077{
3078 size_t size = sizeof(struct event_update_event);
3079 struct event_update_event *ev;
3080 int max, err;
3081 u16 type;
3082
3083 if (!evsel->own_cpus)
3084 return 0;
3085
3086 ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3087 if (!ev)
3088 return -ENOMEM;
3089
3090 ev->header.type = PERF_RECORD_EVENT_UPDATE;
3091 ev->header.size = (u16)size;
3092 ev->type = PERF_EVENT_UPDATE__CPUS;
3093 ev->id = evsel->id[0];
3094
3095 cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3096 evsel->own_cpus,
3097 type, max);
3098
3099 err = process(tool, (union perf_event*) ev, NULL, NULL);
3100 free(ev);
3101 return err;
3102}
3103
Jiri Olsac853f932015-10-25 15:51:41 +01003104size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3105{
3106 struct event_update_event *ev = &event->event_update;
3107 struct event_update_event_scale *ev_scale;
3108 struct event_update_event_cpus *ev_cpus;
3109 struct cpu_map *map;
3110 size_t ret;
3111
3112 ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id);
3113
3114 switch (ev->type) {
3115 case PERF_EVENT_UPDATE__SCALE:
3116 ev_scale = (struct event_update_event_scale *) ev->data;
3117 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3118 break;
3119 case PERF_EVENT_UPDATE__UNIT:
3120 ret += fprintf(fp, "... unit: %s\n", ev->data);
3121 break;
3122 case PERF_EVENT_UPDATE__NAME:
3123 ret += fprintf(fp, "... name: %s\n", ev->data);
3124 break;
3125 case PERF_EVENT_UPDATE__CPUS:
3126 ev_cpus = (struct event_update_event_cpus *) ev->data;
3127 ret += fprintf(fp, "... ");
3128
3129 map = cpu_map__new_data(&ev_cpus->cpus);
3130 if (map)
3131 ret += cpu_map__fprintf(map, fp);
3132 else
3133 ret += fprintf(fp, "failed to get cpus\n");
3134 break;
3135 default:
3136 ret += fprintf(fp, "... unknown type\n");
3137 break;
3138 }
3139
3140 return ret;
3141}
Jiri Olsa86ebb092015-10-25 15:51:40 +01003142
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003143int perf_event__synthesize_attrs(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003144 struct perf_session *session,
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003145 perf_event__handler_t process)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003146{
Robert Richter6606f872012-08-16 21:10:19 +02003147 struct perf_evsel *evsel;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003148 int err = 0;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003149
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03003150 evlist__for_each_entry(session->evlist, evsel) {
Robert Richter6606f872012-08-16 21:10:19 +02003151 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3152 evsel->id, process);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003153 if (err) {
3154 pr_debug("failed to create perf header attribute\n");
3155 return err;
3156 }
3157 }
3158
3159 return err;
3160}
3161
Adrian Hunter47c3d102013-07-04 16:20:21 +03003162int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3163 union perf_event *event,
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003164 struct perf_evlist **pevlist)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003165{
Robert Richterf4d83432012-08-16 21:10:17 +02003166 u32 i, ids, n_ids;
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003167 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003168 struct perf_evlist *evlist = *pevlist;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003169
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003170 if (evlist == NULL) {
Namhyung Kim334fe7a2013-03-11 16:43:12 +09003171 *pevlist = evlist = perf_evlist__new();
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003172 if (evlist == NULL)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003173 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003174 }
3175
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -03003176 evsel = perf_evsel__new(&event->attr.attr);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003177 if (evsel == NULL)
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003178 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003179
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003180 perf_evlist__add(evlist, evsel);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003181
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003182 ids = event->header.size;
3183 ids -= (void *)&event->attr.id - (void *)event;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003184 n_ids = ids / sizeof(u64);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03003185 /*
3186 * We don't have the cpu and thread maps on the header, so
3187 * for allocating the perf_sample_id table we fake 1 cpu and
3188 * hattr->ids threads.
3189 */
3190 if (perf_evsel__alloc_id(evsel, 1, n_ids))
3191 return -ENOMEM;
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003192
3193 for (i = 0; i < n_ids; i++) {
Arnaldo Carvalho de Melo10d0f082011-11-11 22:45:41 -02003194 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003195 }
3196
Adrian Hunter7e0d6fc2013-07-04 16:20:29 +03003197 symbol_conf.nr_events = evlist->nr_entries;
3198
Tom Zanussi2c46dbb2010-04-01 23:59:19 -05003199 return 0;
3200}
Tom Zanussicd19a032010-04-01 23:59:20 -05003201
Jiri Olsaffe777252015-10-25 15:51:36 +01003202int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3203 union perf_event *event,
3204 struct perf_evlist **pevlist)
3205{
3206 struct event_update_event *ev = &event->event_update;
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003207 struct event_update_event_scale *ev_scale;
Jiri Olsa86ebb092015-10-25 15:51:40 +01003208 struct event_update_event_cpus *ev_cpus;
Jiri Olsaffe777252015-10-25 15:51:36 +01003209 struct perf_evlist *evlist;
3210 struct perf_evsel *evsel;
Jiri Olsa86ebb092015-10-25 15:51:40 +01003211 struct cpu_map *map;
Jiri Olsaffe777252015-10-25 15:51:36 +01003212
3213 if (!pevlist || *pevlist == NULL)
3214 return -EINVAL;
3215
3216 evlist = *pevlist;
3217
3218 evsel = perf_evlist__id2evsel(evlist, ev->id);
3219 if (evsel == NULL)
3220 return -EINVAL;
3221
Jiri Olsaa6e52812015-10-25 15:51:37 +01003222 switch (ev->type) {
3223 case PERF_EVENT_UPDATE__UNIT:
3224 evsel->unit = strdup(ev->data);
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003225 break;
Jiri Olsa802c9042015-10-25 15:51:39 +01003226 case PERF_EVENT_UPDATE__NAME:
3227 evsel->name = strdup(ev->data);
3228 break;
Jiri Olsadaeecbc2015-10-25 15:51:38 +01003229 case PERF_EVENT_UPDATE__SCALE:
3230 ev_scale = (struct event_update_event_scale *) ev->data;
3231 evsel->scale = ev_scale->scale;
Arnaldo Carvalho de Melo8434a2e2017-02-08 21:57:22 -03003232 break;
Jiri Olsa86ebb092015-10-25 15:51:40 +01003233 case PERF_EVENT_UPDATE__CPUS:
3234 ev_cpus = (struct event_update_event_cpus *) ev->data;
3235
3236 map = cpu_map__new_data(&ev_cpus->cpus);
3237 if (map)
3238 evsel->own_cpus = map;
3239 else
3240 pr_err("failed to get event_update cpus\n");
Jiri Olsaa6e52812015-10-25 15:51:37 +01003241 default:
3242 break;
3243 }
3244
Jiri Olsaffe777252015-10-25 15:51:36 +01003245 return 0;
3246}
3247
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003248int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003249 struct perf_evlist *evlist,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02003250 perf_event__handler_t process)
Tom Zanussi92155452010-04-01 23:59:21 -05003251{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003252 union perf_event ev;
Jiri Olsa29208e52011-10-20 15:59:43 +02003253 struct tracing_data *tdata;
Tom Zanussi92155452010-04-01 23:59:21 -05003254 ssize_t size = 0, aligned_size = 0, padding;
Irina Tirdea1d037ca2012-09-11 01:15:03 +03003255 int err __maybe_unused = 0;
Tom Zanussi92155452010-04-01 23:59:21 -05003256
Jiri Olsa29208e52011-10-20 15:59:43 +02003257 /*
3258 * We are going to store the size of the data followed
3259 * by the data contents. Since the fd descriptor is a pipe,
3260 * we cannot seek back to store the size of the data once
3261 * we know it. Instead we:
3262 *
3263 * - write the tracing data to the temp file
3264 * - get/write the data size to pipe
3265 * - write the tracing data from the temp file
3266 * to the pipe
3267 */
3268 tdata = tracing_data_get(&evlist->entries, fd, true);
3269 if (!tdata)
3270 return -1;
3271
Tom Zanussi92155452010-04-01 23:59:21 -05003272 memset(&ev, 0, sizeof(ev));
3273
3274 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
Jiri Olsa29208e52011-10-20 15:59:43 +02003275 size = tdata->size;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03003276 aligned_size = PERF_ALIGN(size, sizeof(u64));
Tom Zanussi92155452010-04-01 23:59:21 -05003277 padding = aligned_size - size;
3278 ev.tracing_data.header.size = sizeof(ev.tracing_data);
3279 ev.tracing_data.size = aligned_size;
3280
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003281 process(tool, &ev, NULL, NULL);
Tom Zanussi92155452010-04-01 23:59:21 -05003282
Jiri Olsa29208e52011-10-20 15:59:43 +02003283 /*
3284 * The put function will copy all the tracing data
3285 * stored in temp file to the pipe.
3286 */
3287 tracing_data_put(tdata);
3288
Tom Zanussi92155452010-04-01 23:59:21 -05003289 write_padded(fd, NULL, 0, padding);
3290
3291 return aligned_size;
3292}
3293
Adrian Hunter47c3d102013-07-04 16:20:21 +03003294int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3295 union perf_event *event,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003296 struct perf_session *session)
Tom Zanussi92155452010-04-01 23:59:21 -05003297{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003298 ssize_t size_read, padding, size = event->tracing_data.size;
Jiri Olsacc9784bd2013-10-15 16:27:34 +02003299 int fd = perf_data_file__fd(session->file);
3300 off_t offset = lseek(fd, 0, SEEK_CUR);
Tom Zanussi92155452010-04-01 23:59:21 -05003301 char buf[BUFSIZ];
3302
3303 /* setup for reading amidst mmap */
Jiri Olsacc9784bd2013-10-15 16:27:34 +02003304 lseek(fd, offset + sizeof(struct tracing_data_event),
Tom Zanussi92155452010-04-01 23:59:21 -05003305 SEEK_SET);
3306
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01003307 size_read = trace_report(fd, &session->tevent,
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -03003308 session->repipe);
Irina Tirdea9ac3e482012-09-11 01:15:01 +03003309 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
Tom Zanussi92155452010-04-01 23:59:21 -05003310
Jiri Olsacc9784bd2013-10-15 16:27:34 +02003311 if (readn(fd, buf, padding) < 0) {
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03003312 pr_err("%s: reading input file", __func__);
3313 return -1;
3314 }
Tom Zanussi454c4072010-05-01 01:41:20 -05003315 if (session->repipe) {
3316 int retw = write(STDOUT_FILENO, buf, padding);
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03003317 if (retw <= 0 || retw != padding) {
3318 pr_err("%s: repiping tracing data padding", __func__);
3319 return -1;
3320 }
Tom Zanussi454c4072010-05-01 01:41:20 -05003321 }
Tom Zanussi92155452010-04-01 23:59:21 -05003322
Arnaldo Carvalho de Melo2caa48a2013-01-24 22:34:33 -03003323 if (size_read + padding != size) {
3324 pr_err("%s: tracing data size mismatch", __func__);
3325 return -1;
3326 }
Tom Zanussi92155452010-04-01 23:59:21 -05003327
Namhyung Kim831394b2012-09-06 11:10:46 +09003328 perf_evlist__prepare_tracepoint_events(session->evlist,
Jiri Olsa29f5ffd2013-12-03 14:09:23 +01003329 session->tevent.pevent);
Arnaldo Carvalho de Melo8b6ee4c2012-08-07 23:36:16 -03003330
Tom Zanussi92155452010-04-01 23:59:21 -05003331 return size_read + padding;
3332}
Tom Zanussic7929e42010-04-01 23:59:22 -05003333
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003334int perf_event__synthesize_build_id(struct perf_tool *tool,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003335 struct dso *pos, u16 misc,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003336 perf_event__handler_t process,
Arnaldo Carvalho de Melo743eb862011-11-28 07:56:39 -02003337 struct machine *machine)
Tom Zanussic7929e42010-04-01 23:59:22 -05003338{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003339 union perf_event ev;
Tom Zanussic7929e42010-04-01 23:59:22 -05003340 size_t len;
3341 int err = 0;
3342
3343 if (!pos->hit)
3344 return err;
3345
3346 memset(&ev, 0, sizeof(ev));
3347
3348 len = pos->long_name_len + 1;
Irina Tirdea9ac3e482012-09-11 01:15:01 +03003349 len = PERF_ALIGN(len, NAME_ALIGN);
Tom Zanussic7929e42010-04-01 23:59:22 -05003350 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3351 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3352 ev.build_id.header.misc = misc;
Arnaldo Carvalho de Melo23346f22010-04-27 21:17:50 -03003353 ev.build_id.pid = machine->pid;
Tom Zanussic7929e42010-04-01 23:59:22 -05003354 ev.build_id.header.size = sizeof(ev.build_id) + len;
3355 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3356
Arnaldo Carvalho de Melo45694aa2011-11-28 08:30:20 -02003357 err = process(tool, &ev, NULL, machine);
Tom Zanussic7929e42010-04-01 23:59:22 -05003358
3359 return err;
3360}
3361
Irina Tirdea1d037ca2012-09-11 01:15:03 +03003362int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
Arnaldo Carvalho de Melod20deb62011-11-25 08:19:45 -02003363 union perf_event *event,
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003364 struct perf_session *session)
Tom Zanussic7929e42010-04-01 23:59:22 -05003365{
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -02003366 __event_process_build_id(&event->build_id,
3367 event->build_id.filename,
Zhang, Yanmina1645ce2010-04-19 13:32:50 +08003368 session);
Tom Zanussic7929e42010-04-01 23:59:22 -05003369 return 0;
3370}