blob: 9f6cebd798eed5ee44ea4510305b93b4f2e3fd6b [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
David Ahern936be502011-09-06 09:12:26 -060010#include <byteswap.h>
11#include "asm/bug.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020012#include "evsel.h"
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -020013#include "evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020014#include "util.h"
Arnaldo Carvalho de Melo86bd5e82011-01-03 23:09:46 -020015#include "cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020016#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090017#include "target.h"
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -030018#include "../../include/linux/perf_event.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020019
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -020020#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -020021#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -020022
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -030023int __perf_evsel__sample_size(u64 sample_type)
24{
25 u64 mask = sample_type & PERF_SAMPLE_MASK;
26 int size = 0;
27 int i;
28
29 for (i = 0; i < 64; i++) {
30 if (mask & (1ULL << i))
31 size++;
32 }
33
34 size *= sizeof(u64);
35
36 return size;
37}
38
Jiri Olsa4bf9ce12012-03-22 14:37:26 +010039void hists__init(struct hists *hists)
Arnaldo Carvalho de Melo0e2a5f12011-11-04 08:16:58 -020040{
41 memset(hists, 0, sizeof(*hists));
42 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
43 hists->entries_in = &hists->entries_in_array[0];
44 hists->entries_collapsed = RB_ROOT;
45 hists->entries = RB_ROOT;
46 pthread_mutex_init(&hists->lock, NULL);
47}
48
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020049void perf_evsel__init(struct perf_evsel *evsel,
50 struct perf_event_attr *attr, int idx)
51{
52 evsel->idx = idx;
53 evsel->attr = *attr;
54 INIT_LIST_HEAD(&evsel->node);
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -030055 hists__init(&evsel->hists);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020056}
57
Lin Ming23a2f3a2011-01-07 11:11:09 +080058struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020059{
60 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
61
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020062 if (evsel != NULL)
63 perf_evsel__init(evsel, attr, idx);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020064
65 return evsel;
66}
67
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -030068static const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
69 "cycles",
70 "instructions",
71 "cache-references",
72 "cache-misses",
73 "branches",
74 "branch-misses",
75 "bus-cycles",
76 "stalled-cycles-frontend",
77 "stalled-cycles-backend",
78 "ref-cycles",
79};
80
81const char *__perf_evsel__hw_name(u64 config)
82{
83 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
84 return perf_evsel__hw_names[config];
85
86 return "unknown-hardware";
87}
88
89static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
90{
91 int colon = 0;
92 struct perf_event_attr *attr = &evsel->attr;
93 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(attr->config));
94 bool exclude_guest_default = false;
95
96#define MOD_PRINT(context, mod) do { \
97 if (!attr->exclude_##context) { \
98 if (!colon) colon = r++; \
99 r += scnprintf(bf + r, size - r, "%c", mod); \
100 } } while(0)
101
102 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
103 MOD_PRINT(kernel, 'k');
104 MOD_PRINT(user, 'u');
105 MOD_PRINT(hv, 'h');
106 exclude_guest_default = true;
107 }
108
109 if (attr->precise_ip) {
110 if (!colon)
111 colon = r++;
112 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
113 exclude_guest_default = true;
114 }
115
116 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
117 MOD_PRINT(host, 'H');
118 MOD_PRINT(guest, 'G');
119 }
120#undef MOD_PRINT
121 if (colon)
122 bf[colon] = ':';
123 return r;
124}
125
126int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size)
127{
128 int ret;
129
130 switch (evsel->attr.type) {
131 case PERF_TYPE_RAW:
132 ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
133 break;
134
135 case PERF_TYPE_HARDWARE:
136 ret = perf_evsel__hw_name(evsel, bf, size);
137 break;
138 default:
139 /*
140 * FIXME
141 *
142 * This is the minimal perf_evsel__name so that we can
143 * reconstruct event names taking into account event modifiers.
144 *
145 * The old event_name uses it now for raw anr hw events, so that
146 * we don't drag all the parsing stuff into the python binding.
147 *
148 * On the next devel cycle the rest of the event naming will be
149 * brought here.
150 */
151 return 0;
152 }
153
154 return ret;
155}
156
Namhyung Kim5090c6a2012-03-16 17:42:20 +0900157void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
158 struct perf_evsel *first)
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200159{
160 struct perf_event_attr *attr = &evsel->attr;
161 int track = !evsel->idx; /* only the first counter needs these */
162
David Ahern5e1c81d2012-05-13 22:01:28 -0600163 attr->disabled = 1;
Arnaldo Carvalho de Melo808e1222012-02-14 14:18:57 -0200164 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200165 attr->inherit = !opts->no_inherit;
166 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
167 PERF_FORMAT_TOTAL_TIME_RUNNING |
168 PERF_FORMAT_ID;
169
170 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
171
172 /*
173 * We default some events to a 1 default interval. But keep
174 * it a weak assumption overridable by the user.
175 */
176 if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
177 opts->user_interval != ULLONG_MAX)) {
178 if (opts->freq) {
179 attr->sample_type |= PERF_SAMPLE_PERIOD;
180 attr->freq = 1;
181 attr->sample_freq = opts->freq;
182 } else {
183 attr->sample_period = opts->default_interval;
184 }
185 }
186
187 if (opts->no_samples)
188 attr->sample_freq = 0;
189
190 if (opts->inherit_stat)
191 attr->inherit_stat = 1;
192
193 if (opts->sample_address) {
194 attr->sample_type |= PERF_SAMPLE_ADDR;
195 attr->mmap_data = track;
196 }
197
198 if (opts->call_graph)
199 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
200
Namhyung Kime40ee742012-05-21 10:42:07 +0900201 if (perf_target__has_cpu(&opts->target))
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200202 attr->sample_type |= PERF_SAMPLE_CPU;
203
Andrew Vagin3e76ac72011-12-20 17:32:45 +0300204 if (opts->period)
205 attr->sample_type |= PERF_SAMPLE_PERIOD;
206
Arnaldo Carvalho de Melo808e1222012-02-14 14:18:57 -0200207 if (!opts->sample_id_all_missing &&
Namhyung Kimd67356e2012-05-07 14:09:03 +0900208 (opts->sample_time || !opts->no_inherit ||
Namhyung Kimaa22dd42012-05-16 18:45:47 +0900209 perf_target__has_cpu(&opts->target)))
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200210 attr->sample_type |= PERF_SAMPLE_TIME;
211
212 if (opts->raw_samples) {
213 attr->sample_type |= PERF_SAMPLE_TIME;
214 attr->sample_type |= PERF_SAMPLE_RAW;
215 attr->sample_type |= PERF_SAMPLE_CPU;
216 }
217
218 if (opts->no_delay) {
219 attr->watermark = 0;
220 attr->wakeup_events = 1;
221 }
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100222 if (opts->branch_stack) {
223 attr->sample_type |= PERF_SAMPLE_BRANCH_STACK;
224 attr->branch_sample_type = opts->branch_stack;
225 }
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200226
227 attr->mmap = track;
228 attr->comm = track;
229
Namhyung Kimd67356e2012-05-07 14:09:03 +0900230 if (perf_target__none(&opts->target) &&
231 (!opts->group || evsel == first)) {
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200232 attr->enable_on_exec = 1;
233 }
234}
235
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200236int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
237{
David Ahern4af4c952011-05-27 09:58:34 -0600238 int cpu, thread;
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200239 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
David Ahern4af4c952011-05-27 09:58:34 -0600240
241 if (evsel->fd) {
242 for (cpu = 0; cpu < ncpus; cpu++) {
243 for (thread = 0; thread < nthreads; thread++) {
244 FD(evsel, cpu, thread) = -1;
245 }
246 }
247 }
248
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200249 return evsel->fd != NULL ? 0 : -ENOMEM;
250}
251
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200252int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
253{
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300254 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
255 if (evsel->sample_id == NULL)
256 return -ENOMEM;
257
258 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
259 if (evsel->id == NULL) {
260 xyarray__delete(evsel->sample_id);
261 evsel->sample_id = NULL;
262 return -ENOMEM;
263 }
264
265 return 0;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200266}
267
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200268int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
269{
270 evsel->counts = zalloc((sizeof(*evsel->counts) +
271 (ncpus * sizeof(struct perf_counts_values))));
272 return evsel->counts != NULL ? 0 : -ENOMEM;
273}
274
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200275void perf_evsel__free_fd(struct perf_evsel *evsel)
276{
277 xyarray__delete(evsel->fd);
278 evsel->fd = NULL;
279}
280
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200281void perf_evsel__free_id(struct perf_evsel *evsel)
282{
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300283 xyarray__delete(evsel->sample_id);
284 evsel->sample_id = NULL;
285 free(evsel->id);
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200286 evsel->id = NULL;
287}
288
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200289void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
290{
291 int cpu, thread;
292
293 for (cpu = 0; cpu < ncpus; cpu++)
294 for (thread = 0; thread < nthreads; ++thread) {
295 close(FD(evsel, cpu, thread));
296 FD(evsel, cpu, thread) = -1;
297 }
298}
299
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200300void perf_evsel__exit(struct perf_evsel *evsel)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200301{
302 assert(list_empty(&evsel->node));
303 xyarray__delete(evsel->fd);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300304 xyarray__delete(evsel->sample_id);
305 free(evsel->id);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200306}
307
308void perf_evsel__delete(struct perf_evsel *evsel)
309{
310 perf_evsel__exit(evsel);
Stephane Eranian023695d2011-02-14 11:20:01 +0200311 close_cgroup(evsel->cgrp);
Stephane Eranianf0c55bc2011-02-16 15:10:01 +0200312 free(evsel->name);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200313 free(evsel);
314}
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200315
316int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
317 int cpu, int thread, bool scale)
318{
319 struct perf_counts_values count;
320 size_t nv = scale ? 3 : 1;
321
322 if (FD(evsel, cpu, thread) < 0)
323 return -EINVAL;
324
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -0200325 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
326 return -ENOMEM;
327
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200328 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
329 return -errno;
330
331 if (scale) {
332 if (count.run == 0)
333 count.val = 0;
334 else if (count.run < count.ena)
335 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
336 } else
337 count.ena = count.run = 0;
338
339 evsel->counts->cpu[cpu] = count;
340 return 0;
341}
342
343int __perf_evsel__read(struct perf_evsel *evsel,
344 int ncpus, int nthreads, bool scale)
345{
346 size_t nv = scale ? 3 : 1;
347 int cpu, thread;
348 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
349
Arnaldo Carvalho de Melo52bcd9942011-02-03 17:26:06 -0200350 aggr->val = aggr->ena = aggr->run = 0;
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200351
352 for (cpu = 0; cpu < ncpus; cpu++) {
353 for (thread = 0; thread < nthreads; thread++) {
354 if (FD(evsel, cpu, thread) < 0)
355 continue;
356
357 if (readn(FD(evsel, cpu, thread),
358 &count, nv * sizeof(u64)) < 0)
359 return -errno;
360
361 aggr->val += count.val;
362 if (scale) {
363 aggr->ena += count.ena;
364 aggr->run += count.run;
365 }
366 }
367 }
368
369 evsel->counts->scaled = 0;
370 if (scale) {
371 if (aggr->run == 0) {
372 evsel->counts->scaled = -1;
373 aggr->val = 0;
374 return 0;
375 }
376
377 if (aggr->run < aggr->ena) {
378 evsel->counts->scaled = 1;
379 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
380 }
381 } else
382 aggr->ena = aggr->run = 0;
383
384 return 0;
385}
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200386
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200387static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200388 struct thread_map *threads, bool group,
389 struct xyarray *group_fds)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200390{
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200391 int cpu, thread;
Stephane Eranian023695d2011-02-14 11:20:01 +0200392 unsigned long flags = 0;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200393 int pid = -1, err;
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200394
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200395 if (evsel->fd == NULL &&
396 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200397 return -ENOMEM;
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -0200398
Stephane Eranian023695d2011-02-14 11:20:01 +0200399 if (evsel->cgrp) {
400 flags = PERF_FLAG_PID_CGROUP;
401 pid = evsel->cgrp->fd;
402 }
403
Arnaldo Carvalho de Melo86bd5e82011-01-03 23:09:46 -0200404 for (cpu = 0; cpu < cpus->nr; cpu++) {
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200405 int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1;
Arnaldo Carvalho de Melo9d04f172011-01-12 00:08:18 -0200406
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200407 for (thread = 0; thread < threads->nr; thread++) {
Stephane Eranian023695d2011-02-14 11:20:01 +0200408
409 if (!evsel->cgrp)
410 pid = threads->map[thread];
411
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200412 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
Stephane Eranian023695d2011-02-14 11:20:01 +0200413 pid,
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200414 cpus->map[cpu],
Stephane Eranian023695d2011-02-14 11:20:01 +0200415 group_fd, flags);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200416 if (FD(evsel, cpu, thread) < 0) {
417 err = -errno;
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200418 goto out_close;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200419 }
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200420
421 if (group && group_fd == -1)
422 group_fd = FD(evsel, cpu, thread);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200423 }
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200424 }
425
426 return 0;
427
428out_close:
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200429 do {
430 while (--thread >= 0) {
431 close(FD(evsel, cpu, thread));
432 FD(evsel, cpu, thread) = -1;
433 }
434 thread = threads->nr;
435 } while (--cpu >= 0);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200436 return err;
437}
438
439void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
440{
441 if (evsel->fd == NULL)
442 return;
443
444 perf_evsel__close_fd(evsel, ncpus, nthreads);
445 perf_evsel__free_fd(evsel);
446 evsel->fd = NULL;
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200447}
448
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200449static struct {
450 struct cpu_map map;
451 int cpus[1];
452} empty_cpu_map = {
453 .map.nr = 1,
454 .cpus = { -1, },
455};
456
457static struct {
458 struct thread_map map;
459 int threads[1];
460} empty_thread_map = {
461 .map.nr = 1,
462 .threads = { -1, },
463};
464
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200465int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200466 struct thread_map *threads, bool group,
467 struct xyarray *group_fd)
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200468{
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200469 if (cpus == NULL) {
470 /* Work around old compiler warnings about strict aliasing */
471 cpus = &empty_cpu_map.map;
472 }
473
474 if (threads == NULL)
475 threads = &empty_thread_map.map;
476
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200477 return __perf_evsel__open(evsel, cpus, threads, group, group_fd);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200478}
479
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200480int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200481 struct cpu_map *cpus, bool group,
482 struct xyarray *group_fd)
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200483{
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200484 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group,
485 group_fd);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200486}
487
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200488int perf_evsel__open_per_thread(struct perf_evsel *evsel,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200489 struct thread_map *threads, bool group,
490 struct xyarray *group_fd)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200491{
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200492 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group,
493 group_fd);
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200494}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200495
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200496static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
Jiri Olsa37073f92012-05-30 14:23:44 +0200497 struct perf_sample *sample,
498 bool swapped)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200499{
500 const u64 *array = event->sample.array;
Jiri Olsa37073f92012-05-30 14:23:44 +0200501 union u64_swap u;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200502
503 array += ((event->header.size -
504 sizeof(event->header)) / sizeof(u64)) - 1;
505
506 if (type & PERF_SAMPLE_CPU) {
Jiri Olsa37073f92012-05-30 14:23:44 +0200507 u.val64 = *array;
508 if (swapped) {
509 /* undo swap of u64, then swap on individual u32s */
510 u.val64 = bswap_64(u.val64);
511 u.val32[0] = bswap_32(u.val32[0]);
512 }
513
514 sample->cpu = u.val32[0];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200515 array--;
516 }
517
518 if (type & PERF_SAMPLE_STREAM_ID) {
519 sample->stream_id = *array;
520 array--;
521 }
522
523 if (type & PERF_SAMPLE_ID) {
524 sample->id = *array;
525 array--;
526 }
527
528 if (type & PERF_SAMPLE_TIME) {
529 sample->time = *array;
530 array--;
531 }
532
533 if (type & PERF_SAMPLE_TID) {
Jiri Olsa37073f92012-05-30 14:23:44 +0200534 u.val64 = *array;
535 if (swapped) {
536 /* undo swap of u64, then swap on individual u32s */
537 u.val64 = bswap_64(u.val64);
538 u.val32[0] = bswap_32(u.val32[0]);
539 u.val32[1] = bswap_32(u.val32[1]);
540 }
541
542 sample->pid = u.val32[0];
543 sample->tid = u.val32[1];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200544 }
545
546 return 0;
547}
548
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200549static bool sample_overlap(const union perf_event *event,
550 const void *offset, u64 size)
551{
552 const void *base = event;
553
554 if (offset + size > base + event->header.size)
555 return true;
556
557 return false;
558}
559
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200560int perf_event__parse_sample(const union perf_event *event, u64 type,
Frederic Weisbeckera2854122011-05-21 19:33:04 +0200561 int sample_size, bool sample_id_all,
David Ahern936be502011-09-06 09:12:26 -0600562 struct perf_sample *data, bool swapped)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200563{
564 const u64 *array;
565
David Ahern936be502011-09-06 09:12:26 -0600566 /*
567 * used for cross-endian analysis. See git commit 65014ab3
568 * for why this goofiness is needed.
569 */
Jiri Olsa6a11f922012-05-16 08:59:04 +0200570 union u64_swap u;
David Ahern936be502011-09-06 09:12:26 -0600571
Robert Richterf3bda2c2011-12-15 17:32:39 +0100572 memset(data, 0, sizeof(*data));
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200573 data->cpu = data->pid = data->tid = -1;
574 data->stream_id = data->id = data->time = -1ULL;
Naveen N. Raoa4a03fc2012-02-03 22:31:13 +0530575 data->period = 1;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200576
577 if (event->header.type != PERF_RECORD_SAMPLE) {
578 if (!sample_id_all)
579 return 0;
Jiri Olsa37073f92012-05-30 14:23:44 +0200580 return perf_event__parse_id_sample(event, type, data, swapped);
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200581 }
582
583 array = event->sample.array;
584
Frederic Weisbeckera2854122011-05-21 19:33:04 +0200585 if (sample_size + sizeof(event->header) > event->header.size)
586 return -EFAULT;
587
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200588 if (type & PERF_SAMPLE_IP) {
589 data->ip = event->ip.ip;
590 array++;
591 }
592
593 if (type & PERF_SAMPLE_TID) {
David Ahern936be502011-09-06 09:12:26 -0600594 u.val64 = *array;
595 if (swapped) {
596 /* undo swap of u64, then swap on individual u32s */
597 u.val64 = bswap_64(u.val64);
598 u.val32[0] = bswap_32(u.val32[0]);
599 u.val32[1] = bswap_32(u.val32[1]);
600 }
601
602 data->pid = u.val32[0];
603 data->tid = u.val32[1];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200604 array++;
605 }
606
607 if (type & PERF_SAMPLE_TIME) {
608 data->time = *array;
609 array++;
610 }
611
David Ahern7cec0922011-05-30 13:08:23 -0600612 data->addr = 0;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200613 if (type & PERF_SAMPLE_ADDR) {
614 data->addr = *array;
615 array++;
616 }
617
618 data->id = -1ULL;
619 if (type & PERF_SAMPLE_ID) {
620 data->id = *array;
621 array++;
622 }
623
624 if (type & PERF_SAMPLE_STREAM_ID) {
625 data->stream_id = *array;
626 array++;
627 }
628
629 if (type & PERF_SAMPLE_CPU) {
David Ahern936be502011-09-06 09:12:26 -0600630
631 u.val64 = *array;
632 if (swapped) {
633 /* undo swap of u64, then swap on individual u32s */
634 u.val64 = bswap_64(u.val64);
635 u.val32[0] = bswap_32(u.val32[0]);
636 }
637
638 data->cpu = u.val32[0];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200639 array++;
640 }
641
642 if (type & PERF_SAMPLE_PERIOD) {
643 data->period = *array;
644 array++;
645 }
646
647 if (type & PERF_SAMPLE_READ) {
Masanari Iidaf9d36992012-01-25 15:20:40 +0100648 fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200649 return -1;
650 }
651
652 if (type & PERF_SAMPLE_CALLCHAIN) {
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200653 if (sample_overlap(event, array, sizeof(data->callchain->nr)))
654 return -EFAULT;
655
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200656 data->callchain = (struct ip_callchain *)array;
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200657
658 if (sample_overlap(event, array, data->callchain->nr))
659 return -EFAULT;
660
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200661 array += 1 + data->callchain->nr;
662 }
663
664 if (type & PERF_SAMPLE_RAW) {
Jiri Olsa8e303f22011-09-29 17:05:08 +0200665 const u64 *pdata;
666
David Ahern936be502011-09-06 09:12:26 -0600667 u.val64 = *array;
668 if (WARN_ONCE(swapped,
669 "Endianness of raw data not corrected!\n")) {
670 /* undo swap of u64, then swap on individual u32s */
671 u.val64 = bswap_64(u.val64);
672 u.val32[0] = bswap_32(u.val32[0]);
673 u.val32[1] = bswap_32(u.val32[1]);
674 }
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200675
676 if (sample_overlap(event, array, sizeof(u32)))
677 return -EFAULT;
678
David Ahern936be502011-09-06 09:12:26 -0600679 data->raw_size = u.val32[0];
Jiri Olsa8e303f22011-09-29 17:05:08 +0200680 pdata = (void *) array + sizeof(u32);
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200681
Jiri Olsa8e303f22011-09-29 17:05:08 +0200682 if (sample_overlap(event, pdata, data->raw_size))
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200683 return -EFAULT;
684
Jiri Olsa8e303f22011-09-29 17:05:08 +0200685 data->raw_data = (void *) pdata;
Stephane Eranianfa30c962012-03-17 23:23:18 +0100686
687 array = (void *)array + data->raw_size + sizeof(u32);
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200688 }
689
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100690 if (type & PERF_SAMPLE_BRANCH_STACK) {
691 u64 sz;
692
693 data->branch_stack = (struct branch_stack *)array;
694 array++; /* nr */
695
696 sz = data->branch_stack->nr * sizeof(struct branch_entry);
697 sz /= sizeof(u64);
698 array += sz;
699 }
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200700 return 0;
701}
Andrew Vagin74eec262011-11-28 12:03:31 +0300702
703int perf_event__synthesize_sample(union perf_event *event, u64 type,
704 const struct perf_sample *sample,
705 bool swapped)
706{
707 u64 *array;
708
709 /*
710 * used for cross-endian analysis. See git commit 65014ab3
711 * for why this goofiness is needed.
712 */
Jiri Olsa6a11f922012-05-16 08:59:04 +0200713 union u64_swap u;
Andrew Vagin74eec262011-11-28 12:03:31 +0300714
715 array = event->sample.array;
716
717 if (type & PERF_SAMPLE_IP) {
718 event->ip.ip = sample->ip;
719 array++;
720 }
721
722 if (type & PERF_SAMPLE_TID) {
723 u.val32[0] = sample->pid;
724 u.val32[1] = sample->tid;
725 if (swapped) {
726 /*
727 * Inverse of what is done in perf_event__parse_sample
728 */
729 u.val32[0] = bswap_32(u.val32[0]);
730 u.val32[1] = bswap_32(u.val32[1]);
731 u.val64 = bswap_64(u.val64);
732 }
733
734 *array = u.val64;
735 array++;
736 }
737
738 if (type & PERF_SAMPLE_TIME) {
739 *array = sample->time;
740 array++;
741 }
742
743 if (type & PERF_SAMPLE_ADDR) {
744 *array = sample->addr;
745 array++;
746 }
747
748 if (type & PERF_SAMPLE_ID) {
749 *array = sample->id;
750 array++;
751 }
752
753 if (type & PERF_SAMPLE_STREAM_ID) {
754 *array = sample->stream_id;
755 array++;
756 }
757
758 if (type & PERF_SAMPLE_CPU) {
759 u.val32[0] = sample->cpu;
760 if (swapped) {
761 /*
762 * Inverse of what is done in perf_event__parse_sample
763 */
764 u.val32[0] = bswap_32(u.val32[0]);
765 u.val64 = bswap_64(u.val64);
766 }
767 *array = u.val64;
768 array++;
769 }
770
771 if (type & PERF_SAMPLE_PERIOD) {
772 *array = sample->period;
773 array++;
774 }
775
776 return 0;
777}