blob: f4f427ce4d641446747bfa196c333020ef22644f [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
David Ahern936be502011-09-06 09:12:26 -060010#include <byteswap.h>
11#include "asm/bug.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020012#include "evsel.h"
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -020013#include "evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020014#include "util.h"
Arnaldo Carvalho de Melo86bd5e82011-01-03 23:09:46 -020015#include "cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020016#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090017#include "target.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020018
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -020019#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -020020#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -020021
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -030022int __perf_evsel__sample_size(u64 sample_type)
23{
24 u64 mask = sample_type & PERF_SAMPLE_MASK;
25 int size = 0;
26 int i;
27
28 for (i = 0; i < 64; i++) {
29 if (mask & (1ULL << i))
30 size++;
31 }
32
33 size *= sizeof(u64);
34
35 return size;
36}
37
Jiri Olsa4bf9ce12012-03-22 14:37:26 +010038void hists__init(struct hists *hists)
Arnaldo Carvalho de Melo0e2a5f12011-11-04 08:16:58 -020039{
40 memset(hists, 0, sizeof(*hists));
41 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
42 hists->entries_in = &hists->entries_in_array[0];
43 hists->entries_collapsed = RB_ROOT;
44 hists->entries = RB_ROOT;
45 pthread_mutex_init(&hists->lock, NULL);
46}
47
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020048void perf_evsel__init(struct perf_evsel *evsel,
49 struct perf_event_attr *attr, int idx)
50{
51 evsel->idx = idx;
52 evsel->attr = *attr;
53 INIT_LIST_HEAD(&evsel->node);
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -030054 hists__init(&evsel->hists);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020055}
56
Lin Ming23a2f3a2011-01-07 11:11:09 +080057struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020058{
59 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
60
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020061 if (evsel != NULL)
62 perf_evsel__init(evsel, attr, idx);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020063
64 return evsel;
65}
66
Namhyung Kim5090c6a2012-03-16 17:42:20 +090067void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
68 struct perf_evsel *first)
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020069{
70 struct perf_event_attr *attr = &evsel->attr;
71 int track = !evsel->idx; /* only the first counter needs these */
72
David Ahern5e1c81d2012-05-13 22:01:28 -060073 attr->disabled = 1;
Arnaldo Carvalho de Melo808e1222012-02-14 14:18:57 -020074 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020075 attr->inherit = !opts->no_inherit;
76 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
77 PERF_FORMAT_TOTAL_TIME_RUNNING |
78 PERF_FORMAT_ID;
79
80 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
81
82 /*
83 * We default some events to a 1 default interval. But keep
84 * it a weak assumption overridable by the user.
85 */
86 if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
87 opts->user_interval != ULLONG_MAX)) {
88 if (opts->freq) {
89 attr->sample_type |= PERF_SAMPLE_PERIOD;
90 attr->freq = 1;
91 attr->sample_freq = opts->freq;
92 } else {
93 attr->sample_period = opts->default_interval;
94 }
95 }
96
97 if (opts->no_samples)
98 attr->sample_freq = 0;
99
100 if (opts->inherit_stat)
101 attr->inherit_stat = 1;
102
103 if (opts->sample_address) {
104 attr->sample_type |= PERF_SAMPLE_ADDR;
105 attr->mmap_data = track;
106 }
107
108 if (opts->call_graph)
109 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
110
Namhyung Kimbea03402012-04-26 14:15:15 +0900111 if (opts->target.system_wide)
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200112 attr->sample_type |= PERF_SAMPLE_CPU;
113
Andrew Vagin3e76ac72011-12-20 17:32:45 +0300114 if (opts->period)
115 attr->sample_type |= PERF_SAMPLE_PERIOD;
116
Arnaldo Carvalho de Melo808e1222012-02-14 14:18:57 -0200117 if (!opts->sample_id_all_missing &&
Namhyung Kimd67356e2012-05-07 14:09:03 +0900118 (opts->sample_time || !opts->no_inherit ||
Namhyung Kimaa22dd42012-05-16 18:45:47 +0900119 perf_target__has_cpu(&opts->target)))
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200120 attr->sample_type |= PERF_SAMPLE_TIME;
121
122 if (opts->raw_samples) {
123 attr->sample_type |= PERF_SAMPLE_TIME;
124 attr->sample_type |= PERF_SAMPLE_RAW;
125 attr->sample_type |= PERF_SAMPLE_CPU;
126 }
127
128 if (opts->no_delay) {
129 attr->watermark = 0;
130 attr->wakeup_events = 1;
131 }
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100132 if (opts->branch_stack) {
133 attr->sample_type |= PERF_SAMPLE_BRANCH_STACK;
134 attr->branch_sample_type = opts->branch_stack;
135 }
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200136
137 attr->mmap = track;
138 attr->comm = track;
139
Namhyung Kimd67356e2012-05-07 14:09:03 +0900140 if (perf_target__none(&opts->target) &&
141 (!opts->group || evsel == first)) {
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200142 attr->enable_on_exec = 1;
143 }
144}
145
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200146int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
147{
David Ahern4af4c952011-05-27 09:58:34 -0600148 int cpu, thread;
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200149 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
David Ahern4af4c952011-05-27 09:58:34 -0600150
151 if (evsel->fd) {
152 for (cpu = 0; cpu < ncpus; cpu++) {
153 for (thread = 0; thread < nthreads; thread++) {
154 FD(evsel, cpu, thread) = -1;
155 }
156 }
157 }
158
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200159 return evsel->fd != NULL ? 0 : -ENOMEM;
160}
161
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200162int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
163{
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300164 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
165 if (evsel->sample_id == NULL)
166 return -ENOMEM;
167
168 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
169 if (evsel->id == NULL) {
170 xyarray__delete(evsel->sample_id);
171 evsel->sample_id = NULL;
172 return -ENOMEM;
173 }
174
175 return 0;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200176}
177
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200178int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
179{
180 evsel->counts = zalloc((sizeof(*evsel->counts) +
181 (ncpus * sizeof(struct perf_counts_values))));
182 return evsel->counts != NULL ? 0 : -ENOMEM;
183}
184
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200185void perf_evsel__free_fd(struct perf_evsel *evsel)
186{
187 xyarray__delete(evsel->fd);
188 evsel->fd = NULL;
189}
190
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200191void perf_evsel__free_id(struct perf_evsel *evsel)
192{
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300193 xyarray__delete(evsel->sample_id);
194 evsel->sample_id = NULL;
195 free(evsel->id);
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200196 evsel->id = NULL;
197}
198
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200199void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
200{
201 int cpu, thread;
202
203 for (cpu = 0; cpu < ncpus; cpu++)
204 for (thread = 0; thread < nthreads; ++thread) {
205 close(FD(evsel, cpu, thread));
206 FD(evsel, cpu, thread) = -1;
207 }
208}
209
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200210void perf_evsel__exit(struct perf_evsel *evsel)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200211{
212 assert(list_empty(&evsel->node));
213 xyarray__delete(evsel->fd);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300214 xyarray__delete(evsel->sample_id);
215 free(evsel->id);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200216}
217
218void perf_evsel__delete(struct perf_evsel *evsel)
219{
220 perf_evsel__exit(evsel);
Stephane Eranian023695d2011-02-14 11:20:01 +0200221 close_cgroup(evsel->cgrp);
Stephane Eranianf0c55bc2011-02-16 15:10:01 +0200222 free(evsel->name);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200223 free(evsel);
224}
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200225
226int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
227 int cpu, int thread, bool scale)
228{
229 struct perf_counts_values count;
230 size_t nv = scale ? 3 : 1;
231
232 if (FD(evsel, cpu, thread) < 0)
233 return -EINVAL;
234
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -0200235 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
236 return -ENOMEM;
237
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200238 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
239 return -errno;
240
241 if (scale) {
242 if (count.run == 0)
243 count.val = 0;
244 else if (count.run < count.ena)
245 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
246 } else
247 count.ena = count.run = 0;
248
249 evsel->counts->cpu[cpu] = count;
250 return 0;
251}
252
253int __perf_evsel__read(struct perf_evsel *evsel,
254 int ncpus, int nthreads, bool scale)
255{
256 size_t nv = scale ? 3 : 1;
257 int cpu, thread;
258 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
259
Arnaldo Carvalho de Melo52bcd9942011-02-03 17:26:06 -0200260 aggr->val = aggr->ena = aggr->run = 0;
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200261
262 for (cpu = 0; cpu < ncpus; cpu++) {
263 for (thread = 0; thread < nthreads; thread++) {
264 if (FD(evsel, cpu, thread) < 0)
265 continue;
266
267 if (readn(FD(evsel, cpu, thread),
268 &count, nv * sizeof(u64)) < 0)
269 return -errno;
270
271 aggr->val += count.val;
272 if (scale) {
273 aggr->ena += count.ena;
274 aggr->run += count.run;
275 }
276 }
277 }
278
279 evsel->counts->scaled = 0;
280 if (scale) {
281 if (aggr->run == 0) {
282 evsel->counts->scaled = -1;
283 aggr->val = 0;
284 return 0;
285 }
286
287 if (aggr->run < aggr->ena) {
288 evsel->counts->scaled = 1;
289 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
290 }
291 } else
292 aggr->ena = aggr->run = 0;
293
294 return 0;
295}
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200296
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200297static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200298 struct thread_map *threads, bool group,
299 struct xyarray *group_fds)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200300{
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200301 int cpu, thread;
Stephane Eranian023695d2011-02-14 11:20:01 +0200302 unsigned long flags = 0;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200303 int pid = -1, err;
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200304
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200305 if (evsel->fd == NULL &&
306 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200307 return -ENOMEM;
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -0200308
Stephane Eranian023695d2011-02-14 11:20:01 +0200309 if (evsel->cgrp) {
310 flags = PERF_FLAG_PID_CGROUP;
311 pid = evsel->cgrp->fd;
312 }
313
Arnaldo Carvalho de Melo86bd5e82011-01-03 23:09:46 -0200314 for (cpu = 0; cpu < cpus->nr; cpu++) {
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200315 int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1;
Arnaldo Carvalho de Melo9d04f172011-01-12 00:08:18 -0200316
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200317 for (thread = 0; thread < threads->nr; thread++) {
Stephane Eranian023695d2011-02-14 11:20:01 +0200318
319 if (!evsel->cgrp)
320 pid = threads->map[thread];
321
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200322 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
Stephane Eranian023695d2011-02-14 11:20:01 +0200323 pid,
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200324 cpus->map[cpu],
Stephane Eranian023695d2011-02-14 11:20:01 +0200325 group_fd, flags);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200326 if (FD(evsel, cpu, thread) < 0) {
327 err = -errno;
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200328 goto out_close;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200329 }
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200330
331 if (group && group_fd == -1)
332 group_fd = FD(evsel, cpu, thread);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200333 }
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200334 }
335
336 return 0;
337
338out_close:
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200339 do {
340 while (--thread >= 0) {
341 close(FD(evsel, cpu, thread));
342 FD(evsel, cpu, thread) = -1;
343 }
344 thread = threads->nr;
345 } while (--cpu >= 0);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200346 return err;
347}
348
349void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
350{
351 if (evsel->fd == NULL)
352 return;
353
354 perf_evsel__close_fd(evsel, ncpus, nthreads);
355 perf_evsel__free_fd(evsel);
356 evsel->fd = NULL;
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200357}
358
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200359static struct {
360 struct cpu_map map;
361 int cpus[1];
362} empty_cpu_map = {
363 .map.nr = 1,
364 .cpus = { -1, },
365};
366
367static struct {
368 struct thread_map map;
369 int threads[1];
370} empty_thread_map = {
371 .map.nr = 1,
372 .threads = { -1, },
373};
374
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200375int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200376 struct thread_map *threads, bool group,
377 struct xyarray *group_fd)
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200378{
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200379 if (cpus == NULL) {
380 /* Work around old compiler warnings about strict aliasing */
381 cpus = &empty_cpu_map.map;
382 }
383
384 if (threads == NULL)
385 threads = &empty_thread_map.map;
386
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200387 return __perf_evsel__open(evsel, cpus, threads, group, group_fd);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200388}
389
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200390int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200391 struct cpu_map *cpus, bool group,
392 struct xyarray *group_fd)
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200393{
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200394 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group,
395 group_fd);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200396}
397
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200398int perf_evsel__open_per_thread(struct perf_evsel *evsel,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200399 struct thread_map *threads, bool group,
400 struct xyarray *group_fd)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200401{
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200402 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group,
403 group_fd);
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200404}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200405
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200406static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
407 struct perf_sample *sample)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200408{
409 const u64 *array = event->sample.array;
410
411 array += ((event->header.size -
412 sizeof(event->header)) / sizeof(u64)) - 1;
413
414 if (type & PERF_SAMPLE_CPU) {
415 u32 *p = (u32 *)array;
416 sample->cpu = *p;
417 array--;
418 }
419
420 if (type & PERF_SAMPLE_STREAM_ID) {
421 sample->stream_id = *array;
422 array--;
423 }
424
425 if (type & PERF_SAMPLE_ID) {
426 sample->id = *array;
427 array--;
428 }
429
430 if (type & PERF_SAMPLE_TIME) {
431 sample->time = *array;
432 array--;
433 }
434
435 if (type & PERF_SAMPLE_TID) {
436 u32 *p = (u32 *)array;
437 sample->pid = p[0];
438 sample->tid = p[1];
439 }
440
441 return 0;
442}
443
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200444static bool sample_overlap(const union perf_event *event,
445 const void *offset, u64 size)
446{
447 const void *base = event;
448
449 if (offset + size > base + event->header.size)
450 return true;
451
452 return false;
453}
454
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200455int perf_event__parse_sample(const union perf_event *event, u64 type,
Frederic Weisbeckera2854122011-05-21 19:33:04 +0200456 int sample_size, bool sample_id_all,
David Ahern936be502011-09-06 09:12:26 -0600457 struct perf_sample *data, bool swapped)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200458{
459 const u64 *array;
460
David Ahern936be502011-09-06 09:12:26 -0600461 /*
462 * used for cross-endian analysis. See git commit 65014ab3
463 * for why this goofiness is needed.
464 */
465 union {
466 u64 val64;
467 u32 val32[2];
468 } u;
469
Robert Richterf3bda2c2011-12-15 17:32:39 +0100470 memset(data, 0, sizeof(*data));
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200471 data->cpu = data->pid = data->tid = -1;
472 data->stream_id = data->id = data->time = -1ULL;
Naveen N. Raoa4a03fc2012-02-03 22:31:13 +0530473 data->period = 1;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200474
475 if (event->header.type != PERF_RECORD_SAMPLE) {
476 if (!sample_id_all)
477 return 0;
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200478 return perf_event__parse_id_sample(event, type, data);
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200479 }
480
481 array = event->sample.array;
482
Frederic Weisbeckera2854122011-05-21 19:33:04 +0200483 if (sample_size + sizeof(event->header) > event->header.size)
484 return -EFAULT;
485
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200486 if (type & PERF_SAMPLE_IP) {
487 data->ip = event->ip.ip;
488 array++;
489 }
490
491 if (type & PERF_SAMPLE_TID) {
David Ahern936be502011-09-06 09:12:26 -0600492 u.val64 = *array;
493 if (swapped) {
494 /* undo swap of u64, then swap on individual u32s */
495 u.val64 = bswap_64(u.val64);
496 u.val32[0] = bswap_32(u.val32[0]);
497 u.val32[1] = bswap_32(u.val32[1]);
498 }
499
500 data->pid = u.val32[0];
501 data->tid = u.val32[1];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200502 array++;
503 }
504
505 if (type & PERF_SAMPLE_TIME) {
506 data->time = *array;
507 array++;
508 }
509
David Ahern7cec0922011-05-30 13:08:23 -0600510 data->addr = 0;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200511 if (type & PERF_SAMPLE_ADDR) {
512 data->addr = *array;
513 array++;
514 }
515
516 data->id = -1ULL;
517 if (type & PERF_SAMPLE_ID) {
518 data->id = *array;
519 array++;
520 }
521
522 if (type & PERF_SAMPLE_STREAM_ID) {
523 data->stream_id = *array;
524 array++;
525 }
526
527 if (type & PERF_SAMPLE_CPU) {
David Ahern936be502011-09-06 09:12:26 -0600528
529 u.val64 = *array;
530 if (swapped) {
531 /* undo swap of u64, then swap on individual u32s */
532 u.val64 = bswap_64(u.val64);
533 u.val32[0] = bswap_32(u.val32[0]);
534 }
535
536 data->cpu = u.val32[0];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200537 array++;
538 }
539
540 if (type & PERF_SAMPLE_PERIOD) {
541 data->period = *array;
542 array++;
543 }
544
545 if (type & PERF_SAMPLE_READ) {
Masanari Iidaf9d36992012-01-25 15:20:40 +0100546 fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200547 return -1;
548 }
549
550 if (type & PERF_SAMPLE_CALLCHAIN) {
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200551 if (sample_overlap(event, array, sizeof(data->callchain->nr)))
552 return -EFAULT;
553
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200554 data->callchain = (struct ip_callchain *)array;
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200555
556 if (sample_overlap(event, array, data->callchain->nr))
557 return -EFAULT;
558
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200559 array += 1 + data->callchain->nr;
560 }
561
562 if (type & PERF_SAMPLE_RAW) {
Jiri Olsa8e303f22011-09-29 17:05:08 +0200563 const u64 *pdata;
564
David Ahern936be502011-09-06 09:12:26 -0600565 u.val64 = *array;
566 if (WARN_ONCE(swapped,
567 "Endianness of raw data not corrected!\n")) {
568 /* undo swap of u64, then swap on individual u32s */
569 u.val64 = bswap_64(u.val64);
570 u.val32[0] = bswap_32(u.val32[0]);
571 u.val32[1] = bswap_32(u.val32[1]);
572 }
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200573
574 if (sample_overlap(event, array, sizeof(u32)))
575 return -EFAULT;
576
David Ahern936be502011-09-06 09:12:26 -0600577 data->raw_size = u.val32[0];
Jiri Olsa8e303f22011-09-29 17:05:08 +0200578 pdata = (void *) array + sizeof(u32);
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200579
Jiri Olsa8e303f22011-09-29 17:05:08 +0200580 if (sample_overlap(event, pdata, data->raw_size))
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200581 return -EFAULT;
582
Jiri Olsa8e303f22011-09-29 17:05:08 +0200583 data->raw_data = (void *) pdata;
Stephane Eranianfa30c962012-03-17 23:23:18 +0100584
585 array = (void *)array + data->raw_size + sizeof(u32);
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200586 }
587
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100588 if (type & PERF_SAMPLE_BRANCH_STACK) {
589 u64 sz;
590
591 data->branch_stack = (struct branch_stack *)array;
592 array++; /* nr */
593
594 sz = data->branch_stack->nr * sizeof(struct branch_entry);
595 sz /= sizeof(u64);
596 array += sz;
597 }
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200598 return 0;
599}
Andrew Vagin74eec262011-11-28 12:03:31 +0300600
601int perf_event__synthesize_sample(union perf_event *event, u64 type,
602 const struct perf_sample *sample,
603 bool swapped)
604{
605 u64 *array;
606
607 /*
608 * used for cross-endian analysis. See git commit 65014ab3
609 * for why this goofiness is needed.
610 */
611 union {
612 u64 val64;
613 u32 val32[2];
614 } u;
615
616 array = event->sample.array;
617
618 if (type & PERF_SAMPLE_IP) {
619 event->ip.ip = sample->ip;
620 array++;
621 }
622
623 if (type & PERF_SAMPLE_TID) {
624 u.val32[0] = sample->pid;
625 u.val32[1] = sample->tid;
626 if (swapped) {
627 /*
628 * Inverse of what is done in perf_event__parse_sample
629 */
630 u.val32[0] = bswap_32(u.val32[0]);
631 u.val32[1] = bswap_32(u.val32[1]);
632 u.val64 = bswap_64(u.val64);
633 }
634
635 *array = u.val64;
636 array++;
637 }
638
639 if (type & PERF_SAMPLE_TIME) {
640 *array = sample->time;
641 array++;
642 }
643
644 if (type & PERF_SAMPLE_ADDR) {
645 *array = sample->addr;
646 array++;
647 }
648
649 if (type & PERF_SAMPLE_ID) {
650 *array = sample->id;
651 array++;
652 }
653
654 if (type & PERF_SAMPLE_STREAM_ID) {
655 *array = sample->stream_id;
656 array++;
657 }
658
659 if (type & PERF_SAMPLE_CPU) {
660 u.val32[0] = sample->cpu;
661 if (swapped) {
662 /*
663 * Inverse of what is done in perf_event__parse_sample
664 */
665 u.val32[0] = bswap_32(u.val32[0]);
666 u.val64 = bswap_64(u.val64);
667 }
668 *array = u.val64;
669 array++;
670 }
671
672 if (type & PERF_SAMPLE_PERIOD) {
673 *array = sample->period;
674 array++;
675 }
676
677 return 0;
678}