blob: 667f3b78bb2c2f44fd4a93cbd94a80850cdd0e51 [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
David Ahern936be502011-09-06 09:12:26 -060010#include <byteswap.h>
11#include "asm/bug.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020012#include "evsel.h"
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -020013#include "evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020014#include "util.h"
Arnaldo Carvalho de Melo86bd5e82011-01-03 23:09:46 -020015#include "cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020016#include "thread_map.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020017
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -020018#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -020019#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -020020
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -030021int __perf_evsel__sample_size(u64 sample_type)
22{
23 u64 mask = sample_type & PERF_SAMPLE_MASK;
24 int size = 0;
25 int i;
26
27 for (i = 0; i < 64; i++) {
28 if (mask & (1ULL << i))
29 size++;
30 }
31
32 size *= sizeof(u64);
33
34 return size;
35}
36
Arnaldo Carvalho de Melo0e2a5f12011-11-04 08:16:58 -020037static void hists__init(struct hists *hists)
38{
39 memset(hists, 0, sizeof(*hists));
40 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
41 hists->entries_in = &hists->entries_in_array[0];
42 hists->entries_collapsed = RB_ROOT;
43 hists->entries = RB_ROOT;
44 pthread_mutex_init(&hists->lock, NULL);
45}
46
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020047void perf_evsel__init(struct perf_evsel *evsel,
48 struct perf_event_attr *attr, int idx)
49{
50 evsel->idx = idx;
51 evsel->attr = *attr;
52 INIT_LIST_HEAD(&evsel->node);
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -030053 hists__init(&evsel->hists);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020054}
55
Lin Ming23a2f3a2011-01-07 11:11:09 +080056struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020057{
58 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
59
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020060 if (evsel != NULL)
61 perf_evsel__init(evsel, attr, idx);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020062
63 return evsel;
64}
65
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020066void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts)
67{
68 struct perf_event_attr *attr = &evsel->attr;
69 int track = !evsel->idx; /* only the first counter needs these */
70
71 attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0;
72 attr->inherit = !opts->no_inherit;
73 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
74 PERF_FORMAT_TOTAL_TIME_RUNNING |
75 PERF_FORMAT_ID;
76
77 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
78
79 /*
80 * We default some events to a 1 default interval. But keep
81 * it a weak assumption overridable by the user.
82 */
83 if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
84 opts->user_interval != ULLONG_MAX)) {
85 if (opts->freq) {
86 attr->sample_type |= PERF_SAMPLE_PERIOD;
87 attr->freq = 1;
88 attr->sample_freq = opts->freq;
89 } else {
90 attr->sample_period = opts->default_interval;
91 }
92 }
93
94 if (opts->no_samples)
95 attr->sample_freq = 0;
96
97 if (opts->inherit_stat)
98 attr->inherit_stat = 1;
99
100 if (opts->sample_address) {
101 attr->sample_type |= PERF_SAMPLE_ADDR;
102 attr->mmap_data = track;
103 }
104
105 if (opts->call_graph)
106 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
107
108 if (opts->system_wide)
109 attr->sample_type |= PERF_SAMPLE_CPU;
110
Andrew Vagin3e76ac72011-12-20 17:32:45 +0300111 if (opts->period)
112 attr->sample_type |= PERF_SAMPLE_PERIOD;
113
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200114 if (opts->sample_id_all_avail &&
115 (opts->sample_time || opts->system_wide ||
116 !opts->no_inherit || opts->cpu_list))
117 attr->sample_type |= PERF_SAMPLE_TIME;
118
119 if (opts->raw_samples) {
120 attr->sample_type |= PERF_SAMPLE_TIME;
121 attr->sample_type |= PERF_SAMPLE_RAW;
122 attr->sample_type |= PERF_SAMPLE_CPU;
123 }
124
125 if (opts->no_delay) {
126 attr->watermark = 0;
127 attr->wakeup_events = 1;
128 }
129
130 attr->mmap = track;
131 attr->comm = track;
132
133 if (opts->target_pid == -1 && opts->target_tid == -1 && !opts->system_wide) {
134 attr->disabled = 1;
135 attr->enable_on_exec = 1;
136 }
137}
138
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200139int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
140{
David Ahern4af4c952011-05-27 09:58:34 -0600141 int cpu, thread;
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200142 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
David Ahern4af4c952011-05-27 09:58:34 -0600143
144 if (evsel->fd) {
145 for (cpu = 0; cpu < ncpus; cpu++) {
146 for (thread = 0; thread < nthreads; thread++) {
147 FD(evsel, cpu, thread) = -1;
148 }
149 }
150 }
151
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200152 return evsel->fd != NULL ? 0 : -ENOMEM;
153}
154
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200155int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
156{
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300157 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
158 if (evsel->sample_id == NULL)
159 return -ENOMEM;
160
161 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
162 if (evsel->id == NULL) {
163 xyarray__delete(evsel->sample_id);
164 evsel->sample_id = NULL;
165 return -ENOMEM;
166 }
167
168 return 0;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200169}
170
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200171int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
172{
173 evsel->counts = zalloc((sizeof(*evsel->counts) +
174 (ncpus * sizeof(struct perf_counts_values))));
175 return evsel->counts != NULL ? 0 : -ENOMEM;
176}
177
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200178void perf_evsel__free_fd(struct perf_evsel *evsel)
179{
180 xyarray__delete(evsel->fd);
181 evsel->fd = NULL;
182}
183
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200184void perf_evsel__free_id(struct perf_evsel *evsel)
185{
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300186 xyarray__delete(evsel->sample_id);
187 evsel->sample_id = NULL;
188 free(evsel->id);
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200189 evsel->id = NULL;
190}
191
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200192void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
193{
194 int cpu, thread;
195
196 for (cpu = 0; cpu < ncpus; cpu++)
197 for (thread = 0; thread < nthreads; ++thread) {
198 close(FD(evsel, cpu, thread));
199 FD(evsel, cpu, thread) = -1;
200 }
201}
202
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200203void perf_evsel__exit(struct perf_evsel *evsel)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200204{
205 assert(list_empty(&evsel->node));
206 xyarray__delete(evsel->fd);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300207 xyarray__delete(evsel->sample_id);
208 free(evsel->id);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200209}
210
211void perf_evsel__delete(struct perf_evsel *evsel)
212{
213 perf_evsel__exit(evsel);
Stephane Eranian023695d2011-02-14 11:20:01 +0200214 close_cgroup(evsel->cgrp);
Stephane Eranianf0c55bc2011-02-16 15:10:01 +0200215 free(evsel->name);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200216 free(evsel);
217}
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200218
219int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
220 int cpu, int thread, bool scale)
221{
222 struct perf_counts_values count;
223 size_t nv = scale ? 3 : 1;
224
225 if (FD(evsel, cpu, thread) < 0)
226 return -EINVAL;
227
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -0200228 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
229 return -ENOMEM;
230
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200231 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
232 return -errno;
233
234 if (scale) {
235 if (count.run == 0)
236 count.val = 0;
237 else if (count.run < count.ena)
238 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
239 } else
240 count.ena = count.run = 0;
241
242 evsel->counts->cpu[cpu] = count;
243 return 0;
244}
245
246int __perf_evsel__read(struct perf_evsel *evsel,
247 int ncpus, int nthreads, bool scale)
248{
249 size_t nv = scale ? 3 : 1;
250 int cpu, thread;
251 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
252
Arnaldo Carvalho de Melo52bcd9942011-02-03 17:26:06 -0200253 aggr->val = aggr->ena = aggr->run = 0;
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200254
255 for (cpu = 0; cpu < ncpus; cpu++) {
256 for (thread = 0; thread < nthreads; thread++) {
257 if (FD(evsel, cpu, thread) < 0)
258 continue;
259
260 if (readn(FD(evsel, cpu, thread),
261 &count, nv * sizeof(u64)) < 0)
262 return -errno;
263
264 aggr->val += count.val;
265 if (scale) {
266 aggr->ena += count.ena;
267 aggr->run += count.run;
268 }
269 }
270 }
271
272 evsel->counts->scaled = 0;
273 if (scale) {
274 if (aggr->run == 0) {
275 evsel->counts->scaled = -1;
276 aggr->val = 0;
277 return 0;
278 }
279
280 if (aggr->run < aggr->ena) {
281 evsel->counts->scaled = 1;
282 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
283 }
284 } else
285 aggr->ena = aggr->run = 0;
286
287 return 0;
288}
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200289
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200290static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200291 struct thread_map *threads, bool group,
292 struct xyarray *group_fds)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200293{
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200294 int cpu, thread;
Stephane Eranian023695d2011-02-14 11:20:01 +0200295 unsigned long flags = 0;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200296 int pid = -1, err;
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200297
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200298 if (evsel->fd == NULL &&
299 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200300 return -ENOMEM;
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -0200301
Stephane Eranian023695d2011-02-14 11:20:01 +0200302 if (evsel->cgrp) {
303 flags = PERF_FLAG_PID_CGROUP;
304 pid = evsel->cgrp->fd;
305 }
306
Arnaldo Carvalho de Melo86bd5e82011-01-03 23:09:46 -0200307 for (cpu = 0; cpu < cpus->nr; cpu++) {
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200308 int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1;
Arnaldo Carvalho de Melo9d04f172011-01-12 00:08:18 -0200309
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200310 for (thread = 0; thread < threads->nr; thread++) {
Stephane Eranian023695d2011-02-14 11:20:01 +0200311
312 if (!evsel->cgrp)
313 pid = threads->map[thread];
314
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200315 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
Stephane Eranian023695d2011-02-14 11:20:01 +0200316 pid,
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200317 cpus->map[cpu],
Stephane Eranian023695d2011-02-14 11:20:01 +0200318 group_fd, flags);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200319 if (FD(evsel, cpu, thread) < 0) {
320 err = -errno;
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200321 goto out_close;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200322 }
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200323
324 if (group && group_fd == -1)
325 group_fd = FD(evsel, cpu, thread);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200326 }
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200327 }
328
329 return 0;
330
331out_close:
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200332 do {
333 while (--thread >= 0) {
334 close(FD(evsel, cpu, thread));
335 FD(evsel, cpu, thread) = -1;
336 }
337 thread = threads->nr;
338 } while (--cpu >= 0);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200339 return err;
340}
341
342void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
343{
344 if (evsel->fd == NULL)
345 return;
346
347 perf_evsel__close_fd(evsel, ncpus, nthreads);
348 perf_evsel__free_fd(evsel);
349 evsel->fd = NULL;
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200350}
351
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200352static struct {
353 struct cpu_map map;
354 int cpus[1];
355} empty_cpu_map = {
356 .map.nr = 1,
357 .cpus = { -1, },
358};
359
360static struct {
361 struct thread_map map;
362 int threads[1];
363} empty_thread_map = {
364 .map.nr = 1,
365 .threads = { -1, },
366};
367
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200368int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200369 struct thread_map *threads, bool group,
370 struct xyarray *group_fd)
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200371{
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200372 if (cpus == NULL) {
373 /* Work around old compiler warnings about strict aliasing */
374 cpus = &empty_cpu_map.map;
375 }
376
377 if (threads == NULL)
378 threads = &empty_thread_map.map;
379
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200380 return __perf_evsel__open(evsel, cpus, threads, group, group_fd);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200381}
382
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200383int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200384 struct cpu_map *cpus, bool group,
385 struct xyarray *group_fd)
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200386{
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200387 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group,
388 group_fd);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200389}
390
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200391int perf_evsel__open_per_thread(struct perf_evsel *evsel,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200392 struct thread_map *threads, bool group,
393 struct xyarray *group_fd)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200394{
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200395 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group,
396 group_fd);
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200397}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200398
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200399static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
400 struct perf_sample *sample)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200401{
402 const u64 *array = event->sample.array;
403
404 array += ((event->header.size -
405 sizeof(event->header)) / sizeof(u64)) - 1;
406
407 if (type & PERF_SAMPLE_CPU) {
408 u32 *p = (u32 *)array;
409 sample->cpu = *p;
410 array--;
411 }
412
413 if (type & PERF_SAMPLE_STREAM_ID) {
414 sample->stream_id = *array;
415 array--;
416 }
417
418 if (type & PERF_SAMPLE_ID) {
419 sample->id = *array;
420 array--;
421 }
422
423 if (type & PERF_SAMPLE_TIME) {
424 sample->time = *array;
425 array--;
426 }
427
428 if (type & PERF_SAMPLE_TID) {
429 u32 *p = (u32 *)array;
430 sample->pid = p[0];
431 sample->tid = p[1];
432 }
433
434 return 0;
435}
436
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200437static bool sample_overlap(const union perf_event *event,
438 const void *offset, u64 size)
439{
440 const void *base = event;
441
442 if (offset + size > base + event->header.size)
443 return true;
444
445 return false;
446}
447
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200448int perf_event__parse_sample(const union perf_event *event, u64 type,
Frederic Weisbeckera2854122011-05-21 19:33:04 +0200449 int sample_size, bool sample_id_all,
David Ahern936be502011-09-06 09:12:26 -0600450 struct perf_sample *data, bool swapped)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200451{
452 const u64 *array;
453
David Ahern936be502011-09-06 09:12:26 -0600454 /*
455 * used for cross-endian analysis. See git commit 65014ab3
456 * for why this goofiness is needed.
457 */
458 union {
459 u64 val64;
460 u32 val32[2];
461 } u;
462
Robert Richterf3bda2c2011-12-15 17:32:39 +0100463 memset(data, 0, sizeof(*data));
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200464 data->cpu = data->pid = data->tid = -1;
465 data->stream_id = data->id = data->time = -1ULL;
466
467 if (event->header.type != PERF_RECORD_SAMPLE) {
468 if (!sample_id_all)
469 return 0;
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200470 return perf_event__parse_id_sample(event, type, data);
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200471 }
472
473 array = event->sample.array;
474
Frederic Weisbeckera2854122011-05-21 19:33:04 +0200475 if (sample_size + sizeof(event->header) > event->header.size)
476 return -EFAULT;
477
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200478 if (type & PERF_SAMPLE_IP) {
479 data->ip = event->ip.ip;
480 array++;
481 }
482
483 if (type & PERF_SAMPLE_TID) {
David Ahern936be502011-09-06 09:12:26 -0600484 u.val64 = *array;
485 if (swapped) {
486 /* undo swap of u64, then swap on individual u32s */
487 u.val64 = bswap_64(u.val64);
488 u.val32[0] = bswap_32(u.val32[0]);
489 u.val32[1] = bswap_32(u.val32[1]);
490 }
491
492 data->pid = u.val32[0];
493 data->tid = u.val32[1];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200494 array++;
495 }
496
497 if (type & PERF_SAMPLE_TIME) {
498 data->time = *array;
499 array++;
500 }
501
David Ahern7cec0922011-05-30 13:08:23 -0600502 data->addr = 0;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200503 if (type & PERF_SAMPLE_ADDR) {
504 data->addr = *array;
505 array++;
506 }
507
508 data->id = -1ULL;
509 if (type & PERF_SAMPLE_ID) {
510 data->id = *array;
511 array++;
512 }
513
514 if (type & PERF_SAMPLE_STREAM_ID) {
515 data->stream_id = *array;
516 array++;
517 }
518
519 if (type & PERF_SAMPLE_CPU) {
David Ahern936be502011-09-06 09:12:26 -0600520
521 u.val64 = *array;
522 if (swapped) {
523 /* undo swap of u64, then swap on individual u32s */
524 u.val64 = bswap_64(u.val64);
525 u.val32[0] = bswap_32(u.val32[0]);
526 }
527
528 data->cpu = u.val32[0];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200529 array++;
530 }
531
532 if (type & PERF_SAMPLE_PERIOD) {
533 data->period = *array;
534 array++;
535 }
536
537 if (type & PERF_SAMPLE_READ) {
538 fprintf(stderr, "PERF_SAMPLE_READ is unsuported for now\n");
539 return -1;
540 }
541
542 if (type & PERF_SAMPLE_CALLCHAIN) {
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200543 if (sample_overlap(event, array, sizeof(data->callchain->nr)))
544 return -EFAULT;
545
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200546 data->callchain = (struct ip_callchain *)array;
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200547
548 if (sample_overlap(event, array, data->callchain->nr))
549 return -EFAULT;
550
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200551 array += 1 + data->callchain->nr;
552 }
553
554 if (type & PERF_SAMPLE_RAW) {
Jiri Olsa8e303f22011-09-29 17:05:08 +0200555 const u64 *pdata;
556
David Ahern936be502011-09-06 09:12:26 -0600557 u.val64 = *array;
558 if (WARN_ONCE(swapped,
559 "Endianness of raw data not corrected!\n")) {
560 /* undo swap of u64, then swap on individual u32s */
561 u.val64 = bswap_64(u.val64);
562 u.val32[0] = bswap_32(u.val32[0]);
563 u.val32[1] = bswap_32(u.val32[1]);
564 }
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200565
566 if (sample_overlap(event, array, sizeof(u32)))
567 return -EFAULT;
568
David Ahern936be502011-09-06 09:12:26 -0600569 data->raw_size = u.val32[0];
Jiri Olsa8e303f22011-09-29 17:05:08 +0200570 pdata = (void *) array + sizeof(u32);
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200571
Jiri Olsa8e303f22011-09-29 17:05:08 +0200572 if (sample_overlap(event, pdata, data->raw_size))
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200573 return -EFAULT;
574
Jiri Olsa8e303f22011-09-29 17:05:08 +0200575 data->raw_data = (void *) pdata;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200576 }
577
578 return 0;
579}
Andrew Vagin74eec262011-11-28 12:03:31 +0300580
581int perf_event__synthesize_sample(union perf_event *event, u64 type,
582 const struct perf_sample *sample,
583 bool swapped)
584{
585 u64 *array;
586
587 /*
588 * used for cross-endian analysis. See git commit 65014ab3
589 * for why this goofiness is needed.
590 */
591 union {
592 u64 val64;
593 u32 val32[2];
594 } u;
595
596 array = event->sample.array;
597
598 if (type & PERF_SAMPLE_IP) {
599 event->ip.ip = sample->ip;
600 array++;
601 }
602
603 if (type & PERF_SAMPLE_TID) {
604 u.val32[0] = sample->pid;
605 u.val32[1] = sample->tid;
606 if (swapped) {
607 /*
608 * Inverse of what is done in perf_event__parse_sample
609 */
610 u.val32[0] = bswap_32(u.val32[0]);
611 u.val32[1] = bswap_32(u.val32[1]);
612 u.val64 = bswap_64(u.val64);
613 }
614
615 *array = u.val64;
616 array++;
617 }
618
619 if (type & PERF_SAMPLE_TIME) {
620 *array = sample->time;
621 array++;
622 }
623
624 if (type & PERF_SAMPLE_ADDR) {
625 *array = sample->addr;
626 array++;
627 }
628
629 if (type & PERF_SAMPLE_ID) {
630 *array = sample->id;
631 array++;
632 }
633
634 if (type & PERF_SAMPLE_STREAM_ID) {
635 *array = sample->stream_id;
636 array++;
637 }
638
639 if (type & PERF_SAMPLE_CPU) {
640 u.val32[0] = sample->cpu;
641 if (swapped) {
642 /*
643 * Inverse of what is done in perf_event__parse_sample
644 */
645 u.val32[0] = bswap_32(u.val32[0]);
646 u.val64 = bswap_64(u.val64);
647 }
648 *array = u.val64;
649 array++;
650 }
651
652 if (type & PERF_SAMPLE_PERIOD) {
653 *array = sample->period;
654 array++;
655 }
656
657 return 0;
658}