blob: a2da682db8198d3c437608b8c2a5a4b54b284718 [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
David Ahern936be502011-09-06 09:12:26 -060010#include <byteswap.h>
Jiri Olsa0f6a3012012-08-07 15:20:45 +020011#include <linux/bitops.h>
David Ahern936be502011-09-06 09:12:26 -060012#include "asm/bug.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020013#include "evsel.h"
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -020014#include "evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020015#include "util.h"
Arnaldo Carvalho de Melo86bd5e82011-01-03 23:09:46 -020016#include "cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020017#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090018#include "target.h"
Jiri Olsa287e74a2012-06-28 23:18:49 +020019#include "../../../include/linux/hw_breakpoint.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020020
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -020021#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -020022#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -020023
Arnaldo Carvalho de Melobde09462012-08-01 18:53:11 -030024static int __perf_evsel__sample_size(u64 sample_type)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -030025{
26 u64 mask = sample_type & PERF_SAMPLE_MASK;
27 int size = 0;
28 int i;
29
30 for (i = 0; i < 64; i++) {
31 if (mask & (1ULL << i))
32 size++;
33 }
34
35 size *= sizeof(u64);
36
37 return size;
38}
39
Jiri Olsa4bf9ce12012-03-22 14:37:26 +010040void hists__init(struct hists *hists)
Arnaldo Carvalho de Melo0e2a5f12011-11-04 08:16:58 -020041{
42 memset(hists, 0, sizeof(*hists));
43 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
44 hists->entries_in = &hists->entries_in_array[0];
45 hists->entries_collapsed = RB_ROOT;
46 hists->entries = RB_ROOT;
47 pthread_mutex_init(&hists->lock, NULL);
48}
49
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020050void perf_evsel__init(struct perf_evsel *evsel,
51 struct perf_event_attr *attr, int idx)
52{
53 evsel->idx = idx;
54 evsel->attr = *attr;
55 INIT_LIST_HEAD(&evsel->node);
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -030056 hists__init(&evsel->hists);
Arnaldo Carvalho de Melobde09462012-08-01 18:53:11 -030057 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020058}
59
Lin Ming23a2f3a2011-01-07 11:11:09 +080060struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020061{
62 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
63
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020064 if (evsel != NULL)
65 perf_evsel__init(evsel, attr, idx);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020066
67 return evsel;
68}
69
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -030070static const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
71 "cycles",
72 "instructions",
73 "cache-references",
74 "cache-misses",
75 "branches",
76 "branch-misses",
77 "bus-cycles",
78 "stalled-cycles-frontend",
79 "stalled-cycles-backend",
80 "ref-cycles",
81};
82
Arnaldo Carvalho de Melodd4f5222012-06-13 15:52:42 -030083static const char *__perf_evsel__hw_name(u64 config)
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -030084{
85 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
86 return perf_evsel__hw_names[config];
87
88 return "unknown-hardware";
89}
90
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -030091static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -030092{
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -030093 int colon = 0, r = 0;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -030094 struct perf_event_attr *attr = &evsel->attr;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -030095 bool exclude_guest_default = false;
96
97#define MOD_PRINT(context, mod) do { \
98 if (!attr->exclude_##context) { \
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -030099 if (!colon) colon = ++r; \
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300100 r += scnprintf(bf + r, size - r, "%c", mod); \
101 } } while(0)
102
103 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
104 MOD_PRINT(kernel, 'k');
105 MOD_PRINT(user, 'u');
106 MOD_PRINT(hv, 'h');
107 exclude_guest_default = true;
108 }
109
110 if (attr->precise_ip) {
111 if (!colon)
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300112 colon = ++r;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300113 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
114 exclude_guest_default = true;
115 }
116
117 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
118 MOD_PRINT(host, 'H');
119 MOD_PRINT(guest, 'G');
120 }
121#undef MOD_PRINT
122 if (colon)
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300123 bf[colon - 1] = ':';
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300124 return r;
125}
126
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300127static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
128{
129 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
130 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
131}
132
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300133static const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
134 "cpu-clock",
135 "task-clock",
136 "page-faults",
137 "context-switches",
138 "CPU-migrations",
139 "minor-faults",
140 "major-faults",
141 "alignment-faults",
142 "emulation-faults",
143};
144
Arnaldo Carvalho de Melodd4f5222012-06-13 15:52:42 -0300145static const char *__perf_evsel__sw_name(u64 config)
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300146{
147 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
148 return perf_evsel__sw_names[config];
149 return "unknown-software";
150}
151
152static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
153{
154 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
155 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
156}
157
Jiri Olsa287e74a2012-06-28 23:18:49 +0200158static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
159{
160 int r;
161
162 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
163
164 if (type & HW_BREAKPOINT_R)
165 r += scnprintf(bf + r, size - r, "r");
166
167 if (type & HW_BREAKPOINT_W)
168 r += scnprintf(bf + r, size - r, "w");
169
170 if (type & HW_BREAKPOINT_X)
171 r += scnprintf(bf + r, size - r, "x");
172
173 return r;
174}
175
176static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
177{
178 struct perf_event_attr *attr = &evsel->attr;
179 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
180 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
181}
182
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300183const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
184 [PERF_EVSEL__MAX_ALIASES] = {
185 { "L1-dcache", "l1-d", "l1d", "L1-data", },
186 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
187 { "LLC", "L2", },
188 { "dTLB", "d-tlb", "Data-TLB", },
189 { "iTLB", "i-tlb", "Instruction-TLB", },
190 { "branch", "branches", "bpu", "btb", "bpc", },
191 { "node", },
192};
193
194const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
195 [PERF_EVSEL__MAX_ALIASES] = {
196 { "load", "loads", "read", },
197 { "store", "stores", "write", },
198 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
199};
200
201const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
202 [PERF_EVSEL__MAX_ALIASES] = {
203 { "refs", "Reference", "ops", "access", },
204 { "misses", "miss", },
205};
206
207#define C(x) PERF_COUNT_HW_CACHE_##x
208#define CACHE_READ (1 << C(OP_READ))
209#define CACHE_WRITE (1 << C(OP_WRITE))
210#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
211#define COP(x) (1 << x)
212
213/*
214 * cache operartion stat
215 * L1I : Read and prefetch only
216 * ITLB and BPU : Read-only
217 */
218static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
219 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
220 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
221 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
222 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
223 [C(ITLB)] = (CACHE_READ),
224 [C(BPU)] = (CACHE_READ),
225 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
226};
227
228bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
229{
230 if (perf_evsel__hw_cache_stat[type] & COP(op))
231 return true; /* valid */
232 else
233 return false; /* invalid */
234}
235
236int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
237 char *bf, size_t size)
238{
239 if (result) {
240 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
241 perf_evsel__hw_cache_op[op][0],
242 perf_evsel__hw_cache_result[result][0]);
243 }
244
245 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
246 perf_evsel__hw_cache_op[op][1]);
247}
248
Arnaldo Carvalho de Melodd4f5222012-06-13 15:52:42 -0300249static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300250{
251 u8 op, result, type = (config >> 0) & 0xff;
252 const char *err = "unknown-ext-hardware-cache-type";
253
254 if (type > PERF_COUNT_HW_CACHE_MAX)
255 goto out_err;
256
257 op = (config >> 8) & 0xff;
258 err = "unknown-ext-hardware-cache-op";
259 if (op > PERF_COUNT_HW_CACHE_OP_MAX)
260 goto out_err;
261
262 result = (config >> 16) & 0xff;
263 err = "unknown-ext-hardware-cache-result";
264 if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
265 goto out_err;
266
267 err = "invalid-cache";
268 if (!perf_evsel__is_cache_op_valid(type, op))
269 goto out_err;
270
271 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
272out_err:
273 return scnprintf(bf, size, "%s", err);
274}
275
276static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
277{
278 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
279 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
280}
281
Arnaldo Carvalho de Melo6eef3d92012-06-13 11:53:37 -0300282static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
283{
284 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
285 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
286}
287
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300288const char *perf_evsel__name(struct perf_evsel *evsel)
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300289{
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300290 char bf[128];
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300291
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300292 if (evsel->name)
293 return evsel->name;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300294
295 switch (evsel->attr.type) {
296 case PERF_TYPE_RAW:
Arnaldo Carvalho de Melo6eef3d92012-06-13 11:53:37 -0300297 perf_evsel__raw_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300298 break;
299
300 case PERF_TYPE_HARDWARE:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300301 perf_evsel__hw_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300302 break;
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300303
304 case PERF_TYPE_HW_CACHE:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300305 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300306 break;
307
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300308 case PERF_TYPE_SOFTWARE:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300309 perf_evsel__sw_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300310 break;
311
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300312 case PERF_TYPE_TRACEPOINT:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300313 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300314 break;
315
Jiri Olsa287e74a2012-06-28 23:18:49 +0200316 case PERF_TYPE_BREAKPOINT:
317 perf_evsel__bp_name(evsel, bf, sizeof(bf));
318 break;
319
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300320 default:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300321 scnprintf(bf, sizeof(bf), "%s", "unknown attr type");
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300322 break;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300323 }
324
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300325 evsel->name = strdup(bf);
326
327 return evsel->name ?: "unknown";
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300328}
329
Namhyung Kim5090c6a2012-03-16 17:42:20 +0900330void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
331 struct perf_evsel *first)
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200332{
333 struct perf_event_attr *attr = &evsel->attr;
334 int track = !evsel->idx; /* only the first counter needs these */
335
David Ahern5e1c81d2012-05-13 22:01:28 -0600336 attr->disabled = 1;
Arnaldo Carvalho de Melo808e1222012-02-14 14:18:57 -0200337 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200338 attr->inherit = !opts->no_inherit;
339 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
340 PERF_FORMAT_TOTAL_TIME_RUNNING |
341 PERF_FORMAT_ID;
342
343 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
344
345 /*
346 * We default some events to a 1 default interval. But keep
347 * it a weak assumption overridable by the user.
348 */
349 if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
350 opts->user_interval != ULLONG_MAX)) {
351 if (opts->freq) {
352 attr->sample_type |= PERF_SAMPLE_PERIOD;
353 attr->freq = 1;
354 attr->sample_freq = opts->freq;
355 } else {
356 attr->sample_period = opts->default_interval;
357 }
358 }
359
360 if (opts->no_samples)
361 attr->sample_freq = 0;
362
363 if (opts->inherit_stat)
364 attr->inherit_stat = 1;
365
366 if (opts->sample_address) {
367 attr->sample_type |= PERF_SAMPLE_ADDR;
368 attr->mmap_data = track;
369 }
370
371 if (opts->call_graph)
372 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
373
Namhyung Kime40ee742012-05-21 10:42:07 +0900374 if (perf_target__has_cpu(&opts->target))
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200375 attr->sample_type |= PERF_SAMPLE_CPU;
376
Andrew Vagin3e76ac72011-12-20 17:32:45 +0300377 if (opts->period)
378 attr->sample_type |= PERF_SAMPLE_PERIOD;
379
Arnaldo Carvalho de Melo808e1222012-02-14 14:18:57 -0200380 if (!opts->sample_id_all_missing &&
Namhyung Kimd67356e2012-05-07 14:09:03 +0900381 (opts->sample_time || !opts->no_inherit ||
Namhyung Kimaa22dd42012-05-16 18:45:47 +0900382 perf_target__has_cpu(&opts->target)))
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200383 attr->sample_type |= PERF_SAMPLE_TIME;
384
385 if (opts->raw_samples) {
386 attr->sample_type |= PERF_SAMPLE_TIME;
387 attr->sample_type |= PERF_SAMPLE_RAW;
388 attr->sample_type |= PERF_SAMPLE_CPU;
389 }
390
391 if (opts->no_delay) {
392 attr->watermark = 0;
393 attr->wakeup_events = 1;
394 }
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100395 if (opts->branch_stack) {
396 attr->sample_type |= PERF_SAMPLE_BRANCH_STACK;
397 attr->branch_sample_type = opts->branch_stack;
398 }
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200399
400 attr->mmap = track;
401 attr->comm = track;
402
Namhyung Kimd67356e2012-05-07 14:09:03 +0900403 if (perf_target__none(&opts->target) &&
404 (!opts->group || evsel == first)) {
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200405 attr->enable_on_exec = 1;
406 }
407}
408
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200409int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
410{
David Ahern4af4c952011-05-27 09:58:34 -0600411 int cpu, thread;
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200412 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
David Ahern4af4c952011-05-27 09:58:34 -0600413
414 if (evsel->fd) {
415 for (cpu = 0; cpu < ncpus; cpu++) {
416 for (thread = 0; thread < nthreads; thread++) {
417 FD(evsel, cpu, thread) = -1;
418 }
419 }
420 }
421
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200422 return evsel->fd != NULL ? 0 : -ENOMEM;
423}
424
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200425int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
426{
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300427 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
428 if (evsel->sample_id == NULL)
429 return -ENOMEM;
430
431 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
432 if (evsel->id == NULL) {
433 xyarray__delete(evsel->sample_id);
434 evsel->sample_id = NULL;
435 return -ENOMEM;
436 }
437
438 return 0;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200439}
440
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200441int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
442{
443 evsel->counts = zalloc((sizeof(*evsel->counts) +
444 (ncpus * sizeof(struct perf_counts_values))));
445 return evsel->counts != NULL ? 0 : -ENOMEM;
446}
447
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200448void perf_evsel__free_fd(struct perf_evsel *evsel)
449{
450 xyarray__delete(evsel->fd);
451 evsel->fd = NULL;
452}
453
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200454void perf_evsel__free_id(struct perf_evsel *evsel)
455{
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300456 xyarray__delete(evsel->sample_id);
457 evsel->sample_id = NULL;
458 free(evsel->id);
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200459 evsel->id = NULL;
460}
461
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200462void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
463{
464 int cpu, thread;
465
466 for (cpu = 0; cpu < ncpus; cpu++)
467 for (thread = 0; thread < nthreads; ++thread) {
468 close(FD(evsel, cpu, thread));
469 FD(evsel, cpu, thread) = -1;
470 }
471}
472
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200473void perf_evsel__exit(struct perf_evsel *evsel)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200474{
475 assert(list_empty(&evsel->node));
476 xyarray__delete(evsel->fd);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300477 xyarray__delete(evsel->sample_id);
478 free(evsel->id);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200479}
480
481void perf_evsel__delete(struct perf_evsel *evsel)
482{
483 perf_evsel__exit(evsel);
Stephane Eranian023695d2011-02-14 11:20:01 +0200484 close_cgroup(evsel->cgrp);
Stephane Eranianf0c55bc2011-02-16 15:10:01 +0200485 free(evsel->name);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200486 free(evsel);
487}
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200488
489int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
490 int cpu, int thread, bool scale)
491{
492 struct perf_counts_values count;
493 size_t nv = scale ? 3 : 1;
494
495 if (FD(evsel, cpu, thread) < 0)
496 return -EINVAL;
497
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -0200498 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
499 return -ENOMEM;
500
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200501 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
502 return -errno;
503
504 if (scale) {
505 if (count.run == 0)
506 count.val = 0;
507 else if (count.run < count.ena)
508 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
509 } else
510 count.ena = count.run = 0;
511
512 evsel->counts->cpu[cpu] = count;
513 return 0;
514}
515
516int __perf_evsel__read(struct perf_evsel *evsel,
517 int ncpus, int nthreads, bool scale)
518{
519 size_t nv = scale ? 3 : 1;
520 int cpu, thread;
521 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
522
Arnaldo Carvalho de Melo52bcd9942011-02-03 17:26:06 -0200523 aggr->val = aggr->ena = aggr->run = 0;
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200524
525 for (cpu = 0; cpu < ncpus; cpu++) {
526 for (thread = 0; thread < nthreads; thread++) {
527 if (FD(evsel, cpu, thread) < 0)
528 continue;
529
530 if (readn(FD(evsel, cpu, thread),
531 &count, nv * sizeof(u64)) < 0)
532 return -errno;
533
534 aggr->val += count.val;
535 if (scale) {
536 aggr->ena += count.ena;
537 aggr->run += count.run;
538 }
539 }
540 }
541
542 evsel->counts->scaled = 0;
543 if (scale) {
544 if (aggr->run == 0) {
545 evsel->counts->scaled = -1;
546 aggr->val = 0;
547 return 0;
548 }
549
550 if (aggr->run < aggr->ena) {
551 evsel->counts->scaled = 1;
552 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
553 }
554 } else
555 aggr->ena = aggr->run = 0;
556
557 return 0;
558}
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200559
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200560static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200561 struct thread_map *threads, bool group,
562 struct xyarray *group_fds)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200563{
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200564 int cpu, thread;
Stephane Eranian023695d2011-02-14 11:20:01 +0200565 unsigned long flags = 0;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200566 int pid = -1, err;
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200567
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200568 if (evsel->fd == NULL &&
569 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200570 return -ENOMEM;
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -0200571
Stephane Eranian023695d2011-02-14 11:20:01 +0200572 if (evsel->cgrp) {
573 flags = PERF_FLAG_PID_CGROUP;
574 pid = evsel->cgrp->fd;
575 }
576
Arnaldo Carvalho de Melo86bd5e82011-01-03 23:09:46 -0200577 for (cpu = 0; cpu < cpus->nr; cpu++) {
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200578 int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1;
Arnaldo Carvalho de Melo9d04f172011-01-12 00:08:18 -0200579
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200580 for (thread = 0; thread < threads->nr; thread++) {
Stephane Eranian023695d2011-02-14 11:20:01 +0200581
582 if (!evsel->cgrp)
583 pid = threads->map[thread];
584
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200585 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
Stephane Eranian023695d2011-02-14 11:20:01 +0200586 pid,
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200587 cpus->map[cpu],
Stephane Eranian023695d2011-02-14 11:20:01 +0200588 group_fd, flags);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200589 if (FD(evsel, cpu, thread) < 0) {
590 err = -errno;
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200591 goto out_close;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200592 }
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200593
594 if (group && group_fd == -1)
595 group_fd = FD(evsel, cpu, thread);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200596 }
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200597 }
598
599 return 0;
600
601out_close:
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200602 do {
603 while (--thread >= 0) {
604 close(FD(evsel, cpu, thread));
605 FD(evsel, cpu, thread) = -1;
606 }
607 thread = threads->nr;
608 } while (--cpu >= 0);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200609 return err;
610}
611
612void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
613{
614 if (evsel->fd == NULL)
615 return;
616
617 perf_evsel__close_fd(evsel, ncpus, nthreads);
618 perf_evsel__free_fd(evsel);
619 evsel->fd = NULL;
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200620}
621
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200622static struct {
623 struct cpu_map map;
624 int cpus[1];
625} empty_cpu_map = {
626 .map.nr = 1,
627 .cpus = { -1, },
628};
629
630static struct {
631 struct thread_map map;
632 int threads[1];
633} empty_thread_map = {
634 .map.nr = 1,
635 .threads = { -1, },
636};
637
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200638int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200639 struct thread_map *threads, bool group,
640 struct xyarray *group_fd)
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200641{
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200642 if (cpus == NULL) {
643 /* Work around old compiler warnings about strict aliasing */
644 cpus = &empty_cpu_map.map;
645 }
646
647 if (threads == NULL)
648 threads = &empty_thread_map.map;
649
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200650 return __perf_evsel__open(evsel, cpus, threads, group, group_fd);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200651}
652
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200653int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200654 struct cpu_map *cpus, bool group,
655 struct xyarray *group_fd)
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200656{
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200657 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group,
658 group_fd);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200659}
660
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200661int perf_evsel__open_per_thread(struct perf_evsel *evsel,
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200662 struct thread_map *threads, bool group,
663 struct xyarray *group_fd)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200664{
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200665 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group,
666 group_fd);
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200667}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200668
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200669static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
Jiri Olsa37073f92012-05-30 14:23:44 +0200670 struct perf_sample *sample,
671 bool swapped)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200672{
673 const u64 *array = event->sample.array;
Jiri Olsa37073f92012-05-30 14:23:44 +0200674 union u64_swap u;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200675
676 array += ((event->header.size -
677 sizeof(event->header)) / sizeof(u64)) - 1;
678
679 if (type & PERF_SAMPLE_CPU) {
Jiri Olsa37073f92012-05-30 14:23:44 +0200680 u.val64 = *array;
681 if (swapped) {
682 /* undo swap of u64, then swap on individual u32s */
683 u.val64 = bswap_64(u.val64);
684 u.val32[0] = bswap_32(u.val32[0]);
685 }
686
687 sample->cpu = u.val32[0];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200688 array--;
689 }
690
691 if (type & PERF_SAMPLE_STREAM_ID) {
692 sample->stream_id = *array;
693 array--;
694 }
695
696 if (type & PERF_SAMPLE_ID) {
697 sample->id = *array;
698 array--;
699 }
700
701 if (type & PERF_SAMPLE_TIME) {
702 sample->time = *array;
703 array--;
704 }
705
706 if (type & PERF_SAMPLE_TID) {
Jiri Olsa37073f92012-05-30 14:23:44 +0200707 u.val64 = *array;
708 if (swapped) {
709 /* undo swap of u64, then swap on individual u32s */
710 u.val64 = bswap_64(u.val64);
711 u.val32[0] = bswap_32(u.val32[0]);
712 u.val32[1] = bswap_32(u.val32[1]);
713 }
714
715 sample->pid = u.val32[0];
716 sample->tid = u.val32[1];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200717 }
718
719 return 0;
720}
721
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200722static bool sample_overlap(const union perf_event *event,
723 const void *offset, u64 size)
724{
725 const void *base = event;
726
727 if (offset + size > base + event->header.size)
728 return true;
729
730 return false;
731}
732
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -0300733int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
David Ahern936be502011-09-06 09:12:26 -0600734 struct perf_sample *data, bool swapped)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200735{
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -0300736 u64 type = evsel->attr.sample_type;
Jiri Olsa0f6a3012012-08-07 15:20:45 +0200737 u64 regs_user = evsel->attr.sample_regs_user;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200738 const u64 *array;
739
David Ahern936be502011-09-06 09:12:26 -0600740 /*
741 * used for cross-endian analysis. See git commit 65014ab3
742 * for why this goofiness is needed.
743 */
Jiri Olsa6a11f922012-05-16 08:59:04 +0200744 union u64_swap u;
David Ahern936be502011-09-06 09:12:26 -0600745
Robert Richterf3bda2c2011-12-15 17:32:39 +0100746 memset(data, 0, sizeof(*data));
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200747 data->cpu = data->pid = data->tid = -1;
748 data->stream_id = data->id = data->time = -1ULL;
Naveen N. Raoa4a03fc2012-02-03 22:31:13 +0530749 data->period = 1;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200750
751 if (event->header.type != PERF_RECORD_SAMPLE) {
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -0300752 if (!evsel->attr.sample_id_all)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200753 return 0;
Jiri Olsa37073f92012-05-30 14:23:44 +0200754 return perf_event__parse_id_sample(event, type, data, swapped);
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200755 }
756
757 array = event->sample.array;
758
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -0300759 if (evsel->sample_size + sizeof(event->header) > event->header.size)
Frederic Weisbeckera2854122011-05-21 19:33:04 +0200760 return -EFAULT;
761
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200762 if (type & PERF_SAMPLE_IP) {
763 data->ip = event->ip.ip;
764 array++;
765 }
766
767 if (type & PERF_SAMPLE_TID) {
David Ahern936be502011-09-06 09:12:26 -0600768 u.val64 = *array;
769 if (swapped) {
770 /* undo swap of u64, then swap on individual u32s */
771 u.val64 = bswap_64(u.val64);
772 u.val32[0] = bswap_32(u.val32[0]);
773 u.val32[1] = bswap_32(u.val32[1]);
774 }
775
776 data->pid = u.val32[0];
777 data->tid = u.val32[1];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200778 array++;
779 }
780
781 if (type & PERF_SAMPLE_TIME) {
782 data->time = *array;
783 array++;
784 }
785
David Ahern7cec0922011-05-30 13:08:23 -0600786 data->addr = 0;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200787 if (type & PERF_SAMPLE_ADDR) {
788 data->addr = *array;
789 array++;
790 }
791
792 data->id = -1ULL;
793 if (type & PERF_SAMPLE_ID) {
794 data->id = *array;
795 array++;
796 }
797
798 if (type & PERF_SAMPLE_STREAM_ID) {
799 data->stream_id = *array;
800 array++;
801 }
802
803 if (type & PERF_SAMPLE_CPU) {
David Ahern936be502011-09-06 09:12:26 -0600804
805 u.val64 = *array;
806 if (swapped) {
807 /* undo swap of u64, then swap on individual u32s */
808 u.val64 = bswap_64(u.val64);
809 u.val32[0] = bswap_32(u.val32[0]);
810 }
811
812 data->cpu = u.val32[0];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200813 array++;
814 }
815
816 if (type & PERF_SAMPLE_PERIOD) {
817 data->period = *array;
818 array++;
819 }
820
821 if (type & PERF_SAMPLE_READ) {
Masanari Iidaf9d36992012-01-25 15:20:40 +0100822 fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200823 return -1;
824 }
825
826 if (type & PERF_SAMPLE_CALLCHAIN) {
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200827 if (sample_overlap(event, array, sizeof(data->callchain->nr)))
828 return -EFAULT;
829
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200830 data->callchain = (struct ip_callchain *)array;
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200831
832 if (sample_overlap(event, array, data->callchain->nr))
833 return -EFAULT;
834
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200835 array += 1 + data->callchain->nr;
836 }
837
838 if (type & PERF_SAMPLE_RAW) {
Jiri Olsa8e303f22011-09-29 17:05:08 +0200839 const u64 *pdata;
840
David Ahern936be502011-09-06 09:12:26 -0600841 u.val64 = *array;
842 if (WARN_ONCE(swapped,
843 "Endianness of raw data not corrected!\n")) {
844 /* undo swap of u64, then swap on individual u32s */
845 u.val64 = bswap_64(u.val64);
846 u.val32[0] = bswap_32(u.val32[0]);
847 u.val32[1] = bswap_32(u.val32[1]);
848 }
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200849
850 if (sample_overlap(event, array, sizeof(u32)))
851 return -EFAULT;
852
David Ahern936be502011-09-06 09:12:26 -0600853 data->raw_size = u.val32[0];
Jiri Olsa8e303f22011-09-29 17:05:08 +0200854 pdata = (void *) array + sizeof(u32);
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200855
Jiri Olsa8e303f22011-09-29 17:05:08 +0200856 if (sample_overlap(event, pdata, data->raw_size))
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200857 return -EFAULT;
858
Jiri Olsa8e303f22011-09-29 17:05:08 +0200859 data->raw_data = (void *) pdata;
Stephane Eranianfa30c962012-03-17 23:23:18 +0100860
861 array = (void *)array + data->raw_size + sizeof(u32);
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200862 }
863
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +0100864 if (type & PERF_SAMPLE_BRANCH_STACK) {
865 u64 sz;
866
867 data->branch_stack = (struct branch_stack *)array;
868 array++; /* nr */
869
870 sz = data->branch_stack->nr * sizeof(struct branch_entry);
871 sz /= sizeof(u64);
872 array += sz;
873 }
Jiri Olsa0f6a3012012-08-07 15:20:45 +0200874
875 if (type & PERF_SAMPLE_REGS_USER) {
876 /* First u64 tells us if we have any regs in sample. */
877 u64 avail = *array++;
878
879 if (avail) {
880 data->user_regs.regs = (u64 *)array;
881 array += hweight_long(regs_user);
882 }
883 }
884
885 if (type & PERF_SAMPLE_STACK_USER) {
886 u64 size = *array++;
887
888 data->user_stack.offset = ((char *)(array - 1)
889 - (char *) event);
890
891 if (!size) {
892 data->user_stack.size = 0;
893 } else {
894 data->user_stack.data = (char *)array;
895 array += size / sizeof(*array);
896 data->user_stack.size = *array;
897 }
898 }
899
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200900 return 0;
901}
Andrew Vagin74eec262011-11-28 12:03:31 +0300902
903int perf_event__synthesize_sample(union perf_event *event, u64 type,
904 const struct perf_sample *sample,
905 bool swapped)
906{
907 u64 *array;
908
909 /*
910 * used for cross-endian analysis. See git commit 65014ab3
911 * for why this goofiness is needed.
912 */
Jiri Olsa6a11f922012-05-16 08:59:04 +0200913 union u64_swap u;
Andrew Vagin74eec262011-11-28 12:03:31 +0300914
915 array = event->sample.array;
916
917 if (type & PERF_SAMPLE_IP) {
918 event->ip.ip = sample->ip;
919 array++;
920 }
921
922 if (type & PERF_SAMPLE_TID) {
923 u.val32[0] = sample->pid;
924 u.val32[1] = sample->tid;
925 if (swapped) {
926 /*
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -0300927 * Inverse of what is done in perf_evsel__parse_sample
Andrew Vagin74eec262011-11-28 12:03:31 +0300928 */
929 u.val32[0] = bswap_32(u.val32[0]);
930 u.val32[1] = bswap_32(u.val32[1]);
931 u.val64 = bswap_64(u.val64);
932 }
933
934 *array = u.val64;
935 array++;
936 }
937
938 if (type & PERF_SAMPLE_TIME) {
939 *array = sample->time;
940 array++;
941 }
942
943 if (type & PERF_SAMPLE_ADDR) {
944 *array = sample->addr;
945 array++;
946 }
947
948 if (type & PERF_SAMPLE_ID) {
949 *array = sample->id;
950 array++;
951 }
952
953 if (type & PERF_SAMPLE_STREAM_ID) {
954 *array = sample->stream_id;
955 array++;
956 }
957
958 if (type & PERF_SAMPLE_CPU) {
959 u.val32[0] = sample->cpu;
960 if (swapped) {
961 /*
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -0300962 * Inverse of what is done in perf_evsel__parse_sample
Andrew Vagin74eec262011-11-28 12:03:31 +0300963 */
964 u.val32[0] = bswap_32(u.val32[0]);
965 u.val64 = bswap_64(u.val64);
966 }
967 *array = u.val64;
968 array++;
969 }
970
971 if (type & PERF_SAMPLE_PERIOD) {
972 *array = sample->period;
973 array++;
974 }
975
976 return 0;
977}