blob: 2d26b7ad6fe053e185a18f1d345a72f7573b99ed [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
David Ahern936be502011-09-06 09:12:26 -060010#include <byteswap.h>
Jiri Olsa0f6a3012012-08-07 15:20:45 +020011#include <linux/bitops.h>
Borislav Petkov553873e2013-12-09 17:14:23 +010012#include <api/fs/debugfs.h>
Robert Richter4e319022013-06-11 17:29:18 +020013#include <traceevent/event-parse.h>
14#include <linux/hw_breakpoint.h>
15#include <linux/perf_event.h>
Andi Kleenbec19672013-08-04 19:41:26 -070016#include <sys/resource.h>
Robert Richter4e319022013-06-11 17:29:18 +020017#include "asm/bug.h"
Arnaldo Carvalho de Melo8f651ea2014-10-09 16:12:24 -030018#include "callchain.h"
Arnaldo Carvalho de Melof14d5702014-10-17 12:17:40 -030019#include "cgroup.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020020#include "evsel.h"
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -020021#include "evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020022#include "util.h"
Arnaldo Carvalho de Melo86bd5e82011-01-03 23:09:46 -020023#include "cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020024#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090025#include "target.h"
Jiri Olsa26d33022012-08-07 15:20:47 +020026#include "perf_regs.h"
Adrian Huntere3e1a542013-08-14 15:48:24 +030027#include "debug.h"
Jiri Olsa97978b32013-12-03 14:09:24 +010028#include "trace-event.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020029
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -030030static struct {
31 bool sample_id_all;
32 bool exclude_guest;
Stephane Eranian5c5e8542013-08-21 12:10:25 +020033 bool mmap2;
Yann Droneaud57480d22014-06-30 22:28:47 +020034 bool cloexec;
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -030035} perf_missing_features;
36
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -030037static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused)
38{
39 return 0;
40}
41
42static void perf_evsel__no_extra_fini(struct perf_evsel *evsel __maybe_unused)
43{
44}
45
46static struct {
47 size_t size;
48 int (*init)(struct perf_evsel *evsel);
49 void (*fini)(struct perf_evsel *evsel);
50} perf_evsel__object = {
51 .size = sizeof(struct perf_evsel),
52 .init = perf_evsel__no_extra_init,
53 .fini = perf_evsel__no_extra_fini,
54};
55
56int perf_evsel__object_config(size_t object_size,
57 int (*init)(struct perf_evsel *evsel),
58 void (*fini)(struct perf_evsel *evsel))
59{
60
61 if (object_size == 0)
62 goto set_methods;
63
64 if (perf_evsel__object.size > object_size)
65 return -EINVAL;
66
67 perf_evsel__object.size = object_size;
68
69set_methods:
70 if (init != NULL)
71 perf_evsel__object.init = init;
72
73 if (fini != NULL)
74 perf_evsel__object.fini = fini;
75
76 return 0;
77}
78
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -020079#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
80
Adrian Hunter75562572013-08-27 11:23:09 +030081int __perf_evsel__sample_size(u64 sample_type)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -030082{
83 u64 mask = sample_type & PERF_SAMPLE_MASK;
84 int size = 0;
85 int i;
86
87 for (i = 0; i < 64; i++) {
88 if (mask & (1ULL << i))
89 size++;
90 }
91
92 size *= sizeof(u64);
93
94 return size;
95}
96
Adrian Hunter75562572013-08-27 11:23:09 +030097/**
98 * __perf_evsel__calc_id_pos - calculate id_pos.
99 * @sample_type: sample type
100 *
101 * This function returns the position of the event id (PERF_SAMPLE_ID or
102 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
103 * sample_event.
104 */
105static int __perf_evsel__calc_id_pos(u64 sample_type)
106{
107 int idx = 0;
108
109 if (sample_type & PERF_SAMPLE_IDENTIFIER)
110 return 0;
111
112 if (!(sample_type & PERF_SAMPLE_ID))
113 return -1;
114
115 if (sample_type & PERF_SAMPLE_IP)
116 idx += 1;
117
118 if (sample_type & PERF_SAMPLE_TID)
119 idx += 1;
120
121 if (sample_type & PERF_SAMPLE_TIME)
122 idx += 1;
123
124 if (sample_type & PERF_SAMPLE_ADDR)
125 idx += 1;
126
127 return idx;
128}
129
130/**
131 * __perf_evsel__calc_is_pos - calculate is_pos.
132 * @sample_type: sample type
133 *
134 * This function returns the position (counting backwards) of the event id
135 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
136 * sample_id_all is used there is an id sample appended to non-sample events.
137 */
138static int __perf_evsel__calc_is_pos(u64 sample_type)
139{
140 int idx = 1;
141
142 if (sample_type & PERF_SAMPLE_IDENTIFIER)
143 return 1;
144
145 if (!(sample_type & PERF_SAMPLE_ID))
146 return -1;
147
148 if (sample_type & PERF_SAMPLE_CPU)
149 idx += 1;
150
151 if (sample_type & PERF_SAMPLE_STREAM_ID)
152 idx += 1;
153
154 return idx;
155}
156
157void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
158{
159 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
160 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
161}
162
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300163void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
164 enum perf_event_sample_format bit)
165{
166 if (!(evsel->attr.sample_type & bit)) {
167 evsel->attr.sample_type |= bit;
168 evsel->sample_size += sizeof(u64);
Adrian Hunter75562572013-08-27 11:23:09 +0300169 perf_evsel__calc_id_pos(evsel);
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300170 }
171}
172
173void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
174 enum perf_event_sample_format bit)
175{
176 if (evsel->attr.sample_type & bit) {
177 evsel->attr.sample_type &= ~bit;
178 evsel->sample_size -= sizeof(u64);
Adrian Hunter75562572013-08-27 11:23:09 +0300179 perf_evsel__calc_id_pos(evsel);
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300180 }
181}
182
Adrian Hunter75562572013-08-27 11:23:09 +0300183void perf_evsel__set_sample_id(struct perf_evsel *evsel,
184 bool can_sample_identifier)
Arnaldo Carvalho de Melo7a5a5ca2012-12-10 15:21:30 -0300185{
Adrian Hunter75562572013-08-27 11:23:09 +0300186 if (can_sample_identifier) {
187 perf_evsel__reset_sample_bit(evsel, ID);
188 perf_evsel__set_sample_bit(evsel, IDENTIFIER);
189 } else {
190 perf_evsel__set_sample_bit(evsel, ID);
191 }
Arnaldo Carvalho de Melo7a5a5ca2012-12-10 15:21:30 -0300192 evsel->attr.read_format |= PERF_FORMAT_ID;
193}
194
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200195void perf_evsel__init(struct perf_evsel *evsel,
196 struct perf_event_attr *attr, int idx)
197{
198 evsel->idx = idx;
Adrian Hunter60b08962014-07-31 09:00:52 +0300199 evsel->tracking = !idx;
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200200 evsel->attr = *attr;
Namhyung Kim2cfda562012-11-29 15:38:29 +0900201 evsel->leader = evsel;
Stephane Eranian410136f2013-11-12 17:58:49 +0100202 evsel->unit = "";
203 evsel->scale = 1.0;
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200204 INIT_LIST_HEAD(&evsel->node);
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -0300205 perf_evsel__object.init(evsel);
Arnaldo Carvalho de Melobde09462012-08-01 18:53:11 -0300206 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
Adrian Hunter75562572013-08-27 11:23:09 +0300207 perf_evsel__calc_id_pos(evsel);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200208}
209
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300210struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200211{
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -0300212 struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200213
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200214 if (evsel != NULL)
215 perf_evsel__init(evsel, attr, idx);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200216
217 return evsel;
218}
219
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300220struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300221{
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -0300222 struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300223
224 if (evsel != NULL) {
225 struct perf_event_attr attr = {
Arnaldo Carvalho de Melo0b80f8b32012-09-26 12:28:26 -0300226 .type = PERF_TYPE_TRACEPOINT,
227 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
228 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300229 };
230
Arnaldo Carvalho de Meloe48ffe22012-09-26 17:11:38 -0300231 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
232 goto out_free;
233
Jiri Olsa97978b32013-12-03 14:09:24 +0100234 evsel->tp_format = trace_event__tp_format(sys, name);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300235 if (evsel->tp_format == NULL)
236 goto out_free;
237
Arnaldo Carvalho de Melo0b80f8b32012-09-26 12:28:26 -0300238 event_attr_init(&attr);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300239 attr.config = evsel->tp_format->id;
Arnaldo Carvalho de Melo0b80f8b32012-09-26 12:28:26 -0300240 attr.sample_period = 1;
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300241 perf_evsel__init(evsel, &attr, idx);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300242 }
243
244 return evsel;
245
246out_free:
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300247 zfree(&evsel->name);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300248 free(evsel);
249 return NULL;
250}
251
Arnaldo Carvalho de Melo8ad70132012-09-06 13:11:18 -0300252const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300253 "cycles",
254 "instructions",
255 "cache-references",
256 "cache-misses",
257 "branches",
258 "branch-misses",
259 "bus-cycles",
260 "stalled-cycles-frontend",
261 "stalled-cycles-backend",
262 "ref-cycles",
263};
264
Arnaldo Carvalho de Melodd4f5222012-06-13 15:52:42 -0300265static const char *__perf_evsel__hw_name(u64 config)
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300266{
267 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
268 return perf_evsel__hw_names[config];
269
270 return "unknown-hardware";
271}
272
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300273static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300274{
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300275 int colon = 0, r = 0;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300276 struct perf_event_attr *attr = &evsel->attr;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300277 bool exclude_guest_default = false;
278
279#define MOD_PRINT(context, mod) do { \
280 if (!attr->exclude_##context) { \
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300281 if (!colon) colon = ++r; \
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300282 r += scnprintf(bf + r, size - r, "%c", mod); \
283 } } while(0)
284
285 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
286 MOD_PRINT(kernel, 'k');
287 MOD_PRINT(user, 'u');
288 MOD_PRINT(hv, 'h');
289 exclude_guest_default = true;
290 }
291
292 if (attr->precise_ip) {
293 if (!colon)
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300294 colon = ++r;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300295 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
296 exclude_guest_default = true;
297 }
298
299 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
300 MOD_PRINT(host, 'H');
301 MOD_PRINT(guest, 'G');
302 }
303#undef MOD_PRINT
304 if (colon)
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300305 bf[colon - 1] = ':';
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300306 return r;
307}
308
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300309static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
310{
311 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
312 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
313}
314
Arnaldo Carvalho de Melo8ad70132012-09-06 13:11:18 -0300315const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300316 "cpu-clock",
317 "task-clock",
318 "page-faults",
319 "context-switches",
Arnaldo Carvalho de Melo8ad70132012-09-06 13:11:18 -0300320 "cpu-migrations",
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300321 "minor-faults",
322 "major-faults",
323 "alignment-faults",
324 "emulation-faults",
Adrian Hunterd22d1a22013-08-31 21:50:52 +0300325 "dummy",
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300326};
327
Arnaldo Carvalho de Melodd4f5222012-06-13 15:52:42 -0300328static const char *__perf_evsel__sw_name(u64 config)
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300329{
330 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
331 return perf_evsel__sw_names[config];
332 return "unknown-software";
333}
334
335static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
336{
337 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
338 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
339}
340
Jiri Olsa287e74a2012-06-28 23:18:49 +0200341static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
342{
343 int r;
344
345 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
346
347 if (type & HW_BREAKPOINT_R)
348 r += scnprintf(bf + r, size - r, "r");
349
350 if (type & HW_BREAKPOINT_W)
351 r += scnprintf(bf + r, size - r, "w");
352
353 if (type & HW_BREAKPOINT_X)
354 r += scnprintf(bf + r, size - r, "x");
355
356 return r;
357}
358
359static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
360{
361 struct perf_event_attr *attr = &evsel->attr;
362 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
363 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
364}
365
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300366const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
367 [PERF_EVSEL__MAX_ALIASES] = {
368 { "L1-dcache", "l1-d", "l1d", "L1-data", },
369 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
370 { "LLC", "L2", },
371 { "dTLB", "d-tlb", "Data-TLB", },
372 { "iTLB", "i-tlb", "Instruction-TLB", },
373 { "branch", "branches", "bpu", "btb", "bpc", },
374 { "node", },
375};
376
377const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
378 [PERF_EVSEL__MAX_ALIASES] = {
379 { "load", "loads", "read", },
380 { "store", "stores", "write", },
381 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
382};
383
384const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
385 [PERF_EVSEL__MAX_ALIASES] = {
386 { "refs", "Reference", "ops", "access", },
387 { "misses", "miss", },
388};
389
390#define C(x) PERF_COUNT_HW_CACHE_##x
391#define CACHE_READ (1 << C(OP_READ))
392#define CACHE_WRITE (1 << C(OP_WRITE))
393#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
394#define COP(x) (1 << x)
395
396/*
397 * cache operartion stat
398 * L1I : Read and prefetch only
399 * ITLB and BPU : Read-only
400 */
401static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
402 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
403 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
404 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
405 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
406 [C(ITLB)] = (CACHE_READ),
407 [C(BPU)] = (CACHE_READ),
408 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
409};
410
411bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
412{
413 if (perf_evsel__hw_cache_stat[type] & COP(op))
414 return true; /* valid */
415 else
416 return false; /* invalid */
417}
418
419int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
420 char *bf, size_t size)
421{
422 if (result) {
423 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
424 perf_evsel__hw_cache_op[op][0],
425 perf_evsel__hw_cache_result[result][0]);
426 }
427
428 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
429 perf_evsel__hw_cache_op[op][1]);
430}
431
Arnaldo Carvalho de Melodd4f5222012-06-13 15:52:42 -0300432static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300433{
434 u8 op, result, type = (config >> 0) & 0xff;
435 const char *err = "unknown-ext-hardware-cache-type";
436
437 if (type > PERF_COUNT_HW_CACHE_MAX)
438 goto out_err;
439
440 op = (config >> 8) & 0xff;
441 err = "unknown-ext-hardware-cache-op";
442 if (op > PERF_COUNT_HW_CACHE_OP_MAX)
443 goto out_err;
444
445 result = (config >> 16) & 0xff;
446 err = "unknown-ext-hardware-cache-result";
447 if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
448 goto out_err;
449
450 err = "invalid-cache";
451 if (!perf_evsel__is_cache_op_valid(type, op))
452 goto out_err;
453
454 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
455out_err:
456 return scnprintf(bf, size, "%s", err);
457}
458
459static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
460{
461 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
462 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
463}
464
Arnaldo Carvalho de Melo6eef3d92012-06-13 11:53:37 -0300465static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
466{
467 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
468 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
469}
470
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300471const char *perf_evsel__name(struct perf_evsel *evsel)
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300472{
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300473 char bf[128];
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300474
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300475 if (evsel->name)
476 return evsel->name;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300477
478 switch (evsel->attr.type) {
479 case PERF_TYPE_RAW:
Arnaldo Carvalho de Melo6eef3d92012-06-13 11:53:37 -0300480 perf_evsel__raw_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300481 break;
482
483 case PERF_TYPE_HARDWARE:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300484 perf_evsel__hw_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300485 break;
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300486
487 case PERF_TYPE_HW_CACHE:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300488 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300489 break;
490
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300491 case PERF_TYPE_SOFTWARE:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300492 perf_evsel__sw_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300493 break;
494
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300495 case PERF_TYPE_TRACEPOINT:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300496 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300497 break;
498
Jiri Olsa287e74a2012-06-28 23:18:49 +0200499 case PERF_TYPE_BREAKPOINT:
500 perf_evsel__bp_name(evsel, bf, sizeof(bf));
501 break;
502
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300503 default:
Robert Richterca1b1452012-08-16 21:10:18 +0200504 scnprintf(bf, sizeof(bf), "unknown attr type: %d",
505 evsel->attr.type);
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300506 break;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300507 }
508
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300509 evsel->name = strdup(bf);
510
511 return evsel->name ?: "unknown";
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300512}
513
Namhyung Kim717e2632013-01-22 18:09:44 +0900514const char *perf_evsel__group_name(struct perf_evsel *evsel)
515{
516 return evsel->group_name ?: "anon group";
517}
518
519int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
520{
521 int ret;
522 struct perf_evsel *pos;
523 const char *group_name = perf_evsel__group_name(evsel);
524
525 ret = scnprintf(buf, size, "%s", group_name);
526
527 ret += scnprintf(buf + ret, size - ret, " { %s",
528 perf_evsel__name(evsel));
529
530 for_each_group_member(pos, evsel)
531 ret += scnprintf(buf + ret, size - ret, ", %s",
532 perf_evsel__name(pos));
533
534 ret += scnprintf(buf + ret, size - ret, " }");
535
536 return ret;
537}
538
Jiri Olsa6bedfab2014-03-02 16:56:40 +0100539static void
Namhyung Kim72a128a2014-09-23 10:01:41 +0900540perf_evsel__config_callgraph(struct perf_evsel *evsel)
Jiri Olsa6bedfab2014-03-02 16:56:40 +0100541{
542 bool function = perf_evsel__is_function_event(evsel);
543 struct perf_event_attr *attr = &evsel->attr;
544
545 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
546
Namhyung Kim72a128a2014-09-23 10:01:41 +0900547 if (callchain_param.record_mode == CALLCHAIN_DWARF) {
Jiri Olsa6bedfab2014-03-02 16:56:40 +0100548 if (!function) {
549 perf_evsel__set_sample_bit(evsel, REGS_USER);
550 perf_evsel__set_sample_bit(evsel, STACK_USER);
551 attr->sample_regs_user = PERF_REGS_MASK;
Namhyung Kim72a128a2014-09-23 10:01:41 +0900552 attr->sample_stack_user = callchain_param.dump_size;
Jiri Olsa6bedfab2014-03-02 16:56:40 +0100553 attr->exclude_callchain_user = 1;
554 } else {
555 pr_info("Cannot use DWARF unwind for function trace event,"
556 " falling back to framepointers.\n");
557 }
558 }
559
560 if (function) {
561 pr_info("Disabling user space callchains for function trace event.\n");
562 attr->exclude_callchain_user = 1;
563 }
564}
565
Jiri Olsa774cb492012-11-12 18:34:01 +0100566/*
567 * The enable_on_exec/disabled value strategy:
568 *
569 * 1) For any type of traced program:
570 * - all independent events and group leaders are disabled
571 * - all group members are enabled
572 *
573 * Group members are ruled by group leaders. They need to
574 * be enabled, because the group scheduling relies on that.
575 *
576 * 2) For traced programs executed by perf:
577 * - all independent events and group leaders have
578 * enable_on_exec set
579 * - we don't specifically enable or disable any event during
580 * the record command
581 *
582 * Independent events and group leaders are initially disabled
583 * and get enabled by exec. Group members are ruled by group
584 * leaders as stated in 1).
585 *
586 * 3) For traced programs attached by perf (pid/tid):
587 * - we specifically enable or disable all events during
588 * the record command
589 *
590 * When attaching events to already running traced we
591 * enable/disable events specifically, as there's no
592 * initial traced exec call.
593 */
Arnaldo Carvalho de Melob4006792013-12-19 14:43:45 -0300594void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200595{
Jiri Olsa3c176312012-10-10 17:39:03 +0200596 struct perf_evsel *leader = evsel->leader;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200597 struct perf_event_attr *attr = &evsel->attr;
Adrian Hunter60b08962014-07-31 09:00:52 +0300598 int track = evsel->tracking;
Adrian Hunter3aa59392013-11-15 15:52:29 +0200599 bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200600
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -0300601 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200602 attr->inherit = !opts->no_inherit;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200603
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300604 perf_evsel__set_sample_bit(evsel, IP);
605 perf_evsel__set_sample_bit(evsel, TID);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200606
Jiri Olsa3c176312012-10-10 17:39:03 +0200607 if (evsel->sample_read) {
608 perf_evsel__set_sample_bit(evsel, READ);
609
610 /*
611 * We need ID even in case of single event, because
612 * PERF_SAMPLE_READ process ID specific data.
613 */
Adrian Hunter75562572013-08-27 11:23:09 +0300614 perf_evsel__set_sample_id(evsel, false);
Jiri Olsa3c176312012-10-10 17:39:03 +0200615
616 /*
617 * Apply group format only if we belong to group
618 * with more than one members.
619 */
620 if (leader->nr_members > 1) {
621 attr->read_format |= PERF_FORMAT_GROUP;
622 attr->inherit = 0;
623 }
624 }
625
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200626 /*
Namhyung Kim17314e22014-06-09 14:43:37 +0900627 * We default some events to have a default interval. But keep
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200628 * it a weak assumption overridable by the user.
629 */
Namhyung Kim17314e22014-06-09 14:43:37 +0900630 if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200631 opts->user_interval != ULLONG_MAX)) {
632 if (opts->freq) {
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300633 perf_evsel__set_sample_bit(evsel, PERIOD);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200634 attr->freq = 1;
635 attr->sample_freq = opts->freq;
636 } else {
637 attr->sample_period = opts->default_interval;
638 }
639 }
640
Jiri Olsa3c176312012-10-10 17:39:03 +0200641 /*
642 * Disable sampling for all group members other
643 * than leader in case leader 'leads' the sampling.
644 */
645 if ((leader != evsel) && leader->sample_read) {
646 attr->sample_freq = 0;
647 attr->sample_period = 0;
648 }
649
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200650 if (opts->no_samples)
651 attr->sample_freq = 0;
652
653 if (opts->inherit_stat)
654 attr->inherit_stat = 1;
655
656 if (opts->sample_address) {
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300657 perf_evsel__set_sample_bit(evsel, ADDR);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200658 attr->mmap_data = track;
659 }
660
Jiri Olsaf1403732014-11-13 18:21:03 +0100661 /*
662 * We don't allow user space callchains for function trace
663 * event, due to issues with page faults while tracing page
664 * fault handler and its overall trickiness nature.
665 */
666 if (perf_evsel__is_function_event(evsel))
667 evsel->attr.exclude_callchain_user = 1;
668
Namhyung Kim72a128a2014-09-23 10:01:41 +0900669 if (callchain_param.enabled && !evsel->no_aux_samples)
670 perf_evsel__config_callgraph(evsel);
Jiri Olsa26d33022012-08-07 15:20:47 +0200671
Stephane Eranian6a21c0b2014-09-24 13:48:39 +0200672 if (opts->sample_intr_regs) {
673 attr->sample_regs_intr = PERF_REGS_MASK;
674 perf_evsel__set_sample_bit(evsel, REGS_INTR);
675 }
676
Adrian Hunter3aa59392013-11-15 15:52:29 +0200677 if (target__has_cpu(&opts->target))
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300678 perf_evsel__set_sample_bit(evsel, CPU);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200679
Andrew Vagin3e76ac72011-12-20 17:32:45 +0300680 if (opts->period)
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300681 perf_evsel__set_sample_bit(evsel, PERIOD);
Andrew Vagin3e76ac72011-12-20 17:32:45 +0300682
Andi Kleen8affc2b2014-07-31 14:45:04 +0800683 /*
684 * When the user explicitely disabled time don't force it here.
685 */
686 if (opts->sample_time &&
687 (!perf_missing_features.sample_id_all &&
688 (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu)))
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300689 perf_evsel__set_sample_bit(evsel, TIME);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200690
Adrian Hunter6ff1ce72014-07-14 13:02:56 +0300691 if (opts->raw_samples && !evsel->no_aux_samples) {
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300692 perf_evsel__set_sample_bit(evsel, TIME);
693 perf_evsel__set_sample_bit(evsel, RAW);
694 perf_evsel__set_sample_bit(evsel, CPU);
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200695 }
696
Stephane Eranianccf49bf2013-01-24 16:10:37 +0100697 if (opts->sample_address)
Adrian Hunter1e7ed5e2013-11-01 15:51:35 +0200698 perf_evsel__set_sample_bit(evsel, DATA_SRC);
Stephane Eranianccf49bf2013-01-24 16:10:37 +0100699
Arnaldo Carvalho de Melo509051e2014-01-14 17:52:14 -0300700 if (opts->no_buffering) {
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200701 attr->watermark = 0;
702 attr->wakeup_events = 1;
703 }
Adrian Hunter6ff1ce72014-07-14 13:02:56 +0300704 if (opts->branch_stack && !evsel->no_aux_samples) {
Arnaldo Carvalho de Melo7be5ebe2012-12-10 14:53:43 -0300705 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100706 attr->branch_sample_type = opts->branch_stack;
707 }
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200708
Andi Kleen05484292013-01-24 16:10:29 +0100709 if (opts->sample_weight)
Adrian Hunter1e7ed5e2013-11-01 15:51:35 +0200710 perf_evsel__set_sample_bit(evsel, WEIGHT);
Andi Kleen05484292013-01-24 16:10:29 +0100711
Stephane Eranian5c5e8542013-08-21 12:10:25 +0200712 attr->mmap = track;
Don Zickusa5a5ba72014-05-30 10:49:42 -0400713 attr->mmap2 = track && !perf_missing_features.mmap2;
Stephane Eranian5c5e8542013-08-21 12:10:25 +0200714 attr->comm = track;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200715
Andi Kleen475eeab2013-09-20 07:40:43 -0700716 if (opts->sample_transaction)
Adrian Hunter1e7ed5e2013-11-01 15:51:35 +0200717 perf_evsel__set_sample_bit(evsel, TRANSACTION);
Andi Kleen475eeab2013-09-20 07:40:43 -0700718
Jiri Olsa774cb492012-11-12 18:34:01 +0100719 /*
720 * XXX see the function comment above
721 *
722 * Disabling only independent events or group leaders,
723 * keeping group members enabled.
724 */
Namhyung Kim823254e2012-11-29 15:38:30 +0900725 if (perf_evsel__is_group_leader(evsel))
Jiri Olsa774cb492012-11-12 18:34:01 +0100726 attr->disabled = 1;
727
728 /*
729 * Setting enable_on_exec for independent events and
730 * group leaders for traced executed by perf.
731 */
Andi Kleen6619a532014-01-11 13:38:27 -0800732 if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
733 !opts->initial_delay)
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200734 attr->enable_on_exec = 1;
Adrian Hunter2afd2bc2014-07-14 13:02:57 +0300735
736 if (evsel->immediate) {
737 attr->disabled = 0;
738 attr->enable_on_exec = 0;
739 }
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200740}
741
Arnaldo Carvalho de Melo88858462014-10-13 13:30:27 -0300742static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200743{
David Ahern4af4c952011-05-27 09:58:34 -0600744 int cpu, thread;
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300745
746 if (evsel->system_wide)
747 nthreads = 1;
748
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200749 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
David Ahern4af4c952011-05-27 09:58:34 -0600750
751 if (evsel->fd) {
752 for (cpu = 0; cpu < ncpus; cpu++) {
753 for (thread = 0; thread < nthreads; thread++) {
754 FD(evsel, cpu, thread) = -1;
755 }
756 }
757 }
758
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200759 return evsel->fd != NULL ? 0 : -ENOMEM;
760}
761
Andi Kleene2407be2013-08-02 17:41:10 -0700762static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads,
763 int ioc, void *arg)
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300764{
765 int cpu, thread;
766
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300767 if (evsel->system_wide)
768 nthreads = 1;
769
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300770 for (cpu = 0; cpu < ncpus; cpu++) {
771 for (thread = 0; thread < nthreads; thread++) {
772 int fd = FD(evsel, cpu, thread),
Andi Kleene2407be2013-08-02 17:41:10 -0700773 err = ioctl(fd, ioc, arg);
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300774
775 if (err)
776 return err;
777 }
778 }
779
780 return 0;
781}
782
Andi Kleene2407be2013-08-02 17:41:10 -0700783int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
784 const char *filter)
785{
786 return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
787 PERF_EVENT_IOC_SET_FILTER,
788 (void *)filter);
789}
790
791int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
792{
793 return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
794 PERF_EVENT_IOC_ENABLE,
795 0);
796}
797
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200798int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
799{
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300800 if (evsel->system_wide)
801 nthreads = 1;
802
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300803 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
804 if (evsel->sample_id == NULL)
805 return -ENOMEM;
806
807 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
808 if (evsel->id == NULL) {
809 xyarray__delete(evsel->sample_id);
810 evsel->sample_id = NULL;
811 return -ENOMEM;
812 }
813
814 return 0;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200815}
816
Frederik Deweerdta7e191c2013-03-01 13:02:27 -0500817void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus)
818{
819 memset(evsel->counts, 0, (sizeof(*evsel->counts) +
820 (ncpus * sizeof(struct perf_counts_values))));
821}
822
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200823int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
824{
825 evsel->counts = zalloc((sizeof(*evsel->counts) +
826 (ncpus * sizeof(struct perf_counts_values))));
827 return evsel->counts != NULL ? 0 : -ENOMEM;
828}
829
Arnaldo Carvalho de Melo88858462014-10-13 13:30:27 -0300830static void perf_evsel__free_fd(struct perf_evsel *evsel)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200831{
832 xyarray__delete(evsel->fd);
833 evsel->fd = NULL;
834}
835
Arnaldo Carvalho de Melo88858462014-10-13 13:30:27 -0300836static void perf_evsel__free_id(struct perf_evsel *evsel)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200837{
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300838 xyarray__delete(evsel->sample_id);
839 evsel->sample_id = NULL;
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -0300840 zfree(&evsel->id);
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200841}
842
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200843void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
844{
845 int cpu, thread;
846
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300847 if (evsel->system_wide)
848 nthreads = 1;
849
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200850 for (cpu = 0; cpu < ncpus; cpu++)
851 for (thread = 0; thread < nthreads; ++thread) {
852 close(FD(evsel, cpu, thread));
853 FD(evsel, cpu, thread) = -1;
854 }
855}
856
Namhyung Kim43f8e762013-01-25 10:44:44 +0900857void perf_evsel__free_counts(struct perf_evsel *evsel)
858{
Arnaldo Carvalho de Melo74cf2492013-12-27 16:55:14 -0300859 zfree(&evsel->counts);
Namhyung Kim43f8e762013-01-25 10:44:44 +0900860}
861
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200862void perf_evsel__exit(struct perf_evsel *evsel)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200863{
864 assert(list_empty(&evsel->node));
Namhyung Kim736b05a2013-03-15 14:48:49 +0900865 perf_evsel__free_fd(evsel);
866 perf_evsel__free_id(evsel);
Arnaldo Carvalho de Melo597e48c2014-10-16 13:25:01 -0300867 close_cgroup(evsel->cgrp);
868 zfree(&evsel->group_name);
Arnaldo Carvalho de Melo597e48c2014-10-16 13:25:01 -0300869 zfree(&evsel->name);
Arnaldo Carvalho de Meloce8ccff2014-10-09 15:29:51 -0300870 perf_evsel__object.fini(evsel);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200871}
872
873void perf_evsel__delete(struct perf_evsel *evsel)
874{
875 perf_evsel__exit(evsel);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200876 free(evsel);
877}
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200878
Jiri Olsa857a94a2014-11-21 10:31:05 +0100879void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu,
880 struct perf_counts_values *count)
Stephane Eranianc7a79c42013-01-29 12:47:43 +0100881{
882 struct perf_counts_values tmp;
883
884 if (!evsel->prev_raw_counts)
885 return;
886
887 if (cpu == -1) {
888 tmp = evsel->prev_raw_counts->aggr;
889 evsel->prev_raw_counts->aggr = *count;
890 } else {
891 tmp = evsel->prev_raw_counts->cpu[cpu];
892 evsel->prev_raw_counts->cpu[cpu] = *count;
893 }
894
895 count->val = count->val - tmp.val;
896 count->ena = count->ena - tmp.ena;
897 count->run = count->run - tmp.run;
898}
899
Jiri Olsa13112bb2014-11-21 10:31:06 +0100900void perf_counts_values__scale(struct perf_counts_values *count,
901 bool scale, s8 *pscaled)
902{
903 s8 scaled = 0;
904
905 if (scale) {
906 if (count->run == 0) {
907 scaled = -1;
908 count->val = 0;
909 } else if (count->run < count->ena) {
910 scaled = 1;
911 count->val = (u64)((double) count->val * count->ena / count->run + 0.5);
912 }
913 } else
914 count->ena = count->run = 0;
915
916 if (pscaled)
917 *pscaled = scaled;
918}
919
Jiri Olsa011dccb2014-11-21 10:31:07 +0100920int perf_evsel__read_cb(struct perf_evsel *evsel, int cpu, int thread,
921 perf_evsel__read_cb_t cb)
922{
923 struct perf_counts_values count;
924
925 memset(&count, 0, sizeof(count));
926
927 if (FD(evsel, cpu, thread) < 0)
928 return -EINVAL;
929
930 if (readn(FD(evsel, cpu, thread), &count, sizeof(count)) < 0)
931 return -errno;
932
933 return cb(evsel, cpu, thread, &count);
934}
935
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200936int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
937 int cpu, int thread, bool scale)
938{
939 struct perf_counts_values count;
940 size_t nv = scale ? 3 : 1;
941
942 if (FD(evsel, cpu, thread) < 0)
943 return -EINVAL;
944
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -0200945 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
946 return -ENOMEM;
947
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200948 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
949 return -errno;
950
Jiri Olsa857a94a2014-11-21 10:31:05 +0100951 perf_evsel__compute_deltas(evsel, cpu, &count);
Jiri Olsa13112bb2014-11-21 10:31:06 +0100952 perf_counts_values__scale(&count, scale, NULL);
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200953 evsel->counts->cpu[cpu] = count;
954 return 0;
955}
956
957int __perf_evsel__read(struct perf_evsel *evsel,
958 int ncpus, int nthreads, bool scale)
959{
960 size_t nv = scale ? 3 : 1;
961 int cpu, thread;
962 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
963
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300964 if (evsel->system_wide)
965 nthreads = 1;
966
Arnaldo Carvalho de Melo52bcd9942011-02-03 17:26:06 -0200967 aggr->val = aggr->ena = aggr->run = 0;
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200968
969 for (cpu = 0; cpu < ncpus; cpu++) {
970 for (thread = 0; thread < nthreads; thread++) {
971 if (FD(evsel, cpu, thread) < 0)
972 continue;
973
974 if (readn(FD(evsel, cpu, thread),
975 &count, nv * sizeof(u64)) < 0)
976 return -errno;
977
978 aggr->val += count.val;
979 if (scale) {
980 aggr->ena += count.ena;
981 aggr->run += count.run;
982 }
983 }
984 }
985
Jiri Olsa857a94a2014-11-21 10:31:05 +0100986 perf_evsel__compute_deltas(evsel, -1, aggr);
Jiri Olsa13112bb2014-11-21 10:31:06 +0100987 perf_counts_values__scale(aggr, scale, &evsel->counts->scaled);
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200988 return 0;
989}
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200990
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200991static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
992{
993 struct perf_evsel *leader = evsel->leader;
994 int fd;
995
Namhyung Kim823254e2012-11-29 15:38:30 +0900996 if (perf_evsel__is_group_leader(evsel))
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200997 return -1;
998
999 /*
1000 * Leader must be already processed/open,
1001 * if not it's a bug.
1002 */
1003 BUG_ON(!leader->fd);
1004
1005 fd = FD(leader, cpu, thread);
1006 BUG_ON(fd == -1);
1007
1008 return fd;
1009}
1010
Adrian Huntere3e1a542013-08-14 15:48:24 +03001011#define __PRINT_ATTR(fmt, cast, field) \
1012 fprintf(fp, " %-19s "fmt"\n", #field, cast attr->field)
1013
1014#define PRINT_ATTR_U32(field) __PRINT_ATTR("%u" , , field)
1015#define PRINT_ATTR_X32(field) __PRINT_ATTR("%#x", , field)
1016#define PRINT_ATTR_U64(field) __PRINT_ATTR("%" PRIu64, (uint64_t), field)
1017#define PRINT_ATTR_X64(field) __PRINT_ATTR("%#"PRIx64, (uint64_t), field)
1018
1019#define PRINT_ATTR2N(name1, field1, name2, field2) \
1020 fprintf(fp, " %-19s %u %-19s %u\n", \
1021 name1, attr->field1, name2, attr->field2)
1022
1023#define PRINT_ATTR2(field1, field2) \
1024 PRINT_ATTR2N(#field1, field1, #field2, field2)
1025
1026static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp)
1027{
1028 size_t ret = 0;
1029
1030 ret += fprintf(fp, "%.60s\n", graph_dotted_line);
1031 ret += fprintf(fp, "perf_event_attr:\n");
1032
1033 ret += PRINT_ATTR_U32(type);
1034 ret += PRINT_ATTR_U32(size);
1035 ret += PRINT_ATTR_X64(config);
1036 ret += PRINT_ATTR_U64(sample_period);
1037 ret += PRINT_ATTR_U64(sample_freq);
1038 ret += PRINT_ATTR_X64(sample_type);
1039 ret += PRINT_ATTR_X64(read_format);
1040
1041 ret += PRINT_ATTR2(disabled, inherit);
1042 ret += PRINT_ATTR2(pinned, exclusive);
1043 ret += PRINT_ATTR2(exclude_user, exclude_kernel);
1044 ret += PRINT_ATTR2(exclude_hv, exclude_idle);
1045 ret += PRINT_ATTR2(mmap, comm);
Adrian Hunter022c50d2014-07-14 13:02:27 +03001046 ret += PRINT_ATTR2(mmap2, comm_exec);
Adrian Huntere3e1a542013-08-14 15:48:24 +03001047 ret += PRINT_ATTR2(freq, inherit_stat);
1048 ret += PRINT_ATTR2(enable_on_exec, task);
1049 ret += PRINT_ATTR2(watermark, precise_ip);
1050 ret += PRINT_ATTR2(mmap_data, sample_id_all);
1051 ret += PRINT_ATTR2(exclude_host, exclude_guest);
1052 ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel,
1053 "excl.callchain_user", exclude_callchain_user);
1054
1055 ret += PRINT_ATTR_U32(wakeup_events);
1056 ret += PRINT_ATTR_U32(wakeup_watermark);
1057 ret += PRINT_ATTR_X32(bp_type);
1058 ret += PRINT_ATTR_X64(bp_addr);
1059 ret += PRINT_ATTR_X64(config1);
1060 ret += PRINT_ATTR_U64(bp_len);
1061 ret += PRINT_ATTR_X64(config2);
1062 ret += PRINT_ATTR_X64(branch_sample_type);
1063 ret += PRINT_ATTR_X64(sample_regs_user);
1064 ret += PRINT_ATTR_U32(sample_stack_user);
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02001065 ret += PRINT_ATTR_X64(sample_regs_intr);
Adrian Huntere3e1a542013-08-14 15:48:24 +03001066
1067 ret += fprintf(fp, "%.60s\n", graph_dotted_line);
1068
1069 return ret;
1070}
1071
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02001072static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001073 struct thread_map *threads)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02001074{
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001075 int cpu, thread, nthreads;
Yann Droneaud57480d22014-06-30 22:28:47 +02001076 unsigned long flags = PERF_FLAG_FD_CLOEXEC;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001077 int pid = -1, err;
Andi Kleenbec19672013-08-04 19:41:26 -07001078 enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02001079
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001080 if (evsel->system_wide)
1081 nthreads = 1;
1082 else
1083 nthreads = threads->nr;
1084
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02001085 if (evsel->fd == NULL &&
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001086 perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001087 return -ENOMEM;
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -02001088
Stephane Eranian023695d2011-02-14 11:20:01 +02001089 if (evsel->cgrp) {
Yann Droneaud57480d22014-06-30 22:28:47 +02001090 flags |= PERF_FLAG_PID_CGROUP;
Stephane Eranian023695d2011-02-14 11:20:01 +02001091 pid = evsel->cgrp->fd;
1092 }
1093
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001094fallback_missing_features:
Yann Droneaud57480d22014-06-30 22:28:47 +02001095 if (perf_missing_features.cloexec)
1096 flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
Stephane Eranian5c5e8542013-08-21 12:10:25 +02001097 if (perf_missing_features.mmap2)
1098 evsel->attr.mmap2 = 0;
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001099 if (perf_missing_features.exclude_guest)
1100 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
1101retry_sample_id:
1102 if (perf_missing_features.sample_id_all)
1103 evsel->attr.sample_id_all = 0;
1104
Adrian Huntere3e1a542013-08-14 15:48:24 +03001105 if (verbose >= 2)
1106 perf_event_attr__fprintf(&evsel->attr, stderr);
1107
Arnaldo Carvalho de Melo86bd5e82011-01-03 23:09:46 -02001108 for (cpu = 0; cpu < cpus->nr; cpu++) {
Arnaldo Carvalho de Melo9d04f172011-01-12 00:08:18 -02001109
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001110 for (thread = 0; thread < nthreads; thread++) {
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001111 int group_fd;
Stephane Eranian023695d2011-02-14 11:20:01 +02001112
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001113 if (!evsel->cgrp && !evsel->system_wide)
Stephane Eranian023695d2011-02-14 11:20:01 +02001114 pid = threads->map[thread];
1115
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001116 group_fd = get_group_fd(evsel, cpu, thread);
Andi Kleenbec19672013-08-04 19:41:26 -07001117retry_open:
Ramkumar Ramachandraa33f6ef2014-03-18 15:10:42 -04001118 pr_debug2("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
Adrian Huntere3e1a542013-08-14 15:48:24 +03001119 pid, cpus->map[cpu], group_fd, flags);
1120
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02001121 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
Stephane Eranian023695d2011-02-14 11:20:01 +02001122 pid,
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -02001123 cpus->map[cpu],
Stephane Eranian023695d2011-02-14 11:20:01 +02001124 group_fd, flags);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001125 if (FD(evsel, cpu, thread) < 0) {
1126 err = -errno;
Ramkumar Ramachandraa33f6ef2014-03-18 15:10:42 -04001127 pr_debug2("sys_perf_event_open failed, error %d\n",
Adrian Hunterf852fd62013-11-01 15:51:29 +02001128 err);
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001129 goto try_fallback;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001130 }
Andi Kleenbec19672013-08-04 19:41:26 -07001131 set_rlimit = NO_CHANGE;
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02001132 }
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02001133 }
1134
1135 return 0;
1136
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001137try_fallback:
Andi Kleenbec19672013-08-04 19:41:26 -07001138 /*
1139 * perf stat needs between 5 and 22 fds per CPU. When we run out
1140 * of them try to increase the limits.
1141 */
1142 if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
1143 struct rlimit l;
1144 int old_errno = errno;
1145
1146 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
1147 if (set_rlimit == NO_CHANGE)
1148 l.rlim_cur = l.rlim_max;
1149 else {
1150 l.rlim_cur = l.rlim_max + 1000;
1151 l.rlim_max = l.rlim_cur;
1152 }
1153 if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
1154 set_rlimit++;
1155 errno = old_errno;
1156 goto retry_open;
1157 }
1158 }
1159 errno = old_errno;
1160 }
1161
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001162 if (err != -EINVAL || cpu > 0 || thread > 0)
1163 goto out_close;
1164
Yann Droneaud57480d22014-06-30 22:28:47 +02001165 if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
1166 perf_missing_features.cloexec = true;
1167 goto fallback_missing_features;
1168 } else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
Stephane Eranian5c5e8542013-08-21 12:10:25 +02001169 perf_missing_features.mmap2 = true;
1170 goto fallback_missing_features;
1171 } else if (!perf_missing_features.exclude_guest &&
1172 (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
Arnaldo Carvalho de Melo594ac612012-12-13 13:13:07 -03001173 perf_missing_features.exclude_guest = true;
1174 goto fallback_missing_features;
1175 } else if (!perf_missing_features.sample_id_all) {
1176 perf_missing_features.sample_id_all = true;
1177 goto retry_sample_id;
1178 }
1179
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02001180out_close:
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02001181 do {
1182 while (--thread >= 0) {
1183 close(FD(evsel, cpu, thread));
1184 FD(evsel, cpu, thread) = -1;
1185 }
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001186 thread = nthreads;
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02001187 } while (--cpu >= 0);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001188 return err;
1189}
1190
1191void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
1192{
1193 if (evsel->fd == NULL)
1194 return;
1195
1196 perf_evsel__close_fd(evsel, ncpus, nthreads);
1197 perf_evsel__free_fd(evsel);
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02001198}
1199
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02001200static struct {
1201 struct cpu_map map;
1202 int cpus[1];
1203} empty_cpu_map = {
1204 .map.nr = 1,
1205 .cpus = { -1, },
1206};
1207
1208static struct {
1209 struct thread_map map;
1210 int threads[1];
1211} empty_thread_map = {
1212 .map.nr = 1,
1213 .threads = { -1, },
1214};
1215
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -02001216int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001217 struct thread_map *threads)
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02001218{
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02001219 if (cpus == NULL) {
1220 /* Work around old compiler warnings about strict aliasing */
1221 cpus = &empty_cpu_map.map;
1222 }
1223
1224 if (threads == NULL)
1225 threads = &empty_thread_map.map;
1226
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001227 return __perf_evsel__open(evsel, cpus, threads);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02001228}
1229
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -02001230int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001231 struct cpu_map *cpus)
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02001232{
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001233 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -02001234}
1235
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -02001236int perf_evsel__open_per_thread(struct perf_evsel *evsel,
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001237 struct thread_map *threads)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02001238{
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001239 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -02001240}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -02001241
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001242static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
1243 const union perf_event *event,
1244 struct perf_sample *sample)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001245{
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001246 u64 type = evsel->attr.sample_type;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001247 const u64 *array = event->sample.array;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001248 bool swapped = evsel->needs_swap;
Jiri Olsa37073f92012-05-30 14:23:44 +02001249 union u64_swap u;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001250
1251 array += ((event->header.size -
1252 sizeof(event->header)) / sizeof(u64)) - 1;
1253
Adrian Hunter75562572013-08-27 11:23:09 +03001254 if (type & PERF_SAMPLE_IDENTIFIER) {
1255 sample->id = *array;
1256 array--;
1257 }
1258
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001259 if (type & PERF_SAMPLE_CPU) {
Jiri Olsa37073f92012-05-30 14:23:44 +02001260 u.val64 = *array;
1261 if (swapped) {
1262 /* undo swap of u64, then swap on individual u32s */
1263 u.val64 = bswap_64(u.val64);
1264 u.val32[0] = bswap_32(u.val32[0]);
1265 }
1266
1267 sample->cpu = u.val32[0];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001268 array--;
1269 }
1270
1271 if (type & PERF_SAMPLE_STREAM_ID) {
1272 sample->stream_id = *array;
1273 array--;
1274 }
1275
1276 if (type & PERF_SAMPLE_ID) {
1277 sample->id = *array;
1278 array--;
1279 }
1280
1281 if (type & PERF_SAMPLE_TIME) {
1282 sample->time = *array;
1283 array--;
1284 }
1285
1286 if (type & PERF_SAMPLE_TID) {
Jiri Olsa37073f92012-05-30 14:23:44 +02001287 u.val64 = *array;
1288 if (swapped) {
1289 /* undo swap of u64, then swap on individual u32s */
1290 u.val64 = bswap_64(u.val64);
1291 u.val32[0] = bswap_32(u.val32[0]);
1292 u.val32[1] = bswap_32(u.val32[1]);
1293 }
1294
1295 sample->pid = u.val32[0];
1296 sample->tid = u.val32[1];
Adrian Hunterdd44bc62013-10-18 15:29:01 +03001297 array--;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001298 }
1299
1300 return 0;
1301}
1302
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001303static inline bool overflow(const void *endp, u16 max_size, const void *offset,
1304 u64 size)
Frederic Weisbecker98e1da92011-05-21 20:08:15 +02001305{
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001306 return size > max_size || offset + size > endp;
Frederic Weisbecker98e1da92011-05-21 20:08:15 +02001307}
1308
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001309#define OVERFLOW_CHECK(offset, size, max_size) \
1310 do { \
1311 if (overflow(endp, (max_size), (offset), (size))) \
1312 return -EFAULT; \
1313 } while (0)
1314
1315#define OVERFLOW_CHECK_u64(offset) \
1316 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
1317
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -03001318int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001319 struct perf_sample *data)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001320{
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -03001321 u64 type = evsel->attr.sample_type;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001322 bool swapped = evsel->needs_swap;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001323 const u64 *array;
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001324 u16 max_size = event->header.size;
1325 const void *endp = (void *)event + max_size;
1326 u64 sz;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001327
David Ahern936be502011-09-06 09:12:26 -06001328 /*
1329 * used for cross-endian analysis. See git commit 65014ab3
1330 * for why this goofiness is needed.
1331 */
Jiri Olsa6a11f922012-05-16 08:59:04 +02001332 union u64_swap u;
David Ahern936be502011-09-06 09:12:26 -06001333
Robert Richterf3bda2c2011-12-15 17:32:39 +01001334 memset(data, 0, sizeof(*data));
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001335 data->cpu = data->pid = data->tid = -1;
1336 data->stream_id = data->id = data->time = -1ULL;
Jiri Olsabc529082014-02-03 12:44:41 +01001337 data->period = evsel->attr.sample_period;
Andi Kleen05484292013-01-24 16:10:29 +01001338 data->weight = 0;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001339
1340 if (event->header.type != PERF_RECORD_SAMPLE) {
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -03001341 if (!evsel->attr.sample_id_all)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001342 return 0;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001343 return perf_evsel__parse_id_sample(evsel, event, data);
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001344 }
1345
1346 array = event->sample.array;
1347
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001348 /*
1349 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
1350 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
1351 * check the format does not go past the end of the event.
1352 */
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -03001353 if (evsel->sample_size + sizeof(event->header) > event->header.size)
Frederic Weisbeckera2854122011-05-21 19:33:04 +02001354 return -EFAULT;
1355
Adrian Hunter75562572013-08-27 11:23:09 +03001356 data->id = -1ULL;
1357 if (type & PERF_SAMPLE_IDENTIFIER) {
1358 data->id = *array;
1359 array++;
1360 }
1361
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001362 if (type & PERF_SAMPLE_IP) {
Adrian Hunteref893252013-08-27 11:23:06 +03001363 data->ip = *array;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001364 array++;
1365 }
1366
1367 if (type & PERF_SAMPLE_TID) {
David Ahern936be502011-09-06 09:12:26 -06001368 u.val64 = *array;
1369 if (swapped) {
1370 /* undo swap of u64, then swap on individual u32s */
1371 u.val64 = bswap_64(u.val64);
1372 u.val32[0] = bswap_32(u.val32[0]);
1373 u.val32[1] = bswap_32(u.val32[1]);
1374 }
1375
1376 data->pid = u.val32[0];
1377 data->tid = u.val32[1];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001378 array++;
1379 }
1380
1381 if (type & PERF_SAMPLE_TIME) {
1382 data->time = *array;
1383 array++;
1384 }
1385
David Ahern7cec0922011-05-30 13:08:23 -06001386 data->addr = 0;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001387 if (type & PERF_SAMPLE_ADDR) {
1388 data->addr = *array;
1389 array++;
1390 }
1391
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001392 if (type & PERF_SAMPLE_ID) {
1393 data->id = *array;
1394 array++;
1395 }
1396
1397 if (type & PERF_SAMPLE_STREAM_ID) {
1398 data->stream_id = *array;
1399 array++;
1400 }
1401
1402 if (type & PERF_SAMPLE_CPU) {
David Ahern936be502011-09-06 09:12:26 -06001403
1404 u.val64 = *array;
1405 if (swapped) {
1406 /* undo swap of u64, then swap on individual u32s */
1407 u.val64 = bswap_64(u.val64);
1408 u.val32[0] = bswap_32(u.val32[0]);
1409 }
1410
1411 data->cpu = u.val32[0];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001412 array++;
1413 }
1414
1415 if (type & PERF_SAMPLE_PERIOD) {
1416 data->period = *array;
1417 array++;
1418 }
1419
1420 if (type & PERF_SAMPLE_READ) {
Jiri Olsa9ede4732012-10-10 17:38:13 +02001421 u64 read_format = evsel->attr.read_format;
1422
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001423 OVERFLOW_CHECK_u64(array);
Jiri Olsa9ede4732012-10-10 17:38:13 +02001424 if (read_format & PERF_FORMAT_GROUP)
1425 data->read.group.nr = *array;
1426 else
1427 data->read.one.value = *array;
1428
1429 array++;
1430
1431 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001432 OVERFLOW_CHECK_u64(array);
Jiri Olsa9ede4732012-10-10 17:38:13 +02001433 data->read.time_enabled = *array;
1434 array++;
1435 }
1436
1437 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001438 OVERFLOW_CHECK_u64(array);
Jiri Olsa9ede4732012-10-10 17:38:13 +02001439 data->read.time_running = *array;
1440 array++;
1441 }
1442
1443 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1444 if (read_format & PERF_FORMAT_GROUP) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001445 const u64 max_group_nr = UINT64_MAX /
1446 sizeof(struct sample_read_value);
1447
1448 if (data->read.group.nr > max_group_nr)
1449 return -EFAULT;
1450 sz = data->read.group.nr *
1451 sizeof(struct sample_read_value);
1452 OVERFLOW_CHECK(array, sz, max_size);
1453 data->read.group.values =
1454 (struct sample_read_value *)array;
1455 array = (void *)array + sz;
Jiri Olsa9ede4732012-10-10 17:38:13 +02001456 } else {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001457 OVERFLOW_CHECK_u64(array);
Jiri Olsa9ede4732012-10-10 17:38:13 +02001458 data->read.one.id = *array;
1459 array++;
1460 }
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001461 }
1462
1463 if (type & PERF_SAMPLE_CALLCHAIN) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001464 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
1465
1466 OVERFLOW_CHECK_u64(array);
1467 data->callchain = (struct ip_callchain *)array++;
1468 if (data->callchain->nr > max_callchain_nr)
Frederic Weisbecker98e1da92011-05-21 20:08:15 +02001469 return -EFAULT;
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001470 sz = data->callchain->nr * sizeof(u64);
1471 OVERFLOW_CHECK(array, sz, max_size);
1472 array = (void *)array + sz;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001473 }
1474
1475 if (type & PERF_SAMPLE_RAW) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001476 OVERFLOW_CHECK_u64(array);
David Ahern936be502011-09-06 09:12:26 -06001477 u.val64 = *array;
1478 if (WARN_ONCE(swapped,
1479 "Endianness of raw data not corrected!\n")) {
1480 /* undo swap of u64, then swap on individual u32s */
1481 u.val64 = bswap_64(u.val64);
1482 u.val32[0] = bswap_32(u.val32[0]);
1483 u.val32[1] = bswap_32(u.val32[1]);
1484 }
David Ahern936be502011-09-06 09:12:26 -06001485 data->raw_size = u.val32[0];
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001486 array = (void *)array + sizeof(u32);
Frederic Weisbecker98e1da92011-05-21 20:08:15 +02001487
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001488 OVERFLOW_CHECK(array, data->raw_size, max_size);
1489 data->raw_data = (void *)array;
1490 array = (void *)array + data->raw_size;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001491 }
1492
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001493 if (type & PERF_SAMPLE_BRANCH_STACK) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001494 const u64 max_branch_nr = UINT64_MAX /
1495 sizeof(struct branch_entry);
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001496
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001497 OVERFLOW_CHECK_u64(array);
1498 data->branch_stack = (struct branch_stack *)array++;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001499
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001500 if (data->branch_stack->nr > max_branch_nr)
1501 return -EFAULT;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001502 sz = data->branch_stack->nr * sizeof(struct branch_entry);
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001503 OVERFLOW_CHECK(array, sz, max_size);
1504 array = (void *)array + sz;
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001505 }
Jiri Olsa0f6a3012012-08-07 15:20:45 +02001506
1507 if (type & PERF_SAMPLE_REGS_USER) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001508 OVERFLOW_CHECK_u64(array);
Adrian Hunter5b95a4a32013-08-27 11:23:10 +03001509 data->user_regs.abi = *array;
1510 array++;
Jiri Olsa0f6a3012012-08-07 15:20:45 +02001511
Adrian Hunter5b95a4a32013-08-27 11:23:10 +03001512 if (data->user_regs.abi) {
Jiri Olsa352ea452014-01-07 13:47:25 +01001513 u64 mask = evsel->attr.sample_regs_user;
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001514
Jiri Olsa352ea452014-01-07 13:47:25 +01001515 sz = hweight_long(mask) * sizeof(u64);
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001516 OVERFLOW_CHECK(array, sz, max_size);
Jiri Olsa352ea452014-01-07 13:47:25 +01001517 data->user_regs.mask = mask;
Jiri Olsa0f6a3012012-08-07 15:20:45 +02001518 data->user_regs.regs = (u64 *)array;
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001519 array = (void *)array + sz;
Jiri Olsa0f6a3012012-08-07 15:20:45 +02001520 }
1521 }
1522
1523 if (type & PERF_SAMPLE_STACK_USER) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001524 OVERFLOW_CHECK_u64(array);
1525 sz = *array++;
Jiri Olsa0f6a3012012-08-07 15:20:45 +02001526
1527 data->user_stack.offset = ((char *)(array - 1)
1528 - (char *) event);
1529
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001530 if (!sz) {
Jiri Olsa0f6a3012012-08-07 15:20:45 +02001531 data->user_stack.size = 0;
1532 } else {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001533 OVERFLOW_CHECK(array, sz, max_size);
Jiri Olsa0f6a3012012-08-07 15:20:45 +02001534 data->user_stack.data = (char *)array;
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001535 array = (void *)array + sz;
1536 OVERFLOW_CHECK_u64(array);
Adrian Hunter54bd2692013-07-04 16:20:34 +03001537 data->user_stack.size = *array++;
Jiri Olsaa65cb4b2013-10-02 15:46:39 +02001538 if (WARN_ONCE(data->user_stack.size > sz,
1539 "user stack dump failure\n"))
1540 return -EFAULT;
Jiri Olsa0f6a3012012-08-07 15:20:45 +02001541 }
1542 }
1543
Andi Kleen05484292013-01-24 16:10:29 +01001544 data->weight = 0;
1545 if (type & PERF_SAMPLE_WEIGHT) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001546 OVERFLOW_CHECK_u64(array);
Andi Kleen05484292013-01-24 16:10:29 +01001547 data->weight = *array;
1548 array++;
1549 }
1550
Stephane Eranian98a3b322013-01-24 16:10:35 +01001551 data->data_src = PERF_MEM_DATA_SRC_NONE;
1552 if (type & PERF_SAMPLE_DATA_SRC) {
Adrian Hunter03b6ea92013-08-27 11:23:04 +03001553 OVERFLOW_CHECK_u64(array);
Stephane Eranian98a3b322013-01-24 16:10:35 +01001554 data->data_src = *array;
1555 array++;
1556 }
1557
Andi Kleen475eeab2013-09-20 07:40:43 -07001558 data->transaction = 0;
1559 if (type & PERF_SAMPLE_TRANSACTION) {
Adrian Hunter87b95522013-11-01 15:51:36 +02001560 OVERFLOW_CHECK_u64(array);
Andi Kleen475eeab2013-09-20 07:40:43 -07001561 data->transaction = *array;
1562 array++;
1563 }
1564
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02001565 data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
1566 if (type & PERF_SAMPLE_REGS_INTR) {
1567 OVERFLOW_CHECK_u64(array);
1568 data->intr_regs.abi = *array;
1569 array++;
1570
1571 if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
1572 u64 mask = evsel->attr.sample_regs_intr;
1573
1574 sz = hweight_long(mask) * sizeof(u64);
1575 OVERFLOW_CHECK(array, sz, max_size);
1576 data->intr_regs.mask = mask;
1577 data->intr_regs.regs = (u64 *)array;
1578 array = (void *)array + sz;
1579 }
1580 }
1581
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001582 return 0;
1583}
Andrew Vagin74eec262011-11-28 12:03:31 +03001584
Adrian Hunterb1cf6f62013-08-27 11:23:12 +03001585size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
Jiri Olsa352ea452014-01-07 13:47:25 +01001586 u64 read_format)
Adrian Hunterb1cf6f62013-08-27 11:23:12 +03001587{
1588 size_t sz, result = sizeof(struct sample_event);
1589
1590 if (type & PERF_SAMPLE_IDENTIFIER)
1591 result += sizeof(u64);
1592
1593 if (type & PERF_SAMPLE_IP)
1594 result += sizeof(u64);
1595
1596 if (type & PERF_SAMPLE_TID)
1597 result += sizeof(u64);
1598
1599 if (type & PERF_SAMPLE_TIME)
1600 result += sizeof(u64);
1601
1602 if (type & PERF_SAMPLE_ADDR)
1603 result += sizeof(u64);
1604
1605 if (type & PERF_SAMPLE_ID)
1606 result += sizeof(u64);
1607
1608 if (type & PERF_SAMPLE_STREAM_ID)
1609 result += sizeof(u64);
1610
1611 if (type & PERF_SAMPLE_CPU)
1612 result += sizeof(u64);
1613
1614 if (type & PERF_SAMPLE_PERIOD)
1615 result += sizeof(u64);
1616
1617 if (type & PERF_SAMPLE_READ) {
1618 result += sizeof(u64);
1619 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1620 result += sizeof(u64);
1621 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1622 result += sizeof(u64);
1623 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1624 if (read_format & PERF_FORMAT_GROUP) {
1625 sz = sample->read.group.nr *
1626 sizeof(struct sample_read_value);
1627 result += sz;
1628 } else {
1629 result += sizeof(u64);
1630 }
1631 }
1632
1633 if (type & PERF_SAMPLE_CALLCHAIN) {
1634 sz = (sample->callchain->nr + 1) * sizeof(u64);
1635 result += sz;
1636 }
1637
1638 if (type & PERF_SAMPLE_RAW) {
1639 result += sizeof(u32);
1640 result += sample->raw_size;
1641 }
1642
1643 if (type & PERF_SAMPLE_BRANCH_STACK) {
1644 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1645 sz += sizeof(u64);
1646 result += sz;
1647 }
1648
1649 if (type & PERF_SAMPLE_REGS_USER) {
1650 if (sample->user_regs.abi) {
1651 result += sizeof(u64);
Jiri Olsa352ea452014-01-07 13:47:25 +01001652 sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
Adrian Hunterb1cf6f62013-08-27 11:23:12 +03001653 result += sz;
1654 } else {
1655 result += sizeof(u64);
1656 }
1657 }
1658
1659 if (type & PERF_SAMPLE_STACK_USER) {
1660 sz = sample->user_stack.size;
1661 result += sizeof(u64);
1662 if (sz) {
1663 result += sz;
1664 result += sizeof(u64);
1665 }
1666 }
1667
1668 if (type & PERF_SAMPLE_WEIGHT)
1669 result += sizeof(u64);
1670
1671 if (type & PERF_SAMPLE_DATA_SRC)
1672 result += sizeof(u64);
1673
Adrian Hunter42d88912013-11-01 15:51:38 +02001674 if (type & PERF_SAMPLE_TRANSACTION)
1675 result += sizeof(u64);
1676
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02001677 if (type & PERF_SAMPLE_REGS_INTR) {
1678 if (sample->intr_regs.abi) {
1679 result += sizeof(u64);
1680 sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
1681 result += sz;
1682 } else {
1683 result += sizeof(u64);
1684 }
1685 }
1686
Adrian Hunterb1cf6f62013-08-27 11:23:12 +03001687 return result;
1688}
1689
Andrew Vagin74eec262011-11-28 12:03:31 +03001690int perf_event__synthesize_sample(union perf_event *event, u64 type,
Jiri Olsa352ea452014-01-07 13:47:25 +01001691 u64 read_format,
Andrew Vagin74eec262011-11-28 12:03:31 +03001692 const struct perf_sample *sample,
1693 bool swapped)
1694{
1695 u64 *array;
Adrian Hunterd03f2172013-08-27 11:23:11 +03001696 size_t sz;
Andrew Vagin74eec262011-11-28 12:03:31 +03001697 /*
1698 * used for cross-endian analysis. See git commit 65014ab3
1699 * for why this goofiness is needed.
1700 */
Jiri Olsa6a11f922012-05-16 08:59:04 +02001701 union u64_swap u;
Andrew Vagin74eec262011-11-28 12:03:31 +03001702
1703 array = event->sample.array;
1704
Adrian Hunter75562572013-08-27 11:23:09 +03001705 if (type & PERF_SAMPLE_IDENTIFIER) {
1706 *array = sample->id;
1707 array++;
1708 }
1709
Andrew Vagin74eec262011-11-28 12:03:31 +03001710 if (type & PERF_SAMPLE_IP) {
Adrian Hunteref893252013-08-27 11:23:06 +03001711 *array = sample->ip;
Andrew Vagin74eec262011-11-28 12:03:31 +03001712 array++;
1713 }
1714
1715 if (type & PERF_SAMPLE_TID) {
1716 u.val32[0] = sample->pid;
1717 u.val32[1] = sample->tid;
1718 if (swapped) {
1719 /*
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -03001720 * Inverse of what is done in perf_evsel__parse_sample
Andrew Vagin74eec262011-11-28 12:03:31 +03001721 */
1722 u.val32[0] = bswap_32(u.val32[0]);
1723 u.val32[1] = bswap_32(u.val32[1]);
1724 u.val64 = bswap_64(u.val64);
1725 }
1726
1727 *array = u.val64;
1728 array++;
1729 }
1730
1731 if (type & PERF_SAMPLE_TIME) {
1732 *array = sample->time;
1733 array++;
1734 }
1735
1736 if (type & PERF_SAMPLE_ADDR) {
1737 *array = sample->addr;
1738 array++;
1739 }
1740
1741 if (type & PERF_SAMPLE_ID) {
1742 *array = sample->id;
1743 array++;
1744 }
1745
1746 if (type & PERF_SAMPLE_STREAM_ID) {
1747 *array = sample->stream_id;
1748 array++;
1749 }
1750
1751 if (type & PERF_SAMPLE_CPU) {
1752 u.val32[0] = sample->cpu;
1753 if (swapped) {
1754 /*
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -03001755 * Inverse of what is done in perf_evsel__parse_sample
Andrew Vagin74eec262011-11-28 12:03:31 +03001756 */
1757 u.val32[0] = bswap_32(u.val32[0]);
1758 u.val64 = bswap_64(u.val64);
1759 }
1760 *array = u.val64;
1761 array++;
1762 }
1763
1764 if (type & PERF_SAMPLE_PERIOD) {
1765 *array = sample->period;
1766 array++;
1767 }
1768
Adrian Hunterd03f2172013-08-27 11:23:11 +03001769 if (type & PERF_SAMPLE_READ) {
1770 if (read_format & PERF_FORMAT_GROUP)
1771 *array = sample->read.group.nr;
1772 else
1773 *array = sample->read.one.value;
1774 array++;
1775
1776 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1777 *array = sample->read.time_enabled;
1778 array++;
1779 }
1780
1781 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1782 *array = sample->read.time_running;
1783 array++;
1784 }
1785
1786 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1787 if (read_format & PERF_FORMAT_GROUP) {
1788 sz = sample->read.group.nr *
1789 sizeof(struct sample_read_value);
1790 memcpy(array, sample->read.group.values, sz);
1791 array = (void *)array + sz;
1792 } else {
1793 *array = sample->read.one.id;
1794 array++;
1795 }
1796 }
1797
1798 if (type & PERF_SAMPLE_CALLCHAIN) {
1799 sz = (sample->callchain->nr + 1) * sizeof(u64);
1800 memcpy(array, sample->callchain, sz);
1801 array = (void *)array + sz;
1802 }
1803
1804 if (type & PERF_SAMPLE_RAW) {
1805 u.val32[0] = sample->raw_size;
1806 if (WARN_ONCE(swapped,
1807 "Endianness of raw data not corrected!\n")) {
1808 /*
1809 * Inverse of what is done in perf_evsel__parse_sample
1810 */
1811 u.val32[0] = bswap_32(u.val32[0]);
1812 u.val32[1] = bswap_32(u.val32[1]);
1813 u.val64 = bswap_64(u.val64);
1814 }
1815 *array = u.val64;
1816 array = (void *)array + sizeof(u32);
1817
1818 memcpy(array, sample->raw_data, sample->raw_size);
1819 array = (void *)array + sample->raw_size;
1820 }
1821
1822 if (type & PERF_SAMPLE_BRANCH_STACK) {
1823 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1824 sz += sizeof(u64);
1825 memcpy(array, sample->branch_stack, sz);
1826 array = (void *)array + sz;
1827 }
1828
1829 if (type & PERF_SAMPLE_REGS_USER) {
1830 if (sample->user_regs.abi) {
1831 *array++ = sample->user_regs.abi;
Jiri Olsa352ea452014-01-07 13:47:25 +01001832 sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
Adrian Hunterd03f2172013-08-27 11:23:11 +03001833 memcpy(array, sample->user_regs.regs, sz);
1834 array = (void *)array + sz;
1835 } else {
1836 *array++ = 0;
1837 }
1838 }
1839
1840 if (type & PERF_SAMPLE_STACK_USER) {
1841 sz = sample->user_stack.size;
1842 *array++ = sz;
1843 if (sz) {
1844 memcpy(array, sample->user_stack.data, sz);
1845 array = (void *)array + sz;
1846 *array++ = sz;
1847 }
1848 }
1849
1850 if (type & PERF_SAMPLE_WEIGHT) {
1851 *array = sample->weight;
1852 array++;
1853 }
1854
1855 if (type & PERF_SAMPLE_DATA_SRC) {
1856 *array = sample->data_src;
1857 array++;
1858 }
1859
Adrian Hunter42d88912013-11-01 15:51:38 +02001860 if (type & PERF_SAMPLE_TRANSACTION) {
1861 *array = sample->transaction;
1862 array++;
1863 }
1864
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02001865 if (type & PERF_SAMPLE_REGS_INTR) {
1866 if (sample->intr_regs.abi) {
1867 *array++ = sample->intr_regs.abi;
1868 sz = hweight_long(sample->intr_regs.mask) * sizeof(u64);
1869 memcpy(array, sample->intr_regs.regs, sz);
1870 array = (void *)array + sz;
1871 } else {
1872 *array++ = 0;
1873 }
1874 }
1875
Andrew Vagin74eec262011-11-28 12:03:31 +03001876 return 0;
1877}
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03001878
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -03001879struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
1880{
1881 return pevent_find_field(evsel->tp_format, name);
1882}
1883
Arnaldo Carvalho de Melo5d2074e2012-09-26 20:22:00 -03001884void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03001885 const char *name)
1886{
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -03001887 struct format_field *field = perf_evsel__field(evsel, name);
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03001888 int offset;
1889
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -03001890 if (!field)
1891 return NULL;
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03001892
1893 offset = field->offset;
1894
1895 if (field->flags & FIELD_IS_DYNAMIC) {
1896 offset = *(int *)(sample->raw_data + field->offset);
1897 offset &= 0xffff;
1898 }
1899
1900 return sample->raw_data + offset;
1901}
1902
1903u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
1904 const char *name)
1905{
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -03001906 struct format_field *field = perf_evsel__field(evsel, name);
Arnaldo Carvalho de Meloe6b6f672012-09-26 13:13:04 -03001907 void *ptr;
1908 u64 value;
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03001909
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -03001910 if (!field)
1911 return 0;
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03001912
Arnaldo Carvalho de Meloe6b6f672012-09-26 13:13:04 -03001913 ptr = sample->raw_data + field->offset;
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03001914
Arnaldo Carvalho de Meloe6b6f672012-09-26 13:13:04 -03001915 switch (field->size) {
1916 case 1:
1917 return *(u8 *)ptr;
1918 case 2:
1919 value = *(u16 *)ptr;
1920 break;
1921 case 4:
1922 value = *(u32 *)ptr;
1923 break;
1924 case 8:
1925 value = *(u64 *)ptr;
1926 break;
1927 default:
1928 return 0;
1929 }
1930
1931 if (!evsel->needs_swap)
1932 return value;
1933
1934 switch (field->size) {
1935 case 2:
1936 return bswap_16(value);
1937 case 4:
1938 return bswap_32(value);
1939 case 8:
1940 return bswap_64(value);
1941 default:
1942 return 0;
1943 }
1944
1945 return 0;
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03001946}
Arnaldo Carvalho de Melo0698aed2012-12-10 18:17:08 -03001947
1948static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
1949{
1950 va_list args;
1951 int ret = 0;
1952
1953 if (!*first) {
1954 ret += fprintf(fp, ",");
1955 } else {
1956 ret += fprintf(fp, ":");
1957 *first = false;
1958 }
1959
1960 va_start(args, fmt);
1961 ret += vfprintf(fp, fmt, args);
1962 va_end(args);
1963 return ret;
1964}
1965
1966static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value)
1967{
1968 if (value == 0)
1969 return 0;
1970
1971 return comma_fprintf(fp, first, " %s: %" PRIu64, field, value);
1972}
1973
1974#define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
1975
Arnaldo Carvalho de Meloc79a4392012-12-11 10:54:12 -03001976struct bit_names {
1977 int bit;
1978 const char *name;
1979};
1980
1981static int bits__fprintf(FILE *fp, const char *field, u64 value,
1982 struct bit_names *bits, bool *first)
1983{
1984 int i = 0, printed = comma_fprintf(fp, first, " %s: ", field);
1985 bool first_bit = true;
1986
1987 do {
1988 if (value & bits[i].bit) {
1989 printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name);
1990 first_bit = false;
1991 }
1992 } while (bits[++i].name != NULL);
1993
1994 return printed;
1995}
1996
1997static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
1998{
1999#define bit_name(n) { PERF_SAMPLE_##n, #n }
2000 struct bit_names bits[] = {
2001 bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
2002 bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
2003 bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
2004 bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
Stephane Eranian6a21c0b2014-09-24 13:48:39 +02002005 bit_name(IDENTIFIER), bit_name(REGS_INTR),
Arnaldo Carvalho de Meloc79a4392012-12-11 10:54:12 -03002006 { .name = NULL, }
2007 };
2008#undef bit_name
2009 return bits__fprintf(fp, "sample_type", value, bits, first);
2010}
2011
2012static int read_format__fprintf(FILE *fp, bool *first, u64 value)
2013{
2014#define bit_name(n) { PERF_FORMAT_##n, #n }
2015 struct bit_names bits[] = {
2016 bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
2017 bit_name(ID), bit_name(GROUP),
2018 { .name = NULL, }
2019 };
2020#undef bit_name
2021 return bits__fprintf(fp, "read_format", value, bits, first);
2022}
2023
Arnaldo Carvalho de Melo0698aed2012-12-10 18:17:08 -03002024int perf_evsel__fprintf(struct perf_evsel *evsel,
2025 struct perf_attr_details *details, FILE *fp)
2026{
2027 bool first = true;
Namhyung Kime6ab07d2013-01-22 18:09:47 +09002028 int printed = 0;
2029
Arnaldo Carvalho de Meloe35ef3552013-02-06 17:20:02 -03002030 if (details->event_group) {
Namhyung Kime6ab07d2013-01-22 18:09:47 +09002031 struct perf_evsel *pos;
2032
2033 if (!perf_evsel__is_group_leader(evsel))
2034 return 0;
2035
2036 if (evsel->nr_members > 1)
2037 printed += fprintf(fp, "%s{", evsel->group_name ?: "");
2038
2039 printed += fprintf(fp, "%s", perf_evsel__name(evsel));
2040 for_each_group_member(pos, evsel)
2041 printed += fprintf(fp, ",%s", perf_evsel__name(pos));
2042
2043 if (evsel->nr_members > 1)
2044 printed += fprintf(fp, "}");
2045 goto out;
2046 }
2047
2048 printed += fprintf(fp, "%s", perf_evsel__name(evsel));
Arnaldo Carvalho de Melo0698aed2012-12-10 18:17:08 -03002049
2050 if (details->verbose || details->freq) {
2051 printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
2052 (u64)evsel->attr.sample_freq);
2053 }
2054
2055 if (details->verbose) {
2056 if_print(type);
2057 if_print(config);
2058 if_print(config1);
2059 if_print(config2);
2060 if_print(size);
Arnaldo Carvalho de Meloc79a4392012-12-11 10:54:12 -03002061 printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type);
2062 if (evsel->attr.read_format)
2063 printed += read_format__fprintf(fp, &first, evsel->attr.read_format);
Arnaldo Carvalho de Melo0698aed2012-12-10 18:17:08 -03002064 if_print(disabled);
2065 if_print(inherit);
2066 if_print(pinned);
2067 if_print(exclusive);
2068 if_print(exclude_user);
2069 if_print(exclude_kernel);
2070 if_print(exclude_hv);
2071 if_print(exclude_idle);
2072 if_print(mmap);
Stephane Eranian5c5e8542013-08-21 12:10:25 +02002073 if_print(mmap2);
Arnaldo Carvalho de Melo0698aed2012-12-10 18:17:08 -03002074 if_print(comm);
Adrian Hunter022c50d2014-07-14 13:02:27 +03002075 if_print(comm_exec);
Arnaldo Carvalho de Melo0698aed2012-12-10 18:17:08 -03002076 if_print(freq);
2077 if_print(inherit_stat);
2078 if_print(enable_on_exec);
2079 if_print(task);
2080 if_print(watermark);
2081 if_print(precise_ip);
2082 if_print(mmap_data);
2083 if_print(sample_id_all);
2084 if_print(exclude_host);
2085 if_print(exclude_guest);
2086 if_print(__reserved_1);
2087 if_print(wakeup_events);
2088 if_print(bp_type);
2089 if_print(branch_sample_type);
2090 }
Namhyung Kime6ab07d2013-01-22 18:09:47 +09002091out:
Arnaldo Carvalho de Melo0698aed2012-12-10 18:17:08 -03002092 fputc('\n', fp);
2093 return ++printed;
2094}
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -03002095
2096bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
2097 char *msg, size_t msgsize)
2098{
David Ahern2b821cc2013-07-18 17:27:59 -06002099 if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -03002100 evsel->attr.type == PERF_TYPE_HARDWARE &&
2101 evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
2102 /*
2103 * If it's cycles then fall back to hrtimer based
2104 * cpu-clock-tick sw counter, which is always available even if
2105 * no PMU support.
2106 *
2107 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
2108 * b0a873e).
2109 */
2110 scnprintf(msg, msgsize, "%s",
2111"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
2112
2113 evsel->attr.type = PERF_TYPE_SOFTWARE;
2114 evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
2115
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -03002116 zfree(&evsel->name);
Arnaldo Carvalho de Meloc0a54342012-12-13 14:16:30 -03002117 return true;
2118 }
2119
2120 return false;
2121}
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002122
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03002123int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002124 int err, char *msg, size_t size)
2125{
Masami Hiramatsu6e81c742014-08-14 02:22:36 +00002126 char sbuf[STRERR_BUFSIZE];
2127
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002128 switch (err) {
2129 case EPERM:
2130 case EACCES:
David Ahernb69e63a2013-05-25 17:54:00 -06002131 return scnprintf(msg, size,
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002132 "You may not have permission to collect %sstats.\n"
2133 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
2134 " -1 - Not paranoid at all\n"
2135 " 0 - Disallow raw tracepoint access for unpriv\n"
2136 " 1 - Disallow cpu events for unpriv\n"
2137 " 2 - Disallow kernel profiling for unpriv",
2138 target->system_wide ? "system-wide " : "");
2139 case ENOENT:
2140 return scnprintf(msg, size, "The %s event is not supported.",
2141 perf_evsel__name(evsel));
2142 case EMFILE:
2143 return scnprintf(msg, size, "%s",
2144 "Too many events are opened.\n"
2145 "Try again after reducing the number of events.");
2146 case ENODEV:
2147 if (target->cpu_list)
2148 return scnprintf(msg, size, "%s",
2149 "No such device - did you specify an out-of-range profile CPU?\n");
2150 break;
2151 case EOPNOTSUPP:
2152 if (evsel->attr.precise_ip)
2153 return scnprintf(msg, size, "%s",
2154 "\'precise\' request may not be supported. Try removing 'p' modifier.");
2155#if defined(__i386__) || defined(__x86_64__)
2156 if (evsel->attr.type == PERF_TYPE_HARDWARE)
2157 return scnprintf(msg, size, "%s",
2158 "No hardware sampling interrupt available.\n"
2159 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
2160#endif
2161 break;
Jiri Olsa63914ac2014-08-01 17:46:54 +02002162 case EBUSY:
2163 if (find_process("oprofiled"))
2164 return scnprintf(msg, size,
2165 "The PMU counters are busy/taken by another profiler.\n"
2166 "We found oprofile daemon running, please stop it and try again.");
2167 break;
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002168 default:
2169 break;
2170 }
2171
2172 return scnprintf(msg, size,
Masami Hiramatsu6e81c742014-08-14 02:22:36 +00002173 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002174 "/bin/dmesg may provide additional information.\n"
2175 "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
Masami Hiramatsu6e81c742014-08-14 02:22:36 +00002176 err, strerror_r(err, sbuf, sizeof(sbuf)),
2177 perf_evsel__name(evsel));
Arnaldo Carvalho de Melo56e52e82012-12-13 15:10:58 -03002178}