blob: bb58b05f905f9a157d41db9547b175850a9578a7 [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
David Ahern936be502011-09-06 09:12:26 -060010#include <byteswap.h>
Jiri Olsa0f6a3012012-08-07 15:20:45 +020011#include <linux/bitops.h>
David Ahern936be502011-09-06 09:12:26 -060012#include "asm/bug.h"
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -030013#include "debugfs.h"
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -030014#include "event-parse.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020015#include "evsel.h"
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -020016#include "evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020017#include "util.h"
Arnaldo Carvalho de Melo86bd5e82011-01-03 23:09:46 -020018#include "cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020019#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090020#include "target.h"
David Howellsd2709c72012-11-19 22:21:03 +000021#include <linux/hw_breakpoint.h>
22#include <linux/perf_event.h>
Jiri Olsa26d33022012-08-07 15:20:47 +020023#include "perf_regs.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020024
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -020025#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
26
Arnaldo Carvalho de Melobde09462012-08-01 18:53:11 -030027static int __perf_evsel__sample_size(u64 sample_type)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -030028{
29 u64 mask = sample_type & PERF_SAMPLE_MASK;
30 int size = 0;
31 int i;
32
33 for (i = 0; i < 64; i++) {
34 if (mask & (1ULL << i))
35 size++;
36 }
37
38 size *= sizeof(u64);
39
40 return size;
41}
42
Jiri Olsa4bf9ce12012-03-22 14:37:26 +010043void hists__init(struct hists *hists)
Arnaldo Carvalho de Melo0e2a5f12011-11-04 08:16:58 -020044{
45 memset(hists, 0, sizeof(*hists));
46 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
47 hists->entries_in = &hists->entries_in_array[0];
48 hists->entries_collapsed = RB_ROOT;
49 hists->entries = RB_ROOT;
50 pthread_mutex_init(&hists->lock, NULL);
51}
52
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020053void perf_evsel__init(struct perf_evsel *evsel,
54 struct perf_event_attr *attr, int idx)
55{
56 evsel->idx = idx;
57 evsel->attr = *attr;
Namhyung Kim2cfda562012-11-29 15:38:29 +090058 evsel->leader = evsel;
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020059 INIT_LIST_HEAD(&evsel->node);
Arnaldo Carvalho de Melo1980c2eb2011-10-05 17:50:23 -030060 hists__init(&evsel->hists);
Arnaldo Carvalho de Melobde09462012-08-01 18:53:11 -030061 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020062}
63
Lin Ming23a2f3a2011-01-07 11:11:09 +080064struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020065{
66 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
67
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020068 if (evsel != NULL)
69 perf_evsel__init(evsel, attr, idx);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020070
71 return evsel;
72}
73
Arnaldo Carvalho de Melo201b7332012-09-26 20:24:19 -030074struct event_format *event_format__new(const char *sys, const char *name)
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -030075{
76 int fd, n;
77 char *filename;
78 void *bf = NULL, *nbf;
79 size_t size = 0, alloc_size = 0;
80 struct event_format *format = NULL;
81
82 if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0)
83 goto out;
84
85 fd = open(filename, O_RDONLY);
86 if (fd < 0)
87 goto out_free_filename;
88
89 do {
90 if (size == alloc_size) {
91 alloc_size += BUFSIZ;
92 nbf = realloc(bf, alloc_size);
93 if (nbf == NULL)
94 goto out_free_bf;
95 bf = nbf;
96 }
97
98 n = read(fd, bf + size, BUFSIZ);
99 if (n < 0)
100 goto out_free_bf;
101 size += n;
102 } while (n > 0);
103
104 pevent_parse_format(&format, bf, size, sys);
105
106out_free_bf:
107 free(bf);
108 close(fd);
109out_free_filename:
110 free(filename);
111out:
112 return format;
113}
114
115struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx)
116{
117 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
118
119 if (evsel != NULL) {
120 struct perf_event_attr attr = {
Arnaldo Carvalho de Melo0b80f8b32012-09-26 12:28:26 -0300121 .type = PERF_TYPE_TRACEPOINT,
122 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
123 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300124 };
125
Arnaldo Carvalho de Meloe48ffe22012-09-26 17:11:38 -0300126 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
127 goto out_free;
128
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300129 evsel->tp_format = event_format__new(sys, name);
130 if (evsel->tp_format == NULL)
131 goto out_free;
132
Arnaldo Carvalho de Melo0b80f8b32012-09-26 12:28:26 -0300133 event_attr_init(&attr);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300134 attr.config = evsel->tp_format->id;
Arnaldo Carvalho de Melo0b80f8b32012-09-26 12:28:26 -0300135 attr.sample_period = 1;
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300136 perf_evsel__init(evsel, &attr, idx);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300137 }
138
139 return evsel;
140
141out_free:
Arnaldo Carvalho de Meloe48ffe22012-09-26 17:11:38 -0300142 free(evsel->name);
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300143 free(evsel);
144 return NULL;
145}
146
Arnaldo Carvalho de Melo8ad70132012-09-06 13:11:18 -0300147const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300148 "cycles",
149 "instructions",
150 "cache-references",
151 "cache-misses",
152 "branches",
153 "branch-misses",
154 "bus-cycles",
155 "stalled-cycles-frontend",
156 "stalled-cycles-backend",
157 "ref-cycles",
158};
159
Arnaldo Carvalho de Melodd4f5222012-06-13 15:52:42 -0300160static const char *__perf_evsel__hw_name(u64 config)
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300161{
162 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
163 return perf_evsel__hw_names[config];
164
165 return "unknown-hardware";
166}
167
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300168static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300169{
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300170 int colon = 0, r = 0;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300171 struct perf_event_attr *attr = &evsel->attr;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300172 bool exclude_guest_default = false;
173
174#define MOD_PRINT(context, mod) do { \
175 if (!attr->exclude_##context) { \
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300176 if (!colon) colon = ++r; \
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300177 r += scnprintf(bf + r, size - r, "%c", mod); \
178 } } while(0)
179
180 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
181 MOD_PRINT(kernel, 'k');
182 MOD_PRINT(user, 'u');
183 MOD_PRINT(hv, 'h');
184 exclude_guest_default = true;
185 }
186
187 if (attr->precise_ip) {
188 if (!colon)
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300189 colon = ++r;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300190 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
191 exclude_guest_default = true;
192 }
193
194 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
195 MOD_PRINT(host, 'H');
196 MOD_PRINT(guest, 'G');
197 }
198#undef MOD_PRINT
199 if (colon)
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300200 bf[colon - 1] = ':';
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300201 return r;
202}
203
Arnaldo Carvalho de Melo27f18612012-06-11 13:33:09 -0300204static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
205{
206 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
207 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
208}
209
Arnaldo Carvalho de Melo8ad70132012-09-06 13:11:18 -0300210const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300211 "cpu-clock",
212 "task-clock",
213 "page-faults",
214 "context-switches",
Arnaldo Carvalho de Melo8ad70132012-09-06 13:11:18 -0300215 "cpu-migrations",
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300216 "minor-faults",
217 "major-faults",
218 "alignment-faults",
219 "emulation-faults",
220};
221
Arnaldo Carvalho de Melodd4f5222012-06-13 15:52:42 -0300222static const char *__perf_evsel__sw_name(u64 config)
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300223{
224 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
225 return perf_evsel__sw_names[config];
226 return "unknown-software";
227}
228
229static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
230{
231 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
232 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
233}
234
Jiri Olsa287e74a2012-06-28 23:18:49 +0200235static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
236{
237 int r;
238
239 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
240
241 if (type & HW_BREAKPOINT_R)
242 r += scnprintf(bf + r, size - r, "r");
243
244 if (type & HW_BREAKPOINT_W)
245 r += scnprintf(bf + r, size - r, "w");
246
247 if (type & HW_BREAKPOINT_X)
248 r += scnprintf(bf + r, size - r, "x");
249
250 return r;
251}
252
253static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
254{
255 struct perf_event_attr *attr = &evsel->attr;
256 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
257 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
258}
259
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300260const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
261 [PERF_EVSEL__MAX_ALIASES] = {
262 { "L1-dcache", "l1-d", "l1d", "L1-data", },
263 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
264 { "LLC", "L2", },
265 { "dTLB", "d-tlb", "Data-TLB", },
266 { "iTLB", "i-tlb", "Instruction-TLB", },
267 { "branch", "branches", "bpu", "btb", "bpc", },
268 { "node", },
269};
270
271const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
272 [PERF_EVSEL__MAX_ALIASES] = {
273 { "load", "loads", "read", },
274 { "store", "stores", "write", },
275 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
276};
277
278const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
279 [PERF_EVSEL__MAX_ALIASES] = {
280 { "refs", "Reference", "ops", "access", },
281 { "misses", "miss", },
282};
283
284#define C(x) PERF_COUNT_HW_CACHE_##x
285#define CACHE_READ (1 << C(OP_READ))
286#define CACHE_WRITE (1 << C(OP_WRITE))
287#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
288#define COP(x) (1 << x)
289
290/*
291 * cache operartion stat
292 * L1I : Read and prefetch only
293 * ITLB and BPU : Read-only
294 */
295static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
296 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
297 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
298 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
299 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
300 [C(ITLB)] = (CACHE_READ),
301 [C(BPU)] = (CACHE_READ),
302 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
303};
304
305bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
306{
307 if (perf_evsel__hw_cache_stat[type] & COP(op))
308 return true; /* valid */
309 else
310 return false; /* invalid */
311}
312
313int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
314 char *bf, size_t size)
315{
316 if (result) {
317 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
318 perf_evsel__hw_cache_op[op][0],
319 perf_evsel__hw_cache_result[result][0]);
320 }
321
322 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
323 perf_evsel__hw_cache_op[op][1]);
324}
325
Arnaldo Carvalho de Melodd4f5222012-06-13 15:52:42 -0300326static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300327{
328 u8 op, result, type = (config >> 0) & 0xff;
329 const char *err = "unknown-ext-hardware-cache-type";
330
331 if (type > PERF_COUNT_HW_CACHE_MAX)
332 goto out_err;
333
334 op = (config >> 8) & 0xff;
335 err = "unknown-ext-hardware-cache-op";
336 if (op > PERF_COUNT_HW_CACHE_OP_MAX)
337 goto out_err;
338
339 result = (config >> 16) & 0xff;
340 err = "unknown-ext-hardware-cache-result";
341 if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
342 goto out_err;
343
344 err = "invalid-cache";
345 if (!perf_evsel__is_cache_op_valid(type, op))
346 goto out_err;
347
348 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
349out_err:
350 return scnprintf(bf, size, "%s", err);
351}
352
353static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
354{
355 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
356 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
357}
358
Arnaldo Carvalho de Melo6eef3d92012-06-13 11:53:37 -0300359static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
360{
361 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
362 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
363}
364
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300365const char *perf_evsel__name(struct perf_evsel *evsel)
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300366{
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300367 char bf[128];
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300368
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300369 if (evsel->name)
370 return evsel->name;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300371
372 switch (evsel->attr.type) {
373 case PERF_TYPE_RAW:
Arnaldo Carvalho de Melo6eef3d92012-06-13 11:53:37 -0300374 perf_evsel__raw_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300375 break;
376
377 case PERF_TYPE_HARDWARE:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300378 perf_evsel__hw_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300379 break;
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300380
381 case PERF_TYPE_HW_CACHE:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300382 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Melo0b668bc2012-06-11 14:08:07 -0300383 break;
384
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300385 case PERF_TYPE_SOFTWARE:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300386 perf_evsel__sw_name(evsel, bf, sizeof(bf));
Arnaldo Carvalho de Melo335c2f52012-06-11 14:36:20 -0300387 break;
388
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300389 case PERF_TYPE_TRACEPOINT:
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300390 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300391 break;
392
Jiri Olsa287e74a2012-06-28 23:18:49 +0200393 case PERF_TYPE_BREAKPOINT:
394 perf_evsel__bp_name(evsel, bf, sizeof(bf));
395 break;
396
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300397 default:
Robert Richterca1b1452012-08-16 21:10:18 +0200398 scnprintf(bf, sizeof(bf), "unknown attr type: %d",
399 evsel->attr.type);
Arnaldo Carvalho de Meloa4460832012-06-12 10:29:12 -0300400 break;
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300401 }
402
Arnaldo Carvalho de Melo7289f832012-06-12 12:34:58 -0300403 evsel->name = strdup(bf);
404
405 return evsel->name ?: "unknown";
Arnaldo Carvalho de Meloc4104312012-05-25 16:38:11 -0300406}
407
Jiri Olsa774cb492012-11-12 18:34:01 +0100408/*
409 * The enable_on_exec/disabled value strategy:
410 *
411 * 1) For any type of traced program:
412 * - all independent events and group leaders are disabled
413 * - all group members are enabled
414 *
415 * Group members are ruled by group leaders. They need to
416 * be enabled, because the group scheduling relies on that.
417 *
418 * 2) For traced programs executed by perf:
419 * - all independent events and group leaders have
420 * enable_on_exec set
421 * - we don't specifically enable or disable any event during
422 * the record command
423 *
424 * Independent events and group leaders are initially disabled
425 * and get enabled by exec. Group members are ruled by group
426 * leaders as stated in 1).
427 *
428 * 3) For traced programs attached by perf (pid/tid):
429 * - we specifically enable or disable all events during
430 * the record command
431 *
432 * When attaching events to already running traced we
433 * enable/disable events specifically, as there's no
434 * initial traced exec call.
435 */
Jiri Olsacac21422012-11-12 18:34:00 +0100436void perf_evsel__config(struct perf_evsel *evsel,
437 struct perf_record_opts *opts)
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200438{
439 struct perf_event_attr *attr = &evsel->attr;
440 int track = !evsel->idx; /* only the first counter needs these */
441
Arnaldo Carvalho de Melo808e1222012-02-14 14:18:57 -0200442 attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200443 attr->inherit = !opts->no_inherit;
444 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
445 PERF_FORMAT_TOTAL_TIME_RUNNING |
446 PERF_FORMAT_ID;
447
448 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
449
450 /*
451 * We default some events to a 1 default interval. But keep
452 * it a weak assumption overridable by the user.
453 */
454 if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
455 opts->user_interval != ULLONG_MAX)) {
456 if (opts->freq) {
457 attr->sample_type |= PERF_SAMPLE_PERIOD;
458 attr->freq = 1;
459 attr->sample_freq = opts->freq;
460 } else {
461 attr->sample_period = opts->default_interval;
462 }
463 }
464
465 if (opts->no_samples)
466 attr->sample_freq = 0;
467
468 if (opts->inherit_stat)
469 attr->inherit_stat = 1;
470
471 if (opts->sample_address) {
472 attr->sample_type |= PERF_SAMPLE_ADDR;
473 attr->mmap_data = track;
474 }
475
Jiri Olsa26d33022012-08-07 15:20:47 +0200476 if (opts->call_graph) {
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200477 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
478
Jiri Olsa26d33022012-08-07 15:20:47 +0200479 if (opts->call_graph == CALLCHAIN_DWARF) {
480 attr->sample_type |= PERF_SAMPLE_REGS_USER |
481 PERF_SAMPLE_STACK_USER;
482 attr->sample_regs_user = PERF_REGS_MASK;
483 attr->sample_stack_user = opts->stack_dump_size;
484 attr->exclude_callchain_user = 1;
485 }
486 }
487
Namhyung Kime40ee742012-05-21 10:42:07 +0900488 if (perf_target__has_cpu(&opts->target))
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200489 attr->sample_type |= PERF_SAMPLE_CPU;
490
Andrew Vagin3e76ac72011-12-20 17:32:45 +0300491 if (opts->period)
492 attr->sample_type |= PERF_SAMPLE_PERIOD;
493
Arnaldo Carvalho de Melo808e1222012-02-14 14:18:57 -0200494 if (!opts->sample_id_all_missing &&
Namhyung Kimd67356e2012-05-07 14:09:03 +0900495 (opts->sample_time || !opts->no_inherit ||
Namhyung Kimaa22dd42012-05-16 18:45:47 +0900496 perf_target__has_cpu(&opts->target)))
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200497 attr->sample_type |= PERF_SAMPLE_TIME;
498
499 if (opts->raw_samples) {
500 attr->sample_type |= PERF_SAMPLE_TIME;
501 attr->sample_type |= PERF_SAMPLE_RAW;
502 attr->sample_type |= PERF_SAMPLE_CPU;
503 }
504
505 if (opts->no_delay) {
506 attr->watermark = 0;
507 attr->wakeup_events = 1;
508 }
Roberto Agostino Vitillobdfebd82012-02-09 23:21:02 +0100509 if (opts->branch_stack) {
510 attr->sample_type |= PERF_SAMPLE_BRANCH_STACK;
511 attr->branch_sample_type = opts->branch_stack;
512 }
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200513
514 attr->mmap = track;
515 attr->comm = track;
516
Jiri Olsa774cb492012-11-12 18:34:01 +0100517 /*
518 * XXX see the function comment above
519 *
520 * Disabling only independent events or group leaders,
521 * keeping group members enabled.
522 */
Namhyung Kim823254e2012-11-29 15:38:30 +0900523 if (perf_evsel__is_group_leader(evsel))
Jiri Olsa774cb492012-11-12 18:34:01 +0100524 attr->disabled = 1;
525
526 /*
527 * Setting enable_on_exec for independent events and
528 * group leaders for traced executed by perf.
529 */
Namhyung Kim823254e2012-11-29 15:38:30 +0900530 if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200531 attr->enable_on_exec = 1;
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -0200532}
533
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200534int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
535{
David Ahern4af4c952011-05-27 09:58:34 -0600536 int cpu, thread;
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200537 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
David Ahern4af4c952011-05-27 09:58:34 -0600538
539 if (evsel->fd) {
540 for (cpu = 0; cpu < ncpus; cpu++) {
541 for (thread = 0; thread < nthreads; thread++) {
542 FD(evsel, cpu, thread) = -1;
543 }
544 }
545 }
546
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200547 return evsel->fd != NULL ? 0 : -ENOMEM;
548}
549
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300550int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
551 const char *filter)
552{
553 int cpu, thread;
554
555 for (cpu = 0; cpu < ncpus; cpu++) {
556 for (thread = 0; thread < nthreads; thread++) {
557 int fd = FD(evsel, cpu, thread),
558 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
559
560 if (err)
561 return err;
562 }
563 }
564
565 return 0;
566}
567
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200568int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
569{
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300570 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
571 if (evsel->sample_id == NULL)
572 return -ENOMEM;
573
574 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
575 if (evsel->id == NULL) {
576 xyarray__delete(evsel->sample_id);
577 evsel->sample_id = NULL;
578 return -ENOMEM;
579 }
580
581 return 0;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200582}
583
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200584int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
585{
586 evsel->counts = zalloc((sizeof(*evsel->counts) +
587 (ncpus * sizeof(struct perf_counts_values))));
588 return evsel->counts != NULL ? 0 : -ENOMEM;
589}
590
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200591void perf_evsel__free_fd(struct perf_evsel *evsel)
592{
593 xyarray__delete(evsel->fd);
594 evsel->fd = NULL;
595}
596
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200597void perf_evsel__free_id(struct perf_evsel *evsel)
598{
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300599 xyarray__delete(evsel->sample_id);
600 evsel->sample_id = NULL;
601 free(evsel->id);
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200602 evsel->id = NULL;
603}
604
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200605void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
606{
607 int cpu, thread;
608
609 for (cpu = 0; cpu < ncpus; cpu++)
610 for (thread = 0; thread < nthreads; ++thread) {
611 close(FD(evsel, cpu, thread));
612 FD(evsel, cpu, thread) = -1;
613 }
614}
615
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200616void perf_evsel__exit(struct perf_evsel *evsel)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200617{
618 assert(list_empty(&evsel->node));
619 xyarray__delete(evsel->fd);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300620 xyarray__delete(evsel->sample_id);
621 free(evsel->id);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200622}
623
624void perf_evsel__delete(struct perf_evsel *evsel)
625{
626 perf_evsel__exit(evsel);
Stephane Eranian023695d2011-02-14 11:20:01 +0200627 close_cgroup(evsel->cgrp);
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200628 free(evsel->group_name);
Arnaldo Carvalho de Meloe48ffe22012-09-26 17:11:38 -0300629 if (evsel->tp_format)
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -0300630 pevent_free_format(evsel->tp_format);
Stephane Eranianf0c55bc2011-02-16 15:10:01 +0200631 free(evsel->name);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200632 free(evsel);
633}
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200634
635int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
636 int cpu, int thread, bool scale)
637{
638 struct perf_counts_values count;
639 size_t nv = scale ? 3 : 1;
640
641 if (FD(evsel, cpu, thread) < 0)
642 return -EINVAL;
643
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -0200644 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
645 return -ENOMEM;
646
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200647 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
648 return -errno;
649
650 if (scale) {
651 if (count.run == 0)
652 count.val = 0;
653 else if (count.run < count.ena)
654 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
655 } else
656 count.ena = count.run = 0;
657
658 evsel->counts->cpu[cpu] = count;
659 return 0;
660}
661
662int __perf_evsel__read(struct perf_evsel *evsel,
663 int ncpus, int nthreads, bool scale)
664{
665 size_t nv = scale ? 3 : 1;
666 int cpu, thread;
667 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
668
Arnaldo Carvalho de Melo52bcd9942011-02-03 17:26:06 -0200669 aggr->val = aggr->ena = aggr->run = 0;
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200670
671 for (cpu = 0; cpu < ncpus; cpu++) {
672 for (thread = 0; thread < nthreads; thread++) {
673 if (FD(evsel, cpu, thread) < 0)
674 continue;
675
676 if (readn(FD(evsel, cpu, thread),
677 &count, nv * sizeof(u64)) < 0)
678 return -errno;
679
680 aggr->val += count.val;
681 if (scale) {
682 aggr->ena += count.ena;
683 aggr->run += count.run;
684 }
685 }
686 }
687
688 evsel->counts->scaled = 0;
689 if (scale) {
690 if (aggr->run == 0) {
691 evsel->counts->scaled = -1;
692 aggr->val = 0;
693 return 0;
694 }
695
696 if (aggr->run < aggr->ena) {
697 evsel->counts->scaled = 1;
698 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
699 }
700 } else
701 aggr->ena = aggr->run = 0;
702
703 return 0;
704}
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200705
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200706static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
707{
708 struct perf_evsel *leader = evsel->leader;
709 int fd;
710
Namhyung Kim823254e2012-11-29 15:38:30 +0900711 if (perf_evsel__is_group_leader(evsel))
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200712 return -1;
713
714 /*
715 * Leader must be already processed/open,
716 * if not it's a bug.
717 */
718 BUG_ON(!leader->fd);
719
720 fd = FD(leader, cpu, thread);
721 BUG_ON(fd == -1);
722
723 return fd;
724}
725
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200726static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200727 struct thread_map *threads)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200728{
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200729 int cpu, thread;
Stephane Eranian023695d2011-02-14 11:20:01 +0200730 unsigned long flags = 0;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200731 int pid = -1, err;
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200732
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200733 if (evsel->fd == NULL &&
734 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200735 return -ENOMEM;
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -0200736
Stephane Eranian023695d2011-02-14 11:20:01 +0200737 if (evsel->cgrp) {
738 flags = PERF_FLAG_PID_CGROUP;
739 pid = evsel->cgrp->fd;
740 }
741
Arnaldo Carvalho de Melo86bd5e82011-01-03 23:09:46 -0200742 for (cpu = 0; cpu < cpus->nr; cpu++) {
Arnaldo Carvalho de Melo9d04f172011-01-12 00:08:18 -0200743
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200744 for (thread = 0; thread < threads->nr; thread++) {
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200745 int group_fd;
Stephane Eranian023695d2011-02-14 11:20:01 +0200746
747 if (!evsel->cgrp)
748 pid = threads->map[thread];
749
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200750 group_fd = get_group_fd(evsel, cpu, thread);
751
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200752 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
Stephane Eranian023695d2011-02-14 11:20:01 +0200753 pid,
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200754 cpus->map[cpu],
Stephane Eranian023695d2011-02-14 11:20:01 +0200755 group_fd, flags);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200756 if (FD(evsel, cpu, thread) < 0) {
757 err = -errno;
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200758 goto out_close;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200759 }
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200760 }
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200761 }
762
763 return 0;
764
765out_close:
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200766 do {
767 while (--thread >= 0) {
768 close(FD(evsel, cpu, thread));
769 FD(evsel, cpu, thread) = -1;
770 }
771 thread = threads->nr;
772 } while (--cpu >= 0);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200773 return err;
774}
775
776void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
777{
778 if (evsel->fd == NULL)
779 return;
780
781 perf_evsel__close_fd(evsel, ncpus, nthreads);
782 perf_evsel__free_fd(evsel);
783 evsel->fd = NULL;
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200784}
785
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200786static struct {
787 struct cpu_map map;
788 int cpus[1];
789} empty_cpu_map = {
790 .map.nr = 1,
791 .cpus = { -1, },
792};
793
794static struct {
795 struct thread_map map;
796 int threads[1];
797} empty_thread_map = {
798 .map.nr = 1,
799 .threads = { -1, },
800};
801
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200802int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200803 struct thread_map *threads)
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200804{
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200805 if (cpus == NULL) {
806 /* Work around old compiler warnings about strict aliasing */
807 cpus = &empty_cpu_map.map;
808 }
809
810 if (threads == NULL)
811 threads = &empty_thread_map.map;
812
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200813 return __perf_evsel__open(evsel, cpus, threads);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200814}
815
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200816int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200817 struct cpu_map *cpus)
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200818{
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200819 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200820}
821
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200822int perf_evsel__open_per_thread(struct perf_evsel *evsel,
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200823 struct thread_map *threads)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200824{
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200825 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200826}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200827
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -0300828static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
829 const union perf_event *event,
830 struct perf_sample *sample)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200831{
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -0300832 u64 type = evsel->attr.sample_type;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200833 const u64 *array = event->sample.array;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -0300834 bool swapped = evsel->needs_swap;
Jiri Olsa37073f92012-05-30 14:23:44 +0200835 union u64_swap u;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200836
837 array += ((event->header.size -
838 sizeof(event->header)) / sizeof(u64)) - 1;
839
840 if (type & PERF_SAMPLE_CPU) {
Jiri Olsa37073f92012-05-30 14:23:44 +0200841 u.val64 = *array;
842 if (swapped) {
843 /* undo swap of u64, then swap on individual u32s */
844 u.val64 = bswap_64(u.val64);
845 u.val32[0] = bswap_32(u.val32[0]);
846 }
847
848 sample->cpu = u.val32[0];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200849 array--;
850 }
851
852 if (type & PERF_SAMPLE_STREAM_ID) {
853 sample->stream_id = *array;
854 array--;
855 }
856
857 if (type & PERF_SAMPLE_ID) {
858 sample->id = *array;
859 array--;
860 }
861
862 if (type & PERF_SAMPLE_TIME) {
863 sample->time = *array;
864 array--;
865 }
866
867 if (type & PERF_SAMPLE_TID) {
Jiri Olsa37073f92012-05-30 14:23:44 +0200868 u.val64 = *array;
869 if (swapped) {
870 /* undo swap of u64, then swap on individual u32s */
871 u.val64 = bswap_64(u.val64);
872 u.val32[0] = bswap_32(u.val32[0]);
873 u.val32[1] = bswap_32(u.val32[1]);
874 }
875
876 sample->pid = u.val32[0];
877 sample->tid = u.val32[1];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200878 }
879
880 return 0;
881}
882
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200883static bool sample_overlap(const union perf_event *event,
884 const void *offset, u64 size)
885{
886 const void *base = event;
887
888 if (offset + size > base + event->header.size)
889 return true;
890
891 return false;
892}
893
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -0300894int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -0300895 struct perf_sample *data)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200896{
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -0300897 u64 type = evsel->attr.sample_type;
Jiri Olsa0f6a3012012-08-07 15:20:45 +0200898 u64 regs_user = evsel->attr.sample_regs_user;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -0300899 bool swapped = evsel->needs_swap;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200900 const u64 *array;
901
David Ahern936be502011-09-06 09:12:26 -0600902 /*
903 * used for cross-endian analysis. See git commit 65014ab3
904 * for why this goofiness is needed.
905 */
Jiri Olsa6a11f922012-05-16 08:59:04 +0200906 union u64_swap u;
David Ahern936be502011-09-06 09:12:26 -0600907
Robert Richterf3bda2c2011-12-15 17:32:39 +0100908 memset(data, 0, sizeof(*data));
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200909 data->cpu = data->pid = data->tid = -1;
910 data->stream_id = data->id = data->time = -1ULL;
Naveen N. Raoa4a03fc2012-02-03 22:31:13 +0530911 data->period = 1;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200912
913 if (event->header.type != PERF_RECORD_SAMPLE) {
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -0300914 if (!evsel->attr.sample_id_all)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200915 return 0;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -0300916 return perf_evsel__parse_id_sample(evsel, event, data);
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200917 }
918
919 array = event->sample.array;
920
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -0300921 if (evsel->sample_size + sizeof(event->header) > event->header.size)
Frederic Weisbeckera2854122011-05-21 19:33:04 +0200922 return -EFAULT;
923
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200924 if (type & PERF_SAMPLE_IP) {
925 data->ip = event->ip.ip;
926 array++;
927 }
928
929 if (type & PERF_SAMPLE_TID) {
David Ahern936be502011-09-06 09:12:26 -0600930 u.val64 = *array;
931 if (swapped) {
932 /* undo swap of u64, then swap on individual u32s */
933 u.val64 = bswap_64(u.val64);
934 u.val32[0] = bswap_32(u.val32[0]);
935 u.val32[1] = bswap_32(u.val32[1]);
936 }
937
938 data->pid = u.val32[0];
939 data->tid = u.val32[1];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200940 array++;
941 }
942
943 if (type & PERF_SAMPLE_TIME) {
944 data->time = *array;
945 array++;
946 }
947
David Ahern7cec0922011-05-30 13:08:23 -0600948 data->addr = 0;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200949 if (type & PERF_SAMPLE_ADDR) {
950 data->addr = *array;
951 array++;
952 }
953
954 data->id = -1ULL;
955 if (type & PERF_SAMPLE_ID) {
956 data->id = *array;
957 array++;
958 }
959
960 if (type & PERF_SAMPLE_STREAM_ID) {
961 data->stream_id = *array;
962 array++;
963 }
964
965 if (type & PERF_SAMPLE_CPU) {
David Ahern936be502011-09-06 09:12:26 -0600966
967 u.val64 = *array;
968 if (swapped) {
969 /* undo swap of u64, then swap on individual u32s */
970 u.val64 = bswap_64(u.val64);
971 u.val32[0] = bswap_32(u.val32[0]);
972 }
973
974 data->cpu = u.val32[0];
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200975 array++;
976 }
977
978 if (type & PERF_SAMPLE_PERIOD) {
979 data->period = *array;
980 array++;
981 }
982
983 if (type & PERF_SAMPLE_READ) {
Masanari Iidaf9d36992012-01-25 15:20:40 +0100984 fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200985 return -1;
986 }
987
988 if (type & PERF_SAMPLE_CALLCHAIN) {
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200989 if (sample_overlap(event, array, sizeof(data->callchain->nr)))
990 return -EFAULT;
991
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200992 data->callchain = (struct ip_callchain *)array;
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200993
994 if (sample_overlap(event, array, data->callchain->nr))
995 return -EFAULT;
996
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200997 array += 1 + data->callchain->nr;
998 }
999
1000 if (type & PERF_SAMPLE_RAW) {
Jiri Olsa8e303f22011-09-29 17:05:08 +02001001 const u64 *pdata;
1002
David Ahern936be502011-09-06 09:12:26 -06001003 u.val64 = *array;
1004 if (WARN_ONCE(swapped,
1005 "Endianness of raw data not corrected!\n")) {
1006 /* undo swap of u64, then swap on individual u32s */
1007 u.val64 = bswap_64(u.val64);
1008 u.val32[0] = bswap_32(u.val32[0]);
1009 u.val32[1] = bswap_32(u.val32[1]);
1010 }
Frederic Weisbecker98e1da92011-05-21 20:08:15 +02001011
1012 if (sample_overlap(event, array, sizeof(u32)))
1013 return -EFAULT;
1014
David Ahern936be502011-09-06 09:12:26 -06001015 data->raw_size = u.val32[0];
Jiri Olsa8e303f22011-09-29 17:05:08 +02001016 pdata = (void *) array + sizeof(u32);
Frederic Weisbecker98e1da92011-05-21 20:08:15 +02001017
Jiri Olsa8e303f22011-09-29 17:05:08 +02001018 if (sample_overlap(event, pdata, data->raw_size))
Frederic Weisbecker98e1da92011-05-21 20:08:15 +02001019 return -EFAULT;
1020
Jiri Olsa8e303f22011-09-29 17:05:08 +02001021 data->raw_data = (void *) pdata;
Stephane Eranianfa30c962012-03-17 23:23:18 +01001022
1023 array = (void *)array + data->raw_size + sizeof(u32);
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001024 }
1025
Roberto Agostino Vitillob5387522012-02-09 23:21:01 +01001026 if (type & PERF_SAMPLE_BRANCH_STACK) {
1027 u64 sz;
1028
1029 data->branch_stack = (struct branch_stack *)array;
1030 array++; /* nr */
1031
1032 sz = data->branch_stack->nr * sizeof(struct branch_entry);
1033 sz /= sizeof(u64);
1034 array += sz;
1035 }
Jiri Olsa0f6a3012012-08-07 15:20:45 +02001036
1037 if (type & PERF_SAMPLE_REGS_USER) {
1038 /* First u64 tells us if we have any regs in sample. */
1039 u64 avail = *array++;
1040
1041 if (avail) {
1042 data->user_regs.regs = (u64 *)array;
1043 array += hweight_long(regs_user);
1044 }
1045 }
1046
1047 if (type & PERF_SAMPLE_STACK_USER) {
1048 u64 size = *array++;
1049
1050 data->user_stack.offset = ((char *)(array - 1)
1051 - (char *) event);
1052
1053 if (!size) {
1054 data->user_stack.size = 0;
1055 } else {
1056 data->user_stack.data = (char *)array;
1057 array += size / sizeof(*array);
1058 data->user_stack.size = *array;
1059 }
1060 }
1061
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -02001062 return 0;
1063}
Andrew Vagin74eec262011-11-28 12:03:31 +03001064
1065int perf_event__synthesize_sample(union perf_event *event, u64 type,
1066 const struct perf_sample *sample,
1067 bool swapped)
1068{
1069 u64 *array;
1070
1071 /*
1072 * used for cross-endian analysis. See git commit 65014ab3
1073 * for why this goofiness is needed.
1074 */
Jiri Olsa6a11f922012-05-16 08:59:04 +02001075 union u64_swap u;
Andrew Vagin74eec262011-11-28 12:03:31 +03001076
1077 array = event->sample.array;
1078
1079 if (type & PERF_SAMPLE_IP) {
1080 event->ip.ip = sample->ip;
1081 array++;
1082 }
1083
1084 if (type & PERF_SAMPLE_TID) {
1085 u.val32[0] = sample->pid;
1086 u.val32[1] = sample->tid;
1087 if (swapped) {
1088 /*
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -03001089 * Inverse of what is done in perf_evsel__parse_sample
Andrew Vagin74eec262011-11-28 12:03:31 +03001090 */
1091 u.val32[0] = bswap_32(u.val32[0]);
1092 u.val32[1] = bswap_32(u.val32[1]);
1093 u.val64 = bswap_64(u.val64);
1094 }
1095
1096 *array = u.val64;
1097 array++;
1098 }
1099
1100 if (type & PERF_SAMPLE_TIME) {
1101 *array = sample->time;
1102 array++;
1103 }
1104
1105 if (type & PERF_SAMPLE_ADDR) {
1106 *array = sample->addr;
1107 array++;
1108 }
1109
1110 if (type & PERF_SAMPLE_ID) {
1111 *array = sample->id;
1112 array++;
1113 }
1114
1115 if (type & PERF_SAMPLE_STREAM_ID) {
1116 *array = sample->stream_id;
1117 array++;
1118 }
1119
1120 if (type & PERF_SAMPLE_CPU) {
1121 u.val32[0] = sample->cpu;
1122 if (swapped) {
1123 /*
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -03001124 * Inverse of what is done in perf_evsel__parse_sample
Andrew Vagin74eec262011-11-28 12:03:31 +03001125 */
1126 u.val32[0] = bswap_32(u.val32[0]);
1127 u.val64 = bswap_64(u.val64);
1128 }
1129 *array = u.val64;
1130 array++;
1131 }
1132
1133 if (type & PERF_SAMPLE_PERIOD) {
1134 *array = sample->period;
1135 array++;
1136 }
1137
1138 return 0;
1139}
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03001140
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -03001141struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
1142{
1143 return pevent_find_field(evsel->tp_format, name);
1144}
1145
Arnaldo Carvalho de Melo5d2074e2012-09-26 20:22:00 -03001146void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03001147 const char *name)
1148{
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -03001149 struct format_field *field = perf_evsel__field(evsel, name);
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03001150 int offset;
1151
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -03001152 if (!field)
1153 return NULL;
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03001154
1155 offset = field->offset;
1156
1157 if (field->flags & FIELD_IS_DYNAMIC) {
1158 offset = *(int *)(sample->raw_data + field->offset);
1159 offset &= 0xffff;
1160 }
1161
1162 return sample->raw_data + offset;
1163}
1164
1165u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
1166 const char *name)
1167{
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -03001168 struct format_field *field = perf_evsel__field(evsel, name);
Arnaldo Carvalho de Meloe6b6f672012-09-26 13:13:04 -03001169 void *ptr;
1170 u64 value;
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03001171
Arnaldo Carvalho de Meloefd2b922012-09-18 11:21:50 -03001172 if (!field)
1173 return 0;
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03001174
Arnaldo Carvalho de Meloe6b6f672012-09-26 13:13:04 -03001175 ptr = sample->raw_data + field->offset;
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03001176
Arnaldo Carvalho de Meloe6b6f672012-09-26 13:13:04 -03001177 switch (field->size) {
1178 case 1:
1179 return *(u8 *)ptr;
1180 case 2:
1181 value = *(u16 *)ptr;
1182 break;
1183 case 4:
1184 value = *(u32 *)ptr;
1185 break;
1186 case 8:
1187 value = *(u64 *)ptr;
1188 break;
1189 default:
1190 return 0;
1191 }
1192
1193 if (!evsel->needs_swap)
1194 return value;
1195
1196 switch (field->size) {
1197 case 2:
1198 return bswap_16(value);
1199 case 4:
1200 return bswap_32(value);
1201 case 8:
1202 return bswap_64(value);
1203 default:
1204 return 0;
1205 }
1206
1207 return 0;
Arnaldo Carvalho de Melo5555ded2012-09-11 19:24:23 -03001208}