blob: a03a36b7908a595e1ed91400bbab289cf3778e38 [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020010#include "evsel.h"
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -020011#include "evlist.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020012#include "util.h"
Arnaldo Carvalho de Melo86bd5e82011-01-03 23:09:46 -020013#include "cpumap.h"
Arnaldo Carvalho de Melofd782602011-01-18 15:15:24 -020014#include "thread_map.h"
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020015
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -020016#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
17
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -030018int __perf_evsel__sample_size(u64 sample_type)
19{
20 u64 mask = sample_type & PERF_SAMPLE_MASK;
21 int size = 0;
22 int i;
23
24 for (i = 0; i < 64; i++) {
25 if (mask & (1ULL << i))
26 size++;
27 }
28
29 size *= sizeof(u64);
30
31 return size;
32}
33
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020034void perf_evsel__init(struct perf_evsel *evsel,
35 struct perf_event_attr *attr, int idx)
36{
37 evsel->idx = idx;
38 evsel->attr = *attr;
39 INIT_LIST_HEAD(&evsel->node);
40}
41
Lin Ming23a2f3a2011-01-07 11:11:09 +080042struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020043{
44 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
45
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020046 if (evsel != NULL)
47 perf_evsel__init(evsel, attr, idx);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020048
49 return evsel;
50}
51
52int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
53{
David Ahern4af4c952011-05-27 09:58:34 -060054 int cpu, thread;
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020055 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
David Ahern4af4c952011-05-27 09:58:34 -060056
57 if (evsel->fd) {
58 for (cpu = 0; cpu < ncpus; cpu++) {
59 for (thread = 0; thread < nthreads; thread++) {
60 FD(evsel, cpu, thread) = -1;
61 }
62 }
63 }
64
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020065 return evsel->fd != NULL ? 0 : -ENOMEM;
66}
67
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020068int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
69{
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030070 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
71 if (evsel->sample_id == NULL)
72 return -ENOMEM;
73
74 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
75 if (evsel->id == NULL) {
76 xyarray__delete(evsel->sample_id);
77 evsel->sample_id = NULL;
78 return -ENOMEM;
79 }
80
81 return 0;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020082}
83
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -020084int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
85{
86 evsel->counts = zalloc((sizeof(*evsel->counts) +
87 (ncpus * sizeof(struct perf_counts_values))));
88 return evsel->counts != NULL ? 0 : -ENOMEM;
89}
90
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -020091void perf_evsel__free_fd(struct perf_evsel *evsel)
92{
93 xyarray__delete(evsel->fd);
94 evsel->fd = NULL;
95}
96
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020097void perf_evsel__free_id(struct perf_evsel *evsel)
98{
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030099 xyarray__delete(evsel->sample_id);
100 evsel->sample_id = NULL;
101 free(evsel->id);
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200102 evsel->id = NULL;
103}
104
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200105void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
106{
107 int cpu, thread;
108
109 for (cpu = 0; cpu < ncpus; cpu++)
110 for (thread = 0; thread < nthreads; ++thread) {
111 close(FD(evsel, cpu, thread));
112 FD(evsel, cpu, thread) = -1;
113 }
114}
115
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200116void perf_evsel__exit(struct perf_evsel *evsel)
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200117{
118 assert(list_empty(&evsel->node));
119 xyarray__delete(evsel->fd);
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300120 xyarray__delete(evsel->sample_id);
121 free(evsel->id);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200122}
123
124void perf_evsel__delete(struct perf_evsel *evsel)
125{
126 perf_evsel__exit(evsel);
Stephane Eranian023695d2011-02-14 11:20:01 +0200127 close_cgroup(evsel->cgrp);
Stephane Eranianf0c55bc2011-02-16 15:10:01 +0200128 free(evsel->name);
Arnaldo Carvalho de Melo69aad6f2011-01-03 16:39:04 -0200129 free(evsel);
130}
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200131
132int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
133 int cpu, int thread, bool scale)
134{
135 struct perf_counts_values count;
136 size_t nv = scale ? 3 : 1;
137
138 if (FD(evsel, cpu, thread) < 0)
139 return -EINVAL;
140
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -0200141 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
142 return -ENOMEM;
143
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200144 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
145 return -errno;
146
147 if (scale) {
148 if (count.run == 0)
149 count.val = 0;
150 else if (count.run < count.ena)
151 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
152 } else
153 count.ena = count.run = 0;
154
155 evsel->counts->cpu[cpu] = count;
156 return 0;
157}
158
159int __perf_evsel__read(struct perf_evsel *evsel,
160 int ncpus, int nthreads, bool scale)
161{
162 size_t nv = scale ? 3 : 1;
163 int cpu, thread;
164 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
165
Arnaldo Carvalho de Melo52bcd9942011-02-03 17:26:06 -0200166 aggr->val = aggr->ena = aggr->run = 0;
Arnaldo Carvalho de Meloc52b12e2011-01-03 17:45:52 -0200167
168 for (cpu = 0; cpu < ncpus; cpu++) {
169 for (thread = 0; thread < nthreads; thread++) {
170 if (FD(evsel, cpu, thread) < 0)
171 continue;
172
173 if (readn(FD(evsel, cpu, thread),
174 &count, nv * sizeof(u64)) < 0)
175 return -errno;
176
177 aggr->val += count.val;
178 if (scale) {
179 aggr->ena += count.ena;
180 aggr->run += count.run;
181 }
182 }
183 }
184
185 evsel->counts->scaled = 0;
186 if (scale) {
187 if (aggr->run == 0) {
188 evsel->counts->scaled = -1;
189 aggr->val = 0;
190 return 0;
191 }
192
193 if (aggr->run < aggr->ena) {
194 evsel->counts->scaled = 1;
195 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
196 }
197 } else
198 aggr->ena = aggr->run = 0;
199
200 return 0;
201}
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200202
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200203static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
Arnaldo Carvalho de Melo5d2cd902011-04-14 11:20:14 -0300204 struct thread_map *threads, bool group)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200205{
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200206 int cpu, thread;
Stephane Eranian023695d2011-02-14 11:20:01 +0200207 unsigned long flags = 0;
208 int pid = -1;
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200209
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200210 if (evsel->fd == NULL &&
211 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
Arnaldo Carvalho de Melo4eed11d2011-01-04 00:13:17 -0200212 return -1;
213
Stephane Eranian023695d2011-02-14 11:20:01 +0200214 if (evsel->cgrp) {
215 flags = PERF_FLAG_PID_CGROUP;
216 pid = evsel->cgrp->fd;
217 }
218
Arnaldo Carvalho de Melo86bd5e82011-01-03 23:09:46 -0200219 for (cpu = 0; cpu < cpus->nr; cpu++) {
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200220 int group_fd = -1;
Arnaldo Carvalho de Melo9d04f172011-01-12 00:08:18 -0200221
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200222 for (thread = 0; thread < threads->nr; thread++) {
Stephane Eranian023695d2011-02-14 11:20:01 +0200223
224 if (!evsel->cgrp)
225 pid = threads->map[thread];
226
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200227 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
Stephane Eranian023695d2011-02-14 11:20:01 +0200228 pid,
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200229 cpus->map[cpu],
Stephane Eranian023695d2011-02-14 11:20:01 +0200230 group_fd, flags);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200231 if (FD(evsel, cpu, thread) < 0)
232 goto out_close;
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200233
234 if (group && group_fd == -1)
235 group_fd = FD(evsel, cpu, thread);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200236 }
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200237 }
238
239 return 0;
240
241out_close:
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200242 do {
243 while (--thread >= 0) {
244 close(FD(evsel, cpu, thread));
245 FD(evsel, cpu, thread) = -1;
246 }
247 thread = threads->nr;
248 } while (--cpu >= 0);
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200249 return -1;
250}
251
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200252static struct {
253 struct cpu_map map;
254 int cpus[1];
255} empty_cpu_map = {
256 .map.nr = 1,
257 .cpus = { -1, },
258};
259
260static struct {
261 struct thread_map map;
262 int threads[1];
263} empty_thread_map = {
264 .map.nr = 1,
265 .threads = { -1, },
266};
267
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200268int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
Arnaldo Carvalho de Melo5d2cd902011-04-14 11:20:14 -0300269 struct thread_map *threads, bool group)
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200270{
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200271 if (cpus == NULL) {
272 /* Work around old compiler warnings about strict aliasing */
273 cpus = &empty_cpu_map.map;
274 }
275
276 if (threads == NULL)
277 threads = &empty_thread_map.map;
278
Arnaldo Carvalho de Melo5d2cd902011-04-14 11:20:14 -0300279 return __perf_evsel__open(evsel, cpus, threads, group);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200280}
281
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200282int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
Arnaldo Carvalho de Melo5d2cd902011-04-14 11:20:14 -0300283 struct cpu_map *cpus, bool group)
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200284{
Arnaldo Carvalho de Melo5d2cd902011-04-14 11:20:14 -0300285 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group);
Arnaldo Carvalho de Melo02522082011-01-04 11:55:27 -0200286}
287
Arnaldo Carvalho de Melof08199d2011-01-11 23:42:19 -0200288int perf_evsel__open_per_thread(struct perf_evsel *evsel,
Arnaldo Carvalho de Melo5d2cd902011-04-14 11:20:14 -0300289 struct thread_map *threads, bool group)
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200290{
Arnaldo Carvalho de Melo5d2cd902011-04-14 11:20:14 -0300291 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group);
Arnaldo Carvalho de Melo48290602011-01-03 17:48:12 -0200292}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200293
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200294static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
295 struct perf_sample *sample)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200296{
297 const u64 *array = event->sample.array;
298
299 array += ((event->header.size -
300 sizeof(event->header)) / sizeof(u64)) - 1;
301
302 if (type & PERF_SAMPLE_CPU) {
303 u32 *p = (u32 *)array;
304 sample->cpu = *p;
305 array--;
306 }
307
308 if (type & PERF_SAMPLE_STREAM_ID) {
309 sample->stream_id = *array;
310 array--;
311 }
312
313 if (type & PERF_SAMPLE_ID) {
314 sample->id = *array;
315 array--;
316 }
317
318 if (type & PERF_SAMPLE_TIME) {
319 sample->time = *array;
320 array--;
321 }
322
323 if (type & PERF_SAMPLE_TID) {
324 u32 *p = (u32 *)array;
325 sample->pid = p[0];
326 sample->tid = p[1];
327 }
328
329 return 0;
330}
331
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200332static bool sample_overlap(const union perf_event *event,
333 const void *offset, u64 size)
334{
335 const void *base = event;
336
337 if (offset + size > base + event->header.size)
338 return true;
339
340 return false;
341}
342
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200343int perf_event__parse_sample(const union perf_event *event, u64 type,
Frederic Weisbeckera2854122011-05-21 19:33:04 +0200344 int sample_size, bool sample_id_all,
345 struct perf_sample *data)
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200346{
347 const u64 *array;
348
349 data->cpu = data->pid = data->tid = -1;
350 data->stream_id = data->id = data->time = -1ULL;
351
352 if (event->header.type != PERF_RECORD_SAMPLE) {
353 if (!sample_id_all)
354 return 0;
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200355 return perf_event__parse_id_sample(event, type, data);
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200356 }
357
358 array = event->sample.array;
359
Frederic Weisbeckera2854122011-05-21 19:33:04 +0200360 if (sample_size + sizeof(event->header) > event->header.size)
361 return -EFAULT;
362
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200363 if (type & PERF_SAMPLE_IP) {
364 data->ip = event->ip.ip;
365 array++;
366 }
367
368 if (type & PERF_SAMPLE_TID) {
369 u32 *p = (u32 *)array;
370 data->pid = p[0];
371 data->tid = p[1];
372 array++;
373 }
374
375 if (type & PERF_SAMPLE_TIME) {
376 data->time = *array;
377 array++;
378 }
379
David Ahern7cec0922011-05-30 13:08:23 -0600380 data->addr = 0;
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200381 if (type & PERF_SAMPLE_ADDR) {
382 data->addr = *array;
383 array++;
384 }
385
386 data->id = -1ULL;
387 if (type & PERF_SAMPLE_ID) {
388 data->id = *array;
389 array++;
390 }
391
392 if (type & PERF_SAMPLE_STREAM_ID) {
393 data->stream_id = *array;
394 array++;
395 }
396
397 if (type & PERF_SAMPLE_CPU) {
398 u32 *p = (u32 *)array;
399 data->cpu = *p;
400 array++;
401 }
402
403 if (type & PERF_SAMPLE_PERIOD) {
404 data->period = *array;
405 array++;
406 }
407
408 if (type & PERF_SAMPLE_READ) {
409 fprintf(stderr, "PERF_SAMPLE_READ is unsuported for now\n");
410 return -1;
411 }
412
413 if (type & PERF_SAMPLE_CALLCHAIN) {
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200414 if (sample_overlap(event, array, sizeof(data->callchain->nr)))
415 return -EFAULT;
416
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200417 data->callchain = (struct ip_callchain *)array;
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200418
419 if (sample_overlap(event, array, data->callchain->nr))
420 return -EFAULT;
421
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200422 array += 1 + data->callchain->nr;
423 }
424
425 if (type & PERF_SAMPLE_RAW) {
426 u32 *p = (u32 *)array;
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200427
428 if (sample_overlap(event, array, sizeof(u32)))
429 return -EFAULT;
430
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200431 data->raw_size = *p;
432 p++;
Frederic Weisbecker98e1da92011-05-21 20:08:15 +0200433
434 if (sample_overlap(event, p, data->raw_size))
435 return -EFAULT;
436
Arnaldo Carvalho de Melod0dd74e2011-01-21 13:46:41 -0200437 data->raw_data = p;
438 }
439
440 return 0;
441}