blob: 3bc5a287a9f92a2cf8121c504b059282a19000cd [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
Arnaldo Carvalho de Meloa8c9ae12011-11-05 08:41:51 -02009#include "util.h"
10#include "debugfs.h"
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -020011#include <poll.h>
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020012#include "cpumap.h"
13#include "thread_map.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020014#include "evlist.h"
15#include "evsel.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020016
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020017#include "parse-events.h"
18
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020019#include <sys/mman.h>
20
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020021#include <linux/bitops.h>
22#include <linux/hash.h>
23
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020024#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030025#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020026
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020027void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
28 struct thread_map *threads)
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020029{
30 int i;
31
32 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
33 INIT_HLIST_HEAD(&evlist->heads[i]);
34 INIT_LIST_HEAD(&evlist->entries);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020035 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020036}
37
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020038struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
39 struct thread_map *threads)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020040{
41 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
42
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020043 if (evlist != NULL)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020044 perf_evlist__init(evlist, cpus, threads);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020045
46 return evlist;
47}
48
49static void perf_evlist__purge(struct perf_evlist *evlist)
50{
51 struct perf_evsel *pos, *n;
52
53 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
54 list_del_init(&pos->node);
55 perf_evsel__delete(pos);
56 }
57
58 evlist->nr_entries = 0;
59}
60
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020061void perf_evlist__exit(struct perf_evlist *evlist)
62{
63 free(evlist->mmap);
64 free(evlist->pollfd);
65 evlist->mmap = NULL;
66 evlist->pollfd = NULL;
67}
68
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020069void perf_evlist__delete(struct perf_evlist *evlist)
70{
71 perf_evlist__purge(evlist);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020072 perf_evlist__exit(evlist);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020073 free(evlist);
74}
75
76void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
77{
78 list_add_tail(&entry->node, &evlist->entries);
79 ++evlist->nr_entries;
80}
81
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020082static void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
83 struct list_head *list,
84 int nr_entries)
85{
86 list_splice_tail(list, &evlist->entries);
87 evlist->nr_entries += nr_entries;
88}
89
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020090int perf_evlist__add_default(struct perf_evlist *evlist)
91{
92 struct perf_event_attr attr = {
93 .type = PERF_TYPE_HARDWARE,
94 .config = PERF_COUNT_HW_CPU_CYCLES,
95 };
96 struct perf_evsel *evsel = perf_evsel__new(&attr, 0);
97
98 if (evsel == NULL)
Stephane Eraniancc2d86b2011-06-07 18:19:36 +020099 goto error;
100
101 /* use strdup() because free(evsel) assumes name is allocated */
102 evsel->name = strdup("cycles");
103 if (!evsel->name)
104 goto error_free;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200105
106 perf_evlist__add(evlist, evsel);
107 return 0;
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200108error_free:
109 perf_evsel__delete(evsel);
110error:
111 return -ENOMEM;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200112}
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200113
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200114int perf_evlist__add_attrs(struct perf_evlist *evlist,
115 struct perf_event_attr *attrs, size_t nr_attrs)
116{
117 struct perf_evsel *evsel, *n;
118 LIST_HEAD(head);
119 size_t i;
120
121 for (i = 0; i < nr_attrs; i++) {
122 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
123 if (evsel == NULL)
124 goto out_delete_partial_list;
125 list_add_tail(&evsel->node, &head);
126 }
127
128 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
129
130 return 0;
131
132out_delete_partial_list:
133 list_for_each_entry_safe(evsel, n, &head, node)
134 perf_evsel__delete(evsel);
135 return -1;
136}
137
Arnaldo Carvalho de Meloa8c9ae12011-11-05 08:41:51 -0200138static int trace_event__id(const char *evname)
139{
140 char *filename, *colon;
141 int err = -1, fd;
142
143 if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
144 return -1;
145
146 colon = strrchr(filename, ':');
147 if (colon != NULL)
148 *colon = '/';
149
150 fd = open(filename, O_RDONLY);
151 if (fd >= 0) {
152 char id[16];
153 if (read(fd, id, sizeof(id)) > 0)
154 err = atoi(id);
155 close(fd);
156 }
157
158 free(filename);
159 return err;
160}
161
162int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
163 const char *tracepoints[],
164 size_t nr_tracepoints)
165{
166 int err;
167 size_t i;
168 struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
169
170 if (attrs == NULL)
171 return -1;
172
173 for (i = 0; i < nr_tracepoints; i++) {
174 err = trace_event__id(tracepoints[i]);
175
176 if (err < 0)
177 goto out_free_attrs;
178
179 attrs[i].type = PERF_TYPE_TRACEPOINT;
180 attrs[i].config = err;
181 attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
182 PERF_SAMPLE_CPU);
183 attrs[i].sample_period = 1;
184 }
185
186 err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
187out_free_attrs:
188 free(attrs);
189 return err;
190}
191
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300192void perf_evlist__disable(struct perf_evlist *evlist)
193{
194 int cpu, thread;
195 struct perf_evsel *pos;
196
197 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
198 list_for_each_entry(pos, &evlist->entries, node) {
199 for (thread = 0; thread < evlist->threads->nr; thread++)
200 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE);
201 }
202 }
203}
204
David Ahern764e16a32011-08-25 10:17:55 -0600205void perf_evlist__enable(struct perf_evlist *evlist)
206{
207 int cpu, thread;
208 struct perf_evsel *pos;
209
210 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
211 list_for_each_entry(pos, &evlist->entries, node) {
212 for (thread = 0; thread < evlist->threads->nr; thread++)
213 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
214 }
215 }
216}
217
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200218int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200219{
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200220 int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200221 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
222 return evlist->pollfd != NULL ? 0 : -ENOMEM;
223}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200224
225void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
226{
227 fcntl(fd, F_SETFL, O_NONBLOCK);
228 evlist->pollfd[evlist->nr_fds].fd = fd;
229 evlist->pollfd[evlist->nr_fds].events = POLLIN;
230 evlist->nr_fds++;
231}
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200232
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300233static void perf_evlist__id_hash(struct perf_evlist *evlist,
234 struct perf_evsel *evsel,
235 int cpu, int thread, u64 id)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200236{
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300237 int hash;
238 struct perf_sample_id *sid = SID(evsel, cpu, thread);
239
240 sid->id = id;
241 sid->evsel = evsel;
242 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
243 hlist_add_head(&sid->node, &evlist->heads[hash]);
244}
245
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300246void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
247 int cpu, int thread, u64 id)
248{
249 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
250 evsel->id[evsel->ids++] = id;
251}
252
253static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
254 struct perf_evsel *evsel,
255 int cpu, int thread, int fd)
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300256{
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200257 u64 read_data[4] = { 0, };
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300258 int id_idx = 1; /* The first entry is the counter value */
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200259
260 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
261 read(fd, &read_data, sizeof(read_data)) == -1)
262 return -1;
263
264 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
265 ++id_idx;
266 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
267 ++id_idx;
268
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300269 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200270 return 0;
271}
272
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200273struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
274{
275 struct hlist_head *head;
276 struct hlist_node *pos;
277 struct perf_sample_id *sid;
278 int hash;
279
280 if (evlist->nr_entries == 1)
281 return list_entry(evlist->entries.next, struct perf_evsel, node);
282
283 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
284 head = &evlist->heads[hash];
285
286 hlist_for_each_entry(sid, pos, head, node)
287 if (sid->id == id)
288 return sid->evsel;
289 return NULL;
290}
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200291
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300292union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200293{
294 /* XXX Move this to perf.c, making it generally available */
295 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300296 struct perf_mmap *md = &evlist->mmap[idx];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200297 unsigned int head = perf_mmap__read_head(md);
298 unsigned int old = md->prev;
299 unsigned char *data = md->base + page_size;
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200300 union perf_event *event = NULL;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200301
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200302 if (evlist->overwrite) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200303 /*
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200304 * If we're further behind than half the buffer, there's a chance
305 * the writer will bite our tail and mess up the samples under us.
306 *
307 * If we somehow ended up ahead of the head, we got messed up.
308 *
309 * In either case, truncate and restart at head.
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200310 */
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200311 int diff = head - old;
312 if (diff > md->mask / 2 || diff < 0) {
313 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
314
315 /*
316 * head points to a known good entry, start there.
317 */
318 old = head;
319 }
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200320 }
321
322 if (old != head) {
323 size_t size;
324
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200325 event = (union perf_event *)&data[old & md->mask];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200326 size = event->header.size;
327
328 /*
329 * Event straddles the mmap boundary -- header should always
330 * be inside due to u64 alignment of output.
331 */
332 if ((old & md->mask) + size != ((old + size) & md->mask)) {
333 unsigned int offset = old;
334 unsigned int len = min(sizeof(*event), size), cpy;
335 void *dst = &evlist->event_copy;
336
337 do {
338 cpy = min(md->mask + 1 - (offset & md->mask), len);
339 memcpy(dst, &data[offset & md->mask], cpy);
340 offset += cpy;
341 dst += cpy;
342 len -= cpy;
343 } while (len);
344
345 event = &evlist->event_copy;
346 }
347
348 old += size;
349 }
350
351 md->prev = old;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200352
353 if (!evlist->overwrite)
354 perf_mmap__write_tail(md, old);
355
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200356 return event;
357}
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200358
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200359void perf_evlist__munmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200360{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300361 int i;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200362
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300363 for (i = 0; i < evlist->nr_mmaps; i++) {
364 if (evlist->mmap[i].base != NULL) {
365 munmap(evlist->mmap[i].base, evlist->mmap_len);
366 evlist->mmap[i].base = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200367 }
368 }
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300369
370 free(evlist->mmap);
371 evlist->mmap = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200372}
373
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200374int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200375{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300376 evlist->nr_mmaps = evlist->cpus->nr;
377 if (evlist->cpus->map[0] == -1)
378 evlist->nr_mmaps = evlist->threads->nr;
379 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200380 return evlist->mmap != NULL ? 0 : -ENOMEM;
381}
382
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300383static int __perf_evlist__mmap(struct perf_evlist *evlist,
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300384 int idx, int prot, int mask, int fd)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200385{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300386 evlist->mmap[idx].prev = 0;
387 evlist->mmap[idx].mask = mask;
388 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200389 MAP_SHARED, fd, 0);
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300390 if (evlist->mmap[idx].base == MAP_FAILED)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200391 return -1;
392
393 perf_evlist__add_pollfd(evlist, fd);
394 return 0;
395}
396
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300397static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
398{
399 struct perf_evsel *evsel;
400 int cpu, thread;
401
402 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
403 int output = -1;
404
405 for (thread = 0; thread < evlist->threads->nr; thread++) {
406 list_for_each_entry(evsel, &evlist->entries, node) {
407 int fd = FD(evsel, cpu, thread);
408
409 if (output == -1) {
410 output = fd;
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300411 if (__perf_evlist__mmap(evlist, cpu,
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300412 prot, mask, output) < 0)
413 goto out_unmap;
414 } else {
415 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
416 goto out_unmap;
417 }
418
419 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
420 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
421 goto out_unmap;
422 }
423 }
424 }
425
426 return 0;
427
428out_unmap:
429 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
430 if (evlist->mmap[cpu].base != NULL) {
431 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
432 evlist->mmap[cpu].base = NULL;
433 }
434 }
435 return -1;
436}
437
438static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
439{
440 struct perf_evsel *evsel;
441 int thread;
442
443 for (thread = 0; thread < evlist->threads->nr; thread++) {
444 int output = -1;
445
446 list_for_each_entry(evsel, &evlist->entries, node) {
447 int fd = FD(evsel, 0, thread);
448
449 if (output == -1) {
450 output = fd;
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300451 if (__perf_evlist__mmap(evlist, thread,
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300452 prot, mask, output) < 0)
453 goto out_unmap;
454 } else {
455 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
456 goto out_unmap;
457 }
458
459 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
460 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
461 goto out_unmap;
462 }
463 }
464
465 return 0;
466
467out_unmap:
468 for (thread = 0; thread < evlist->threads->nr; thread++) {
469 if (evlist->mmap[thread].base != NULL) {
470 munmap(evlist->mmap[thread].base, evlist->mmap_len);
471 evlist->mmap[thread].base = NULL;
472 }
473 }
474 return -1;
475}
476
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200477/** perf_evlist__mmap - Create per cpu maps to receive events
478 *
479 * @evlist - list of events
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200480 * @pages - map length in pages
481 * @overwrite - overwrite older events?
482 *
483 * If overwrite is false the user needs to signal event consuption using:
484 *
485 * struct perf_mmap *m = &evlist->mmap[cpu];
486 * unsigned int head = perf_mmap__read_head(m);
487 *
488 * perf_mmap__write_tail(m, head)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200489 *
490 * Using perf_evlist__read_on_cpu does this automatically.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200491 */
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200492int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200493{
494 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300495 int mask = pages * page_size - 1;
496 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200497 const struct cpu_map *cpus = evlist->cpus;
498 const struct thread_map *threads = evlist->threads;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300499 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200500
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200501 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200502 return -ENOMEM;
503
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200504 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200505 return -ENOMEM;
506
507 evlist->overwrite = overwrite;
508 evlist->mmap_len = (pages + 1) * page_size;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200509
510 list_for_each_entry(evsel, &evlist->entries, node) {
511 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300512 evsel->sample_id == NULL &&
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200513 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
514 return -ENOMEM;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200515 }
516
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300517 if (evlist->cpus->map[0] == -1)
518 return perf_evlist__mmap_per_thread(evlist, prot, mask);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200519
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300520 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200521}
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200522
523int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
524 pid_t target_tid, const char *cpu_list)
525{
526 evlist->threads = thread_map__new(target_pid, target_tid);
527
528 if (evlist->threads == NULL)
529 return -1;
530
Arnaldo Carvalho de Melob9019412011-04-25 16:25:20 -0300531 if (cpu_list == NULL && target_tid != -1)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200532 evlist->cpus = cpu_map__dummy_new();
533 else
534 evlist->cpus = cpu_map__new(cpu_list);
535
536 if (evlist->cpus == NULL)
537 goto out_delete_threads;
538
539 return 0;
540
541out_delete_threads:
542 thread_map__delete(evlist->threads);
543 return -1;
544}
545
546void perf_evlist__delete_maps(struct perf_evlist *evlist)
547{
548 cpu_map__delete(evlist->cpus);
549 thread_map__delete(evlist->threads);
550 evlist->cpus = NULL;
551 evlist->threads = NULL;
552}
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100553
554int perf_evlist__set_filters(struct perf_evlist *evlist)
555{
556 const struct thread_map *threads = evlist->threads;
557 const struct cpu_map *cpus = evlist->cpus;
558 struct perf_evsel *evsel;
559 char *filter;
560 int thread;
561 int cpu;
562 int err;
563 int fd;
564
565 list_for_each_entry(evsel, &evlist->entries, node) {
566 filter = evsel->filter;
567 if (!filter)
568 continue;
569 for (cpu = 0; cpu < cpus->nr; cpu++) {
570 for (thread = 0; thread < threads->nr; thread++) {
571 fd = FD(evsel, cpu, thread);
572 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
573 if (err)
574 return err;
575 }
576 }
577 }
578
579 return 0;
580}
Frederic Weisbecker74429962011-05-21 17:49:00 +0200581
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300582bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +0200583{
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300584 struct perf_evsel *pos, *first;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200585
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300586 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
587
588 list_for_each_entry_continue(pos, &evlist->entries, node) {
589 if (first->attr.sample_type != pos->attr.sample_type)
590 return false;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200591 }
592
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300593 return true;
594}
595
596u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
597{
598 struct perf_evsel *first;
599
600 first = list_entry(evlist->entries.next, struct perf_evsel, node);
601 return first->attr.sample_type;
602}
603
604bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
605{
606 struct perf_evsel *pos, *first;
607
608 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
609
610 list_for_each_entry_continue(pos, &evlist->entries, node) {
611 if (first->attr.sample_id_all != pos->attr.sample_id_all)
612 return false;
613 }
614
615 return true;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200616}
617
618bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
619{
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300620 struct perf_evsel *first;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200621
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300622 first = list_entry(evlist->entries.next, struct perf_evsel, node);
623 return first->attr.sample_id_all;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200624}
Arnaldo Carvalho de Melo81cce8d2011-10-05 19:11:32 -0300625
626void perf_evlist__set_selected(struct perf_evlist *evlist,
627 struct perf_evsel *evsel)
628{
629 evlist->selected = evsel;
630}
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200631
632int perf_evlist__open(struct perf_evlist *evlist, bool group)
633{
634 struct perf_evsel *evsel, *first;
635 int err, ncpus, nthreads;
636
637 first = list_entry(evlist->entries.next, struct perf_evsel, node);
638
639 list_for_each_entry(evsel, &evlist->entries, node) {
640 struct xyarray *group_fd = NULL;
641
642 if (group && evsel != first)
643 group_fd = first->fd;
644
645 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
646 group, group_fd);
647 if (err < 0)
648 goto out_err;
649 }
650
651 return 0;
652out_err:
653 ncpus = evlist->cpus ? evlist->cpus->nr : 1;
654 nthreads = evlist->threads ? evlist->threads->nr : 1;
655
656 list_for_each_entry_reverse(evsel, &evlist->entries, node)
657 perf_evsel__close(evsel, ncpus, nthreads);
658
659 return err;
660}