blob: b774341e797fffbbf39f125ea94e1dee74aa2fe6 [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
Arnaldo Carvalho de Meloa8c9ae12011-11-05 08:41:51 -02009#include "util.h"
10#include "debugfs.h"
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -020011#include <poll.h>
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020012#include "cpumap.h"
13#include "thread_map.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020014#include "evlist.h"
15#include "evsel.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020016
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020017#include "parse-events.h"
18
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020019#include <sys/mman.h>
20
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020021#include <linux/bitops.h>
22#include <linux/hash.h>
23
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020024#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030025#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020026
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020027void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
28 struct thread_map *threads)
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020029{
30 int i;
31
32 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
33 INIT_HLIST_HEAD(&evlist->heads[i]);
34 INIT_LIST_HEAD(&evlist->entries);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020035 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020036}
37
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020038struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
39 struct thread_map *threads)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020040{
41 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
42
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020043 if (evlist != NULL)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020044 perf_evlist__init(evlist, cpus, threads);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020045
46 return evlist;
47}
48
Arnaldo Carvalho de Melo0f82ebc2011-11-08 14:41:57 -020049void perf_evlist__config_attrs(struct perf_evlist *evlist,
50 struct perf_record_opts *opts)
51{
52 struct perf_evsel *evsel;
53
54 if (evlist->cpus->map[0] < 0)
55 opts->no_inherit = true;
56
57 list_for_each_entry(evsel, &evlist->entries, node) {
58 perf_evsel__config(evsel, opts);
59
60 if (evlist->nr_entries > 1)
61 evsel->attr.sample_type |= PERF_SAMPLE_ID;
62 }
63}
64
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020065static void perf_evlist__purge(struct perf_evlist *evlist)
66{
67 struct perf_evsel *pos, *n;
68
69 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
70 list_del_init(&pos->node);
71 perf_evsel__delete(pos);
72 }
73
74 evlist->nr_entries = 0;
75}
76
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020077void perf_evlist__exit(struct perf_evlist *evlist)
78{
79 free(evlist->mmap);
80 free(evlist->pollfd);
81 evlist->mmap = NULL;
82 evlist->pollfd = NULL;
83}
84
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020085void perf_evlist__delete(struct perf_evlist *evlist)
86{
87 perf_evlist__purge(evlist);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020088 perf_evlist__exit(evlist);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020089 free(evlist);
90}
91
92void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
93{
94 list_add_tail(&entry->node, &evlist->entries);
95 ++evlist->nr_entries;
96}
97
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020098static void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
99 struct list_head *list,
100 int nr_entries)
101{
102 list_splice_tail(list, &evlist->entries);
103 evlist->nr_entries += nr_entries;
104}
105
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200106int perf_evlist__add_default(struct perf_evlist *evlist)
107{
108 struct perf_event_attr attr = {
109 .type = PERF_TYPE_HARDWARE,
110 .config = PERF_COUNT_HW_CPU_CYCLES,
111 };
112 struct perf_evsel *evsel = perf_evsel__new(&attr, 0);
113
114 if (evsel == NULL)
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200115 goto error;
116
117 /* use strdup() because free(evsel) assumes name is allocated */
118 evsel->name = strdup("cycles");
119 if (!evsel->name)
120 goto error_free;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200121
122 perf_evlist__add(evlist, evsel);
123 return 0;
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200124error_free:
125 perf_evsel__delete(evsel);
126error:
127 return -ENOMEM;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200128}
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200129
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200130int perf_evlist__add_attrs(struct perf_evlist *evlist,
131 struct perf_event_attr *attrs, size_t nr_attrs)
132{
133 struct perf_evsel *evsel, *n;
134 LIST_HEAD(head);
135 size_t i;
136
137 for (i = 0; i < nr_attrs; i++) {
138 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
139 if (evsel == NULL)
140 goto out_delete_partial_list;
141 list_add_tail(&evsel->node, &head);
142 }
143
144 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
145
146 return 0;
147
148out_delete_partial_list:
149 list_for_each_entry_safe(evsel, n, &head, node)
150 perf_evsel__delete(evsel);
151 return -1;
152}
153
Arnaldo Carvalho de Meloa8c9ae12011-11-05 08:41:51 -0200154static int trace_event__id(const char *evname)
155{
156 char *filename, *colon;
157 int err = -1, fd;
158
159 if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
160 return -1;
161
162 colon = strrchr(filename, ':');
163 if (colon != NULL)
164 *colon = '/';
165
166 fd = open(filename, O_RDONLY);
167 if (fd >= 0) {
168 char id[16];
169 if (read(fd, id, sizeof(id)) > 0)
170 err = atoi(id);
171 close(fd);
172 }
173
174 free(filename);
175 return err;
176}
177
178int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
179 const char *tracepoints[],
180 size_t nr_tracepoints)
181{
182 int err;
183 size_t i;
184 struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
185
186 if (attrs == NULL)
187 return -1;
188
189 for (i = 0; i < nr_tracepoints; i++) {
190 err = trace_event__id(tracepoints[i]);
191
192 if (err < 0)
193 goto out_free_attrs;
194
195 attrs[i].type = PERF_TYPE_TRACEPOINT;
196 attrs[i].config = err;
197 attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
198 PERF_SAMPLE_CPU);
199 attrs[i].sample_period = 1;
200 }
201
202 err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
203out_free_attrs:
204 free(attrs);
205 return err;
206}
207
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300208void perf_evlist__disable(struct perf_evlist *evlist)
209{
210 int cpu, thread;
211 struct perf_evsel *pos;
212
213 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
214 list_for_each_entry(pos, &evlist->entries, node) {
215 for (thread = 0; thread < evlist->threads->nr; thread++)
216 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE);
217 }
218 }
219}
220
David Ahern764e16a32011-08-25 10:17:55 -0600221void perf_evlist__enable(struct perf_evlist *evlist)
222{
223 int cpu, thread;
224 struct perf_evsel *pos;
225
226 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
227 list_for_each_entry(pos, &evlist->entries, node) {
228 for (thread = 0; thread < evlist->threads->nr; thread++)
229 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
230 }
231 }
232}
233
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200234int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200235{
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200236 int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200237 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
238 return evlist->pollfd != NULL ? 0 : -ENOMEM;
239}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200240
241void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
242{
243 fcntl(fd, F_SETFL, O_NONBLOCK);
244 evlist->pollfd[evlist->nr_fds].fd = fd;
245 evlist->pollfd[evlist->nr_fds].events = POLLIN;
246 evlist->nr_fds++;
247}
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200248
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300249static void perf_evlist__id_hash(struct perf_evlist *evlist,
250 struct perf_evsel *evsel,
251 int cpu, int thread, u64 id)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200252{
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300253 int hash;
254 struct perf_sample_id *sid = SID(evsel, cpu, thread);
255
256 sid->id = id;
257 sid->evsel = evsel;
258 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
259 hlist_add_head(&sid->node, &evlist->heads[hash]);
260}
261
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300262void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
263 int cpu, int thread, u64 id)
264{
265 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
266 evsel->id[evsel->ids++] = id;
267}
268
269static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
270 struct perf_evsel *evsel,
271 int cpu, int thread, int fd)
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300272{
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200273 u64 read_data[4] = { 0, };
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300274 int id_idx = 1; /* The first entry is the counter value */
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200275
276 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
277 read(fd, &read_data, sizeof(read_data)) == -1)
278 return -1;
279
280 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
281 ++id_idx;
282 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
283 ++id_idx;
284
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300285 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200286 return 0;
287}
288
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200289struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
290{
291 struct hlist_head *head;
292 struct hlist_node *pos;
293 struct perf_sample_id *sid;
294 int hash;
295
296 if (evlist->nr_entries == 1)
297 return list_entry(evlist->entries.next, struct perf_evsel, node);
298
299 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
300 head = &evlist->heads[hash];
301
302 hlist_for_each_entry(sid, pos, head, node)
303 if (sid->id == id)
304 return sid->evsel;
305 return NULL;
306}
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200307
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300308union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200309{
310 /* XXX Move this to perf.c, making it generally available */
311 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300312 struct perf_mmap *md = &evlist->mmap[idx];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200313 unsigned int head = perf_mmap__read_head(md);
314 unsigned int old = md->prev;
315 unsigned char *data = md->base + page_size;
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200316 union perf_event *event = NULL;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200317
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200318 if (evlist->overwrite) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200319 /*
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200320 * If we're further behind than half the buffer, there's a chance
321 * the writer will bite our tail and mess up the samples under us.
322 *
323 * If we somehow ended up ahead of the head, we got messed up.
324 *
325 * In either case, truncate and restart at head.
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200326 */
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200327 int diff = head - old;
328 if (diff > md->mask / 2 || diff < 0) {
329 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
330
331 /*
332 * head points to a known good entry, start there.
333 */
334 old = head;
335 }
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200336 }
337
338 if (old != head) {
339 size_t size;
340
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200341 event = (union perf_event *)&data[old & md->mask];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200342 size = event->header.size;
343
344 /*
345 * Event straddles the mmap boundary -- header should always
346 * be inside due to u64 alignment of output.
347 */
348 if ((old & md->mask) + size != ((old + size) & md->mask)) {
349 unsigned int offset = old;
350 unsigned int len = min(sizeof(*event), size), cpy;
351 void *dst = &evlist->event_copy;
352
353 do {
354 cpy = min(md->mask + 1 - (offset & md->mask), len);
355 memcpy(dst, &data[offset & md->mask], cpy);
356 offset += cpy;
357 dst += cpy;
358 len -= cpy;
359 } while (len);
360
361 event = &evlist->event_copy;
362 }
363
364 old += size;
365 }
366
367 md->prev = old;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200368
369 if (!evlist->overwrite)
370 perf_mmap__write_tail(md, old);
371
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200372 return event;
373}
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200374
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200375void perf_evlist__munmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200376{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300377 int i;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200378
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300379 for (i = 0; i < evlist->nr_mmaps; i++) {
380 if (evlist->mmap[i].base != NULL) {
381 munmap(evlist->mmap[i].base, evlist->mmap_len);
382 evlist->mmap[i].base = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200383 }
384 }
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300385
386 free(evlist->mmap);
387 evlist->mmap = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200388}
389
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200390int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200391{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300392 evlist->nr_mmaps = evlist->cpus->nr;
393 if (evlist->cpus->map[0] == -1)
394 evlist->nr_mmaps = evlist->threads->nr;
395 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200396 return evlist->mmap != NULL ? 0 : -ENOMEM;
397}
398
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300399static int __perf_evlist__mmap(struct perf_evlist *evlist,
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300400 int idx, int prot, int mask, int fd)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200401{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300402 evlist->mmap[idx].prev = 0;
403 evlist->mmap[idx].mask = mask;
404 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200405 MAP_SHARED, fd, 0);
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300406 if (evlist->mmap[idx].base == MAP_FAILED)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200407 return -1;
408
409 perf_evlist__add_pollfd(evlist, fd);
410 return 0;
411}
412
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300413static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
414{
415 struct perf_evsel *evsel;
416 int cpu, thread;
417
418 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
419 int output = -1;
420
421 for (thread = 0; thread < evlist->threads->nr; thread++) {
422 list_for_each_entry(evsel, &evlist->entries, node) {
423 int fd = FD(evsel, cpu, thread);
424
425 if (output == -1) {
426 output = fd;
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300427 if (__perf_evlist__mmap(evlist, cpu,
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300428 prot, mask, output) < 0)
429 goto out_unmap;
430 } else {
431 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
432 goto out_unmap;
433 }
434
435 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
436 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
437 goto out_unmap;
438 }
439 }
440 }
441
442 return 0;
443
444out_unmap:
445 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
446 if (evlist->mmap[cpu].base != NULL) {
447 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
448 evlist->mmap[cpu].base = NULL;
449 }
450 }
451 return -1;
452}
453
454static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
455{
456 struct perf_evsel *evsel;
457 int thread;
458
459 for (thread = 0; thread < evlist->threads->nr; thread++) {
460 int output = -1;
461
462 list_for_each_entry(evsel, &evlist->entries, node) {
463 int fd = FD(evsel, 0, thread);
464
465 if (output == -1) {
466 output = fd;
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300467 if (__perf_evlist__mmap(evlist, thread,
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300468 prot, mask, output) < 0)
469 goto out_unmap;
470 } else {
471 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
472 goto out_unmap;
473 }
474
475 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
476 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
477 goto out_unmap;
478 }
479 }
480
481 return 0;
482
483out_unmap:
484 for (thread = 0; thread < evlist->threads->nr; thread++) {
485 if (evlist->mmap[thread].base != NULL) {
486 munmap(evlist->mmap[thread].base, evlist->mmap_len);
487 evlist->mmap[thread].base = NULL;
488 }
489 }
490 return -1;
491}
492
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200493/** perf_evlist__mmap - Create per cpu maps to receive events
494 *
495 * @evlist - list of events
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200496 * @pages - map length in pages
497 * @overwrite - overwrite older events?
498 *
499 * If overwrite is false the user needs to signal event consuption using:
500 *
501 * struct perf_mmap *m = &evlist->mmap[cpu];
502 * unsigned int head = perf_mmap__read_head(m);
503 *
504 * perf_mmap__write_tail(m, head)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200505 *
506 * Using perf_evlist__read_on_cpu does this automatically.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200507 */
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200508int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200509{
510 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300511 int mask = pages * page_size - 1;
512 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200513 const struct cpu_map *cpus = evlist->cpus;
514 const struct thread_map *threads = evlist->threads;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300515 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200516
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200517 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200518 return -ENOMEM;
519
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200520 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200521 return -ENOMEM;
522
523 evlist->overwrite = overwrite;
524 evlist->mmap_len = (pages + 1) * page_size;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200525
526 list_for_each_entry(evsel, &evlist->entries, node) {
527 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300528 evsel->sample_id == NULL &&
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200529 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
530 return -ENOMEM;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200531 }
532
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300533 if (evlist->cpus->map[0] == -1)
534 return perf_evlist__mmap_per_thread(evlist, prot, mask);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200535
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300536 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200537}
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200538
539int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
540 pid_t target_tid, const char *cpu_list)
541{
542 evlist->threads = thread_map__new(target_pid, target_tid);
543
544 if (evlist->threads == NULL)
545 return -1;
546
Arnaldo Carvalho de Melob9019412011-04-25 16:25:20 -0300547 if (cpu_list == NULL && target_tid != -1)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200548 evlist->cpus = cpu_map__dummy_new();
549 else
550 evlist->cpus = cpu_map__new(cpu_list);
551
552 if (evlist->cpus == NULL)
553 goto out_delete_threads;
554
555 return 0;
556
557out_delete_threads:
558 thread_map__delete(evlist->threads);
559 return -1;
560}
561
562void perf_evlist__delete_maps(struct perf_evlist *evlist)
563{
564 cpu_map__delete(evlist->cpus);
565 thread_map__delete(evlist->threads);
566 evlist->cpus = NULL;
567 evlist->threads = NULL;
568}
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100569
570int perf_evlist__set_filters(struct perf_evlist *evlist)
571{
572 const struct thread_map *threads = evlist->threads;
573 const struct cpu_map *cpus = evlist->cpus;
574 struct perf_evsel *evsel;
575 char *filter;
576 int thread;
577 int cpu;
578 int err;
579 int fd;
580
581 list_for_each_entry(evsel, &evlist->entries, node) {
582 filter = evsel->filter;
583 if (!filter)
584 continue;
585 for (cpu = 0; cpu < cpus->nr; cpu++) {
586 for (thread = 0; thread < threads->nr; thread++) {
587 fd = FD(evsel, cpu, thread);
588 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
589 if (err)
590 return err;
591 }
592 }
593 }
594
595 return 0;
596}
Frederic Weisbecker74429962011-05-21 17:49:00 +0200597
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300598bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +0200599{
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300600 struct perf_evsel *pos, *first;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200601
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300602 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
603
604 list_for_each_entry_continue(pos, &evlist->entries, node) {
605 if (first->attr.sample_type != pos->attr.sample_type)
606 return false;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200607 }
608
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300609 return true;
610}
611
612u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
613{
614 struct perf_evsel *first;
615
616 first = list_entry(evlist->entries.next, struct perf_evsel, node);
617 return first->attr.sample_type;
618}
619
620bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
621{
622 struct perf_evsel *pos, *first;
623
624 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
625
626 list_for_each_entry_continue(pos, &evlist->entries, node) {
627 if (first->attr.sample_id_all != pos->attr.sample_id_all)
628 return false;
629 }
630
631 return true;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200632}
633
634bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
635{
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300636 struct perf_evsel *first;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200637
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300638 first = list_entry(evlist->entries.next, struct perf_evsel, node);
639 return first->attr.sample_id_all;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200640}
Arnaldo Carvalho de Melo81cce8d2011-10-05 19:11:32 -0300641
642void perf_evlist__set_selected(struct perf_evlist *evlist,
643 struct perf_evsel *evsel)
644{
645 evlist->selected = evsel;
646}
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200647
648int perf_evlist__open(struct perf_evlist *evlist, bool group)
649{
650 struct perf_evsel *evsel, *first;
651 int err, ncpus, nthreads;
652
653 first = list_entry(evlist->entries.next, struct perf_evsel, node);
654
655 list_for_each_entry(evsel, &evlist->entries, node) {
656 struct xyarray *group_fd = NULL;
657
658 if (group && evsel != first)
659 group_fd = first->fd;
660
661 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
662 group, group_fd);
663 if (err < 0)
664 goto out_err;
665 }
666
667 return 0;
668out_err:
669 ncpus = evlist->cpus ? evlist->cpus->nr : 1;
670 nthreads = evlist->threads ? evlist->threads->nr : 1;
671
672 list_for_each_entry_reverse(evsel, &evlist->entries, node)
673 perf_evsel__close(evsel, ncpus, nthreads);
674
675 return err;
676}