blob: d21ab08129264c10859133e6b1da7bce444e4608 [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
Arnaldo Carvalho de Meloa8c9ae12011-11-05 08:41:51 -02009#include "util.h"
Borislav Petkov85c66be2013-02-20 16:32:30 +010010#include <lk/debugfs.h>
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -020011#include <poll.h>
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020012#include "cpumap.h"
13#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090014#include "target.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020015#include "evlist.h"
16#include "evsel.h"
Adrian Huntere3e1a542013-08-14 15:48:24 +030017#include "debug.h"
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020018#include <unistd.h>
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020019
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020020#include "parse-events.h"
Jiri Olsa994a1f72013-09-01 12:36:12 +020021#include "parse-options.h"
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020022
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020023#include <sys/mman.h>
24
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020025#include <linux/bitops.h>
26#include <linux/hash.h>
27
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020028#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030029#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020030
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020031void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
32 struct thread_map *threads)
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020033{
34 int i;
35
36 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
37 INIT_HLIST_HEAD(&evlist->heads[i]);
38 INIT_LIST_HEAD(&evlist->entries);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020039 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020040 evlist->workload.pid = -1;
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020041}
42
Namhyung Kim334fe7a2013-03-11 16:43:12 +090043struct perf_evlist *perf_evlist__new(void)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020044{
45 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
46
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020047 if (evlist != NULL)
Namhyung Kim334fe7a2013-03-11 16:43:12 +090048 perf_evlist__init(evlist, NULL, NULL);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020049
50 return evlist;
51}
52
Adrian Hunter75562572013-08-27 11:23:09 +030053/**
54 * perf_evlist__set_id_pos - set the positions of event ids.
55 * @evlist: selected event list
56 *
57 * Events with compatible sample types all have the same id_pos
58 * and is_pos. For convenience, put a copy on evlist.
59 */
60void perf_evlist__set_id_pos(struct perf_evlist *evlist)
61{
62 struct perf_evsel *first = perf_evlist__first(evlist);
63
64 evlist->id_pos = first->id_pos;
65 evlist->is_pos = first->is_pos;
66}
67
Adrian Hunter733cd2f2013-09-06 22:40:11 +030068static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
69{
70 struct perf_evsel *evsel;
71
72 list_for_each_entry(evsel, &evlist->entries, node)
73 perf_evsel__calc_id_pos(evsel);
74
75 perf_evlist__set_id_pos(evlist);
76}
77
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020078static void perf_evlist__purge(struct perf_evlist *evlist)
79{
80 struct perf_evsel *pos, *n;
81
82 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
83 list_del_init(&pos->node);
84 perf_evsel__delete(pos);
85 }
86
87 evlist->nr_entries = 0;
88}
89
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020090void perf_evlist__exit(struct perf_evlist *evlist)
91{
92 free(evlist->mmap);
93 free(evlist->pollfd);
94 evlist->mmap = NULL;
95 evlist->pollfd = NULL;
96}
97
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020098void perf_evlist__delete(struct perf_evlist *evlist)
99{
100 perf_evlist__purge(evlist);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200101 perf_evlist__exit(evlist);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200102 free(evlist);
103}
104
105void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
106{
107 list_add_tail(&entry->node, &evlist->entries);
Adrian Hunter75562572013-08-27 11:23:09 +0300108 if (!evlist->nr_entries++)
109 perf_evlist__set_id_pos(evlist);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200110}
111
Jiri Olsa0529bc12012-01-27 15:34:20 +0100112void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
113 struct list_head *list,
114 int nr_entries)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200115{
Adrian Hunter75562572013-08-27 11:23:09 +0300116 bool set_id_pos = !evlist->nr_entries;
117
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200118 list_splice_tail(list, &evlist->entries);
119 evlist->nr_entries += nr_entries;
Adrian Hunter75562572013-08-27 11:23:09 +0300120 if (set_id_pos)
121 perf_evlist__set_id_pos(evlist);
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200122}
123
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300124void __perf_evlist__set_leader(struct list_head *list)
125{
126 struct perf_evsel *evsel, *leader;
127
128 leader = list_entry(list->next, struct perf_evsel, node);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900129 evsel = list_entry(list->prev, struct perf_evsel, node);
130
131 leader->nr_members = evsel->idx - leader->idx + 1;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300132
133 list_for_each_entry(evsel, list, node) {
Stephane Eranian74b21332013-01-31 13:54:37 +0100134 evsel->leader = leader;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300135 }
136}
137
138void perf_evlist__set_leader(struct perf_evlist *evlist)
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200139{
Namhyung Kim97f63e42013-01-22 18:09:29 +0900140 if (evlist->nr_entries) {
141 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300142 __perf_evlist__set_leader(&evlist->entries);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900143 }
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200144}
145
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200146int perf_evlist__add_default(struct perf_evlist *evlist)
147{
148 struct perf_event_attr attr = {
149 .type = PERF_TYPE_HARDWARE,
150 .config = PERF_COUNT_HW_CPU_CYCLES,
151 };
Joerg Roedel1aed2672012-01-04 17:54:20 +0100152 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200153
Joerg Roedel1aed2672012-01-04 17:54:20 +0100154 event_attr_init(&attr);
155
156 evsel = perf_evsel__new(&attr, 0);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200157 if (evsel == NULL)
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200158 goto error;
159
160 /* use strdup() because free(evsel) assumes name is allocated */
161 evsel->name = strdup("cycles");
162 if (!evsel->name)
163 goto error_free;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200164
165 perf_evlist__add(evlist, evsel);
166 return 0;
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200167error_free:
168 perf_evsel__delete(evsel);
169error:
170 return -ENOMEM;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200171}
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200172
Arnaldo Carvalho de Meloe60fc842012-10-03 11:50:55 -0300173static int perf_evlist__add_attrs(struct perf_evlist *evlist,
174 struct perf_event_attr *attrs, size_t nr_attrs)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200175{
176 struct perf_evsel *evsel, *n;
177 LIST_HEAD(head);
178 size_t i;
179
180 for (i = 0; i < nr_attrs; i++) {
181 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
182 if (evsel == NULL)
183 goto out_delete_partial_list;
184 list_add_tail(&evsel->node, &head);
185 }
186
187 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
188
189 return 0;
190
191out_delete_partial_list:
192 list_for_each_entry_safe(evsel, n, &head, node)
193 perf_evsel__delete(evsel);
194 return -1;
195}
196
Arnaldo Carvalho de Melo79695e12012-05-30 13:53:54 -0300197int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
198 struct perf_event_attr *attrs, size_t nr_attrs)
199{
200 size_t i;
201
202 for (i = 0; i < nr_attrs; i++)
203 event_attr_init(attrs + i);
204
205 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
206}
207
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -0300208struct perf_evsel *
209perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -0200210{
211 struct perf_evsel *evsel;
212
213 list_for_each_entry(evsel, &evlist->entries, node) {
214 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
215 (int)evsel->attr.config == id)
216 return evsel;
217 }
218
219 return NULL;
220}
221
David Aherna2f28042013-08-28 22:29:51 -0600222struct perf_evsel *
223perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
224 const char *name)
225{
226 struct perf_evsel *evsel;
227
228 list_for_each_entry(evsel, &evlist->entries, node) {
229 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
230 (strcmp(evsel->name, name) == 0))
231 return evsel;
232 }
233
234 return NULL;
235}
236
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300237int perf_evlist__add_newtp(struct perf_evlist *evlist,
238 const char *sys, const char *name, void *handler)
239{
240 struct perf_evsel *evsel;
241
242 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
243 if (evsel == NULL)
244 return -1;
245
246 evsel->handler.func = handler;
247 perf_evlist__add(evlist, evsel);
248 return 0;
249}
250
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300251void perf_evlist__disable(struct perf_evlist *evlist)
252{
253 int cpu, thread;
254 struct perf_evsel *pos;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900255 int nr_cpus = cpu_map__nr(evlist->cpus);
256 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300257
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900258 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300259 list_for_each_entry(pos, &evlist->entries, node) {
Adrian Hunter395c3072013-08-31 21:50:53 +0300260 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
Jiri Olsa3fe4430d2012-11-12 18:34:03 +0100261 continue;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900262 for (thread = 0; thread < nr_threads; thread++)
Namhyung Kim55da8002012-05-31 14:51:46 +0900263 ioctl(FD(pos, cpu, thread),
264 PERF_EVENT_IOC_DISABLE, 0);
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300265 }
266 }
267}
268
David Ahern764e16a32011-08-25 10:17:55 -0600269void perf_evlist__enable(struct perf_evlist *evlist)
270{
271 int cpu, thread;
272 struct perf_evsel *pos;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900273 int nr_cpus = cpu_map__nr(evlist->cpus);
274 int nr_threads = thread_map__nr(evlist->threads);
David Ahern764e16a32011-08-25 10:17:55 -0600275
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900276 for (cpu = 0; cpu < nr_cpus; cpu++) {
David Ahern764e16a32011-08-25 10:17:55 -0600277 list_for_each_entry(pos, &evlist->entries, node) {
Adrian Hunter395c3072013-08-31 21:50:53 +0300278 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
Jiri Olsa3fe4430d2012-11-12 18:34:03 +0100279 continue;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900280 for (thread = 0; thread < nr_threads; thread++)
Namhyung Kim55da8002012-05-31 14:51:46 +0900281 ioctl(FD(pos, cpu, thread),
282 PERF_EVENT_IOC_ENABLE, 0);
David Ahern764e16a32011-08-25 10:17:55 -0600283 }
284 }
285}
286
Adrian Hunter395c3072013-08-31 21:50:53 +0300287int perf_evlist__disable_event(struct perf_evlist *evlist,
288 struct perf_evsel *evsel)
289{
290 int cpu, thread, err;
291
292 if (!evsel->fd)
293 return 0;
294
295 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
296 for (thread = 0; thread < evlist->threads->nr; thread++) {
297 err = ioctl(FD(evsel, cpu, thread),
298 PERF_EVENT_IOC_DISABLE, 0);
299 if (err)
300 return err;
301 }
302 }
303 return 0;
304}
305
306int perf_evlist__enable_event(struct perf_evlist *evlist,
307 struct perf_evsel *evsel)
308{
309 int cpu, thread, err;
310
311 if (!evsel->fd)
312 return -EINVAL;
313
314 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
315 for (thread = 0; thread < evlist->threads->nr; thread++) {
316 err = ioctl(FD(evsel, cpu, thread),
317 PERF_EVENT_IOC_ENABLE, 0);
318 if (err)
319 return err;
320 }
321 }
322 return 0;
323}
324
Arnaldo Carvalho de Melo806fb632011-11-29 08:05:52 -0200325static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200326{
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900327 int nr_cpus = cpu_map__nr(evlist->cpus);
328 int nr_threads = thread_map__nr(evlist->threads);
329 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200330 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
331 return evlist->pollfd != NULL ? 0 : -ENOMEM;
332}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200333
334void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
335{
336 fcntl(fd, F_SETFL, O_NONBLOCK);
337 evlist->pollfd[evlist->nr_fds].fd = fd;
338 evlist->pollfd[evlist->nr_fds].events = POLLIN;
339 evlist->nr_fds++;
340}
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200341
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300342static void perf_evlist__id_hash(struct perf_evlist *evlist,
343 struct perf_evsel *evsel,
344 int cpu, int thread, u64 id)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200345{
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300346 int hash;
347 struct perf_sample_id *sid = SID(evsel, cpu, thread);
348
349 sid->id = id;
350 sid->evsel = evsel;
351 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
352 hlist_add_head(&sid->node, &evlist->heads[hash]);
353}
354
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300355void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
356 int cpu, int thread, u64 id)
357{
358 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
359 evsel->id[evsel->ids++] = id;
360}
361
362static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
363 struct perf_evsel *evsel,
364 int cpu, int thread, int fd)
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300365{
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200366 u64 read_data[4] = { 0, };
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300367 int id_idx = 1; /* The first entry is the counter value */
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200368 u64 id;
369 int ret;
370
371 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
372 if (!ret)
373 goto add;
374
375 if (errno != ENOTTY)
376 return -1;
377
378 /* Legacy way to get event id.. All hail to old kernels! */
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200379
Jiri Olsac4861af2012-10-12 13:02:21 +0200380 /*
381 * This way does not work with group format read, so bail
382 * out in that case.
383 */
384 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
385 return -1;
386
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200387 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
388 read(fd, &read_data, sizeof(read_data)) == -1)
389 return -1;
390
391 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
392 ++id_idx;
393 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
394 ++id_idx;
395
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200396 id = read_data[id_idx];
397
398 add:
399 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200400 return 0;
401}
402
Jiri Olsa932a3592012-10-11 14:10:35 +0200403struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200404{
405 struct hlist_head *head;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200406 struct perf_sample_id *sid;
407 int hash;
408
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200409 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
410 head = &evlist->heads[hash];
411
Sasha Levinb67bfe02013-02-27 17:06:00 -0800412 hlist_for_each_entry(sid, head, node)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200413 if (sid->id == id)
Jiri Olsa932a3592012-10-11 14:10:35 +0200414 return sid;
415
416 return NULL;
417}
418
419struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
420{
421 struct perf_sample_id *sid;
422
423 if (evlist->nr_entries == 1)
424 return perf_evlist__first(evlist);
425
426 sid = perf_evlist__id2sid(evlist, id);
427 if (sid)
428 return sid->evsel;
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900429
430 if (!perf_evlist__sample_id_all(evlist))
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300431 return perf_evlist__first(evlist);
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900432
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200433 return NULL;
434}
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200435
Adrian Hunter75562572013-08-27 11:23:09 +0300436static int perf_evlist__event2id(struct perf_evlist *evlist,
437 union perf_event *event, u64 *id)
438{
439 const u64 *array = event->sample.array;
440 ssize_t n;
441
442 n = (event->header.size - sizeof(event->header)) >> 3;
443
444 if (event->header.type == PERF_RECORD_SAMPLE) {
445 if (evlist->id_pos >= n)
446 return -1;
447 *id = array[evlist->id_pos];
448 } else {
449 if (evlist->is_pos > n)
450 return -1;
451 n -= evlist->is_pos;
452 *id = array[n];
453 }
454 return 0;
455}
456
457static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
458 union perf_event *event)
459{
Adrian Hunter98be6962013-09-04 23:18:17 +0300460 struct perf_evsel *first = perf_evlist__first(evlist);
Adrian Hunter75562572013-08-27 11:23:09 +0300461 struct hlist_head *head;
462 struct perf_sample_id *sid;
463 int hash;
464 u64 id;
465
466 if (evlist->nr_entries == 1)
Adrian Hunter98be6962013-09-04 23:18:17 +0300467 return first;
468
469 if (!first->attr.sample_id_all &&
470 event->header.type != PERF_RECORD_SAMPLE)
471 return first;
Adrian Hunter75562572013-08-27 11:23:09 +0300472
473 if (perf_evlist__event2id(evlist, event, &id))
474 return NULL;
475
476 /* Synthesized events have an id of zero */
477 if (!id)
Adrian Hunter98be6962013-09-04 23:18:17 +0300478 return first;
Adrian Hunter75562572013-08-27 11:23:09 +0300479
480 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
481 head = &evlist->heads[hash];
482
483 hlist_for_each_entry(sid, head, node) {
484 if (sid->id == id)
485 return sid->evsel;
486 }
487 return NULL;
488}
489
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300490union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200491{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300492 struct perf_mmap *md = &evlist->mmap[idx];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200493 unsigned int head = perf_mmap__read_head(md);
494 unsigned int old = md->prev;
495 unsigned char *data = md->base + page_size;
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200496 union perf_event *event = NULL;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200497
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200498 if (evlist->overwrite) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200499 /*
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200500 * If we're further behind than half the buffer, there's a chance
501 * the writer will bite our tail and mess up the samples under us.
502 *
503 * If we somehow ended up ahead of the head, we got messed up.
504 *
505 * In either case, truncate and restart at head.
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200506 */
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200507 int diff = head - old;
508 if (diff > md->mask / 2 || diff < 0) {
509 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
510
511 /*
512 * head points to a known good entry, start there.
513 */
514 old = head;
515 }
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200516 }
517
518 if (old != head) {
519 size_t size;
520
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200521 event = (union perf_event *)&data[old & md->mask];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200522 size = event->header.size;
523
524 /*
525 * Event straddles the mmap boundary -- header should always
526 * be inside due to u64 alignment of output.
527 */
528 if ((old & md->mask) + size != ((old + size) & md->mask)) {
529 unsigned int offset = old;
530 unsigned int len = min(sizeof(*event), size), cpy;
David Ahern0479b8b2013-02-05 14:12:42 -0700531 void *dst = &md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200532
533 do {
534 cpy = min(md->mask + 1 - (offset & md->mask), len);
535 memcpy(dst, &data[offset & md->mask], cpy);
536 offset += cpy;
537 dst += cpy;
538 len -= cpy;
539 } while (len);
540
David Ahern0479b8b2013-02-05 14:12:42 -0700541 event = &md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200542 }
543
544 old += size;
545 }
546
547 md->prev = old;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200548
549 if (!evlist->overwrite)
550 perf_mmap__write_tail(md, old);
551
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200552 return event;
553}
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200554
Adrian Hunter93edcbd2013-07-04 16:20:26 +0300555static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
556{
557 if (evlist->mmap[idx].base != NULL) {
558 munmap(evlist->mmap[idx].base, evlist->mmap_len);
559 evlist->mmap[idx].base = NULL;
560 }
561}
562
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200563void perf_evlist__munmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200564{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300565 int i;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200566
Adrian Hunter93edcbd2013-07-04 16:20:26 +0300567 for (i = 0; i < evlist->nr_mmaps; i++)
568 __perf_evlist__munmap(evlist, i);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300569
570 free(evlist->mmap);
571 evlist->mmap = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200572}
573
Arnaldo Carvalho de Melo806fb632011-11-29 08:05:52 -0200574static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200575{
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -0300576 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -0700577 if (cpu_map__empty(evlist->cpus))
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900578 evlist->nr_mmaps = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300579 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200580 return evlist->mmap != NULL ? 0 : -ENOMEM;
581}
582
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300583static int __perf_evlist__mmap(struct perf_evlist *evlist,
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300584 int idx, int prot, int mask, int fd)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200585{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300586 evlist->mmap[idx].prev = 0;
587 evlist->mmap[idx].mask = mask;
588 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200589 MAP_SHARED, fd, 0);
Nelson Elhage301b1952011-12-19 08:39:30 -0500590 if (evlist->mmap[idx].base == MAP_FAILED) {
591 evlist->mmap[idx].base = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200592 return -1;
Nelson Elhage301b1952011-12-19 08:39:30 -0500593 }
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200594
595 perf_evlist__add_pollfd(evlist, fd);
596 return 0;
597}
598
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300599static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
600{
601 struct perf_evsel *evsel;
602 int cpu, thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900603 int nr_cpus = cpu_map__nr(evlist->cpus);
604 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300605
Adrian Huntere3e1a542013-08-14 15:48:24 +0300606 pr_debug2("perf event ring buffer mmapped per cpu\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900607 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300608 int output = -1;
609
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900610 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300611 list_for_each_entry(evsel, &evlist->entries, node) {
612 int fd = FD(evsel, cpu, thread);
613
614 if (output == -1) {
615 output = fd;
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300616 if (__perf_evlist__mmap(evlist, cpu,
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300617 prot, mask, output) < 0)
618 goto out_unmap;
619 } else {
620 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
621 goto out_unmap;
622 }
623
624 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
625 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
626 goto out_unmap;
627 }
628 }
629 }
630
631 return 0;
632
633out_unmap:
Adrian Hunter93edcbd2013-07-04 16:20:26 +0300634 for (cpu = 0; cpu < nr_cpus; cpu++)
635 __perf_evlist__munmap(evlist, cpu);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300636 return -1;
637}
638
639static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
640{
641 struct perf_evsel *evsel;
642 int thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900643 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300644
Adrian Huntere3e1a542013-08-14 15:48:24 +0300645 pr_debug2("perf event ring buffer mmapped per thread\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900646 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300647 int output = -1;
648
649 list_for_each_entry(evsel, &evlist->entries, node) {
650 int fd = FD(evsel, 0, thread);
651
652 if (output == -1) {
653 output = fd;
Arnaldo Carvalho de Melobccdaba2011-06-02 10:39:43 -0300654 if (__perf_evlist__mmap(evlist, thread,
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300655 prot, mask, output) < 0)
656 goto out_unmap;
657 } else {
658 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
659 goto out_unmap;
660 }
661
662 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
663 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
664 goto out_unmap;
665 }
666 }
667
668 return 0;
669
670out_unmap:
Adrian Hunter93edcbd2013-07-04 16:20:26 +0300671 for (thread = 0; thread < nr_threads; thread++)
672 __perf_evlist__munmap(evlist, thread);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300673 return -1;
674}
675
Jiri Olsa994a1f72013-09-01 12:36:12 +0200676static size_t perf_evlist__mmap_size(unsigned long pages)
677{
678 /* 512 kiB: default amount of unprivileged mlocked memory */
679 if (pages == UINT_MAX)
680 pages = (512 * 1024) / page_size;
681 else if (!is_power_of_2(pages))
682 return 0;
683
684 return (pages + 1) * page_size;
685}
686
687int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
688 int unset __maybe_unused)
689{
Jiri Olsa27050f52013-09-01 12:36:13 +0200690 unsigned int pages, val, *mmap_pages = opt->value;
Jiri Olsa994a1f72013-09-01 12:36:12 +0200691 size_t size;
Jiri Olsa27050f52013-09-01 12:36:13 +0200692 static struct parse_tag tags[] = {
693 { .tag = 'B', .mult = 1 },
694 { .tag = 'K', .mult = 1 << 10 },
695 { .tag = 'M', .mult = 1 << 20 },
696 { .tag = 'G', .mult = 1 << 30 },
697 { .tag = 0 },
698 };
Jiri Olsa994a1f72013-09-01 12:36:12 +0200699
Jiri Olsa27050f52013-09-01 12:36:13 +0200700 val = parse_tag_value(str, tags);
701 if (val != (unsigned int) -1) {
702 /* we got file size value */
703 pages = PERF_ALIGN(val, page_size) / page_size;
704 if (!is_power_of_2(pages)) {
705 pages = next_pow2(pages);
706 pr_info("rounding mmap pages size to %u (%u pages)\n",
707 pages * page_size, pages);
708 }
709 } else {
710 /* we got pages count value */
711 char *eptr;
712 pages = strtoul(str, &eptr, 10);
713 if (*eptr != '\0') {
714 pr_err("failed to parse --mmap_pages/-m value\n");
715 return -1;
716 }
Jiri Olsa994a1f72013-09-01 12:36:12 +0200717 }
718
719 size = perf_evlist__mmap_size(pages);
720 if (!size) {
721 pr_err("--mmap_pages/-m value must be a power of two.");
722 return -1;
723 }
724
725 *mmap_pages = pages;
726 return 0;
727}
728
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200729/** perf_evlist__mmap - Create per cpu maps to receive events
730 *
731 * @evlist - list of events
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200732 * @pages - map length in pages
733 * @overwrite - overwrite older events?
734 *
735 * If overwrite is false the user needs to signal event consuption using:
736 *
737 * struct perf_mmap *m = &evlist->mmap[cpu];
738 * unsigned int head = perf_mmap__read_head(m);
739 *
740 * perf_mmap__write_tail(m, head)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200741 *
742 * Using perf_evlist__read_on_cpu does this automatically.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200743 */
Arnaldo Carvalho de Melo50a682c2011-11-09 09:10:47 -0200744int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
745 bool overwrite)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200746{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300747 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200748 const struct cpu_map *cpus = evlist->cpus;
749 const struct thread_map *threads = evlist->threads;
Arnaldo Carvalho de Melo50a682c2011-11-09 09:10:47 -0200750 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
751
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200752 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200753 return -ENOMEM;
754
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200755 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200756 return -ENOMEM;
757
758 evlist->overwrite = overwrite;
Jiri Olsa994a1f72013-09-01 12:36:12 +0200759 evlist->mmap_len = perf_evlist__mmap_size(pages);
Jiri Olsa27050f52013-09-01 12:36:13 +0200760 pr_debug("mmap size %luB\n", evlist->mmap_len);
Jiri Olsa994a1f72013-09-01 12:36:12 +0200761 mask = evlist->mmap_len - page_size - 1;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200762
763 list_for_each_entry(evsel, &evlist->entries, node) {
764 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300765 evsel->sample_id == NULL &&
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -0300766 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200767 return -ENOMEM;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200768 }
769
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -0700770 if (cpu_map__empty(cpus))
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300771 return perf_evlist__mmap_per_thread(evlist, prot, mask);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200772
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300773 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200774}
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200775
Namhyung Kimb809ac12012-04-26 14:15:19 +0900776int perf_evlist__create_maps(struct perf_evlist *evlist,
777 struct perf_target *target)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200778{
Namhyung Kimb809ac12012-04-26 14:15:19 +0900779 evlist->threads = thread_map__new_str(target->pid, target->tid,
780 target->uid);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200781
782 if (evlist->threads == NULL)
783 return -1;
784
Namhyung Kim879d77d2012-05-16 18:45:48 +0900785 if (perf_target__has_task(target))
Namhyung Kimd67356e2012-05-07 14:09:03 +0900786 evlist->cpus = cpu_map__dummy_new();
Namhyung Kimd1cb9fc2012-05-16 18:45:49 +0900787 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
788 evlist->cpus = cpu_map__dummy_new();
Namhyung Kim879d77d2012-05-16 18:45:48 +0900789 else
790 evlist->cpus = cpu_map__new(target->cpu_list);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -0200791
792 if (evlist->cpus == NULL)
793 goto out_delete_threads;
794
795 return 0;
796
797out_delete_threads:
798 thread_map__delete(evlist->threads);
799 return -1;
800}
801
802void perf_evlist__delete_maps(struct perf_evlist *evlist)
803{
804 cpu_map__delete(evlist->cpus);
805 thread_map__delete(evlist->threads);
806 evlist->cpus = NULL;
807 evlist->threads = NULL;
808}
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100809
Arnaldo Carvalho de Melo1491a632012-09-26 14:43:13 -0300810int perf_evlist__apply_filters(struct perf_evlist *evlist)
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100811{
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100812 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300813 int err = 0;
814 const int ncpus = cpu_map__nr(evlist->cpus),
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900815 nthreads = thread_map__nr(evlist->threads);
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100816
817 list_for_each_entry(evsel, &evlist->entries, node) {
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300818 if (evsel->filter == NULL)
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100819 continue;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300820
821 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
822 if (err)
823 break;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100824 }
825
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300826 return err;
827}
828
829int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
830{
831 struct perf_evsel *evsel;
832 int err = 0;
833 const int ncpus = cpu_map__nr(evlist->cpus),
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900834 nthreads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -0300835
836 list_for_each_entry(evsel, &evlist->entries, node) {
837 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
838 if (err)
839 break;
840 }
841
842 return err;
Frederic Weisbecker0a102472011-02-26 04:51:54 +0100843}
Frederic Weisbecker74429962011-05-21 17:49:00 +0200844
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300845bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +0200846{
Adrian Hunter75562572013-08-27 11:23:09 +0300847 struct perf_evsel *pos;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300848
Adrian Hunter75562572013-08-27 11:23:09 +0300849 if (evlist->nr_entries == 1)
850 return true;
851
852 if (evlist->id_pos < 0 || evlist->is_pos < 0)
853 return false;
854
855 list_for_each_entry(pos, &evlist->entries, node) {
856 if (pos->id_pos != evlist->id_pos ||
857 pos->is_pos != evlist->is_pos)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300858 return false;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200859 }
860
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300861 return true;
862}
863
Adrian Hunter75562572013-08-27 11:23:09 +0300864u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300865{
Adrian Hunter75562572013-08-27 11:23:09 +0300866 struct perf_evsel *evsel;
867
868 if (evlist->combined_sample_type)
869 return evlist->combined_sample_type;
870
871 list_for_each_entry(evsel, &evlist->entries, node)
872 evlist->combined_sample_type |= evsel->attr.sample_type;
873
874 return evlist->combined_sample_type;
875}
876
877u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
878{
879 evlist->combined_sample_type = 0;
880 return __perf_evlist__combined_sample_type(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300881}
882
Jiri Olsa9ede4732012-10-10 17:38:13 +0200883bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
884{
885 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
886 u64 read_format = first->attr.read_format;
887 u64 sample_type = first->attr.sample_type;
888
889 list_for_each_entry_continue(pos, &evlist->entries, node) {
890 if (read_format != pos->attr.read_format)
891 return false;
892 }
893
894 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
895 if ((sample_type & PERF_SAMPLE_READ) &&
896 !(read_format & PERF_FORMAT_ID)) {
897 return false;
898 }
899
900 return true;
901}
902
903u64 perf_evlist__read_format(struct perf_evlist *evlist)
904{
905 struct perf_evsel *first = perf_evlist__first(evlist);
906 return first->attr.read_format;
907}
908
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300909u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -0200910{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300911 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -0200912 struct perf_sample *data;
913 u64 sample_type;
914 u16 size = 0;
915
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -0200916 if (!first->attr.sample_id_all)
917 goto out;
918
919 sample_type = first->attr.sample_type;
920
921 if (sample_type & PERF_SAMPLE_TID)
922 size += sizeof(data->tid) * 2;
923
924 if (sample_type & PERF_SAMPLE_TIME)
925 size += sizeof(data->time);
926
927 if (sample_type & PERF_SAMPLE_ID)
928 size += sizeof(data->id);
929
930 if (sample_type & PERF_SAMPLE_STREAM_ID)
931 size += sizeof(data->stream_id);
932
933 if (sample_type & PERF_SAMPLE_CPU)
934 size += sizeof(data->cpu) * 2;
Adrian Hunter75562572013-08-27 11:23:09 +0300935
936 if (sample_type & PERF_SAMPLE_IDENTIFIER)
937 size += sizeof(data->id);
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -0200938out:
939 return size;
940}
941
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300942bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300943{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300944 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300945
946 list_for_each_entry_continue(pos, &evlist->entries, node) {
947 if (first->attr.sample_id_all != pos->attr.sample_id_all)
948 return false;
949 }
950
951 return true;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200952}
953
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300954bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +0200955{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300956 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -0300957 return first->attr.sample_id_all;
Frederic Weisbecker74429962011-05-21 17:49:00 +0200958}
Arnaldo Carvalho de Melo81cce8d2011-10-05 19:11:32 -0300959
960void perf_evlist__set_selected(struct perf_evlist *evlist,
961 struct perf_evsel *evsel)
962{
963 evlist->selected = evsel;
964}
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200965
Namhyung Kima74b4b62013-03-15 14:48:48 +0900966void perf_evlist__close(struct perf_evlist *evlist)
967{
968 struct perf_evsel *evsel;
969 int ncpus = cpu_map__nr(evlist->cpus);
970 int nthreads = thread_map__nr(evlist->threads);
971
972 list_for_each_entry_reverse(evsel, &evlist->entries, node)
973 perf_evsel__close(evsel, ncpus, nthreads);
974}
975
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200976int perf_evlist__open(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200977{
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200978 struct perf_evsel *evsel;
Namhyung Kima74b4b62013-03-15 14:48:48 +0900979 int err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200980
Adrian Hunter733cd2f2013-09-06 22:40:11 +0300981 perf_evlist__update_id_pos(evlist);
982
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200983 list_for_each_entry(evsel, &evlist->entries, node) {
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200984 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200985 if (err < 0)
986 goto out_err;
987 }
988
989 return 0;
990out_err:
Namhyung Kima74b4b62013-03-15 14:48:48 +0900991 perf_evlist__close(evlist);
Namhyung Kim41c21a62012-02-23 12:13:36 +0900992 errno = -err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -0200993 return err;
994}
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -0200995
996int perf_evlist__prepare_workload(struct perf_evlist *evlist,
Namhyung Kim6ef73ec2013-03-11 16:43:15 +0900997 struct perf_target *target,
Namhyung Kim55e162e2013-03-11 16:43:17 +0900998 const char *argv[], bool pipe_output,
999 bool want_signal)
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001000{
1001 int child_ready_pipe[2], go_pipe[2];
1002 char bf;
1003
1004 if (pipe(child_ready_pipe) < 0) {
1005 perror("failed to create 'ready' pipe");
1006 return -1;
1007 }
1008
1009 if (pipe(go_pipe) < 0) {
1010 perror("failed to create 'go' pipe");
1011 goto out_close_ready_pipe;
1012 }
1013
1014 evlist->workload.pid = fork();
1015 if (evlist->workload.pid < 0) {
1016 perror("failed to fork");
1017 goto out_close_pipes;
1018 }
1019
1020 if (!evlist->workload.pid) {
Namhyung Kim119fa3c2013-03-11 16:43:16 +09001021 if (pipe_output)
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001022 dup2(2, 1);
1023
David Ahern0817df02013-05-25 17:50:39 -06001024 signal(SIGTERM, SIG_DFL);
1025
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001026 close(child_ready_pipe[0]);
1027 close(go_pipe[1]);
1028 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1029
1030 /*
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001031 * Tell the parent we're ready to go
1032 */
1033 close(child_ready_pipe[1]);
1034
1035 /*
1036 * Wait until the parent tells us to go.
1037 */
1038 if (read(go_pipe[0], &bf, 1) == -1)
1039 perror("unable to read pipe");
1040
1041 execvp(argv[0], (char **)argv);
1042
1043 perror(argv[0]);
Namhyung Kim55e162e2013-03-11 16:43:17 +09001044 if (want_signal)
1045 kill(getppid(), SIGUSR1);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001046 exit(-1);
1047 }
1048
Namhyung Kim6ef73ec2013-03-11 16:43:15 +09001049 if (perf_target__none(target))
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001050 evlist->threads->map[0] = evlist->workload.pid;
1051
1052 close(child_ready_pipe[1]);
1053 close(go_pipe[0]);
1054 /*
1055 * wait for child to settle
1056 */
1057 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1058 perror("unable to read pipe");
1059 goto out_close_pipes;
1060 }
1061
Namhyung Kimbcf31452013-06-26 16:14:15 +09001062 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001063 evlist->workload.cork_fd = go_pipe[1];
1064 close(child_ready_pipe[0]);
1065 return 0;
1066
1067out_close_pipes:
1068 close(go_pipe[0]);
1069 close(go_pipe[1]);
1070out_close_ready_pipe:
1071 close(child_ready_pipe[0]);
1072 close(child_ready_pipe[1]);
1073 return -1;
1074}
1075
1076int perf_evlist__start_workload(struct perf_evlist *evlist)
1077{
1078 if (evlist->workload.cork_fd > 0) {
David Ahernb3824402013-07-02 13:27:21 -06001079 char bf = 0;
Namhyung Kimbcf31452013-06-26 16:14:15 +09001080 int ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001081 /*
1082 * Remove the cork, let it rip!
1083 */
Namhyung Kimbcf31452013-06-26 16:14:15 +09001084 ret = write(evlist->workload.cork_fd, &bf, 1);
1085 if (ret < 0)
1086 perror("enable to write to pipe");
1087
1088 close(evlist->workload.cork_fd);
1089 return ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001090 }
1091
1092 return 0;
1093}
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001094
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -03001095int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001096 struct perf_sample *sample)
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001097{
Adrian Hunter75562572013-08-27 11:23:09 +03001098 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1099
1100 if (!evsel)
1101 return -EFAULT;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001102 return perf_evsel__parse_sample(evsel, event, sample);
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001103}
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001104
1105size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1106{
1107 struct perf_evsel *evsel;
1108 size_t printed = 0;
1109
1110 list_for_each_entry(evsel, &evlist->entries, node) {
1111 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1112 perf_evsel__name(evsel));
1113 }
1114
1115 return printed + fprintf(fp, "\n");;
1116}