blob: 5beb44faa71d2a88ee0fe04801b9150a8a3354f6 [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
Arnaldo Carvalho de Meloa8c9ae12011-11-05 08:41:51 -02009#include "util.h"
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -030010#include <api/fs/fs.h>
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -020011#include <poll.h>
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020012#include "cpumap.h"
13#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090014#include "target.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020015#include "evlist.h"
16#include "evsel.h"
Adrian Huntere3e1a542013-08-14 15:48:24 +030017#include "debug.h"
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020018#include <unistd.h>
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020019
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020020#include "parse-events.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060021#include <subcmd/parse-options.h>
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020022
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020023#include <sys/mman.h>
24
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020025#include <linux/bitops.h>
26#include <linux/hash.h>
Arnaldo Carvalho de Melo0389cd12014-12-15 16:04:11 -030027#include <linux/log2.h>
Jiri Olsa8dd2a132015-09-07 10:38:06 +020028#include <linux/err.h>
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020029
Wang Nan8db6d6b2016-07-14 08:34:35 +000030static void perf_mmap__munmap(struct perf_mmap *map);
Wang Nan48760752016-07-14 08:34:37 +000031static void perf_mmap__put(struct perf_mmap *map);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -030032
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020033#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030034#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020035
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020036void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
37 struct thread_map *threads)
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020038{
39 int i;
40
41 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
42 INIT_HLIST_HEAD(&evlist->heads[i]);
43 INIT_LIST_HEAD(&evlist->entries);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020044 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -030045 fdarray__init(&evlist->pollfd, 64);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020046 evlist->workload.pid = -1;
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020047}
48
Namhyung Kim334fe7a2013-03-11 16:43:12 +090049struct perf_evlist *perf_evlist__new(void)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020050{
51 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
52
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020053 if (evlist != NULL)
Namhyung Kim334fe7a2013-03-11 16:43:12 +090054 perf_evlist__init(evlist, NULL, NULL);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020055
56 return evlist;
57}
58
Jiri Olsab22d54b2013-09-01 12:36:14 +020059struct perf_evlist *perf_evlist__new_default(void)
60{
61 struct perf_evlist *evlist = perf_evlist__new();
62
63 if (evlist && perf_evlist__add_default(evlist)) {
64 perf_evlist__delete(evlist);
65 evlist = NULL;
66 }
67
68 return evlist;
69}
70
Arnaldo Carvalho de Melo5bae0252016-01-07 13:14:56 -030071struct perf_evlist *perf_evlist__new_dummy(void)
72{
73 struct perf_evlist *evlist = perf_evlist__new();
74
75 if (evlist && perf_evlist__add_dummy(evlist)) {
76 perf_evlist__delete(evlist);
77 evlist = NULL;
78 }
79
80 return evlist;
81}
82
Adrian Hunter75562572013-08-27 11:23:09 +030083/**
84 * perf_evlist__set_id_pos - set the positions of event ids.
85 * @evlist: selected event list
86 *
87 * Events with compatible sample types all have the same id_pos
88 * and is_pos. For convenience, put a copy on evlist.
89 */
90void perf_evlist__set_id_pos(struct perf_evlist *evlist)
91{
92 struct perf_evsel *first = perf_evlist__first(evlist);
93
94 evlist->id_pos = first->id_pos;
95 evlist->is_pos = first->is_pos;
96}
97
Adrian Hunter733cd2f2013-09-06 22:40:11 +030098static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
99{
100 struct perf_evsel *evsel;
101
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300102 evlist__for_each_entry(evlist, evsel)
Adrian Hunter733cd2f2013-09-06 22:40:11 +0300103 perf_evsel__calc_id_pos(evsel);
104
105 perf_evlist__set_id_pos(evlist);
106}
107
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200108static void perf_evlist__purge(struct perf_evlist *evlist)
109{
110 struct perf_evsel *pos, *n;
111
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300112 evlist__for_each_entry_safe(evlist, n, pos) {
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200113 list_del_init(&pos->node);
Arnaldo Carvalho de Melod49e4692015-08-27 08:07:40 -0400114 pos->evlist = NULL;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200115 perf_evsel__delete(pos);
116 }
117
118 evlist->nr_entries = 0;
119}
120
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200121void perf_evlist__exit(struct perf_evlist *evlist)
122{
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -0300123 zfree(&evlist->mmap);
Wang Nanb2cb6152016-07-14 08:34:39 +0000124 zfree(&evlist->backward_mmap);
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300125 fdarray__exit(&evlist->pollfd);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200126}
127
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200128void perf_evlist__delete(struct perf_evlist *evlist)
129{
Arnaldo Carvalho de Melo0b04b3d2016-06-21 18:15:45 -0300130 if (evlist == NULL)
131 return;
132
Arnaldo Carvalho de Melo983874d2014-01-03 17:25:49 -0300133 perf_evlist__munmap(evlist);
Arnaldo Carvalho de Melof26e1c72014-01-03 16:54:12 -0300134 perf_evlist__close(evlist);
Jiri Olsaf30a79b2015-06-23 00:36:04 +0200135 cpu_map__put(evlist->cpus);
Jiri Olsa186fbb72015-06-23 00:36:05 +0200136 thread_map__put(evlist->threads);
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -0300137 evlist->cpus = NULL;
138 evlist->threads = NULL;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200139 perf_evlist__purge(evlist);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200140 perf_evlist__exit(evlist);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200141 free(evlist);
142}
143
Adrian Hunteradc0c3e2015-09-08 10:58:58 +0300144static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
145 struct perf_evsel *evsel)
146{
147 /*
148 * We already have cpus for evsel (via PMU sysfs) so
149 * keep it, if there's no target cpu list defined.
150 */
151 if (!evsel->own_cpus || evlist->has_user_cpus) {
152 cpu_map__put(evsel->cpus);
153 evsel->cpus = cpu_map__get(evlist->cpus);
154 } else if (evsel->cpus != evsel->own_cpus) {
155 cpu_map__put(evsel->cpus);
156 evsel->cpus = cpu_map__get(evsel->own_cpus);
157 }
158
159 thread_map__put(evsel->threads);
160 evsel->threads = thread_map__get(evlist->threads);
161}
162
163static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
164{
165 struct perf_evsel *evsel;
166
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300167 evlist__for_each_entry(evlist, evsel)
Adrian Hunteradc0c3e2015-09-08 10:58:58 +0300168 __perf_evlist__propagate_maps(evlist, evsel);
169}
170
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200171void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
172{
Arnaldo Carvalho de Melod49e4692015-08-27 08:07:40 -0400173 entry->evlist = evlist;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200174 list_add_tail(&entry->node, &evlist->entries);
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300175 entry->idx = evlist->nr_entries;
Adrian Hunter60b08962014-07-31 09:00:52 +0300176 entry->tracking = !entry->idx;
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300177
Adrian Hunter75562572013-08-27 11:23:09 +0300178 if (!evlist->nr_entries++)
179 perf_evlist__set_id_pos(evlist);
Adrian Hunter44c42d72015-09-08 10:58:59 +0300180
181 __perf_evlist__propagate_maps(evlist, entry);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200182}
183
Adrian Hunter47682302015-09-25 16:15:53 +0300184void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
185{
186 evsel->evlist = NULL;
187 list_del_init(&evsel->node);
188 evlist->nr_entries -= 1;
189}
190
Jiri Olsa0529bc12012-01-27 15:34:20 +0100191void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300192 struct list_head *list)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200193{
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300194 struct perf_evsel *evsel, *temp;
Adrian Hunter75562572013-08-27 11:23:09 +0300195
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300196 __evlist__for_each_entry_safe(list, temp, evsel) {
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300197 list_del_init(&evsel->node);
198 perf_evlist__add(evlist, evsel);
199 }
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200200}
201
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300202void __perf_evlist__set_leader(struct list_head *list)
203{
204 struct perf_evsel *evsel, *leader;
205
206 leader = list_entry(list->next, struct perf_evsel, node);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900207 evsel = list_entry(list->prev, struct perf_evsel, node);
208
209 leader->nr_members = evsel->idx - leader->idx + 1;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300210
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300211 __evlist__for_each_entry(list, evsel) {
Stephane Eranian74b21332013-01-31 13:54:37 +0100212 evsel->leader = leader;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300213 }
214}
215
216void perf_evlist__set_leader(struct perf_evlist *evlist)
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200217{
Namhyung Kim97f63e42013-01-22 18:09:29 +0900218 if (evlist->nr_entries) {
219 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300220 __perf_evlist__set_leader(&evlist->entries);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900221 }
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200222}
223
Jiri Olsa45cf6c32015-10-05 20:06:04 +0200224void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
Arnaldo Carvalho de Melo7f8d1ad2015-09-30 17:49:49 -0300225{
226 attr->precise_ip = 3;
227
228 while (attr->precise_ip != 0) {
229 int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
230 if (fd != -1) {
231 close(fd);
232 break;
233 }
234 --attr->precise_ip;
235 }
236}
237
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200238int perf_evlist__add_default(struct perf_evlist *evlist)
239{
240 struct perf_event_attr attr = {
241 .type = PERF_TYPE_HARDWARE,
242 .config = PERF_COUNT_HW_CPU_CYCLES,
243 };
Joerg Roedel1aed2672012-01-04 17:54:20 +0100244 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200245
Joerg Roedel1aed2672012-01-04 17:54:20 +0100246 event_attr_init(&attr);
247
Arnaldo Carvalho de Melo7f8d1ad2015-09-30 17:49:49 -0300248 perf_event_attr__set_max_precise_ip(&attr);
249
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300250 evsel = perf_evsel__new(&attr);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200251 if (evsel == NULL)
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200252 goto error;
253
Arnaldo Carvalho de Melo7f8d1ad2015-09-30 17:49:49 -0300254 /* use asprintf() because free(evsel) assumes name is allocated */
255 if (asprintf(&evsel->name, "cycles%.*s",
256 attr.precise_ip ? attr.precise_ip + 1 : 0, ":ppp") < 0)
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200257 goto error_free;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200258
259 perf_evlist__add(evlist, evsel);
260 return 0;
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200261error_free:
262 perf_evsel__delete(evsel);
263error:
264 return -ENOMEM;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200265}
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200266
Arnaldo Carvalho de Melo5bae0252016-01-07 13:14:56 -0300267int perf_evlist__add_dummy(struct perf_evlist *evlist)
268{
269 struct perf_event_attr attr = {
270 .type = PERF_TYPE_SOFTWARE,
271 .config = PERF_COUNT_SW_DUMMY,
272 .size = sizeof(attr), /* to capture ABI version */
273 };
274 struct perf_evsel *evsel = perf_evsel__new(&attr);
275
276 if (evsel == NULL)
277 return -ENOMEM;
278
279 perf_evlist__add(evlist, evsel);
280 return 0;
281}
282
Arnaldo Carvalho de Meloe60fc842012-10-03 11:50:55 -0300283static int perf_evlist__add_attrs(struct perf_evlist *evlist,
284 struct perf_event_attr *attrs, size_t nr_attrs)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200285{
286 struct perf_evsel *evsel, *n;
287 LIST_HEAD(head);
288 size_t i;
289
290 for (i = 0; i < nr_attrs; i++) {
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300291 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200292 if (evsel == NULL)
293 goto out_delete_partial_list;
294 list_add_tail(&evsel->node, &head);
295 }
296
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300297 perf_evlist__splice_list_tail(evlist, &head);
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200298
299 return 0;
300
301out_delete_partial_list:
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300302 __evlist__for_each_entry_safe(&head, n, evsel)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200303 perf_evsel__delete(evsel);
304 return -1;
305}
306
Arnaldo Carvalho de Melo79695e12012-05-30 13:53:54 -0300307int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
308 struct perf_event_attr *attrs, size_t nr_attrs)
309{
310 size_t i;
311
312 for (i = 0; i < nr_attrs; i++)
313 event_attr_init(attrs + i);
314
315 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
316}
317
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -0300318struct perf_evsel *
319perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -0200320{
321 struct perf_evsel *evsel;
322
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300323 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -0200324 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
325 (int)evsel->attr.config == id)
326 return evsel;
327 }
328
329 return NULL;
330}
331
David Aherna2f28042013-08-28 22:29:51 -0600332struct perf_evsel *
333perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
334 const char *name)
335{
336 struct perf_evsel *evsel;
337
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300338 evlist__for_each_entry(evlist, evsel) {
David Aherna2f28042013-08-28 22:29:51 -0600339 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
340 (strcmp(evsel->name, name) == 0))
341 return evsel;
342 }
343
344 return NULL;
345}
346
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300347int perf_evlist__add_newtp(struct perf_evlist *evlist,
348 const char *sys, const char *name, void *handler)
349{
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300350 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300351
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200352 if (IS_ERR(evsel))
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300353 return -1;
354
Arnaldo Carvalho de Melo744a9712013-11-06 10:17:38 -0300355 evsel->handler = handler;
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300356 perf_evlist__add(evlist, evsel);
357 return 0;
358}
359
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300360static int perf_evlist__nr_threads(struct perf_evlist *evlist,
361 struct perf_evsel *evsel)
362{
363 if (evsel->system_wide)
364 return 1;
365 else
366 return thread_map__nr(evlist->threads);
367}
368
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300369void perf_evlist__disable(struct perf_evlist *evlist)
370{
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300371 struct perf_evsel *pos;
372
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300373 evlist__for_each_entry(evlist, pos) {
Jiri Olsa3e27c922015-12-03 10:06:42 +0100374 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
375 continue;
376 perf_evsel__disable(pos);
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300377 }
Arnaldo Carvalho de Melo2b56bcf2015-06-17 16:40:26 -0300378
379 evlist->enabled = false;
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300380}
381
David Ahern764e16a32011-08-25 10:17:55 -0600382void perf_evlist__enable(struct perf_evlist *evlist)
383{
David Ahern764e16a32011-08-25 10:17:55 -0600384 struct perf_evsel *pos;
385
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300386 evlist__for_each_entry(evlist, pos) {
Jiri Olsa3e27c922015-12-03 10:06:42 +0100387 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
388 continue;
389 perf_evsel__enable(pos);
David Ahern764e16a32011-08-25 10:17:55 -0600390 }
Arnaldo Carvalho de Melo2b56bcf2015-06-17 16:40:26 -0300391
392 evlist->enabled = true;
393}
394
395void perf_evlist__toggle_enable(struct perf_evlist *evlist)
396{
397 (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
David Ahern764e16a32011-08-25 10:17:55 -0600398}
399
Adrian Hunter1c650562014-07-31 09:00:56 +0300400static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
401 struct perf_evsel *evsel, int cpu)
402{
403 int thread, err;
404 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
405
406 if (!evsel->fd)
407 return -EINVAL;
408
409 for (thread = 0; thread < nr_threads; thread++) {
410 err = ioctl(FD(evsel, cpu, thread),
411 PERF_EVENT_IOC_ENABLE, 0);
412 if (err)
413 return err;
414 }
415 return 0;
416}
417
418static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
419 struct perf_evsel *evsel,
420 int thread)
421{
422 int cpu, err;
423 int nr_cpus = cpu_map__nr(evlist->cpus);
424
425 if (!evsel->fd)
426 return -EINVAL;
427
428 for (cpu = 0; cpu < nr_cpus; cpu++) {
429 err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
430 if (err)
431 return err;
432 }
433 return 0;
434}
435
436int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
437 struct perf_evsel *evsel, int idx)
438{
439 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
440
441 if (per_cpu_mmaps)
442 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
443 else
444 return perf_evlist__enable_event_thread(evlist, evsel, idx);
445}
446
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300447int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200448{
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900449 int nr_cpus = cpu_map__nr(evlist->cpus);
450 int nr_threads = thread_map__nr(evlist->threads);
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300451 int nfds = 0;
452 struct perf_evsel *evsel;
453
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300454 evlist__for_each_entry(evlist, evsel) {
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300455 if (evsel->system_wide)
456 nfds += nr_cpus;
457 else
458 nfds += nr_cpus * nr_threads;
459 }
460
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300461 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
462 fdarray__grow(&evlist->pollfd, nfds) < 0)
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300463 return -ENOMEM;
464
465 return 0;
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200466}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200467
Wang Nan48760752016-07-14 08:34:37 +0000468static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
469 struct perf_mmap *map, short revent)
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300470{
Wang Nanf3058a12016-05-24 02:28:59 +0000471 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300472 /*
473 * Save the idx so that when we filter out fds POLLHUP'ed we can
474 * close the associated evlist->mmap[] entry.
475 */
476 if (pos >= 0) {
Wang Nan48760752016-07-14 08:34:37 +0000477 evlist->pollfd.priv[pos].ptr = map;
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300478
479 fcntl(fd, F_SETFL, O_NONBLOCK);
480 }
481
482 return pos;
483}
484
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300485int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200486{
Wang Nan48760752016-07-14 08:34:37 +0000487 return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300488}
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300489
Wang Nan258e4bf2016-05-25 13:44:57 +0000490static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
491 void *arg __maybe_unused)
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300492{
Wang Nan48760752016-07-14 08:34:37 +0000493 struct perf_mmap *map = fda->priv[fd].ptr;
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300494
Wang Nan48760752016-07-14 08:34:37 +0000495 if (map)
496 perf_mmap__put(map);
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200497}
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200498
Arnaldo Carvalho de Melo1ddec7f2014-08-12 23:04:11 -0300499int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
500{
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300501 return fdarray__filter(&evlist->pollfd, revents_and_mask,
Wang Nan258e4bf2016-05-25 13:44:57 +0000502 perf_evlist__munmap_filtered, NULL);
Arnaldo Carvalho de Melo1ddec7f2014-08-12 23:04:11 -0300503}
504
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -0300505int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
506{
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300507 return fdarray__poll(&evlist->pollfd, timeout);
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -0300508}
509
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300510static void perf_evlist__id_hash(struct perf_evlist *evlist,
511 struct perf_evsel *evsel,
512 int cpu, int thread, u64 id)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200513{
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300514 int hash;
515 struct perf_sample_id *sid = SID(evsel, cpu, thread);
516
517 sid->id = id;
518 sid->evsel = evsel;
519 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
520 hlist_add_head(&sid->node, &evlist->heads[hash]);
521}
522
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300523void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
524 int cpu, int thread, u64 id)
525{
526 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
527 evsel->id[evsel->ids++] = id;
528}
529
Jiri Olsa1c596122015-11-05 15:40:49 +0100530int perf_evlist__id_add_fd(struct perf_evlist *evlist,
531 struct perf_evsel *evsel,
532 int cpu, int thread, int fd)
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300533{
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200534 u64 read_data[4] = { 0, };
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300535 int id_idx = 1; /* The first entry is the counter value */
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200536 u64 id;
537 int ret;
538
539 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
540 if (!ret)
541 goto add;
542
543 if (errno != ENOTTY)
544 return -1;
545
546 /* Legacy way to get event id.. All hail to old kernels! */
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200547
Jiri Olsac4861af2012-10-12 13:02:21 +0200548 /*
549 * This way does not work with group format read, so bail
550 * out in that case.
551 */
552 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
553 return -1;
554
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200555 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
556 read(fd, &read_data, sizeof(read_data)) == -1)
557 return -1;
558
559 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
560 ++id_idx;
561 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
562 ++id_idx;
563
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200564 id = read_data[id_idx];
565
566 add:
567 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200568 return 0;
569}
570
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200571static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
572 struct perf_evsel *evsel, int idx, int cpu,
573 int thread)
574{
575 struct perf_sample_id *sid = SID(evsel, cpu, thread);
576 sid->idx = idx;
577 if (evlist->cpus && cpu >= 0)
578 sid->cpu = evlist->cpus->map[cpu];
579 else
580 sid->cpu = -1;
581 if (!evsel->system_wide && evlist->threads && thread >= 0)
Jiri Olsae13798c2015-06-23 00:36:02 +0200582 sid->tid = thread_map__pid(evlist->threads, thread);
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200583 else
584 sid->tid = -1;
585}
586
Jiri Olsa932a3592012-10-11 14:10:35 +0200587struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200588{
589 struct hlist_head *head;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200590 struct perf_sample_id *sid;
591 int hash;
592
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200593 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
594 head = &evlist->heads[hash];
595
Sasha Levinb67bfe02013-02-27 17:06:00 -0800596 hlist_for_each_entry(sid, head, node)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200597 if (sid->id == id)
Jiri Olsa932a3592012-10-11 14:10:35 +0200598 return sid;
599
600 return NULL;
601}
602
603struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
604{
605 struct perf_sample_id *sid;
606
Adrian Hunter05169df2015-08-20 11:26:45 +0300607 if (evlist->nr_entries == 1 || !id)
Jiri Olsa932a3592012-10-11 14:10:35 +0200608 return perf_evlist__first(evlist);
609
610 sid = perf_evlist__id2sid(evlist, id);
611 if (sid)
612 return sid->evsel;
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900613
614 if (!perf_evlist__sample_id_all(evlist))
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300615 return perf_evlist__first(evlist);
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900616
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200617 return NULL;
618}
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200619
Adrian Hunterdddcf6a2015-09-25 16:15:52 +0300620struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
621 u64 id)
622{
623 struct perf_sample_id *sid;
624
625 if (!id)
626 return NULL;
627
628 sid = perf_evlist__id2sid(evlist, id);
629 if (sid)
630 return sid->evsel;
631
632 return NULL;
633}
634
Adrian Hunter75562572013-08-27 11:23:09 +0300635static int perf_evlist__event2id(struct perf_evlist *evlist,
636 union perf_event *event, u64 *id)
637{
638 const u64 *array = event->sample.array;
639 ssize_t n;
640
641 n = (event->header.size - sizeof(event->header)) >> 3;
642
643 if (event->header.type == PERF_RECORD_SAMPLE) {
644 if (evlist->id_pos >= n)
645 return -1;
646 *id = array[evlist->id_pos];
647 } else {
648 if (evlist->is_pos > n)
649 return -1;
650 n -= evlist->is_pos;
651 *id = array[n];
652 }
653 return 0;
654}
655
Jiri Olsa7cb5c5a2016-07-10 13:07:53 +0200656struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
657 union perf_event *event)
Adrian Hunter75562572013-08-27 11:23:09 +0300658{
Adrian Hunter98be6962013-09-04 23:18:17 +0300659 struct perf_evsel *first = perf_evlist__first(evlist);
Adrian Hunter75562572013-08-27 11:23:09 +0300660 struct hlist_head *head;
661 struct perf_sample_id *sid;
662 int hash;
663 u64 id;
664
665 if (evlist->nr_entries == 1)
Adrian Hunter98be6962013-09-04 23:18:17 +0300666 return first;
667
668 if (!first->attr.sample_id_all &&
669 event->header.type != PERF_RECORD_SAMPLE)
670 return first;
Adrian Hunter75562572013-08-27 11:23:09 +0300671
672 if (perf_evlist__event2id(evlist, event, &id))
673 return NULL;
674
675 /* Synthesized events have an id of zero */
676 if (!id)
Adrian Hunter98be6962013-09-04 23:18:17 +0300677 return first;
Adrian Hunter75562572013-08-27 11:23:09 +0300678
679 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
680 head = &evlist->heads[hash];
681
682 hlist_for_each_entry(sid, head, node) {
683 if (sid->id == id)
684 return sid->evsel;
685 }
686 return NULL;
687}
688
Wang Nan65aea232016-05-23 07:13:38 +0000689static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
690{
691 int i;
692
Wang Nan078c3382016-07-14 08:34:40 +0000693 if (!evlist->backward_mmap)
694 return 0;
695
Wang Nan65aea232016-05-23 07:13:38 +0000696 for (i = 0; i < evlist->nr_mmaps; i++) {
Wang Nan078c3382016-07-14 08:34:40 +0000697 int fd = evlist->backward_mmap[i].fd;
Wang Nan65aea232016-05-23 07:13:38 +0000698 int err;
699
700 if (fd < 0)
701 continue;
702 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
703 if (err)
704 return err;
705 }
706 return 0;
707}
708
709int perf_evlist__pause(struct perf_evlist *evlist)
710{
711 return perf_evlist__set_paused(evlist, true);
712}
713
714int perf_evlist__resume(struct perf_evlist *evlist)
715{
716 return perf_evlist__set_paused(evlist, false);
717}
718
Wang Nanb6b85da2016-04-27 02:19:21 +0000719/* When check_messup is true, 'end' must points to a good entry */
Wang Nan0f4ccd12016-04-27 02:19:20 +0000720static union perf_event *
Wang Nanb6b85da2016-04-27 02:19:21 +0000721perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
722 u64 end, u64 *prev)
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200723{
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200724 unsigned char *data = md->base + page_size;
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200725 union perf_event *event = NULL;
Wang Nanb6b85da2016-04-27 02:19:21 +0000726 int diff = end - start;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200727
Wang Nanb6b85da2016-04-27 02:19:21 +0000728 if (check_messup) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200729 /*
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200730 * If we're further behind than half the buffer, there's a chance
731 * the writer will bite our tail and mess up the samples under us.
732 *
Wang Nanb6b85da2016-04-27 02:19:21 +0000733 * If we somehow ended up ahead of the 'end', we got messed up.
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200734 *
Wang Nanb6b85da2016-04-27 02:19:21 +0000735 * In either case, truncate and restart at 'end'.
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200736 */
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200737 if (diff > md->mask / 2 || diff < 0) {
738 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
739
740 /*
Wang Nanb6b85da2016-04-27 02:19:21 +0000741 * 'end' points to a known good entry, start there.
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200742 */
Wang Nanb6b85da2016-04-27 02:19:21 +0000743 start = end;
Wang Nanb04b7022016-04-26 02:28:54 +0000744 diff = 0;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200745 }
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200746 }
747
Wang Nanb04b7022016-04-26 02:28:54 +0000748 if (diff >= (int)sizeof(event->header)) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200749 size_t size;
750
Wang Nanb6b85da2016-04-27 02:19:21 +0000751 event = (union perf_event *)&data[start & md->mask];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200752 size = event->header.size;
753
Wang Nanb04b7022016-04-26 02:28:54 +0000754 if (size < sizeof(event->header) || diff < (int)size) {
755 event = NULL;
756 goto broken_event;
757 }
758
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200759 /*
760 * Event straddles the mmap boundary -- header should always
761 * be inside due to u64 alignment of output.
762 */
Wang Nanb6b85da2016-04-27 02:19:21 +0000763 if ((start & md->mask) + size != ((start + size) & md->mask)) {
764 unsigned int offset = start;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200765 unsigned int len = min(sizeof(*event), size), cpy;
Jiri Olsaa65cb4b2013-10-02 15:46:39 +0200766 void *dst = md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200767
768 do {
769 cpy = min(md->mask + 1 - (offset & md->mask), len);
770 memcpy(dst, &data[offset & md->mask], cpy);
771 offset += cpy;
772 dst += cpy;
773 len -= cpy;
774 } while (len);
775
Jiri Olsaa65cb4b2013-10-02 15:46:39 +0200776 event = (union perf_event *) md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200777 }
778
Wang Nanb6b85da2016-04-27 02:19:21 +0000779 start += size;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200780 }
781
Wang Nanb04b7022016-04-26 02:28:54 +0000782broken_event:
Wang Nan0f4ccd12016-04-27 02:19:20 +0000783 if (prev)
Wang Nanb6b85da2016-04-27 02:19:21 +0000784 *prev = start;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200785
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200786 return event;
787}
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200788
Wang Nan8db6d6b2016-07-14 08:34:35 +0000789union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup)
Wang Nan0f4ccd12016-04-27 02:19:20 +0000790{
Wang Nan0f4ccd12016-04-27 02:19:20 +0000791 u64 head;
792 u64 old = md->prev;
793
794 /*
795 * Check if event was unmapped due to a POLLHUP/POLLERR.
796 */
797 if (!atomic_read(&md->refcnt))
798 return NULL;
799
800 head = perf_mmap__read_head(md);
801
Wang Nan8db6d6b2016-07-14 08:34:35 +0000802 return perf_mmap__read(md, check_messup, old, head, &md->prev);
Wang Nan0f4ccd12016-04-27 02:19:20 +0000803}
804
Wang Nane24c7522016-05-09 01:47:50 +0000805union perf_event *
Wang Nan8db6d6b2016-07-14 08:34:35 +0000806perf_mmap__read_backward(struct perf_mmap *md)
Wang Nane24c7522016-05-09 01:47:50 +0000807{
Wang Nane24c7522016-05-09 01:47:50 +0000808 u64 head, end;
809 u64 start = md->prev;
810
811 /*
812 * Check if event was unmapped due to a POLLHUP/POLLERR.
813 */
814 if (!atomic_read(&md->refcnt))
815 return NULL;
816
817 head = perf_mmap__read_head(md);
818 if (!head)
819 return NULL;
820
821 /*
822 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
823 * it each time when kernel writes to it, so in fact 'head' is
824 * negative. 'end' pointer is made manually by adding the size of
825 * the ring buffer to 'head' pointer, means the validate data can
826 * read is the whole ring buffer. If 'end' is positive, the ring
827 * buffer has not fully filled, so we must adjust 'end' to 0.
828 *
829 * However, since both 'head' and 'end' is unsigned, we can't
830 * simply compare 'end' against 0. Here we compare '-head' and
831 * the size of the ring buffer, where -head is the number of bytes
832 * kernel write to the ring buffer.
833 */
834 if (-head < (u64)(md->mask + 1))
835 end = 0;
836 else
837 end = head + md->mask + 1;
838
839 return perf_mmap__read(md, false, start, end, &md->prev);
840}
841
Wang Nan8db6d6b2016-07-14 08:34:35 +0000842union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
843{
844 struct perf_mmap *md = &evlist->mmap[idx];
845
846 /*
847 * Check messup is required for forward overwritable ring buffer:
848 * memory pointed by md->prev can be overwritten in this case.
849 * No need for read-write ring buffer: kernel stop outputting when
850 * it hit md->prev (perf_mmap__consume()).
851 */
852 return perf_mmap__read_forward(md, evlist->overwrite);
853}
854
855union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
856{
857 struct perf_mmap *md = &evlist->mmap[idx];
858
859 /*
860 * No need to check messup for backward ring buffer:
861 * We can always read arbitrary long data from a backward
862 * ring buffer unless we forget to pause it before reading.
863 */
864 return perf_mmap__read_backward(md);
865}
866
Wang Nan5a5ddeb2016-05-25 13:44:50 +0000867union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
868{
Wang Nana0c6f452016-07-14 08:34:41 +0000869 return perf_evlist__mmap_read_forward(evlist, idx);
Wang Nan5a5ddeb2016-05-25 13:44:50 +0000870}
871
Wang Nan8db6d6b2016-07-14 08:34:35 +0000872void perf_mmap__read_catchup(struct perf_mmap *md)
Wang Nane24c7522016-05-09 01:47:50 +0000873{
Wang Nane24c7522016-05-09 01:47:50 +0000874 u64 head;
875
876 if (!atomic_read(&md->refcnt))
877 return;
878
879 head = perf_mmap__read_head(md);
880 md->prev = head;
881}
882
Wang Nan8db6d6b2016-07-14 08:34:35 +0000883void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
884{
885 perf_mmap__read_catchup(&evlist->mmap[idx]);
886}
887
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300888static bool perf_mmap__empty(struct perf_mmap *md)
889{
Adrian Hunterb72e74d2015-04-24 22:29:43 +0300890 return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300891}
892
Wang Nan8db6d6b2016-07-14 08:34:35 +0000893static void perf_mmap__get(struct perf_mmap *map)
894{
895 atomic_inc(&map->refcnt);
896}
897
898static void perf_mmap__put(struct perf_mmap *md)
899{
900 BUG_ON(md->base && atomic_read(&md->refcnt) == 0);
901
902 if (atomic_dec_and_test(&md->refcnt))
903 perf_mmap__munmap(md);
904}
905
Wang Nan8db6d6b2016-07-14 08:34:35 +0000906void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800907{
Wang Nan8db6d6b2016-07-14 08:34:35 +0000908 if (!overwrite) {
David Ahern7b8283b52015-04-07 09:20:37 -0600909 u64 old = md->prev;
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800910
911 perf_mmap__write_tail(md, old);
912 }
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300913
Arnaldo Carvalho de Melo71438492015-05-15 15:45:16 -0300914 if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md))
Wang Nan8db6d6b2016-07-14 08:34:35 +0000915 perf_mmap__put(md);
916}
917
918void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
919{
920 perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800921}
922
Adrian Hunter718c6022015-04-09 18:53:42 +0300923int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
924 struct auxtrace_mmap_params *mp __maybe_unused,
925 void *userpg __maybe_unused,
926 int fd __maybe_unused)
927{
928 return 0;
929}
930
931void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
932{
933}
934
935void __weak auxtrace_mmap_params__init(
936 struct auxtrace_mmap_params *mp __maybe_unused,
937 off_t auxtrace_offset __maybe_unused,
938 unsigned int auxtrace_pages __maybe_unused,
939 bool auxtrace_overwrite __maybe_unused)
940{
941}
942
943void __weak auxtrace_mmap_params__set_idx(
944 struct auxtrace_mmap_params *mp __maybe_unused,
945 struct perf_evlist *evlist __maybe_unused,
946 int idx __maybe_unused,
947 bool per_cpu __maybe_unused)
948{
949}
950
Wang Nan8db6d6b2016-07-14 08:34:35 +0000951static void perf_mmap__munmap(struct perf_mmap *map)
952{
953 if (map->base != NULL) {
954 munmap(map->base, perf_mmap__mmap_len(map));
955 map->base = NULL;
956 map->fd = -1;
957 atomic_set(&map->refcnt, 0);
958 }
959 auxtrace_mmap__munmap(&map->auxtrace_mmap);
960}
961
Wang Nana1f72612016-07-14 08:34:38 +0000962static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200963{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300964 int i;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200965
Wang Nanb2cb6152016-07-14 08:34:39 +0000966 if (evlist->mmap)
967 for (i = 0; i < evlist->nr_mmaps; i++)
968 perf_mmap__munmap(&evlist->mmap[i]);
Arnaldo Carvalho de Melo983874d2014-01-03 17:25:49 -0300969
Wang Nanb2cb6152016-07-14 08:34:39 +0000970 if (evlist->backward_mmap)
971 for (i = 0; i < evlist->nr_mmaps; i++)
972 perf_mmap__munmap(&evlist->backward_mmap[i]);
Wang Nana1f72612016-07-14 08:34:38 +0000973}
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300974
Wang Nana1f72612016-07-14 08:34:38 +0000975void perf_evlist__munmap(struct perf_evlist *evlist)
976{
977 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -0300978 zfree(&evlist->mmap);
Wang Nanb2cb6152016-07-14 08:34:39 +0000979 zfree(&evlist->backward_mmap);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200980}
981
Wang Nan8db6d6b2016-07-14 08:34:35 +0000982static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200983{
Wang Nand4c6fb32016-05-20 16:38:24 +0000984 int i;
Wang Nan8db6d6b2016-07-14 08:34:35 +0000985 struct perf_mmap *map;
Wang Nand4c6fb32016-05-20 16:38:24 +0000986
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -0300987 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -0700988 if (cpu_map__empty(evlist->cpus))
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900989 evlist->nr_mmaps = thread_map__nr(evlist->threads);
Wang Nan8db6d6b2016-07-14 08:34:35 +0000990 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
991 if (!map)
992 return NULL;
Wang Nan946ae1d2016-05-31 13:06:15 +0000993
Wang Nand4c6fb32016-05-20 16:38:24 +0000994 for (i = 0; i < evlist->nr_mmaps; i++)
Wang Nan8db6d6b2016-07-14 08:34:35 +0000995 map[i].fd = -1;
996 return map;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200997}
998
Adrian Huntera8a8f3e2014-07-14 13:02:52 +0300999struct mmap_params {
1000 int prot;
1001 int mask;
Adrian Hunter718c6022015-04-09 18:53:42 +03001002 struct auxtrace_mmap_params auxtrace_mp;
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001003};
1004
Wang Nan8db6d6b2016-07-14 08:34:35 +00001005static int perf_mmap__mmap(struct perf_mmap *map,
1006 struct mmap_params *mp, int fd)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001007{
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001008 /*
1009 * The last one will be done at perf_evlist__mmap_consume(), so that we
1010 * make sure we don't prevent tools from consuming every last event in
1011 * the ring buffer.
1012 *
1013 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
1014 * anymore, but the last events for it are still in the ring buffer,
1015 * waiting to be consumed.
1016 *
1017 * Tools can chose to ignore this at their own discretion, but the
1018 * evlist layer can't just drop it when filtering events in
1019 * perf_evlist__filter_pollfd().
1020 */
Wang Nan8db6d6b2016-07-14 08:34:35 +00001021 atomic_set(&map->refcnt, 2);
1022 map->prev = 0;
1023 map->mask = mp->mask;
1024 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
1025 MAP_SHARED, fd, 0);
1026 if (map->base == MAP_FAILED) {
Adrian Hunter02635962013-11-01 15:51:33 +02001027 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
1028 errno);
Wang Nan8db6d6b2016-07-14 08:34:35 +00001029 map->base = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001030 return -1;
Nelson Elhage301b1952011-12-19 08:39:30 -05001031 }
Wang Nan8db6d6b2016-07-14 08:34:35 +00001032 map->fd = fd;
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -03001033
Wang Nan8db6d6b2016-07-14 08:34:35 +00001034 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
1035 &mp->auxtrace_mp, map->base, fd))
Adrian Hunter718c6022015-04-09 18:53:42 +03001036 return -1;
1037
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001038 return 0;
1039}
1040
Wang Nanf3058a12016-05-24 02:28:59 +00001041static bool
1042perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
1043 struct perf_evsel *evsel)
1044{
Arnaldo Carvalho de Melo32a951b2016-07-14 08:34:33 +00001045 if (evsel->attr.write_backward)
Wang Nanf3058a12016-05-24 02:28:59 +00001046 return false;
1047 return true;
1048}
1049
Adrian Hunter04e21312013-10-18 15:29:13 +03001050static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001051 struct mmap_params *mp, int cpu,
Wang Nan078c3382016-07-14 08:34:40 +00001052 int thread, int *_output, int *_output_backward)
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001053{
1054 struct perf_evsel *evsel;
Wang Nanf3058a12016-05-24 02:28:59 +00001055 int revent;
Adrian Hunter04e21312013-10-18 15:29:13 +03001056
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001057 evlist__for_each_entry(evlist, evsel) {
Wang Nan078c3382016-07-14 08:34:40 +00001058 struct perf_mmap *maps = evlist->mmap;
1059 int *output = _output;
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001060 int fd;
1061
Wang Nan078c3382016-07-14 08:34:40 +00001062 if (evsel->attr.write_backward) {
1063 output = _output_backward;
1064 maps = evlist->backward_mmap;
1065
1066 if (!maps) {
1067 maps = perf_evlist__alloc_mmap(evlist);
1068 if (!maps)
1069 return -1;
1070 evlist->backward_mmap = maps;
1071 }
1072 }
Wang Nanf3058a12016-05-24 02:28:59 +00001073
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001074 if (evsel->system_wide && thread)
1075 continue;
1076
1077 fd = FD(evsel, cpu, thread);
Adrian Hunter04e21312013-10-18 15:29:13 +03001078
1079 if (*output == -1) {
1080 *output = fd;
Wang Nan078c3382016-07-14 08:34:40 +00001081
1082 if (perf_mmap__mmap(&maps[idx], mp, *output) < 0)
Adrian Hunter04e21312013-10-18 15:29:13 +03001083 return -1;
1084 } else {
1085 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
1086 return -1;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001087
Wang Nan078c3382016-07-14 08:34:40 +00001088 perf_mmap__get(&maps[idx]);
Adrian Hunter04e21312013-10-18 15:29:13 +03001089 }
1090
Wang Nanf3058a12016-05-24 02:28:59 +00001091 revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
1092
Adrian Hunterf90d1942014-11-11 16:16:39 +02001093 /*
1094 * The system_wide flag causes a selected event to be opened
1095 * always without a pid. Consequently it will never get a
1096 * POLLHUP, but it is used for tracking in combination with
1097 * other events, so it should not need to be polled anyway.
1098 * Therefore don't add it for polling.
1099 */
1100 if (!evsel->system_wide &&
Wang Nan078c3382016-07-14 08:34:40 +00001101 __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
1102 perf_mmap__put(&maps[idx]);
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -03001103 return -1;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001104 }
Arnaldo Carvalho de Melo033fa712014-09-08 12:55:12 -03001105
Adrian Hunter3c659ee2014-10-27 15:49:22 +02001106 if (evsel->attr.read_format & PERF_FORMAT_ID) {
1107 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
1108 fd) < 0)
1109 return -1;
1110 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
1111 thread);
1112 }
Adrian Hunter04e21312013-10-18 15:29:13 +03001113 }
1114
1115 return 0;
1116}
1117
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001118static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
1119 struct mmap_params *mp)
Adrian Hunter04e21312013-10-18 15:29:13 +03001120{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001121 int cpu, thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001122 int nr_cpus = cpu_map__nr(evlist->cpus);
1123 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001124
Adrian Huntere3e1a542013-08-14 15:48:24 +03001125 pr_debug2("perf event ring buffer mmapped per cpu\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001126 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001127 int output = -1;
Wang Nan078c3382016-07-14 08:34:40 +00001128 int output_backward = -1;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001129
Adrian Hunter718c6022015-04-09 18:53:42 +03001130 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
1131 true);
1132
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001133 for (thread = 0; thread < nr_threads; thread++) {
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001134 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
Wang Nan078c3382016-07-14 08:34:40 +00001135 thread, &output, &output_backward))
Adrian Hunter04e21312013-10-18 15:29:13 +03001136 goto out_unmap;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001137 }
1138 }
1139
1140 return 0;
1141
1142out_unmap:
Wang Nana1f72612016-07-14 08:34:38 +00001143 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001144 return -1;
1145}
1146
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001147static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
1148 struct mmap_params *mp)
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001149{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001150 int thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001151 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001152
Adrian Huntere3e1a542013-08-14 15:48:24 +03001153 pr_debug2("perf event ring buffer mmapped per thread\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001154 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001155 int output = -1;
Wang Nan078c3382016-07-14 08:34:40 +00001156 int output_backward = -1;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001157
Adrian Hunter718c6022015-04-09 18:53:42 +03001158 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
1159 false);
1160
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001161 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
Wang Nan078c3382016-07-14 08:34:40 +00001162 &output, &output_backward))
Adrian Hunter04e21312013-10-18 15:29:13 +03001163 goto out_unmap;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001164 }
1165
1166 return 0;
1167
1168out_unmap:
Wang Nana1f72612016-07-14 08:34:38 +00001169 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001170 return -1;
1171}
1172
Arnaldo Carvalho de Melof5e71502016-04-15 17:46:31 -03001173unsigned long perf_event_mlock_kb_in_pages(void)
1174{
1175 unsigned long pages;
1176 int max;
1177
1178 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
1179 /*
1180 * Pick a once upon a time good value, i.e. things look
1181 * strange since we can't read a sysctl value, but lets not
1182 * die yet...
1183 */
1184 max = 512;
1185 } else {
1186 max -= (page_size / 1024);
1187 }
1188
1189 pages = (max * 1024) / page_size;
1190 if (!is_power_of_2(pages))
1191 pages = rounddown_pow_of_two(pages);
1192
1193 return pages;
1194}
1195
Jiri Olsa994a1f72013-09-01 12:36:12 +02001196static size_t perf_evlist__mmap_size(unsigned long pages)
1197{
Arnaldo Carvalho de Melof5e71502016-04-15 17:46:31 -03001198 if (pages == UINT_MAX)
1199 pages = perf_event_mlock_kb_in_pages();
1200 else if (!is_power_of_2(pages))
Jiri Olsa994a1f72013-09-01 12:36:12 +02001201 return 0;
1202
1203 return (pages + 1) * page_size;
1204}
1205
David Ahern33c2dcf2013-11-12 07:46:55 -07001206static long parse_pages_arg(const char *str, unsigned long min,
1207 unsigned long max)
Jiri Olsa994a1f72013-09-01 12:36:12 +02001208{
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001209 unsigned long pages, val;
Jiri Olsa27050f52013-09-01 12:36:13 +02001210 static struct parse_tag tags[] = {
1211 { .tag = 'B', .mult = 1 },
1212 { .tag = 'K', .mult = 1 << 10 },
1213 { .tag = 'M', .mult = 1 << 20 },
1214 { .tag = 'G', .mult = 1 << 30 },
1215 { .tag = 0 },
1216 };
Jiri Olsa994a1f72013-09-01 12:36:12 +02001217
David Ahern89735042013-11-12 07:46:53 -07001218 if (str == NULL)
David Ahern33c2dcf2013-11-12 07:46:55 -07001219 return -EINVAL;
David Ahern89735042013-11-12 07:46:53 -07001220
Jiri Olsa27050f52013-09-01 12:36:13 +02001221 val = parse_tag_value(str, tags);
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001222 if (val != (unsigned long) -1) {
Jiri Olsa27050f52013-09-01 12:36:13 +02001223 /* we got file size value */
1224 pages = PERF_ALIGN(val, page_size) / page_size;
Jiri Olsa27050f52013-09-01 12:36:13 +02001225 } else {
1226 /* we got pages count value */
1227 char *eptr;
1228 pages = strtoul(str, &eptr, 10);
David Ahern33c2dcf2013-11-12 07:46:55 -07001229 if (*eptr != '\0')
1230 return -EINVAL;
Jiri Olsa994a1f72013-09-01 12:36:12 +02001231 }
1232
Adrian Hunter2bcab6c2013-12-09 15:18:37 +02001233 if (pages == 0 && min == 0) {
David Ahern33c2dcf2013-11-12 07:46:55 -07001234 /* leave number of pages at 0 */
Adrian Hunter1dbfa9382013-12-09 15:18:39 +02001235 } else if (!is_power_of_2(pages)) {
David Ahern33c2dcf2013-11-12 07:46:55 -07001236 /* round pages up to next power of 2 */
Arnaldo Carvalho de Melo91529832014-12-16 13:24:41 -03001237 pages = roundup_pow_of_two(pages);
Adrian Hunter1dbfa9382013-12-09 15:18:39 +02001238 if (!pages)
1239 return -EINVAL;
David Ahern96398372013-11-12 07:46:54 -07001240 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
1241 pages * page_size, pages);
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001242 }
1243
David Ahern33c2dcf2013-11-12 07:46:55 -07001244 if (pages > max)
1245 return -EINVAL;
1246
1247 return pages;
1248}
1249
Adrian Huntere9db1312015-04-09 18:53:46 +03001250int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
David Ahern33c2dcf2013-11-12 07:46:55 -07001251{
David Ahern33c2dcf2013-11-12 07:46:55 -07001252 unsigned long max = UINT_MAX;
1253 long pages;
1254
Adrian Hunterf5ae9c42013-12-09 15:18:38 +02001255 if (max > SIZE_MAX / page_size)
David Ahern33c2dcf2013-11-12 07:46:55 -07001256 max = SIZE_MAX / page_size;
1257
1258 pages = parse_pages_arg(str, 1, max);
1259 if (pages < 0) {
1260 pr_err("Invalid argument for --mmap_pages/-m\n");
Jiri Olsa994a1f72013-09-01 12:36:12 +02001261 return -1;
1262 }
1263
1264 *mmap_pages = pages;
1265 return 0;
1266}
1267
Adrian Huntere9db1312015-04-09 18:53:46 +03001268int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1269 int unset __maybe_unused)
1270{
1271 return __perf_evlist__parse_mmap_pages(opt->value, str);
1272}
1273
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001274/**
Adrian Hunter718c6022015-04-09 18:53:42 +03001275 * perf_evlist__mmap_ex - Create mmaps to receive events.
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001276 * @evlist: list of events
1277 * @pages: map length in pages
1278 * @overwrite: overwrite older events?
Adrian Hunter718c6022015-04-09 18:53:42 +03001279 * @auxtrace_pages - auxtrace map length in pages
1280 * @auxtrace_overwrite - overwrite older auxtrace data?
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001281 *
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001282 * If @overwrite is %false the user needs to signal event consumption using
1283 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
1284 * automatically.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001285 *
Adrian Hunter718c6022015-04-09 18:53:42 +03001286 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1287 * consumption using auxtrace_mmap__write_tail().
1288 *
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001289 * Return: %0 on success, negative error code otherwise.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001290 */
Adrian Hunter718c6022015-04-09 18:53:42 +03001291int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1292 bool overwrite, unsigned int auxtrace_pages,
1293 bool auxtrace_overwrite)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001294{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001295 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001296 const struct cpu_map *cpus = evlist->cpus;
1297 const struct thread_map *threads = evlist->threads;
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001298 struct mmap_params mp = {
1299 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
1300 };
Arnaldo Carvalho de Melo50a682c2011-11-09 09:10:47 -02001301
Wang Nan8db6d6b2016-07-14 08:34:35 +00001302 if (!evlist->mmap)
1303 evlist->mmap = perf_evlist__alloc_mmap(evlist);
1304 if (!evlist->mmap)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001305 return -ENOMEM;
1306
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -03001307 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001308 return -ENOMEM;
1309
1310 evlist->overwrite = overwrite;
Jiri Olsa994a1f72013-09-01 12:36:12 +02001311 evlist->mmap_len = perf_evlist__mmap_size(pages);
Adrian Hunter2af68ef2013-10-18 15:29:07 +03001312 pr_debug("mmap size %zuB\n", evlist->mmap_len);
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001313 mp.mask = evlist->mmap_len - page_size - 1;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001314
Adrian Hunter718c6022015-04-09 18:53:42 +03001315 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
1316 auxtrace_pages, auxtrace_overwrite);
1317
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001318 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001319 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03001320 evsel->sample_id == NULL &&
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -03001321 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001322 return -ENOMEM;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001323 }
1324
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -07001325 if (cpu_map__empty(cpus))
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001326 return perf_evlist__mmap_per_thread(evlist, &mp);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001327
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001328 return perf_evlist__mmap_per_cpu(evlist, &mp);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001329}
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001330
Adrian Hunter718c6022015-04-09 18:53:42 +03001331int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
1332 bool overwrite)
1333{
1334 return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
1335}
1336
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001337int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001338{
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001339 struct cpu_map *cpus;
1340 struct thread_map *threads;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001341
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001342 threads = thread_map__new_str(target->pid, target->tid, target->uid);
1343
1344 if (!threads)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001345 return -1;
1346
Dongsheng Yang9c105fb2013-12-04 17:56:40 -05001347 if (target__uses_dummy_map(target))
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001348 cpus = cpu_map__dummy_new();
Namhyung Kim879d77d2012-05-16 18:45:48 +09001349 else
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001350 cpus = cpu_map__new(target->cpu_list);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001351
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001352 if (!cpus)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001353 goto out_delete_threads;
1354
Adrian Hunterec9a77a2015-09-08 10:58:52 +03001355 evlist->has_user_cpus = !!target->cpu_list;
1356
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001357 perf_evlist__set_maps(evlist, cpus, threads);
Adrian Hunterd5bc0562015-09-08 10:58:51 +03001358
1359 return 0;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001360
1361out_delete_threads:
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001362 thread_map__put(threads);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001363 return -1;
1364}
1365
Adrian Hunterd5bc0562015-09-08 10:58:51 +03001366void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1367 struct thread_map *threads)
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001368{
Adrian Hunter934e0f22015-09-08 10:58:56 +03001369 /*
1370 * Allow for the possibility that one or another of the maps isn't being
1371 * changed i.e. don't put it. Note we are assuming the maps that are
1372 * being applied are brand new and evlist is taking ownership of the
1373 * original reference count of 1. If that is not the case it is up to
1374 * the caller to increase the reference count.
1375 */
1376 if (cpus != evlist->cpus) {
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001377 cpu_map__put(evlist->cpus);
Arnaldo Carvalho de Meloa55e5662016-02-17 10:57:19 -03001378 evlist->cpus = cpu_map__get(cpus);
Adrian Hunter934e0f22015-09-08 10:58:56 +03001379 }
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001380
Adrian Hunter934e0f22015-09-08 10:58:56 +03001381 if (threads != evlist->threads) {
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001382 thread_map__put(evlist->threads);
Arnaldo Carvalho de Meloa55e5662016-02-17 10:57:19 -03001383 evlist->threads = thread_map__get(threads);
Adrian Hunter934e0f22015-09-08 10:58:56 +03001384 }
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001385
Adrian Hunterec9a77a2015-09-08 10:58:52 +03001386 perf_evlist__propagate_maps(evlist);
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001387}
1388
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001389void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
1390 enum perf_event_sample_format bit)
1391{
1392 struct perf_evsel *evsel;
1393
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001394 evlist__for_each_entry(evlist, evsel)
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001395 __perf_evsel__set_sample_bit(evsel, bit);
1396}
1397
1398void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
1399 enum perf_event_sample_format bit)
1400{
1401 struct perf_evsel *evsel;
1402
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001403 evlist__for_each_entry(evlist, evsel)
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001404 __perf_evsel__reset_sample_bit(evsel, bit);
1405}
1406
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001407int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001408{
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001409 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001410 int err = 0;
1411 const int ncpus = cpu_map__nr(evlist->cpus),
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001412 nthreads = thread_map__nr(evlist->threads);
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001413
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001414 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001415 if (evsel->filter == NULL)
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001416 continue;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001417
Kan Liangd988d5e2015-08-21 02:23:14 -04001418 /*
1419 * filters only work for tracepoint event, which doesn't have cpu limit.
1420 * So evlist and evsel should always be same.
1421 */
Arnaldo Carvalho de Melof47805a2015-07-03 15:53:49 -03001422 err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter);
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001423 if (err) {
1424 *err_evsel = evsel;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001425 break;
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001426 }
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001427 }
1428
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001429 return err;
1430}
1431
1432int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1433{
1434 struct perf_evsel *evsel;
1435 int err = 0;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001436
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001437 evlist__for_each_entry(evlist, evsel) {
Wang Nanfdf14722016-02-26 09:31:53 +00001438 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1439 continue;
1440
Arnaldo Carvalho de Melo94ad89b2015-07-03 17:42:03 -03001441 err = perf_evsel__set_filter(evsel, filter);
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001442 if (err)
1443 break;
1444 }
1445
1446 return err;
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001447}
Frederic Weisbecker74429962011-05-21 17:49:00 +02001448
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001449int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001450{
1451 char *filter;
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001452 int ret = -1;
1453 size_t i;
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001454
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001455 for (i = 0; i < npids; ++i) {
1456 if (i == 0) {
1457 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1458 return -1;
1459 } else {
1460 char *tmp;
1461
1462 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1463 goto out_free;
1464
1465 free(filter);
1466 filter = tmp;
1467 }
1468 }
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001469
1470 ret = perf_evlist__set_filter(evlist, filter);
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001471out_free:
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001472 free(filter);
1473 return ret;
1474}
1475
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001476int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
1477{
1478 return perf_evlist__set_filter_pids(evlist, 1, &pid);
1479}
1480
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001481bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +02001482{
Adrian Hunter75562572013-08-27 11:23:09 +03001483 struct perf_evsel *pos;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001484
Adrian Hunter75562572013-08-27 11:23:09 +03001485 if (evlist->nr_entries == 1)
1486 return true;
1487
1488 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1489 return false;
1490
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001491 evlist__for_each_entry(evlist, pos) {
Adrian Hunter75562572013-08-27 11:23:09 +03001492 if (pos->id_pos != evlist->id_pos ||
1493 pos->is_pos != evlist->is_pos)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001494 return false;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001495 }
1496
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001497 return true;
1498}
1499
Adrian Hunter75562572013-08-27 11:23:09 +03001500u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001501{
Adrian Hunter75562572013-08-27 11:23:09 +03001502 struct perf_evsel *evsel;
1503
1504 if (evlist->combined_sample_type)
1505 return evlist->combined_sample_type;
1506
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001507 evlist__for_each_entry(evlist, evsel)
Adrian Hunter75562572013-08-27 11:23:09 +03001508 evlist->combined_sample_type |= evsel->attr.sample_type;
1509
1510 return evlist->combined_sample_type;
1511}
1512
1513u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1514{
1515 evlist->combined_sample_type = 0;
1516 return __perf_evlist__combined_sample_type(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001517}
1518
Andi Kleen98df8582015-07-18 08:24:47 -07001519u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
1520{
1521 struct perf_evsel *evsel;
1522 u64 branch_type = 0;
1523
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001524 evlist__for_each_entry(evlist, evsel)
Andi Kleen98df8582015-07-18 08:24:47 -07001525 branch_type |= evsel->attr.branch_sample_type;
1526 return branch_type;
1527}
1528
Jiri Olsa9ede4732012-10-10 17:38:13 +02001529bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1530{
1531 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1532 u64 read_format = first->attr.read_format;
1533 u64 sample_type = first->attr.sample_type;
1534
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001535 evlist__for_each_entry(evlist, pos) {
Jiri Olsa9ede4732012-10-10 17:38:13 +02001536 if (read_format != pos->attr.read_format)
1537 return false;
1538 }
1539
1540 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1541 if ((sample_type & PERF_SAMPLE_READ) &&
1542 !(read_format & PERF_FORMAT_ID)) {
1543 return false;
1544 }
1545
1546 return true;
1547}
1548
1549u64 perf_evlist__read_format(struct perf_evlist *evlist)
1550{
1551 struct perf_evsel *first = perf_evlist__first(evlist);
1552 return first->attr.read_format;
1553}
1554
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001555u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001556{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001557 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001558 struct perf_sample *data;
1559 u64 sample_type;
1560 u16 size = 0;
1561
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001562 if (!first->attr.sample_id_all)
1563 goto out;
1564
1565 sample_type = first->attr.sample_type;
1566
1567 if (sample_type & PERF_SAMPLE_TID)
1568 size += sizeof(data->tid) * 2;
1569
1570 if (sample_type & PERF_SAMPLE_TIME)
1571 size += sizeof(data->time);
1572
1573 if (sample_type & PERF_SAMPLE_ID)
1574 size += sizeof(data->id);
1575
1576 if (sample_type & PERF_SAMPLE_STREAM_ID)
1577 size += sizeof(data->stream_id);
1578
1579 if (sample_type & PERF_SAMPLE_CPU)
1580 size += sizeof(data->cpu) * 2;
Adrian Hunter75562572013-08-27 11:23:09 +03001581
1582 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1583 size += sizeof(data->id);
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001584out:
1585 return size;
1586}
1587
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001588bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001589{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001590 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001591
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001592 evlist__for_each_entry_continue(evlist, pos) {
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001593 if (first->attr.sample_id_all != pos->attr.sample_id_all)
1594 return false;
1595 }
1596
1597 return true;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001598}
1599
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001600bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +02001601{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001602 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001603 return first->attr.sample_id_all;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001604}
Arnaldo Carvalho de Melo81cce8d2011-10-05 19:11:32 -03001605
1606void perf_evlist__set_selected(struct perf_evlist *evlist,
1607 struct perf_evsel *evsel)
1608{
1609 evlist->selected = evsel;
1610}
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001611
Namhyung Kima74b4b62013-03-15 14:48:48 +09001612void perf_evlist__close(struct perf_evlist *evlist)
1613{
1614 struct perf_evsel *evsel;
1615 int ncpus = cpu_map__nr(evlist->cpus);
1616 int nthreads = thread_map__nr(evlist->threads);
Stephane Eranian8ad92192014-01-17 16:34:06 +01001617 int n;
Namhyung Kima74b4b62013-03-15 14:48:48 +09001618
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001619 evlist__for_each_entry_reverse(evlist, evsel) {
Stephane Eranian8ad92192014-01-17 16:34:06 +01001620 n = evsel->cpus ? evsel->cpus->nr : ncpus;
1621 perf_evsel__close(evsel, n, nthreads);
1622 }
Namhyung Kima74b4b62013-03-15 14:48:48 +09001623}
1624
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001625static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1626{
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001627 struct cpu_map *cpus;
1628 struct thread_map *threads;
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001629 int err = -ENOMEM;
1630
1631 /*
1632 * Try reading /sys/devices/system/cpu/online to get
1633 * an all cpus map.
1634 *
1635 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1636 * code needs an overhaul to properly forward the
1637 * error, and we may not want to do that fallback to a
1638 * default cpu identity map :-\
1639 */
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001640 cpus = cpu_map__new(NULL);
1641 if (!cpus)
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001642 goto out;
1643
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001644 threads = thread_map__new_dummy();
1645 if (!threads)
1646 goto out_put;
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001647
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001648 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001649out:
1650 return err;
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001651out_put:
1652 cpu_map__put(cpus);
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001653 goto out;
1654}
1655
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001656int perf_evlist__open(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001657{
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001658 struct perf_evsel *evsel;
Namhyung Kima74b4b62013-03-15 14:48:48 +09001659 int err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001660
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001661 /*
1662 * Default: one fd per CPU, all threads, aka systemwide
1663 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1664 */
1665 if (evlist->threads == NULL && evlist->cpus == NULL) {
1666 err = perf_evlist__create_syswide_maps(evlist);
1667 if (err < 0)
1668 goto out_err;
1669 }
1670
Adrian Hunter733cd2f2013-09-06 22:40:11 +03001671 perf_evlist__update_id_pos(evlist);
1672
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001673 evlist__for_each_entry(evlist, evsel) {
Adrian Hunter23df7f72016-01-07 10:13:59 +01001674 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001675 if (err < 0)
1676 goto out_err;
1677 }
1678
1679 return 0;
1680out_err:
Namhyung Kima74b4b62013-03-15 14:48:48 +09001681 perf_evlist__close(evlist);
Namhyung Kim41c21a62012-02-23 12:13:36 +09001682 errno = -err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001683 return err;
1684}
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001685
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001686int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
Namhyung Kim55e162e2013-03-11 16:43:17 +09001687 const char *argv[], bool pipe_output,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001688 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001689{
1690 int child_ready_pipe[2], go_pipe[2];
1691 char bf;
1692
1693 if (pipe(child_ready_pipe) < 0) {
1694 perror("failed to create 'ready' pipe");
1695 return -1;
1696 }
1697
1698 if (pipe(go_pipe) < 0) {
1699 perror("failed to create 'go' pipe");
1700 goto out_close_ready_pipe;
1701 }
1702
1703 evlist->workload.pid = fork();
1704 if (evlist->workload.pid < 0) {
1705 perror("failed to fork");
1706 goto out_close_pipes;
1707 }
1708
1709 if (!evlist->workload.pid) {
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001710 int ret;
1711
Namhyung Kim119fa3c2013-03-11 16:43:16 +09001712 if (pipe_output)
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001713 dup2(2, 1);
1714
David Ahern0817df02013-05-25 17:50:39 -06001715 signal(SIGTERM, SIG_DFL);
1716
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001717 close(child_ready_pipe[0]);
1718 close(go_pipe[1]);
1719 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1720
1721 /*
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001722 * Tell the parent we're ready to go
1723 */
1724 close(child_ready_pipe[1]);
1725
1726 /*
1727 * Wait until the parent tells us to go.
1728 */
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001729 ret = read(go_pipe[0], &bf, 1);
1730 /*
1731 * The parent will ask for the execvp() to be performed by
1732 * writing exactly one byte, in workload.cork_fd, usually via
1733 * perf_evlist__start_workload().
1734 *
Arnaldo Carvalho de Melo20f86fc2015-02-03 13:29:05 -03001735 * For cancelling the workload without actually running it,
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001736 * the parent will just close workload.cork_fd, without writing
1737 * anything, i.e. read will return zero and we just exit()
1738 * here.
1739 */
1740 if (ret != 1) {
1741 if (ret == -1)
1742 perror("unable to read pipe");
1743 exit(ret);
1744 }
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001745
1746 execvp(argv[0], (char **)argv);
1747
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001748 if (exec_error) {
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001749 union sigval val;
1750
1751 val.sival_int = errno;
1752 if (sigqueue(getppid(), SIGUSR1, val))
1753 perror(argv[0]);
1754 } else
1755 perror(argv[0]);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001756 exit(-1);
1757 }
1758
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001759 if (exec_error) {
1760 struct sigaction act = {
1761 .sa_flags = SA_SIGINFO,
1762 .sa_sigaction = exec_error,
1763 };
1764 sigaction(SIGUSR1, &act, NULL);
1765 }
1766
Arnaldo Carvalho de Melo1aaf63b2014-10-10 14:29:49 -03001767 if (target__none(target)) {
1768 if (evlist->threads == NULL) {
1769 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1770 __func__, __LINE__);
1771 goto out_close_pipes;
1772 }
Jiri Olsae13798c2015-06-23 00:36:02 +02001773 thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
Arnaldo Carvalho de Melo1aaf63b2014-10-10 14:29:49 -03001774 }
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001775
1776 close(child_ready_pipe[1]);
1777 close(go_pipe[0]);
1778 /*
1779 * wait for child to settle
1780 */
1781 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1782 perror("unable to read pipe");
1783 goto out_close_pipes;
1784 }
1785
Namhyung Kimbcf31452013-06-26 16:14:15 +09001786 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001787 evlist->workload.cork_fd = go_pipe[1];
1788 close(child_ready_pipe[0]);
1789 return 0;
1790
1791out_close_pipes:
1792 close(go_pipe[0]);
1793 close(go_pipe[1]);
1794out_close_ready_pipe:
1795 close(child_ready_pipe[0]);
1796 close(child_ready_pipe[1]);
1797 return -1;
1798}
1799
1800int perf_evlist__start_workload(struct perf_evlist *evlist)
1801{
1802 if (evlist->workload.cork_fd > 0) {
David Ahernb3824402013-07-02 13:27:21 -06001803 char bf = 0;
Namhyung Kimbcf31452013-06-26 16:14:15 +09001804 int ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001805 /*
1806 * Remove the cork, let it rip!
1807 */
Namhyung Kimbcf31452013-06-26 16:14:15 +09001808 ret = write(evlist->workload.cork_fd, &bf, 1);
1809 if (ret < 0)
1810 perror("enable to write to pipe");
1811
1812 close(evlist->workload.cork_fd);
1813 return ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001814 }
1815
1816 return 0;
1817}
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001818
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -03001819int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001820 struct perf_sample *sample)
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001821{
Adrian Hunter75562572013-08-27 11:23:09 +03001822 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1823
1824 if (!evsel)
1825 return -EFAULT;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001826 return perf_evsel__parse_sample(evsel, event, sample);
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001827}
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001828
1829size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1830{
1831 struct perf_evsel *evsel;
1832 size_t printed = 0;
1833
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001834 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001835 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1836 perf_evsel__name(evsel));
1837 }
1838
Davidlohr Buesob2222132013-11-12 22:24:24 -08001839 return printed + fprintf(fp, "\n");
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001840}
Arnaldo Carvalho de Melo6ef068c2013-10-17 12:07:58 -03001841
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001842int perf_evlist__strerror_open(struct perf_evlist *evlist,
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001843 int err, char *buf, size_t size)
1844{
1845 int printed, value;
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001846 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001847
1848 switch (err) {
1849 case EACCES:
1850 case EPERM:
1851 printed = scnprintf(buf, size,
1852 "Error:\t%s.\n"
1853 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1854
Adrian Hunter1a472452013-12-11 14:36:23 +02001855 value = perf_event_paranoid();
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001856
1857 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1858
1859 if (value >= 2) {
1860 printed += scnprintf(buf + printed, size - printed,
1861 "For your workloads it needs to be <= 1\nHint:\t");
1862 }
1863 printed += scnprintf(buf + printed, size - printed,
Arnaldo Carvalho de Melo5229e362014-06-10 17:18:54 -03001864 "For system wide tracing it needs to be set to -1.\n");
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001865
1866 printed += scnprintf(buf + printed, size - printed,
Arnaldo Carvalho de Melo5229e362014-06-10 17:18:54 -03001867 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1868 "Hint:\tThe current value is %d.", value);
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001869 break;
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001870 case EINVAL: {
1871 struct perf_evsel *first = perf_evlist__first(evlist);
1872 int max_freq;
1873
1874 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1875 goto out_default;
1876
1877 if (first->attr.sample_freq < (u64)max_freq)
1878 goto out_default;
1879
1880 printed = scnprintf(buf, size,
1881 "Error:\t%s.\n"
1882 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1883 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1884 emsg, max_freq, first->attr.sample_freq);
1885 break;
1886 }
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001887 default:
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001888out_default:
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001889 scnprintf(buf, size, "%s", emsg);
1890 break;
1891 }
1892
1893 return 0;
1894}
Adrian Huntera025e4f2013-12-11 14:36:35 +02001895
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001896int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1897{
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001898 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001899 int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001900
1901 switch (err) {
1902 case EPERM:
Arnaldo Carvalho de Meloe5d4a292014-12-12 15:59:51 -03001903 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001904 printed += scnprintf(buf + printed, size - printed,
1905 "Error:\t%s.\n"
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001906 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001907 "Hint:\tTried using %zd kB.\n",
Arnaldo Carvalho de Meloe5d4a292014-12-12 15:59:51 -03001908 emsg, pages_max_per_user, pages_attempted);
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001909
1910 if (pages_attempted >= pages_max_per_user) {
1911 printed += scnprintf(buf + printed, size - printed,
1912 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1913 pages_max_per_user + pages_attempted);
1914 }
1915
1916 printed += scnprintf(buf + printed, size - printed,
1917 "Hint:\tTry using a smaller -m/--mmap-pages value.");
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001918 break;
1919 default:
1920 scnprintf(buf, size, "%s", emsg);
1921 break;
1922 }
1923
1924 return 0;
1925}
1926
Adrian Huntera025e4f2013-12-11 14:36:35 +02001927void perf_evlist__to_front(struct perf_evlist *evlist,
1928 struct perf_evsel *move_evsel)
1929{
1930 struct perf_evsel *evsel, *n;
1931 LIST_HEAD(move);
1932
1933 if (move_evsel == perf_evlist__first(evlist))
1934 return;
1935
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001936 evlist__for_each_entry_safe(evlist, n, evsel) {
Adrian Huntera025e4f2013-12-11 14:36:35 +02001937 if (evsel->leader == move_evsel->leader)
1938 list_move_tail(&evsel->node, &move);
1939 }
1940
1941 list_splice(&move, &evlist->entries);
1942}
Adrian Hunter60b08962014-07-31 09:00:52 +03001943
1944void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1945 struct perf_evsel *tracking_evsel)
1946{
1947 struct perf_evsel *evsel;
1948
1949 if (tracking_evsel->tracking)
1950 return;
1951
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001952 evlist__for_each_entry(evlist, evsel) {
Adrian Hunter60b08962014-07-31 09:00:52 +03001953 if (evsel != tracking_evsel)
1954 evsel->tracking = false;
1955 }
1956
1957 tracking_evsel->tracking = true;
1958}
Wang Nan7630b3e2016-02-22 09:10:33 +00001959
1960struct perf_evsel *
1961perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
1962 const char *str)
1963{
1964 struct perf_evsel *evsel;
1965
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001966 evlist__for_each_entry(evlist, evsel) {
Wang Nan7630b3e2016-02-22 09:10:33 +00001967 if (!evsel->name)
1968 continue;
1969 if (strcmp(str, evsel->name) == 0)
1970 return evsel;
1971 }
1972
1973 return NULL;
1974}