blob: dc4df3d2660e6d085c27ba28b19a0ca20494d82d [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
Arnaldo Carvalho de Meloa8c9ae12011-11-05 08:41:51 -02009#include "util.h"
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -030010#include <api/fs/fs.h>
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -020011#include <poll.h>
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020012#include "cpumap.h"
13#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090014#include "target.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020015#include "evlist.h"
16#include "evsel.h"
Adrian Huntere3e1a542013-08-14 15:48:24 +030017#include "debug.h"
Wang Nan54cc54d2016-07-14 08:34:42 +000018#include "asm/bug.h"
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020019#include <unistd.h>
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020020
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020021#include "parse-events.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060022#include <subcmd/parse-options.h>
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020023
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020024#include <sys/mman.h>
25
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020026#include <linux/bitops.h>
27#include <linux/hash.h>
Arnaldo Carvalho de Melo0389cd12014-12-15 16:04:11 -030028#include <linux/log2.h>
Jiri Olsa8dd2a132015-09-07 10:38:06 +020029#include <linux/err.h>
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020030
Wang Nan8db6d6b2016-07-14 08:34:35 +000031static void perf_mmap__munmap(struct perf_mmap *map);
Wang Nan48760752016-07-14 08:34:37 +000032static void perf_mmap__put(struct perf_mmap *map);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -030033
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020034#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030035#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020036
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020037void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
38 struct thread_map *threads)
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020039{
40 int i;
41
42 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
43 INIT_HLIST_HEAD(&evlist->heads[i]);
44 INIT_LIST_HEAD(&evlist->entries);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020045 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -030046 fdarray__init(&evlist->pollfd, 64);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020047 evlist->workload.pid = -1;
Wang Nan54cc54d2016-07-14 08:34:42 +000048 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020049}
50
Namhyung Kim334fe7a2013-03-11 16:43:12 +090051struct perf_evlist *perf_evlist__new(void)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020052{
53 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
54
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020055 if (evlist != NULL)
Namhyung Kim334fe7a2013-03-11 16:43:12 +090056 perf_evlist__init(evlist, NULL, NULL);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020057
58 return evlist;
59}
60
Jiri Olsab22d54b2013-09-01 12:36:14 +020061struct perf_evlist *perf_evlist__new_default(void)
62{
63 struct perf_evlist *evlist = perf_evlist__new();
64
65 if (evlist && perf_evlist__add_default(evlist)) {
66 perf_evlist__delete(evlist);
67 evlist = NULL;
68 }
69
70 return evlist;
71}
72
Arnaldo Carvalho de Melo5bae0252016-01-07 13:14:56 -030073struct perf_evlist *perf_evlist__new_dummy(void)
74{
75 struct perf_evlist *evlist = perf_evlist__new();
76
77 if (evlist && perf_evlist__add_dummy(evlist)) {
78 perf_evlist__delete(evlist);
79 evlist = NULL;
80 }
81
82 return evlist;
83}
84
Adrian Hunter75562572013-08-27 11:23:09 +030085/**
86 * perf_evlist__set_id_pos - set the positions of event ids.
87 * @evlist: selected event list
88 *
89 * Events with compatible sample types all have the same id_pos
90 * and is_pos. For convenience, put a copy on evlist.
91 */
92void perf_evlist__set_id_pos(struct perf_evlist *evlist)
93{
94 struct perf_evsel *first = perf_evlist__first(evlist);
95
96 evlist->id_pos = first->id_pos;
97 evlist->is_pos = first->is_pos;
98}
99
Adrian Hunter733cd2f2013-09-06 22:40:11 +0300100static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
101{
102 struct perf_evsel *evsel;
103
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300104 evlist__for_each_entry(evlist, evsel)
Adrian Hunter733cd2f2013-09-06 22:40:11 +0300105 perf_evsel__calc_id_pos(evsel);
106
107 perf_evlist__set_id_pos(evlist);
108}
109
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200110static void perf_evlist__purge(struct perf_evlist *evlist)
111{
112 struct perf_evsel *pos, *n;
113
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300114 evlist__for_each_entry_safe(evlist, n, pos) {
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200115 list_del_init(&pos->node);
Arnaldo Carvalho de Melod49e4692015-08-27 08:07:40 -0400116 pos->evlist = NULL;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200117 perf_evsel__delete(pos);
118 }
119
120 evlist->nr_entries = 0;
121}
122
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200123void perf_evlist__exit(struct perf_evlist *evlist)
124{
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -0300125 zfree(&evlist->mmap);
Wang Nanb2cb6152016-07-14 08:34:39 +0000126 zfree(&evlist->backward_mmap);
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300127 fdarray__exit(&evlist->pollfd);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200128}
129
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200130void perf_evlist__delete(struct perf_evlist *evlist)
131{
Arnaldo Carvalho de Melo0b04b3d2016-06-21 18:15:45 -0300132 if (evlist == NULL)
133 return;
134
Arnaldo Carvalho de Melo983874d2014-01-03 17:25:49 -0300135 perf_evlist__munmap(evlist);
Arnaldo Carvalho de Melof26e1c72014-01-03 16:54:12 -0300136 perf_evlist__close(evlist);
Jiri Olsaf30a79b2015-06-23 00:36:04 +0200137 cpu_map__put(evlist->cpus);
Jiri Olsa186fbb72015-06-23 00:36:05 +0200138 thread_map__put(evlist->threads);
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -0300139 evlist->cpus = NULL;
140 evlist->threads = NULL;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200141 perf_evlist__purge(evlist);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200142 perf_evlist__exit(evlist);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200143 free(evlist);
144}
145
Adrian Hunteradc0c3e2015-09-08 10:58:58 +0300146static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
147 struct perf_evsel *evsel)
148{
149 /*
150 * We already have cpus for evsel (via PMU sysfs) so
151 * keep it, if there's no target cpu list defined.
152 */
153 if (!evsel->own_cpus || evlist->has_user_cpus) {
154 cpu_map__put(evsel->cpus);
155 evsel->cpus = cpu_map__get(evlist->cpus);
156 } else if (evsel->cpus != evsel->own_cpus) {
157 cpu_map__put(evsel->cpus);
158 evsel->cpus = cpu_map__get(evsel->own_cpus);
159 }
160
161 thread_map__put(evsel->threads);
162 evsel->threads = thread_map__get(evlist->threads);
163}
164
165static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
166{
167 struct perf_evsel *evsel;
168
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300169 evlist__for_each_entry(evlist, evsel)
Adrian Hunteradc0c3e2015-09-08 10:58:58 +0300170 __perf_evlist__propagate_maps(evlist, evsel);
171}
172
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200173void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
174{
Arnaldo Carvalho de Melod49e4692015-08-27 08:07:40 -0400175 entry->evlist = evlist;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200176 list_add_tail(&entry->node, &evlist->entries);
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300177 entry->idx = evlist->nr_entries;
Adrian Hunter60b08962014-07-31 09:00:52 +0300178 entry->tracking = !entry->idx;
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300179
Adrian Hunter75562572013-08-27 11:23:09 +0300180 if (!evlist->nr_entries++)
181 perf_evlist__set_id_pos(evlist);
Adrian Hunter44c42d72015-09-08 10:58:59 +0300182
183 __perf_evlist__propagate_maps(evlist, entry);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200184}
185
Adrian Hunter47682302015-09-25 16:15:53 +0300186void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
187{
188 evsel->evlist = NULL;
189 list_del_init(&evsel->node);
190 evlist->nr_entries -= 1;
191}
192
Jiri Olsa0529bc12012-01-27 15:34:20 +0100193void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300194 struct list_head *list)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200195{
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300196 struct perf_evsel *evsel, *temp;
Adrian Hunter75562572013-08-27 11:23:09 +0300197
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300198 __evlist__for_each_entry_safe(list, temp, evsel) {
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300199 list_del_init(&evsel->node);
200 perf_evlist__add(evlist, evsel);
201 }
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200202}
203
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300204void __perf_evlist__set_leader(struct list_head *list)
205{
206 struct perf_evsel *evsel, *leader;
207
208 leader = list_entry(list->next, struct perf_evsel, node);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900209 evsel = list_entry(list->prev, struct perf_evsel, node);
210
211 leader->nr_members = evsel->idx - leader->idx + 1;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300212
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300213 __evlist__for_each_entry(list, evsel) {
Stephane Eranian74b21332013-01-31 13:54:37 +0100214 evsel->leader = leader;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300215 }
216}
217
218void perf_evlist__set_leader(struct perf_evlist *evlist)
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200219{
Namhyung Kim97f63e42013-01-22 18:09:29 +0900220 if (evlist->nr_entries) {
221 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300222 __perf_evlist__set_leader(&evlist->entries);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900223 }
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200224}
225
Jiri Olsa45cf6c32015-10-05 20:06:04 +0200226void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
Arnaldo Carvalho de Melo7f8d1ad2015-09-30 17:49:49 -0300227{
228 attr->precise_ip = 3;
229
230 while (attr->precise_ip != 0) {
231 int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
232 if (fd != -1) {
233 close(fd);
234 break;
235 }
236 --attr->precise_ip;
237 }
238}
239
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200240int perf_evlist__add_default(struct perf_evlist *evlist)
241{
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300242 struct perf_evsel *evsel = perf_evsel__new_cycles();
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200243
244 if (evsel == NULL)
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300245 return -ENOMEM;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200246
247 perf_evlist__add(evlist, evsel);
248 return 0;
249}
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200250
Arnaldo Carvalho de Melo5bae0252016-01-07 13:14:56 -0300251int perf_evlist__add_dummy(struct perf_evlist *evlist)
252{
253 struct perf_event_attr attr = {
254 .type = PERF_TYPE_SOFTWARE,
255 .config = PERF_COUNT_SW_DUMMY,
256 .size = sizeof(attr), /* to capture ABI version */
257 };
258 struct perf_evsel *evsel = perf_evsel__new(&attr);
259
260 if (evsel == NULL)
261 return -ENOMEM;
262
263 perf_evlist__add(evlist, evsel);
264 return 0;
265}
266
Arnaldo Carvalho de Meloe60fc842012-10-03 11:50:55 -0300267static int perf_evlist__add_attrs(struct perf_evlist *evlist,
268 struct perf_event_attr *attrs, size_t nr_attrs)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200269{
270 struct perf_evsel *evsel, *n;
271 LIST_HEAD(head);
272 size_t i;
273
274 for (i = 0; i < nr_attrs; i++) {
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300275 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200276 if (evsel == NULL)
277 goto out_delete_partial_list;
278 list_add_tail(&evsel->node, &head);
279 }
280
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300281 perf_evlist__splice_list_tail(evlist, &head);
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200282
283 return 0;
284
285out_delete_partial_list:
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300286 __evlist__for_each_entry_safe(&head, n, evsel)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200287 perf_evsel__delete(evsel);
288 return -1;
289}
290
Arnaldo Carvalho de Melo79695e12012-05-30 13:53:54 -0300291int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
292 struct perf_event_attr *attrs, size_t nr_attrs)
293{
294 size_t i;
295
296 for (i = 0; i < nr_attrs; i++)
297 event_attr_init(attrs + i);
298
299 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
300}
301
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -0300302struct perf_evsel *
303perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -0200304{
305 struct perf_evsel *evsel;
306
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300307 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -0200308 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
309 (int)evsel->attr.config == id)
310 return evsel;
311 }
312
313 return NULL;
314}
315
David Aherna2f28042013-08-28 22:29:51 -0600316struct perf_evsel *
317perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
318 const char *name)
319{
320 struct perf_evsel *evsel;
321
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300322 evlist__for_each_entry(evlist, evsel) {
David Aherna2f28042013-08-28 22:29:51 -0600323 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
324 (strcmp(evsel->name, name) == 0))
325 return evsel;
326 }
327
328 return NULL;
329}
330
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300331int perf_evlist__add_newtp(struct perf_evlist *evlist,
332 const char *sys, const char *name, void *handler)
333{
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300334 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300335
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200336 if (IS_ERR(evsel))
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300337 return -1;
338
Arnaldo Carvalho de Melo744a9712013-11-06 10:17:38 -0300339 evsel->handler = handler;
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300340 perf_evlist__add(evlist, evsel);
341 return 0;
342}
343
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300344static int perf_evlist__nr_threads(struct perf_evlist *evlist,
345 struct perf_evsel *evsel)
346{
347 if (evsel->system_wide)
348 return 1;
349 else
350 return thread_map__nr(evlist->threads);
351}
352
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300353void perf_evlist__disable(struct perf_evlist *evlist)
354{
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300355 struct perf_evsel *pos;
356
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300357 evlist__for_each_entry(evlist, pos) {
Jiri Olsa3e27c922015-12-03 10:06:42 +0100358 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
359 continue;
360 perf_evsel__disable(pos);
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300361 }
Arnaldo Carvalho de Melo2b56bcf2015-06-17 16:40:26 -0300362
363 evlist->enabled = false;
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300364}
365
David Ahern764e16a32011-08-25 10:17:55 -0600366void perf_evlist__enable(struct perf_evlist *evlist)
367{
David Ahern764e16a32011-08-25 10:17:55 -0600368 struct perf_evsel *pos;
369
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300370 evlist__for_each_entry(evlist, pos) {
Jiri Olsa3e27c922015-12-03 10:06:42 +0100371 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
372 continue;
373 perf_evsel__enable(pos);
David Ahern764e16a32011-08-25 10:17:55 -0600374 }
Arnaldo Carvalho de Melo2b56bcf2015-06-17 16:40:26 -0300375
376 evlist->enabled = true;
377}
378
379void perf_evlist__toggle_enable(struct perf_evlist *evlist)
380{
381 (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
David Ahern764e16a32011-08-25 10:17:55 -0600382}
383
Adrian Hunter1c650562014-07-31 09:00:56 +0300384static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
385 struct perf_evsel *evsel, int cpu)
386{
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -0300387 int thread;
Adrian Hunter1c650562014-07-31 09:00:56 +0300388 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
389
390 if (!evsel->fd)
391 return -EINVAL;
392
393 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -0300394 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
Adrian Hunter1c650562014-07-31 09:00:56 +0300395 if (err)
396 return err;
397 }
398 return 0;
399}
400
401static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
402 struct perf_evsel *evsel,
403 int thread)
404{
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -0300405 int cpu;
Adrian Hunter1c650562014-07-31 09:00:56 +0300406 int nr_cpus = cpu_map__nr(evlist->cpus);
407
408 if (!evsel->fd)
409 return -EINVAL;
410
411 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -0300412 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
Adrian Hunter1c650562014-07-31 09:00:56 +0300413 if (err)
414 return err;
415 }
416 return 0;
417}
418
419int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
420 struct perf_evsel *evsel, int idx)
421{
422 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
423
424 if (per_cpu_mmaps)
425 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
426 else
427 return perf_evlist__enable_event_thread(evlist, evsel, idx);
428}
429
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300430int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200431{
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900432 int nr_cpus = cpu_map__nr(evlist->cpus);
433 int nr_threads = thread_map__nr(evlist->threads);
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300434 int nfds = 0;
435 struct perf_evsel *evsel;
436
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300437 evlist__for_each_entry(evlist, evsel) {
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300438 if (evsel->system_wide)
439 nfds += nr_cpus;
440 else
441 nfds += nr_cpus * nr_threads;
442 }
443
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300444 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
445 fdarray__grow(&evlist->pollfd, nfds) < 0)
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300446 return -ENOMEM;
447
448 return 0;
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200449}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200450
Wang Nan48760752016-07-14 08:34:37 +0000451static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
452 struct perf_mmap *map, short revent)
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300453{
Wang Nanf3058a12016-05-24 02:28:59 +0000454 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300455 /*
456 * Save the idx so that when we filter out fds POLLHUP'ed we can
457 * close the associated evlist->mmap[] entry.
458 */
459 if (pos >= 0) {
Wang Nan48760752016-07-14 08:34:37 +0000460 evlist->pollfd.priv[pos].ptr = map;
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300461
462 fcntl(fd, F_SETFL, O_NONBLOCK);
463 }
464
465 return pos;
466}
467
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300468int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200469{
Wang Nan48760752016-07-14 08:34:37 +0000470 return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300471}
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300472
Wang Nan258e4bf2016-05-25 13:44:57 +0000473static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
474 void *arg __maybe_unused)
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300475{
Wang Nan48760752016-07-14 08:34:37 +0000476 struct perf_mmap *map = fda->priv[fd].ptr;
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300477
Wang Nan48760752016-07-14 08:34:37 +0000478 if (map)
479 perf_mmap__put(map);
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200480}
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200481
Arnaldo Carvalho de Melo1ddec7f2014-08-12 23:04:11 -0300482int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
483{
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300484 return fdarray__filter(&evlist->pollfd, revents_and_mask,
Wang Nan258e4bf2016-05-25 13:44:57 +0000485 perf_evlist__munmap_filtered, NULL);
Arnaldo Carvalho de Melo1ddec7f2014-08-12 23:04:11 -0300486}
487
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -0300488int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
489{
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300490 return fdarray__poll(&evlist->pollfd, timeout);
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -0300491}
492
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300493static void perf_evlist__id_hash(struct perf_evlist *evlist,
494 struct perf_evsel *evsel,
495 int cpu, int thread, u64 id)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200496{
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300497 int hash;
498 struct perf_sample_id *sid = SID(evsel, cpu, thread);
499
500 sid->id = id;
501 sid->evsel = evsel;
502 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
503 hlist_add_head(&sid->node, &evlist->heads[hash]);
504}
505
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300506void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
507 int cpu, int thread, u64 id)
508{
509 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
510 evsel->id[evsel->ids++] = id;
511}
512
Jiri Olsa1c596122015-11-05 15:40:49 +0100513int perf_evlist__id_add_fd(struct perf_evlist *evlist,
514 struct perf_evsel *evsel,
515 int cpu, int thread, int fd)
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300516{
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200517 u64 read_data[4] = { 0, };
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300518 int id_idx = 1; /* The first entry is the counter value */
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200519 u64 id;
520 int ret;
521
522 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
523 if (!ret)
524 goto add;
525
526 if (errno != ENOTTY)
527 return -1;
528
529 /* Legacy way to get event id.. All hail to old kernels! */
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200530
Jiri Olsac4861af2012-10-12 13:02:21 +0200531 /*
532 * This way does not work with group format read, so bail
533 * out in that case.
534 */
535 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
536 return -1;
537
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200538 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
539 read(fd, &read_data, sizeof(read_data)) == -1)
540 return -1;
541
542 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
543 ++id_idx;
544 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
545 ++id_idx;
546
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200547 id = read_data[id_idx];
548
549 add:
550 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200551 return 0;
552}
553
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200554static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
555 struct perf_evsel *evsel, int idx, int cpu,
556 int thread)
557{
558 struct perf_sample_id *sid = SID(evsel, cpu, thread);
559 sid->idx = idx;
560 if (evlist->cpus && cpu >= 0)
561 sid->cpu = evlist->cpus->map[cpu];
562 else
563 sid->cpu = -1;
564 if (!evsel->system_wide && evlist->threads && thread >= 0)
Jiri Olsae13798c2015-06-23 00:36:02 +0200565 sid->tid = thread_map__pid(evlist->threads, thread);
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200566 else
567 sid->tid = -1;
568}
569
Jiri Olsa932a3592012-10-11 14:10:35 +0200570struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200571{
572 struct hlist_head *head;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200573 struct perf_sample_id *sid;
574 int hash;
575
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200576 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
577 head = &evlist->heads[hash];
578
Sasha Levinb67bfe02013-02-27 17:06:00 -0800579 hlist_for_each_entry(sid, head, node)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200580 if (sid->id == id)
Jiri Olsa932a3592012-10-11 14:10:35 +0200581 return sid;
582
583 return NULL;
584}
585
586struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
587{
588 struct perf_sample_id *sid;
589
Adrian Hunter05169df2015-08-20 11:26:45 +0300590 if (evlist->nr_entries == 1 || !id)
Jiri Olsa932a3592012-10-11 14:10:35 +0200591 return perf_evlist__first(evlist);
592
593 sid = perf_evlist__id2sid(evlist, id);
594 if (sid)
595 return sid->evsel;
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900596
597 if (!perf_evlist__sample_id_all(evlist))
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300598 return perf_evlist__first(evlist);
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900599
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200600 return NULL;
601}
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200602
Adrian Hunterdddcf6a2015-09-25 16:15:52 +0300603struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
604 u64 id)
605{
606 struct perf_sample_id *sid;
607
608 if (!id)
609 return NULL;
610
611 sid = perf_evlist__id2sid(evlist, id);
612 if (sid)
613 return sid->evsel;
614
615 return NULL;
616}
617
Adrian Hunter75562572013-08-27 11:23:09 +0300618static int perf_evlist__event2id(struct perf_evlist *evlist,
619 union perf_event *event, u64 *id)
620{
621 const u64 *array = event->sample.array;
622 ssize_t n;
623
624 n = (event->header.size - sizeof(event->header)) >> 3;
625
626 if (event->header.type == PERF_RECORD_SAMPLE) {
627 if (evlist->id_pos >= n)
628 return -1;
629 *id = array[evlist->id_pos];
630 } else {
631 if (evlist->is_pos > n)
632 return -1;
633 n -= evlist->is_pos;
634 *id = array[n];
635 }
636 return 0;
637}
638
Jiri Olsa7cb5c5a2016-07-10 13:07:53 +0200639struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
640 union perf_event *event)
Adrian Hunter75562572013-08-27 11:23:09 +0300641{
Adrian Hunter98be6962013-09-04 23:18:17 +0300642 struct perf_evsel *first = perf_evlist__first(evlist);
Adrian Hunter75562572013-08-27 11:23:09 +0300643 struct hlist_head *head;
644 struct perf_sample_id *sid;
645 int hash;
646 u64 id;
647
648 if (evlist->nr_entries == 1)
Adrian Hunter98be6962013-09-04 23:18:17 +0300649 return first;
650
651 if (!first->attr.sample_id_all &&
652 event->header.type != PERF_RECORD_SAMPLE)
653 return first;
Adrian Hunter75562572013-08-27 11:23:09 +0300654
655 if (perf_evlist__event2id(evlist, event, &id))
656 return NULL;
657
658 /* Synthesized events have an id of zero */
659 if (!id)
Adrian Hunter98be6962013-09-04 23:18:17 +0300660 return first;
Adrian Hunter75562572013-08-27 11:23:09 +0300661
662 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
663 head = &evlist->heads[hash];
664
665 hlist_for_each_entry(sid, head, node) {
666 if (sid->id == id)
667 return sid->evsel;
668 }
669 return NULL;
670}
671
Wang Nan65aea232016-05-23 07:13:38 +0000672static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
673{
674 int i;
675
Wang Nan078c3382016-07-14 08:34:40 +0000676 if (!evlist->backward_mmap)
677 return 0;
678
Wang Nan65aea232016-05-23 07:13:38 +0000679 for (i = 0; i < evlist->nr_mmaps; i++) {
Wang Nan078c3382016-07-14 08:34:40 +0000680 int fd = evlist->backward_mmap[i].fd;
Wang Nan65aea232016-05-23 07:13:38 +0000681 int err;
682
683 if (fd < 0)
684 continue;
685 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
686 if (err)
687 return err;
688 }
689 return 0;
690}
691
Wang Nanf6cdff82016-07-14 08:34:44 +0000692static int perf_evlist__pause(struct perf_evlist *evlist)
Wang Nan65aea232016-05-23 07:13:38 +0000693{
694 return perf_evlist__set_paused(evlist, true);
695}
696
Wang Nanf6cdff82016-07-14 08:34:44 +0000697static int perf_evlist__resume(struct perf_evlist *evlist)
Wang Nan65aea232016-05-23 07:13:38 +0000698{
699 return perf_evlist__set_paused(evlist, false);
700}
701
Wang Nanb6b85da2016-04-27 02:19:21 +0000702/* When check_messup is true, 'end' must points to a good entry */
Wang Nan0f4ccd12016-04-27 02:19:20 +0000703static union perf_event *
Wang Nanb6b85da2016-04-27 02:19:21 +0000704perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
705 u64 end, u64 *prev)
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200706{
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200707 unsigned char *data = md->base + page_size;
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200708 union perf_event *event = NULL;
Wang Nanb6b85da2016-04-27 02:19:21 +0000709 int diff = end - start;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200710
Wang Nanb6b85da2016-04-27 02:19:21 +0000711 if (check_messup) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200712 /*
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200713 * If we're further behind than half the buffer, there's a chance
714 * the writer will bite our tail and mess up the samples under us.
715 *
Wang Nanb6b85da2016-04-27 02:19:21 +0000716 * If we somehow ended up ahead of the 'end', we got messed up.
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200717 *
Wang Nanb6b85da2016-04-27 02:19:21 +0000718 * In either case, truncate and restart at 'end'.
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200719 */
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200720 if (diff > md->mask / 2 || diff < 0) {
721 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
722
723 /*
Wang Nanb6b85da2016-04-27 02:19:21 +0000724 * 'end' points to a known good entry, start there.
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200725 */
Wang Nanb6b85da2016-04-27 02:19:21 +0000726 start = end;
Wang Nanb04b7022016-04-26 02:28:54 +0000727 diff = 0;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200728 }
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200729 }
730
Wang Nanb04b7022016-04-26 02:28:54 +0000731 if (diff >= (int)sizeof(event->header)) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200732 size_t size;
733
Wang Nanb6b85da2016-04-27 02:19:21 +0000734 event = (union perf_event *)&data[start & md->mask];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200735 size = event->header.size;
736
Wang Nanb04b7022016-04-26 02:28:54 +0000737 if (size < sizeof(event->header) || diff < (int)size) {
738 event = NULL;
739 goto broken_event;
740 }
741
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200742 /*
743 * Event straddles the mmap boundary -- header should always
744 * be inside due to u64 alignment of output.
745 */
Wang Nanb6b85da2016-04-27 02:19:21 +0000746 if ((start & md->mask) + size != ((start + size) & md->mask)) {
747 unsigned int offset = start;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200748 unsigned int len = min(sizeof(*event), size), cpy;
Jiri Olsaa65cb4b2013-10-02 15:46:39 +0200749 void *dst = md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200750
751 do {
752 cpy = min(md->mask + 1 - (offset & md->mask), len);
753 memcpy(dst, &data[offset & md->mask], cpy);
754 offset += cpy;
755 dst += cpy;
756 len -= cpy;
757 } while (len);
758
Jiri Olsaa65cb4b2013-10-02 15:46:39 +0200759 event = (union perf_event *) md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200760 }
761
Wang Nanb6b85da2016-04-27 02:19:21 +0000762 start += size;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200763 }
764
Wang Nanb04b7022016-04-26 02:28:54 +0000765broken_event:
Wang Nan0f4ccd12016-04-27 02:19:20 +0000766 if (prev)
Wang Nanb6b85da2016-04-27 02:19:21 +0000767 *prev = start;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200768
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200769 return event;
770}
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200771
Wang Nan8db6d6b2016-07-14 08:34:35 +0000772union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup)
Wang Nan0f4ccd12016-04-27 02:19:20 +0000773{
Wang Nan0f4ccd12016-04-27 02:19:20 +0000774 u64 head;
775 u64 old = md->prev;
776
777 /*
778 * Check if event was unmapped due to a POLLHUP/POLLERR.
779 */
780 if (!atomic_read(&md->refcnt))
781 return NULL;
782
783 head = perf_mmap__read_head(md);
784
Wang Nan8db6d6b2016-07-14 08:34:35 +0000785 return perf_mmap__read(md, check_messup, old, head, &md->prev);
Wang Nan0f4ccd12016-04-27 02:19:20 +0000786}
787
Wang Nane24c7522016-05-09 01:47:50 +0000788union perf_event *
Wang Nan8db6d6b2016-07-14 08:34:35 +0000789perf_mmap__read_backward(struct perf_mmap *md)
Wang Nane24c7522016-05-09 01:47:50 +0000790{
Wang Nane24c7522016-05-09 01:47:50 +0000791 u64 head, end;
792 u64 start = md->prev;
793
794 /*
795 * Check if event was unmapped due to a POLLHUP/POLLERR.
796 */
797 if (!atomic_read(&md->refcnt))
798 return NULL;
799
800 head = perf_mmap__read_head(md);
801 if (!head)
802 return NULL;
803
804 /*
805 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
806 * it each time when kernel writes to it, so in fact 'head' is
807 * negative. 'end' pointer is made manually by adding the size of
808 * the ring buffer to 'head' pointer, means the validate data can
809 * read is the whole ring buffer. If 'end' is positive, the ring
810 * buffer has not fully filled, so we must adjust 'end' to 0.
811 *
812 * However, since both 'head' and 'end' is unsigned, we can't
813 * simply compare 'end' against 0. Here we compare '-head' and
814 * the size of the ring buffer, where -head is the number of bytes
815 * kernel write to the ring buffer.
816 */
817 if (-head < (u64)(md->mask + 1))
818 end = 0;
819 else
820 end = head + md->mask + 1;
821
822 return perf_mmap__read(md, false, start, end, &md->prev);
823}
824
Wang Nan8db6d6b2016-07-14 08:34:35 +0000825union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
826{
827 struct perf_mmap *md = &evlist->mmap[idx];
828
829 /*
830 * Check messup is required for forward overwritable ring buffer:
831 * memory pointed by md->prev can be overwritten in this case.
832 * No need for read-write ring buffer: kernel stop outputting when
833 * it hit md->prev (perf_mmap__consume()).
834 */
835 return perf_mmap__read_forward(md, evlist->overwrite);
836}
837
838union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
839{
840 struct perf_mmap *md = &evlist->mmap[idx];
841
842 /*
843 * No need to check messup for backward ring buffer:
844 * We can always read arbitrary long data from a backward
845 * ring buffer unless we forget to pause it before reading.
846 */
847 return perf_mmap__read_backward(md);
848}
849
Wang Nan5a5ddeb2016-05-25 13:44:50 +0000850union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
851{
Wang Nana0c6f452016-07-14 08:34:41 +0000852 return perf_evlist__mmap_read_forward(evlist, idx);
Wang Nan5a5ddeb2016-05-25 13:44:50 +0000853}
854
Wang Nan8db6d6b2016-07-14 08:34:35 +0000855void perf_mmap__read_catchup(struct perf_mmap *md)
Wang Nane24c7522016-05-09 01:47:50 +0000856{
Wang Nane24c7522016-05-09 01:47:50 +0000857 u64 head;
858
859 if (!atomic_read(&md->refcnt))
860 return;
861
862 head = perf_mmap__read_head(md);
863 md->prev = head;
864}
865
Wang Nan8db6d6b2016-07-14 08:34:35 +0000866void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
867{
868 perf_mmap__read_catchup(&evlist->mmap[idx]);
869}
870
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300871static bool perf_mmap__empty(struct perf_mmap *md)
872{
Adrian Hunterb72e74d2015-04-24 22:29:43 +0300873 return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300874}
875
Wang Nan8db6d6b2016-07-14 08:34:35 +0000876static void perf_mmap__get(struct perf_mmap *map)
877{
878 atomic_inc(&map->refcnt);
879}
880
881static void perf_mmap__put(struct perf_mmap *md)
882{
883 BUG_ON(md->base && atomic_read(&md->refcnt) == 0);
884
885 if (atomic_dec_and_test(&md->refcnt))
886 perf_mmap__munmap(md);
887}
888
Wang Nan8db6d6b2016-07-14 08:34:35 +0000889void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800890{
Wang Nan8db6d6b2016-07-14 08:34:35 +0000891 if (!overwrite) {
David Ahern7b8283b52015-04-07 09:20:37 -0600892 u64 old = md->prev;
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800893
894 perf_mmap__write_tail(md, old);
895 }
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300896
Arnaldo Carvalho de Melo71438492015-05-15 15:45:16 -0300897 if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md))
Wang Nan8db6d6b2016-07-14 08:34:35 +0000898 perf_mmap__put(md);
899}
900
901void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
902{
903 perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800904}
905
Adrian Hunter718c6022015-04-09 18:53:42 +0300906int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
907 struct auxtrace_mmap_params *mp __maybe_unused,
908 void *userpg __maybe_unused,
909 int fd __maybe_unused)
910{
911 return 0;
912}
913
914void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
915{
916}
917
918void __weak auxtrace_mmap_params__init(
919 struct auxtrace_mmap_params *mp __maybe_unused,
920 off_t auxtrace_offset __maybe_unused,
921 unsigned int auxtrace_pages __maybe_unused,
922 bool auxtrace_overwrite __maybe_unused)
923{
924}
925
926void __weak auxtrace_mmap_params__set_idx(
927 struct auxtrace_mmap_params *mp __maybe_unused,
928 struct perf_evlist *evlist __maybe_unused,
929 int idx __maybe_unused,
930 bool per_cpu __maybe_unused)
931{
932}
933
Wang Nan8db6d6b2016-07-14 08:34:35 +0000934static void perf_mmap__munmap(struct perf_mmap *map)
935{
936 if (map->base != NULL) {
937 munmap(map->base, perf_mmap__mmap_len(map));
938 map->base = NULL;
939 map->fd = -1;
940 atomic_set(&map->refcnt, 0);
941 }
942 auxtrace_mmap__munmap(&map->auxtrace_mmap);
943}
944
Wang Nana1f72612016-07-14 08:34:38 +0000945static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200946{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300947 int i;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200948
Wang Nanb2cb6152016-07-14 08:34:39 +0000949 if (evlist->mmap)
950 for (i = 0; i < evlist->nr_mmaps; i++)
951 perf_mmap__munmap(&evlist->mmap[i]);
Arnaldo Carvalho de Melo983874d2014-01-03 17:25:49 -0300952
Wang Nanb2cb6152016-07-14 08:34:39 +0000953 if (evlist->backward_mmap)
954 for (i = 0; i < evlist->nr_mmaps; i++)
955 perf_mmap__munmap(&evlist->backward_mmap[i]);
Wang Nana1f72612016-07-14 08:34:38 +0000956}
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300957
Wang Nana1f72612016-07-14 08:34:38 +0000958void perf_evlist__munmap(struct perf_evlist *evlist)
959{
960 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -0300961 zfree(&evlist->mmap);
Wang Nanb2cb6152016-07-14 08:34:39 +0000962 zfree(&evlist->backward_mmap);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200963}
964
Wang Nan8db6d6b2016-07-14 08:34:35 +0000965static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200966{
Wang Nand4c6fb32016-05-20 16:38:24 +0000967 int i;
Wang Nan8db6d6b2016-07-14 08:34:35 +0000968 struct perf_mmap *map;
Wang Nand4c6fb32016-05-20 16:38:24 +0000969
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -0300970 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -0700971 if (cpu_map__empty(evlist->cpus))
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900972 evlist->nr_mmaps = thread_map__nr(evlist->threads);
Wang Nan8db6d6b2016-07-14 08:34:35 +0000973 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
974 if (!map)
975 return NULL;
Wang Nan946ae1d2016-05-31 13:06:15 +0000976
Wang Nand4c6fb32016-05-20 16:38:24 +0000977 for (i = 0; i < evlist->nr_mmaps; i++)
Wang Nan8db6d6b2016-07-14 08:34:35 +0000978 map[i].fd = -1;
979 return map;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200980}
981
Adrian Huntera8a8f3e2014-07-14 13:02:52 +0300982struct mmap_params {
983 int prot;
984 int mask;
Adrian Hunter718c6022015-04-09 18:53:42 +0300985 struct auxtrace_mmap_params auxtrace_mp;
Adrian Huntera8a8f3e2014-07-14 13:02:52 +0300986};
987
Wang Nan8db6d6b2016-07-14 08:34:35 +0000988static int perf_mmap__mmap(struct perf_mmap *map,
989 struct mmap_params *mp, int fd)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200990{
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300991 /*
992 * The last one will be done at perf_evlist__mmap_consume(), so that we
993 * make sure we don't prevent tools from consuming every last event in
994 * the ring buffer.
995 *
996 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
997 * anymore, but the last events for it are still in the ring buffer,
998 * waiting to be consumed.
999 *
1000 * Tools can chose to ignore this at their own discretion, but the
1001 * evlist layer can't just drop it when filtering events in
1002 * perf_evlist__filter_pollfd().
1003 */
Wang Nan8db6d6b2016-07-14 08:34:35 +00001004 atomic_set(&map->refcnt, 2);
1005 map->prev = 0;
1006 map->mask = mp->mask;
1007 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
1008 MAP_SHARED, fd, 0);
1009 if (map->base == MAP_FAILED) {
Adrian Hunter02635962013-11-01 15:51:33 +02001010 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
1011 errno);
Wang Nan8db6d6b2016-07-14 08:34:35 +00001012 map->base = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001013 return -1;
Nelson Elhage301b1952011-12-19 08:39:30 -05001014 }
Wang Nan8db6d6b2016-07-14 08:34:35 +00001015 map->fd = fd;
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -03001016
Wang Nan8db6d6b2016-07-14 08:34:35 +00001017 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
1018 &mp->auxtrace_mp, map->base, fd))
Adrian Hunter718c6022015-04-09 18:53:42 +03001019 return -1;
1020
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001021 return 0;
1022}
1023
Wang Nanf3058a12016-05-24 02:28:59 +00001024static bool
1025perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
1026 struct perf_evsel *evsel)
1027{
Arnaldo Carvalho de Melo32a951b2016-07-14 08:34:33 +00001028 if (evsel->attr.write_backward)
Wang Nanf3058a12016-05-24 02:28:59 +00001029 return false;
1030 return true;
1031}
1032
Adrian Hunter04e21312013-10-18 15:29:13 +03001033static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
Mark Rutland9f21b812016-09-08 11:21:51 +01001034 struct mmap_params *mp, int cpu_idx,
Wang Nan078c3382016-07-14 08:34:40 +00001035 int thread, int *_output, int *_output_backward)
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001036{
1037 struct perf_evsel *evsel;
Wang Nanf3058a12016-05-24 02:28:59 +00001038 int revent;
Mark Rutland9f21b812016-09-08 11:21:51 +01001039 int evlist_cpu = cpu_map__cpu(evlist->cpus, cpu_idx);
Adrian Hunter04e21312013-10-18 15:29:13 +03001040
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001041 evlist__for_each_entry(evlist, evsel) {
Wang Nan078c3382016-07-14 08:34:40 +00001042 struct perf_mmap *maps = evlist->mmap;
1043 int *output = _output;
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001044 int fd;
Mark Rutland9f21b812016-09-08 11:21:51 +01001045 int cpu;
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001046
Wang Nan078c3382016-07-14 08:34:40 +00001047 if (evsel->attr.write_backward) {
1048 output = _output_backward;
1049 maps = evlist->backward_mmap;
1050
1051 if (!maps) {
1052 maps = perf_evlist__alloc_mmap(evlist);
1053 if (!maps)
1054 return -1;
1055 evlist->backward_mmap = maps;
Wang Nan54cc54d2016-07-14 08:34:42 +00001056 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
1057 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
Wang Nan078c3382016-07-14 08:34:40 +00001058 }
1059 }
Wang Nanf3058a12016-05-24 02:28:59 +00001060
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001061 if (evsel->system_wide && thread)
1062 continue;
1063
Mark Rutland9f21b812016-09-08 11:21:51 +01001064 cpu = cpu_map__idx(evsel->cpus, evlist_cpu);
1065 if (cpu == -1)
1066 continue;
1067
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001068 fd = FD(evsel, cpu, thread);
Adrian Hunter04e21312013-10-18 15:29:13 +03001069
1070 if (*output == -1) {
1071 *output = fd;
Wang Nan078c3382016-07-14 08:34:40 +00001072
1073 if (perf_mmap__mmap(&maps[idx], mp, *output) < 0)
Adrian Hunter04e21312013-10-18 15:29:13 +03001074 return -1;
1075 } else {
1076 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
1077 return -1;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001078
Wang Nan078c3382016-07-14 08:34:40 +00001079 perf_mmap__get(&maps[idx]);
Adrian Hunter04e21312013-10-18 15:29:13 +03001080 }
1081
Wang Nanf3058a12016-05-24 02:28:59 +00001082 revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
1083
Adrian Hunterf90d1942014-11-11 16:16:39 +02001084 /*
1085 * The system_wide flag causes a selected event to be opened
1086 * always without a pid. Consequently it will never get a
1087 * POLLHUP, but it is used for tracking in combination with
1088 * other events, so it should not need to be polled anyway.
1089 * Therefore don't add it for polling.
1090 */
1091 if (!evsel->system_wide &&
Wang Nan078c3382016-07-14 08:34:40 +00001092 __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
1093 perf_mmap__put(&maps[idx]);
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -03001094 return -1;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001095 }
Arnaldo Carvalho de Melo033fa712014-09-08 12:55:12 -03001096
Adrian Hunter3c659ee2014-10-27 15:49:22 +02001097 if (evsel->attr.read_format & PERF_FORMAT_ID) {
1098 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
1099 fd) < 0)
1100 return -1;
1101 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
1102 thread);
1103 }
Adrian Hunter04e21312013-10-18 15:29:13 +03001104 }
1105
1106 return 0;
1107}
1108
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001109static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
1110 struct mmap_params *mp)
Adrian Hunter04e21312013-10-18 15:29:13 +03001111{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001112 int cpu, thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001113 int nr_cpus = cpu_map__nr(evlist->cpus);
1114 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001115
Adrian Huntere3e1a542013-08-14 15:48:24 +03001116 pr_debug2("perf event ring buffer mmapped per cpu\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001117 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001118 int output = -1;
Wang Nan078c3382016-07-14 08:34:40 +00001119 int output_backward = -1;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001120
Adrian Hunter718c6022015-04-09 18:53:42 +03001121 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
1122 true);
1123
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001124 for (thread = 0; thread < nr_threads; thread++) {
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001125 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
Wang Nan078c3382016-07-14 08:34:40 +00001126 thread, &output, &output_backward))
Adrian Hunter04e21312013-10-18 15:29:13 +03001127 goto out_unmap;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001128 }
1129 }
1130
1131 return 0;
1132
1133out_unmap:
Wang Nana1f72612016-07-14 08:34:38 +00001134 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001135 return -1;
1136}
1137
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001138static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
1139 struct mmap_params *mp)
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001140{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001141 int thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001142 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001143
Adrian Huntere3e1a542013-08-14 15:48:24 +03001144 pr_debug2("perf event ring buffer mmapped per thread\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001145 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001146 int output = -1;
Wang Nan078c3382016-07-14 08:34:40 +00001147 int output_backward = -1;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001148
Adrian Hunter718c6022015-04-09 18:53:42 +03001149 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
1150 false);
1151
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001152 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
Wang Nan078c3382016-07-14 08:34:40 +00001153 &output, &output_backward))
Adrian Hunter04e21312013-10-18 15:29:13 +03001154 goto out_unmap;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001155 }
1156
1157 return 0;
1158
1159out_unmap:
Wang Nana1f72612016-07-14 08:34:38 +00001160 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001161 return -1;
1162}
1163
Arnaldo Carvalho de Melof5e71502016-04-15 17:46:31 -03001164unsigned long perf_event_mlock_kb_in_pages(void)
1165{
1166 unsigned long pages;
1167 int max;
1168
1169 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
1170 /*
1171 * Pick a once upon a time good value, i.e. things look
1172 * strange since we can't read a sysctl value, but lets not
1173 * die yet...
1174 */
1175 max = 512;
1176 } else {
1177 max -= (page_size / 1024);
1178 }
1179
1180 pages = (max * 1024) / page_size;
1181 if (!is_power_of_2(pages))
1182 pages = rounddown_pow_of_two(pages);
1183
1184 return pages;
1185}
1186
Jiri Olsa994a1f72013-09-01 12:36:12 +02001187static size_t perf_evlist__mmap_size(unsigned long pages)
1188{
Arnaldo Carvalho de Melof5e71502016-04-15 17:46:31 -03001189 if (pages == UINT_MAX)
1190 pages = perf_event_mlock_kb_in_pages();
1191 else if (!is_power_of_2(pages))
Jiri Olsa994a1f72013-09-01 12:36:12 +02001192 return 0;
1193
1194 return (pages + 1) * page_size;
1195}
1196
David Ahern33c2dcf2013-11-12 07:46:55 -07001197static long parse_pages_arg(const char *str, unsigned long min,
1198 unsigned long max)
Jiri Olsa994a1f72013-09-01 12:36:12 +02001199{
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001200 unsigned long pages, val;
Jiri Olsa27050f52013-09-01 12:36:13 +02001201 static struct parse_tag tags[] = {
1202 { .tag = 'B', .mult = 1 },
1203 { .tag = 'K', .mult = 1 << 10 },
1204 { .tag = 'M', .mult = 1 << 20 },
1205 { .tag = 'G', .mult = 1 << 30 },
1206 { .tag = 0 },
1207 };
Jiri Olsa994a1f72013-09-01 12:36:12 +02001208
David Ahern89735042013-11-12 07:46:53 -07001209 if (str == NULL)
David Ahern33c2dcf2013-11-12 07:46:55 -07001210 return -EINVAL;
David Ahern89735042013-11-12 07:46:53 -07001211
Jiri Olsa27050f52013-09-01 12:36:13 +02001212 val = parse_tag_value(str, tags);
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001213 if (val != (unsigned long) -1) {
Jiri Olsa27050f52013-09-01 12:36:13 +02001214 /* we got file size value */
1215 pages = PERF_ALIGN(val, page_size) / page_size;
Jiri Olsa27050f52013-09-01 12:36:13 +02001216 } else {
1217 /* we got pages count value */
1218 char *eptr;
1219 pages = strtoul(str, &eptr, 10);
David Ahern33c2dcf2013-11-12 07:46:55 -07001220 if (*eptr != '\0')
1221 return -EINVAL;
Jiri Olsa994a1f72013-09-01 12:36:12 +02001222 }
1223
Adrian Hunter2bcab6c2013-12-09 15:18:37 +02001224 if (pages == 0 && min == 0) {
David Ahern33c2dcf2013-11-12 07:46:55 -07001225 /* leave number of pages at 0 */
Adrian Hunter1dbfa9382013-12-09 15:18:39 +02001226 } else if (!is_power_of_2(pages)) {
Jiri Olsa98081432017-01-09 10:51:55 +01001227 char buf[100];
1228
David Ahern33c2dcf2013-11-12 07:46:55 -07001229 /* round pages up to next power of 2 */
Arnaldo Carvalho de Melo91529832014-12-16 13:24:41 -03001230 pages = roundup_pow_of_two(pages);
Adrian Hunter1dbfa9382013-12-09 15:18:39 +02001231 if (!pages)
1232 return -EINVAL;
Jiri Olsa98081432017-01-09 10:51:55 +01001233
1234 unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
1235 pr_info("rounding mmap pages size to %s (%lu pages)\n",
1236 buf, pages);
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001237 }
1238
David Ahern33c2dcf2013-11-12 07:46:55 -07001239 if (pages > max)
1240 return -EINVAL;
1241
1242 return pages;
1243}
1244
Adrian Huntere9db1312015-04-09 18:53:46 +03001245int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
David Ahern33c2dcf2013-11-12 07:46:55 -07001246{
David Ahern33c2dcf2013-11-12 07:46:55 -07001247 unsigned long max = UINT_MAX;
1248 long pages;
1249
Adrian Hunterf5ae9c42013-12-09 15:18:38 +02001250 if (max > SIZE_MAX / page_size)
David Ahern33c2dcf2013-11-12 07:46:55 -07001251 max = SIZE_MAX / page_size;
1252
1253 pages = parse_pages_arg(str, 1, max);
1254 if (pages < 0) {
1255 pr_err("Invalid argument for --mmap_pages/-m\n");
Jiri Olsa994a1f72013-09-01 12:36:12 +02001256 return -1;
1257 }
1258
1259 *mmap_pages = pages;
1260 return 0;
1261}
1262
Adrian Huntere9db1312015-04-09 18:53:46 +03001263int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1264 int unset __maybe_unused)
1265{
1266 return __perf_evlist__parse_mmap_pages(opt->value, str);
1267}
1268
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001269/**
Adrian Hunter718c6022015-04-09 18:53:42 +03001270 * perf_evlist__mmap_ex - Create mmaps to receive events.
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001271 * @evlist: list of events
1272 * @pages: map length in pages
1273 * @overwrite: overwrite older events?
Adrian Hunter718c6022015-04-09 18:53:42 +03001274 * @auxtrace_pages - auxtrace map length in pages
1275 * @auxtrace_overwrite - overwrite older auxtrace data?
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001276 *
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001277 * If @overwrite is %false the user needs to signal event consumption using
1278 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
1279 * automatically.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001280 *
Adrian Hunter718c6022015-04-09 18:53:42 +03001281 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1282 * consumption using auxtrace_mmap__write_tail().
1283 *
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001284 * Return: %0 on success, negative error code otherwise.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001285 */
Adrian Hunter718c6022015-04-09 18:53:42 +03001286int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1287 bool overwrite, unsigned int auxtrace_pages,
1288 bool auxtrace_overwrite)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001289{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001290 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001291 const struct cpu_map *cpus = evlist->cpus;
1292 const struct thread_map *threads = evlist->threads;
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001293 struct mmap_params mp = {
1294 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
1295 };
Arnaldo Carvalho de Melo50a682c2011-11-09 09:10:47 -02001296
Wang Nan8db6d6b2016-07-14 08:34:35 +00001297 if (!evlist->mmap)
1298 evlist->mmap = perf_evlist__alloc_mmap(evlist);
1299 if (!evlist->mmap)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001300 return -ENOMEM;
1301
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -03001302 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001303 return -ENOMEM;
1304
1305 evlist->overwrite = overwrite;
Jiri Olsa994a1f72013-09-01 12:36:12 +02001306 evlist->mmap_len = perf_evlist__mmap_size(pages);
Adrian Hunter2af68ef2013-10-18 15:29:07 +03001307 pr_debug("mmap size %zuB\n", evlist->mmap_len);
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001308 mp.mask = evlist->mmap_len - page_size - 1;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001309
Adrian Hunter718c6022015-04-09 18:53:42 +03001310 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
1311 auxtrace_pages, auxtrace_overwrite);
1312
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001313 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001314 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03001315 evsel->sample_id == NULL &&
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -03001316 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001317 return -ENOMEM;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001318 }
1319
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -07001320 if (cpu_map__empty(cpus))
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001321 return perf_evlist__mmap_per_thread(evlist, &mp);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001322
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001323 return perf_evlist__mmap_per_cpu(evlist, &mp);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001324}
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001325
Adrian Hunter718c6022015-04-09 18:53:42 +03001326int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
1327 bool overwrite)
1328{
1329 return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
1330}
1331
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001332int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001333{
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001334 struct cpu_map *cpus;
1335 struct thread_map *threads;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001336
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001337 threads = thread_map__new_str(target->pid, target->tid, target->uid);
1338
1339 if (!threads)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001340 return -1;
1341
Dongsheng Yang9c105fb2013-12-04 17:56:40 -05001342 if (target__uses_dummy_map(target))
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001343 cpus = cpu_map__dummy_new();
Namhyung Kim879d77d2012-05-16 18:45:48 +09001344 else
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001345 cpus = cpu_map__new(target->cpu_list);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001346
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001347 if (!cpus)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001348 goto out_delete_threads;
1349
Adrian Hunterec9a77a2015-09-08 10:58:52 +03001350 evlist->has_user_cpus = !!target->cpu_list;
1351
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001352 perf_evlist__set_maps(evlist, cpus, threads);
Adrian Hunterd5bc0562015-09-08 10:58:51 +03001353
1354 return 0;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001355
1356out_delete_threads:
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001357 thread_map__put(threads);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001358 return -1;
1359}
1360
Adrian Hunterd5bc0562015-09-08 10:58:51 +03001361void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1362 struct thread_map *threads)
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001363{
Adrian Hunter934e0f22015-09-08 10:58:56 +03001364 /*
1365 * Allow for the possibility that one or another of the maps isn't being
1366 * changed i.e. don't put it. Note we are assuming the maps that are
1367 * being applied are brand new and evlist is taking ownership of the
1368 * original reference count of 1. If that is not the case it is up to
1369 * the caller to increase the reference count.
1370 */
1371 if (cpus != evlist->cpus) {
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001372 cpu_map__put(evlist->cpus);
Arnaldo Carvalho de Meloa55e5662016-02-17 10:57:19 -03001373 evlist->cpus = cpu_map__get(cpus);
Adrian Hunter934e0f22015-09-08 10:58:56 +03001374 }
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001375
Adrian Hunter934e0f22015-09-08 10:58:56 +03001376 if (threads != evlist->threads) {
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001377 thread_map__put(evlist->threads);
Arnaldo Carvalho de Meloa55e5662016-02-17 10:57:19 -03001378 evlist->threads = thread_map__get(threads);
Adrian Hunter934e0f22015-09-08 10:58:56 +03001379 }
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001380
Adrian Hunterec9a77a2015-09-08 10:58:52 +03001381 perf_evlist__propagate_maps(evlist);
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001382}
1383
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001384void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
1385 enum perf_event_sample_format bit)
1386{
1387 struct perf_evsel *evsel;
1388
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001389 evlist__for_each_entry(evlist, evsel)
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001390 __perf_evsel__set_sample_bit(evsel, bit);
1391}
1392
1393void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
1394 enum perf_event_sample_format bit)
1395{
1396 struct perf_evsel *evsel;
1397
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001398 evlist__for_each_entry(evlist, evsel)
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001399 __perf_evsel__reset_sample_bit(evsel, bit);
1400}
1401
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001402int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001403{
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001404 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001405 int err = 0;
1406 const int ncpus = cpu_map__nr(evlist->cpus),
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001407 nthreads = thread_map__nr(evlist->threads);
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001408
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001409 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001410 if (evsel->filter == NULL)
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001411 continue;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001412
Kan Liangd988d5e2015-08-21 02:23:14 -04001413 /*
1414 * filters only work for tracepoint event, which doesn't have cpu limit.
1415 * So evlist and evsel should always be same.
1416 */
Arnaldo Carvalho de Melof47805a2015-07-03 15:53:49 -03001417 err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter);
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001418 if (err) {
1419 *err_evsel = evsel;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001420 break;
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001421 }
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001422 }
1423
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001424 return err;
1425}
1426
1427int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1428{
1429 struct perf_evsel *evsel;
1430 int err = 0;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001431
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001432 evlist__for_each_entry(evlist, evsel) {
Wang Nanfdf14722016-02-26 09:31:53 +00001433 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1434 continue;
1435
Arnaldo Carvalho de Melo94ad89b2015-07-03 17:42:03 -03001436 err = perf_evsel__set_filter(evsel, filter);
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001437 if (err)
1438 break;
1439 }
1440
1441 return err;
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001442}
Frederic Weisbecker74429962011-05-21 17:49:00 +02001443
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001444int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001445{
1446 char *filter;
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001447 int ret = -1;
1448 size_t i;
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001449
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001450 for (i = 0; i < npids; ++i) {
1451 if (i == 0) {
1452 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1453 return -1;
1454 } else {
1455 char *tmp;
1456
1457 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1458 goto out_free;
1459
1460 free(filter);
1461 filter = tmp;
1462 }
1463 }
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001464
1465 ret = perf_evlist__set_filter(evlist, filter);
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001466out_free:
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001467 free(filter);
1468 return ret;
1469}
1470
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001471int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
1472{
1473 return perf_evlist__set_filter_pids(evlist, 1, &pid);
1474}
1475
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001476bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +02001477{
Adrian Hunter75562572013-08-27 11:23:09 +03001478 struct perf_evsel *pos;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001479
Adrian Hunter75562572013-08-27 11:23:09 +03001480 if (evlist->nr_entries == 1)
1481 return true;
1482
1483 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1484 return false;
1485
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001486 evlist__for_each_entry(evlist, pos) {
Adrian Hunter75562572013-08-27 11:23:09 +03001487 if (pos->id_pos != evlist->id_pos ||
1488 pos->is_pos != evlist->is_pos)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001489 return false;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001490 }
1491
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001492 return true;
1493}
1494
Adrian Hunter75562572013-08-27 11:23:09 +03001495u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001496{
Adrian Hunter75562572013-08-27 11:23:09 +03001497 struct perf_evsel *evsel;
1498
1499 if (evlist->combined_sample_type)
1500 return evlist->combined_sample_type;
1501
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001502 evlist__for_each_entry(evlist, evsel)
Adrian Hunter75562572013-08-27 11:23:09 +03001503 evlist->combined_sample_type |= evsel->attr.sample_type;
1504
1505 return evlist->combined_sample_type;
1506}
1507
1508u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1509{
1510 evlist->combined_sample_type = 0;
1511 return __perf_evlist__combined_sample_type(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001512}
1513
Andi Kleen98df8582015-07-18 08:24:47 -07001514u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
1515{
1516 struct perf_evsel *evsel;
1517 u64 branch_type = 0;
1518
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001519 evlist__for_each_entry(evlist, evsel)
Andi Kleen98df8582015-07-18 08:24:47 -07001520 branch_type |= evsel->attr.branch_sample_type;
1521 return branch_type;
1522}
1523
Jiri Olsa9ede4732012-10-10 17:38:13 +02001524bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1525{
1526 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1527 u64 read_format = first->attr.read_format;
1528 u64 sample_type = first->attr.sample_type;
1529
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001530 evlist__for_each_entry(evlist, pos) {
Jiri Olsa9ede4732012-10-10 17:38:13 +02001531 if (read_format != pos->attr.read_format)
1532 return false;
1533 }
1534
1535 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1536 if ((sample_type & PERF_SAMPLE_READ) &&
1537 !(read_format & PERF_FORMAT_ID)) {
1538 return false;
1539 }
1540
1541 return true;
1542}
1543
1544u64 perf_evlist__read_format(struct perf_evlist *evlist)
1545{
1546 struct perf_evsel *first = perf_evlist__first(evlist);
1547 return first->attr.read_format;
1548}
1549
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001550u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001551{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001552 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001553 struct perf_sample *data;
1554 u64 sample_type;
1555 u16 size = 0;
1556
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001557 if (!first->attr.sample_id_all)
1558 goto out;
1559
1560 sample_type = first->attr.sample_type;
1561
1562 if (sample_type & PERF_SAMPLE_TID)
1563 size += sizeof(data->tid) * 2;
1564
1565 if (sample_type & PERF_SAMPLE_TIME)
1566 size += sizeof(data->time);
1567
1568 if (sample_type & PERF_SAMPLE_ID)
1569 size += sizeof(data->id);
1570
1571 if (sample_type & PERF_SAMPLE_STREAM_ID)
1572 size += sizeof(data->stream_id);
1573
1574 if (sample_type & PERF_SAMPLE_CPU)
1575 size += sizeof(data->cpu) * 2;
Adrian Hunter75562572013-08-27 11:23:09 +03001576
1577 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1578 size += sizeof(data->id);
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001579out:
1580 return size;
1581}
1582
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001583bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001584{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001585 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001586
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001587 evlist__for_each_entry_continue(evlist, pos) {
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001588 if (first->attr.sample_id_all != pos->attr.sample_id_all)
1589 return false;
1590 }
1591
1592 return true;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001593}
1594
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001595bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +02001596{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001597 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001598 return first->attr.sample_id_all;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001599}
Arnaldo Carvalho de Melo81cce8d2011-10-05 19:11:32 -03001600
1601void perf_evlist__set_selected(struct perf_evlist *evlist,
1602 struct perf_evsel *evsel)
1603{
1604 evlist->selected = evsel;
1605}
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001606
Namhyung Kima74b4b62013-03-15 14:48:48 +09001607void perf_evlist__close(struct perf_evlist *evlist)
1608{
1609 struct perf_evsel *evsel;
1610 int ncpus = cpu_map__nr(evlist->cpus);
1611 int nthreads = thread_map__nr(evlist->threads);
1612
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001613 evlist__for_each_entry_reverse(evlist, evsel) {
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -03001614 int n = evsel->cpus ? evsel->cpus->nr : ncpus;
Stephane Eranian8ad92192014-01-17 16:34:06 +01001615 perf_evsel__close(evsel, n, nthreads);
1616 }
Namhyung Kima74b4b62013-03-15 14:48:48 +09001617}
1618
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001619static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1620{
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001621 struct cpu_map *cpus;
1622 struct thread_map *threads;
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001623 int err = -ENOMEM;
1624
1625 /*
1626 * Try reading /sys/devices/system/cpu/online to get
1627 * an all cpus map.
1628 *
1629 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1630 * code needs an overhaul to properly forward the
1631 * error, and we may not want to do that fallback to a
1632 * default cpu identity map :-\
1633 */
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001634 cpus = cpu_map__new(NULL);
1635 if (!cpus)
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001636 goto out;
1637
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001638 threads = thread_map__new_dummy();
1639 if (!threads)
1640 goto out_put;
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001641
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001642 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001643out:
1644 return err;
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001645out_put:
1646 cpu_map__put(cpus);
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001647 goto out;
1648}
1649
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001650int perf_evlist__open(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001651{
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001652 struct perf_evsel *evsel;
Namhyung Kima74b4b62013-03-15 14:48:48 +09001653 int err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001654
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001655 /*
1656 * Default: one fd per CPU, all threads, aka systemwide
1657 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1658 */
1659 if (evlist->threads == NULL && evlist->cpus == NULL) {
1660 err = perf_evlist__create_syswide_maps(evlist);
1661 if (err < 0)
1662 goto out_err;
1663 }
1664
Adrian Hunter733cd2f2013-09-06 22:40:11 +03001665 perf_evlist__update_id_pos(evlist);
1666
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001667 evlist__for_each_entry(evlist, evsel) {
Adrian Hunter23df7f72016-01-07 10:13:59 +01001668 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001669 if (err < 0)
1670 goto out_err;
1671 }
1672
1673 return 0;
1674out_err:
Namhyung Kima74b4b62013-03-15 14:48:48 +09001675 perf_evlist__close(evlist);
Namhyung Kim41c21a62012-02-23 12:13:36 +09001676 errno = -err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001677 return err;
1678}
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001679
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001680int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
Namhyung Kim55e162e2013-03-11 16:43:17 +09001681 const char *argv[], bool pipe_output,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001682 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001683{
1684 int child_ready_pipe[2], go_pipe[2];
1685 char bf;
1686
1687 if (pipe(child_ready_pipe) < 0) {
1688 perror("failed to create 'ready' pipe");
1689 return -1;
1690 }
1691
1692 if (pipe(go_pipe) < 0) {
1693 perror("failed to create 'go' pipe");
1694 goto out_close_ready_pipe;
1695 }
1696
1697 evlist->workload.pid = fork();
1698 if (evlist->workload.pid < 0) {
1699 perror("failed to fork");
1700 goto out_close_pipes;
1701 }
1702
1703 if (!evlist->workload.pid) {
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001704 int ret;
1705
Namhyung Kim119fa3c2013-03-11 16:43:16 +09001706 if (pipe_output)
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001707 dup2(2, 1);
1708
David Ahern0817df02013-05-25 17:50:39 -06001709 signal(SIGTERM, SIG_DFL);
1710
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001711 close(child_ready_pipe[0]);
1712 close(go_pipe[1]);
1713 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1714
1715 /*
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001716 * Tell the parent we're ready to go
1717 */
1718 close(child_ready_pipe[1]);
1719
1720 /*
1721 * Wait until the parent tells us to go.
1722 */
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001723 ret = read(go_pipe[0], &bf, 1);
1724 /*
1725 * The parent will ask for the execvp() to be performed by
1726 * writing exactly one byte, in workload.cork_fd, usually via
1727 * perf_evlist__start_workload().
1728 *
Arnaldo Carvalho de Melo20f86fc2015-02-03 13:29:05 -03001729 * For cancelling the workload without actually running it,
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001730 * the parent will just close workload.cork_fd, without writing
1731 * anything, i.e. read will return zero and we just exit()
1732 * here.
1733 */
1734 if (ret != 1) {
1735 if (ret == -1)
1736 perror("unable to read pipe");
1737 exit(ret);
1738 }
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001739
1740 execvp(argv[0], (char **)argv);
1741
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001742 if (exec_error) {
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001743 union sigval val;
1744
1745 val.sival_int = errno;
1746 if (sigqueue(getppid(), SIGUSR1, val))
1747 perror(argv[0]);
1748 } else
1749 perror(argv[0]);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001750 exit(-1);
1751 }
1752
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001753 if (exec_error) {
1754 struct sigaction act = {
1755 .sa_flags = SA_SIGINFO,
1756 .sa_sigaction = exec_error,
1757 };
1758 sigaction(SIGUSR1, &act, NULL);
1759 }
1760
Arnaldo Carvalho de Melo1aaf63b2014-10-10 14:29:49 -03001761 if (target__none(target)) {
1762 if (evlist->threads == NULL) {
1763 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1764 __func__, __LINE__);
1765 goto out_close_pipes;
1766 }
Jiri Olsae13798c2015-06-23 00:36:02 +02001767 thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
Arnaldo Carvalho de Melo1aaf63b2014-10-10 14:29:49 -03001768 }
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001769
1770 close(child_ready_pipe[1]);
1771 close(go_pipe[0]);
1772 /*
1773 * wait for child to settle
1774 */
1775 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1776 perror("unable to read pipe");
1777 goto out_close_pipes;
1778 }
1779
Namhyung Kimbcf31452013-06-26 16:14:15 +09001780 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001781 evlist->workload.cork_fd = go_pipe[1];
1782 close(child_ready_pipe[0]);
1783 return 0;
1784
1785out_close_pipes:
1786 close(go_pipe[0]);
1787 close(go_pipe[1]);
1788out_close_ready_pipe:
1789 close(child_ready_pipe[0]);
1790 close(child_ready_pipe[1]);
1791 return -1;
1792}
1793
1794int perf_evlist__start_workload(struct perf_evlist *evlist)
1795{
1796 if (evlist->workload.cork_fd > 0) {
David Ahernb3824402013-07-02 13:27:21 -06001797 char bf = 0;
Namhyung Kimbcf31452013-06-26 16:14:15 +09001798 int ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001799 /*
1800 * Remove the cork, let it rip!
1801 */
Namhyung Kimbcf31452013-06-26 16:14:15 +09001802 ret = write(evlist->workload.cork_fd, &bf, 1);
1803 if (ret < 0)
Soramichi Akiyamae978be92017-01-10 10:41:00 -03001804 perror("unable to write to pipe");
Namhyung Kimbcf31452013-06-26 16:14:15 +09001805
1806 close(evlist->workload.cork_fd);
1807 return ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001808 }
1809
1810 return 0;
1811}
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001812
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -03001813int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001814 struct perf_sample *sample)
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001815{
Adrian Hunter75562572013-08-27 11:23:09 +03001816 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1817
1818 if (!evsel)
1819 return -EFAULT;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001820 return perf_evsel__parse_sample(evsel, event, sample);
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001821}
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001822
1823size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1824{
1825 struct perf_evsel *evsel;
1826 size_t printed = 0;
1827
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001828 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001829 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1830 perf_evsel__name(evsel));
1831 }
1832
Davidlohr Buesob2222132013-11-12 22:24:24 -08001833 return printed + fprintf(fp, "\n");
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001834}
Arnaldo Carvalho de Melo6ef068c2013-10-17 12:07:58 -03001835
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001836int perf_evlist__strerror_open(struct perf_evlist *evlist,
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001837 int err, char *buf, size_t size)
1838{
1839 int printed, value;
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001840 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001841
1842 switch (err) {
1843 case EACCES:
1844 case EPERM:
1845 printed = scnprintf(buf, size,
1846 "Error:\t%s.\n"
1847 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1848
Adrian Hunter1a472452013-12-11 14:36:23 +02001849 value = perf_event_paranoid();
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001850
1851 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1852
1853 if (value >= 2) {
1854 printed += scnprintf(buf + printed, size - printed,
1855 "For your workloads it needs to be <= 1\nHint:\t");
1856 }
1857 printed += scnprintf(buf + printed, size - printed,
Arnaldo Carvalho de Melo5229e362014-06-10 17:18:54 -03001858 "For system wide tracing it needs to be set to -1.\n");
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001859
1860 printed += scnprintf(buf + printed, size - printed,
Arnaldo Carvalho de Melo5229e362014-06-10 17:18:54 -03001861 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1862 "Hint:\tThe current value is %d.", value);
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001863 break;
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001864 case EINVAL: {
1865 struct perf_evsel *first = perf_evlist__first(evlist);
1866 int max_freq;
1867
1868 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1869 goto out_default;
1870
1871 if (first->attr.sample_freq < (u64)max_freq)
1872 goto out_default;
1873
1874 printed = scnprintf(buf, size,
1875 "Error:\t%s.\n"
1876 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1877 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1878 emsg, max_freq, first->attr.sample_freq);
1879 break;
1880 }
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001881 default:
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001882out_default:
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001883 scnprintf(buf, size, "%s", emsg);
1884 break;
1885 }
1886
1887 return 0;
1888}
Adrian Huntera025e4f2013-12-11 14:36:35 +02001889
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001890int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1891{
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001892 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001893 int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001894
1895 switch (err) {
1896 case EPERM:
Arnaldo Carvalho de Meloe5d4a292014-12-12 15:59:51 -03001897 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001898 printed += scnprintf(buf + printed, size - printed,
1899 "Error:\t%s.\n"
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001900 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001901 "Hint:\tTried using %zd kB.\n",
Arnaldo Carvalho de Meloe5d4a292014-12-12 15:59:51 -03001902 emsg, pages_max_per_user, pages_attempted);
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001903
1904 if (pages_attempted >= pages_max_per_user) {
1905 printed += scnprintf(buf + printed, size - printed,
1906 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1907 pages_max_per_user + pages_attempted);
1908 }
1909
1910 printed += scnprintf(buf + printed, size - printed,
1911 "Hint:\tTry using a smaller -m/--mmap-pages value.");
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001912 break;
1913 default:
1914 scnprintf(buf, size, "%s", emsg);
1915 break;
1916 }
1917
1918 return 0;
1919}
1920
Adrian Huntera025e4f2013-12-11 14:36:35 +02001921void perf_evlist__to_front(struct perf_evlist *evlist,
1922 struct perf_evsel *move_evsel)
1923{
1924 struct perf_evsel *evsel, *n;
1925 LIST_HEAD(move);
1926
1927 if (move_evsel == perf_evlist__first(evlist))
1928 return;
1929
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001930 evlist__for_each_entry_safe(evlist, n, evsel) {
Adrian Huntera025e4f2013-12-11 14:36:35 +02001931 if (evsel->leader == move_evsel->leader)
1932 list_move_tail(&evsel->node, &move);
1933 }
1934
1935 list_splice(&move, &evlist->entries);
1936}
Adrian Hunter60b08962014-07-31 09:00:52 +03001937
1938void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1939 struct perf_evsel *tracking_evsel)
1940{
1941 struct perf_evsel *evsel;
1942
1943 if (tracking_evsel->tracking)
1944 return;
1945
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001946 evlist__for_each_entry(evlist, evsel) {
Adrian Hunter60b08962014-07-31 09:00:52 +03001947 if (evsel != tracking_evsel)
1948 evsel->tracking = false;
1949 }
1950
1951 tracking_evsel->tracking = true;
1952}
Wang Nan7630b3e2016-02-22 09:10:33 +00001953
1954struct perf_evsel *
1955perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
1956 const char *str)
1957{
1958 struct perf_evsel *evsel;
1959
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001960 evlist__for_each_entry(evlist, evsel) {
Wang Nan7630b3e2016-02-22 09:10:33 +00001961 if (!evsel->name)
1962 continue;
1963 if (strcmp(str, evsel->name) == 0)
1964 return evsel;
1965 }
1966
1967 return NULL;
1968}
Wang Nan54cc54d2016-07-14 08:34:42 +00001969
1970void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
1971 enum bkw_mmap_state state)
1972{
1973 enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1974 enum action {
1975 NONE,
1976 PAUSE,
1977 RESUME,
1978 } action = NONE;
1979
1980 if (!evlist->backward_mmap)
1981 return;
1982
1983 switch (old_state) {
1984 case BKW_MMAP_NOTREADY: {
1985 if (state != BKW_MMAP_RUNNING)
1986 goto state_err;;
1987 break;
1988 }
1989 case BKW_MMAP_RUNNING: {
1990 if (state != BKW_MMAP_DATA_PENDING)
1991 goto state_err;
1992 action = PAUSE;
1993 break;
1994 }
1995 case BKW_MMAP_DATA_PENDING: {
1996 if (state != BKW_MMAP_EMPTY)
1997 goto state_err;
1998 break;
1999 }
2000 case BKW_MMAP_EMPTY: {
2001 if (state != BKW_MMAP_RUNNING)
2002 goto state_err;
2003 action = RESUME;
2004 break;
2005 }
2006 default:
2007 WARN_ONCE(1, "Shouldn't get there\n");
2008 }
2009
2010 evlist->bkw_mmap_state = state;
2011
2012 switch (action) {
2013 case PAUSE:
2014 perf_evlist__pause(evlist);
2015 break;
2016 case RESUME:
2017 perf_evlist__resume(evlist);
2018 break;
2019 case NONE:
2020 default:
2021 break;
2022 }
2023
2024state_err:
2025 return;
2026}