blob: 50420cd354467361615d8606d6dde73f5e588a45 [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
Arnaldo Carvalho de Meloa8c9ae12011-11-05 08:41:51 -02009#include "util.h"
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -030010#include <api/fs/fs.h>
Arnaldo Carvalho de Melo5c5810412011-01-11 22:30:02 -020011#include <poll.h>
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020012#include "cpumap.h"
13#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090014#include "target.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020015#include "evlist.h"
16#include "evsel.h"
Adrian Huntere3e1a542013-08-14 15:48:24 +030017#include "debug.h"
Wang Nan54cc54d2016-07-14 08:34:42 +000018#include "asm/bug.h"
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020019#include <unistd.h>
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020020
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020021#include "parse-events.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060022#include <subcmd/parse-options.h>
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020023
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020024#include <sys/mman.h>
25
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020026#include <linux/bitops.h>
27#include <linux/hash.h>
Arnaldo Carvalho de Melo0389cd12014-12-15 16:04:11 -030028#include <linux/log2.h>
Jiri Olsa8dd2a132015-09-07 10:38:06 +020029#include <linux/err.h>
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020030
Wang Nan8db6d6b2016-07-14 08:34:35 +000031static void perf_mmap__munmap(struct perf_mmap *map);
Wang Nan48760752016-07-14 08:34:37 +000032static void perf_mmap__put(struct perf_mmap *map);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -030033
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020034#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030035#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020036
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020037void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
38 struct thread_map *threads)
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020039{
40 int i;
41
42 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
43 INIT_HLIST_HEAD(&evlist->heads[i]);
44 INIT_LIST_HEAD(&evlist->entries);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020045 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -030046 fdarray__init(&evlist->pollfd, 64);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020047 evlist->workload.pid = -1;
Wang Nan54cc54d2016-07-14 08:34:42 +000048 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020049}
50
Namhyung Kim334fe7a2013-03-11 16:43:12 +090051struct perf_evlist *perf_evlist__new(void)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020052{
53 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
54
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020055 if (evlist != NULL)
Namhyung Kim334fe7a2013-03-11 16:43:12 +090056 perf_evlist__init(evlist, NULL, NULL);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020057
58 return evlist;
59}
60
Jiri Olsab22d54b2013-09-01 12:36:14 +020061struct perf_evlist *perf_evlist__new_default(void)
62{
63 struct perf_evlist *evlist = perf_evlist__new();
64
65 if (evlist && perf_evlist__add_default(evlist)) {
66 perf_evlist__delete(evlist);
67 evlist = NULL;
68 }
69
70 return evlist;
71}
72
Arnaldo Carvalho de Melo5bae0252016-01-07 13:14:56 -030073struct perf_evlist *perf_evlist__new_dummy(void)
74{
75 struct perf_evlist *evlist = perf_evlist__new();
76
77 if (evlist && perf_evlist__add_dummy(evlist)) {
78 perf_evlist__delete(evlist);
79 evlist = NULL;
80 }
81
82 return evlist;
83}
84
Adrian Hunter75562572013-08-27 11:23:09 +030085/**
86 * perf_evlist__set_id_pos - set the positions of event ids.
87 * @evlist: selected event list
88 *
89 * Events with compatible sample types all have the same id_pos
90 * and is_pos. For convenience, put a copy on evlist.
91 */
92void perf_evlist__set_id_pos(struct perf_evlist *evlist)
93{
94 struct perf_evsel *first = perf_evlist__first(evlist);
95
96 evlist->id_pos = first->id_pos;
97 evlist->is_pos = first->is_pos;
98}
99
Adrian Hunter733cd2f2013-09-06 22:40:11 +0300100static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
101{
102 struct perf_evsel *evsel;
103
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300104 evlist__for_each_entry(evlist, evsel)
Adrian Hunter733cd2f2013-09-06 22:40:11 +0300105 perf_evsel__calc_id_pos(evsel);
106
107 perf_evlist__set_id_pos(evlist);
108}
109
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200110static void perf_evlist__purge(struct perf_evlist *evlist)
111{
112 struct perf_evsel *pos, *n;
113
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300114 evlist__for_each_entry_safe(evlist, n, pos) {
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200115 list_del_init(&pos->node);
Arnaldo Carvalho de Melod49e4692015-08-27 08:07:40 -0400116 pos->evlist = NULL;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200117 perf_evsel__delete(pos);
118 }
119
120 evlist->nr_entries = 0;
121}
122
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200123void perf_evlist__exit(struct perf_evlist *evlist)
124{
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -0300125 zfree(&evlist->mmap);
Wang Nanb2cb6152016-07-14 08:34:39 +0000126 zfree(&evlist->backward_mmap);
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300127 fdarray__exit(&evlist->pollfd);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200128}
129
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200130void perf_evlist__delete(struct perf_evlist *evlist)
131{
Arnaldo Carvalho de Melo0b04b3d2016-06-21 18:15:45 -0300132 if (evlist == NULL)
133 return;
134
Arnaldo Carvalho de Melo983874d2014-01-03 17:25:49 -0300135 perf_evlist__munmap(evlist);
Arnaldo Carvalho de Melof26e1c72014-01-03 16:54:12 -0300136 perf_evlist__close(evlist);
Jiri Olsaf30a79b2015-06-23 00:36:04 +0200137 cpu_map__put(evlist->cpus);
Jiri Olsa186fbb72015-06-23 00:36:05 +0200138 thread_map__put(evlist->threads);
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -0300139 evlist->cpus = NULL;
140 evlist->threads = NULL;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200141 perf_evlist__purge(evlist);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200142 perf_evlist__exit(evlist);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200143 free(evlist);
144}
145
Adrian Hunteradc0c3e2015-09-08 10:58:58 +0300146static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
147 struct perf_evsel *evsel)
148{
149 /*
150 * We already have cpus for evsel (via PMU sysfs) so
151 * keep it, if there's no target cpu list defined.
152 */
153 if (!evsel->own_cpus || evlist->has_user_cpus) {
154 cpu_map__put(evsel->cpus);
155 evsel->cpus = cpu_map__get(evlist->cpus);
156 } else if (evsel->cpus != evsel->own_cpus) {
157 cpu_map__put(evsel->cpus);
158 evsel->cpus = cpu_map__get(evsel->own_cpus);
159 }
160
161 thread_map__put(evsel->threads);
162 evsel->threads = thread_map__get(evlist->threads);
163}
164
165static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
166{
167 struct perf_evsel *evsel;
168
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300169 evlist__for_each_entry(evlist, evsel)
Adrian Hunteradc0c3e2015-09-08 10:58:58 +0300170 __perf_evlist__propagate_maps(evlist, evsel);
171}
172
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200173void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
174{
Arnaldo Carvalho de Melod49e4692015-08-27 08:07:40 -0400175 entry->evlist = evlist;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200176 list_add_tail(&entry->node, &evlist->entries);
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300177 entry->idx = evlist->nr_entries;
Adrian Hunter60b08962014-07-31 09:00:52 +0300178 entry->tracking = !entry->idx;
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300179
Adrian Hunter75562572013-08-27 11:23:09 +0300180 if (!evlist->nr_entries++)
181 perf_evlist__set_id_pos(evlist);
Adrian Hunter44c42d72015-09-08 10:58:59 +0300182
183 __perf_evlist__propagate_maps(evlist, entry);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200184}
185
Adrian Hunter47682302015-09-25 16:15:53 +0300186void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
187{
188 evsel->evlist = NULL;
189 list_del_init(&evsel->node);
190 evlist->nr_entries -= 1;
191}
192
Jiri Olsa0529bc12012-01-27 15:34:20 +0100193void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300194 struct list_head *list)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200195{
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300196 struct perf_evsel *evsel, *temp;
Adrian Hunter75562572013-08-27 11:23:09 +0300197
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300198 __evlist__for_each_entry_safe(list, temp, evsel) {
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300199 list_del_init(&evsel->node);
200 perf_evlist__add(evlist, evsel);
201 }
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200202}
203
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300204void __perf_evlist__set_leader(struct list_head *list)
205{
206 struct perf_evsel *evsel, *leader;
207
208 leader = list_entry(list->next, struct perf_evsel, node);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900209 evsel = list_entry(list->prev, struct perf_evsel, node);
210
211 leader->nr_members = evsel->idx - leader->idx + 1;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300212
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300213 __evlist__for_each_entry(list, evsel) {
Stephane Eranian74b21332013-01-31 13:54:37 +0100214 evsel->leader = leader;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300215 }
216}
217
218void perf_evlist__set_leader(struct perf_evlist *evlist)
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200219{
Namhyung Kim97f63e42013-01-22 18:09:29 +0900220 if (evlist->nr_entries) {
221 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300222 __perf_evlist__set_leader(&evlist->entries);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900223 }
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200224}
225
Jiri Olsa45cf6c32015-10-05 20:06:04 +0200226void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
Arnaldo Carvalho de Melo7f8d1ad2015-09-30 17:49:49 -0300227{
228 attr->precise_ip = 3;
229
230 while (attr->precise_ip != 0) {
231 int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
232 if (fd != -1) {
233 close(fd);
234 break;
235 }
236 --attr->precise_ip;
237 }
238}
239
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200240int perf_evlist__add_default(struct perf_evlist *evlist)
241{
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300242 struct perf_evsel *evsel = perf_evsel__new_cycles();
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200243
244 if (evsel == NULL)
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300245 return -ENOMEM;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200246
247 perf_evlist__add(evlist, evsel);
248 return 0;
249}
Arnaldo Carvalho de Melo5c5810412011-01-11 22:30:02 -0200250
Arnaldo Carvalho de Melo5bae0252016-01-07 13:14:56 -0300251int perf_evlist__add_dummy(struct perf_evlist *evlist)
252{
253 struct perf_event_attr attr = {
254 .type = PERF_TYPE_SOFTWARE,
255 .config = PERF_COUNT_SW_DUMMY,
256 .size = sizeof(attr), /* to capture ABI version */
257 };
258 struct perf_evsel *evsel = perf_evsel__new(&attr);
259
260 if (evsel == NULL)
261 return -ENOMEM;
262
263 perf_evlist__add(evlist, evsel);
264 return 0;
265}
266
Arnaldo Carvalho de Meloe60fc842012-10-03 11:50:55 -0300267static int perf_evlist__add_attrs(struct perf_evlist *evlist,
268 struct perf_event_attr *attrs, size_t nr_attrs)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200269{
270 struct perf_evsel *evsel, *n;
271 LIST_HEAD(head);
272 size_t i;
273
274 for (i = 0; i < nr_attrs; i++) {
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300275 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200276 if (evsel == NULL)
277 goto out_delete_partial_list;
278 list_add_tail(&evsel->node, &head);
279 }
280
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300281 perf_evlist__splice_list_tail(evlist, &head);
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200282
283 return 0;
284
285out_delete_partial_list:
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300286 __evlist__for_each_entry_safe(&head, n, evsel)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200287 perf_evsel__delete(evsel);
288 return -1;
289}
290
Arnaldo Carvalho de Melo79695e12012-05-30 13:53:54 -0300291int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
292 struct perf_event_attr *attrs, size_t nr_attrs)
293{
294 size_t i;
295
296 for (i = 0; i < nr_attrs; i++)
297 event_attr_init(attrs + i);
298
299 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
300}
301
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -0300302struct perf_evsel *
303perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -0200304{
305 struct perf_evsel *evsel;
306
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300307 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -0200308 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
309 (int)evsel->attr.config == id)
310 return evsel;
311 }
312
313 return NULL;
314}
315
David Aherna2f28042013-08-28 22:29:51 -0600316struct perf_evsel *
317perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
318 const char *name)
319{
320 struct perf_evsel *evsel;
321
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300322 evlist__for_each_entry(evlist, evsel) {
David Aherna2f28042013-08-28 22:29:51 -0600323 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
324 (strcmp(evsel->name, name) == 0))
325 return evsel;
326 }
327
328 return NULL;
329}
330
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300331int perf_evlist__add_newtp(struct perf_evlist *evlist,
332 const char *sys, const char *name, void *handler)
333{
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300334 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300335
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200336 if (IS_ERR(evsel))
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300337 return -1;
338
Arnaldo Carvalho de Melo744a9712013-11-06 10:17:38 -0300339 evsel->handler = handler;
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300340 perf_evlist__add(evlist, evsel);
341 return 0;
342}
343
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300344static int perf_evlist__nr_threads(struct perf_evlist *evlist,
345 struct perf_evsel *evsel)
346{
347 if (evsel->system_wide)
348 return 1;
349 else
350 return thread_map__nr(evlist->threads);
351}
352
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300353void perf_evlist__disable(struct perf_evlist *evlist)
354{
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300355 struct perf_evsel *pos;
356
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300357 evlist__for_each_entry(evlist, pos) {
Jiri Olsa3e27c922015-12-03 10:06:42 +0100358 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
359 continue;
360 perf_evsel__disable(pos);
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300361 }
Arnaldo Carvalho de Melo2b56bcf2015-06-17 16:40:26 -0300362
363 evlist->enabled = false;
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300364}
365
David Ahern764e16a32011-08-25 10:17:55 -0600366void perf_evlist__enable(struct perf_evlist *evlist)
367{
David Ahern764e16a32011-08-25 10:17:55 -0600368 struct perf_evsel *pos;
369
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300370 evlist__for_each_entry(evlist, pos) {
Jiri Olsa3e27c922015-12-03 10:06:42 +0100371 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
372 continue;
373 perf_evsel__enable(pos);
David Ahern764e16a32011-08-25 10:17:55 -0600374 }
Arnaldo Carvalho de Melo2b56bcf2015-06-17 16:40:26 -0300375
376 evlist->enabled = true;
377}
378
379void perf_evlist__toggle_enable(struct perf_evlist *evlist)
380{
381 (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
David Ahern764e16a32011-08-25 10:17:55 -0600382}
383
Adrian Hunter1c650562014-07-31 09:00:56 +0300384static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
385 struct perf_evsel *evsel, int cpu)
386{
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -0300387 int thread;
Adrian Hunter1c650562014-07-31 09:00:56 +0300388 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
389
390 if (!evsel->fd)
391 return -EINVAL;
392
393 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -0300394 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
Adrian Hunter1c650562014-07-31 09:00:56 +0300395 if (err)
396 return err;
397 }
398 return 0;
399}
400
401static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
402 struct perf_evsel *evsel,
403 int thread)
404{
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -0300405 int cpu;
Adrian Hunter1c650562014-07-31 09:00:56 +0300406 int nr_cpus = cpu_map__nr(evlist->cpus);
407
408 if (!evsel->fd)
409 return -EINVAL;
410
411 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -0300412 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
Adrian Hunter1c650562014-07-31 09:00:56 +0300413 if (err)
414 return err;
415 }
416 return 0;
417}
418
419int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
420 struct perf_evsel *evsel, int idx)
421{
422 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
423
424 if (per_cpu_mmaps)
425 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
426 else
427 return perf_evlist__enable_event_thread(evlist, evsel, idx);
428}
429
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300430int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo5c5810412011-01-11 22:30:02 -0200431{
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900432 int nr_cpus = cpu_map__nr(evlist->cpus);
433 int nr_threads = thread_map__nr(evlist->threads);
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300434 int nfds = 0;
435 struct perf_evsel *evsel;
436
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300437 evlist__for_each_entry(evlist, evsel) {
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300438 if (evsel->system_wide)
439 nfds += nr_cpus;
440 else
441 nfds += nr_cpus * nr_threads;
442 }
443
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300444 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
445 fdarray__grow(&evlist->pollfd, nfds) < 0)
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300446 return -ENOMEM;
447
448 return 0;
Arnaldo Carvalho de Melo5c5810412011-01-11 22:30:02 -0200449}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200450
Wang Nan48760752016-07-14 08:34:37 +0000451static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
452 struct perf_mmap *map, short revent)
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300453{
Wang Nanf3058a12016-05-24 02:28:59 +0000454 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300455 /*
456 * Save the idx so that when we filter out fds POLLHUP'ed we can
457 * close the associated evlist->mmap[] entry.
458 */
459 if (pos >= 0) {
Wang Nan48760752016-07-14 08:34:37 +0000460 evlist->pollfd.priv[pos].ptr = map;
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300461
462 fcntl(fd, F_SETFL, O_NONBLOCK);
463 }
464
465 return pos;
466}
467
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300468int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200469{
Wang Nan48760752016-07-14 08:34:37 +0000470 return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300471}
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300472
Wang Nan258e4bf2016-05-25 13:44:57 +0000473static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
474 void *arg __maybe_unused)
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300475{
Wang Nan48760752016-07-14 08:34:37 +0000476 struct perf_mmap *map = fda->priv[fd].ptr;
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300477
Wang Nan48760752016-07-14 08:34:37 +0000478 if (map)
479 perf_mmap__put(map);
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200480}
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200481
Arnaldo Carvalho de Melo1ddec7f2014-08-12 23:04:11 -0300482int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
483{
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300484 return fdarray__filter(&evlist->pollfd, revents_and_mask,
Wang Nan258e4bf2016-05-25 13:44:57 +0000485 perf_evlist__munmap_filtered, NULL);
Arnaldo Carvalho de Melo1ddec7f2014-08-12 23:04:11 -0300486}
487
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -0300488int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
489{
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300490 return fdarray__poll(&evlist->pollfd, timeout);
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -0300491}
492
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300493static void perf_evlist__id_hash(struct perf_evlist *evlist,
494 struct perf_evsel *evsel,
495 int cpu, int thread, u64 id)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200496{
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300497 int hash;
498 struct perf_sample_id *sid = SID(evsel, cpu, thread);
499
500 sid->id = id;
501 sid->evsel = evsel;
502 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
503 hlist_add_head(&sid->node, &evlist->heads[hash]);
504}
505
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300506void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
507 int cpu, int thread, u64 id)
508{
509 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
510 evsel->id[evsel->ids++] = id;
511}
512
Jiri Olsa1c596122015-11-05 15:40:49 +0100513int perf_evlist__id_add_fd(struct perf_evlist *evlist,
514 struct perf_evsel *evsel,
515 int cpu, int thread, int fd)
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300516{
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200517 u64 read_data[4] = { 0, };
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300518 int id_idx = 1; /* The first entry is the counter value */
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200519 u64 id;
520 int ret;
521
522 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
523 if (!ret)
524 goto add;
525
526 if (errno != ENOTTY)
527 return -1;
528
529 /* Legacy way to get event id.. All hail to old kernels! */
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200530
Jiri Olsac4861af2012-10-12 13:02:21 +0200531 /*
532 * This way does not work with group format read, so bail
533 * out in that case.
534 */
535 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
536 return -1;
537
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200538 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
539 read(fd, &read_data, sizeof(read_data)) == -1)
540 return -1;
541
542 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
543 ++id_idx;
544 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
545 ++id_idx;
546
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200547 id = read_data[id_idx];
548
549 add:
550 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200551 return 0;
552}
553
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200554static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
555 struct perf_evsel *evsel, int idx, int cpu,
556 int thread)
557{
558 struct perf_sample_id *sid = SID(evsel, cpu, thread);
559 sid->idx = idx;
560 if (evlist->cpus && cpu >= 0)
561 sid->cpu = evlist->cpus->map[cpu];
562 else
563 sid->cpu = -1;
564 if (!evsel->system_wide && evlist->threads && thread >= 0)
Jiri Olsae13798c2015-06-23 00:36:02 +0200565 sid->tid = thread_map__pid(evlist->threads, thread);
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200566 else
567 sid->tid = -1;
568}
569
Jiri Olsa932a3592012-10-11 14:10:35 +0200570struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200571{
572 struct hlist_head *head;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200573 struct perf_sample_id *sid;
574 int hash;
575
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200576 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
577 head = &evlist->heads[hash];
578
Sasha Levinb67bfe02013-02-27 17:06:00 -0800579 hlist_for_each_entry(sid, head, node)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200580 if (sid->id == id)
Jiri Olsa932a3592012-10-11 14:10:35 +0200581 return sid;
582
583 return NULL;
584}
585
586struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
587{
588 struct perf_sample_id *sid;
589
Adrian Hunter05169df2015-08-20 11:26:45 +0300590 if (evlist->nr_entries == 1 || !id)
Jiri Olsa932a3592012-10-11 14:10:35 +0200591 return perf_evlist__first(evlist);
592
593 sid = perf_evlist__id2sid(evlist, id);
594 if (sid)
595 return sid->evsel;
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900596
597 if (!perf_evlist__sample_id_all(evlist))
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300598 return perf_evlist__first(evlist);
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900599
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200600 return NULL;
601}
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200602
Adrian Hunterdddcf6a2015-09-25 16:15:52 +0300603struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
604 u64 id)
605{
606 struct perf_sample_id *sid;
607
608 if (!id)
609 return NULL;
610
611 sid = perf_evlist__id2sid(evlist, id);
612 if (sid)
613 return sid->evsel;
614
615 return NULL;
616}
617
Adrian Hunter75562572013-08-27 11:23:09 +0300618static int perf_evlist__event2id(struct perf_evlist *evlist,
619 union perf_event *event, u64 *id)
620{
621 const u64 *array = event->sample.array;
622 ssize_t n;
623
624 n = (event->header.size - sizeof(event->header)) >> 3;
625
626 if (event->header.type == PERF_RECORD_SAMPLE) {
627 if (evlist->id_pos >= n)
628 return -1;
629 *id = array[evlist->id_pos];
630 } else {
631 if (evlist->is_pos > n)
632 return -1;
633 n -= evlist->is_pos;
634 *id = array[n];
635 }
636 return 0;
637}
638
Jiri Olsa7cb5c5a2016-07-10 13:07:53 +0200639struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
640 union perf_event *event)
Adrian Hunter75562572013-08-27 11:23:09 +0300641{
Adrian Hunter98be6962013-09-04 23:18:17 +0300642 struct perf_evsel *first = perf_evlist__first(evlist);
Adrian Hunter75562572013-08-27 11:23:09 +0300643 struct hlist_head *head;
644 struct perf_sample_id *sid;
645 int hash;
646 u64 id;
647
648 if (evlist->nr_entries == 1)
Adrian Hunter98be6962013-09-04 23:18:17 +0300649 return first;
650
651 if (!first->attr.sample_id_all &&
652 event->header.type != PERF_RECORD_SAMPLE)
653 return first;
Adrian Hunter75562572013-08-27 11:23:09 +0300654
655 if (perf_evlist__event2id(evlist, event, &id))
656 return NULL;
657
658 /* Synthesized events have an id of zero */
659 if (!id)
Adrian Hunter98be6962013-09-04 23:18:17 +0300660 return first;
Adrian Hunter75562572013-08-27 11:23:09 +0300661
662 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
663 head = &evlist->heads[hash];
664
665 hlist_for_each_entry(sid, head, node) {
666 if (sid->id == id)
667 return sid->evsel;
668 }
669 return NULL;
670}
671
Wang Nan65aea232016-05-23 07:13:38 +0000672static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
673{
674 int i;
675
Wang Nan078c3382016-07-14 08:34:40 +0000676 if (!evlist->backward_mmap)
677 return 0;
678
Wang Nan65aea232016-05-23 07:13:38 +0000679 for (i = 0; i < evlist->nr_mmaps; i++) {
Wang Nan078c3382016-07-14 08:34:40 +0000680 int fd = evlist->backward_mmap[i].fd;
Wang Nan65aea232016-05-23 07:13:38 +0000681 int err;
682
683 if (fd < 0)
684 continue;
685 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
686 if (err)
687 return err;
688 }
689 return 0;
690}
691
Wang Nanf6cdff82016-07-14 08:34:44 +0000692static int perf_evlist__pause(struct perf_evlist *evlist)
Wang Nan65aea232016-05-23 07:13:38 +0000693{
694 return perf_evlist__set_paused(evlist, true);
695}
696
Wang Nanf6cdff82016-07-14 08:34:44 +0000697static int perf_evlist__resume(struct perf_evlist *evlist)
Wang Nan65aea232016-05-23 07:13:38 +0000698{
699 return perf_evlist__set_paused(evlist, false);
700}
701
Wang Nanb6b85da2016-04-27 02:19:21 +0000702/* When check_messup is true, 'end' must points to a good entry */
Wang Nan0f4ccd12016-04-27 02:19:20 +0000703static union perf_event *
Wang Nanb6b85da2016-04-27 02:19:21 +0000704perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
705 u64 end, u64 *prev)
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200706{
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200707 unsigned char *data = md->base + page_size;
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200708 union perf_event *event = NULL;
Wang Nanb6b85da2016-04-27 02:19:21 +0000709 int diff = end - start;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200710
Wang Nanb6b85da2016-04-27 02:19:21 +0000711 if (check_messup) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200712 /*
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200713 * If we're further behind than half the buffer, there's a chance
714 * the writer will bite our tail and mess up the samples under us.
715 *
Wang Nanb6b85da2016-04-27 02:19:21 +0000716 * If we somehow ended up ahead of the 'end', we got messed up.
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200717 *
Wang Nanb6b85da2016-04-27 02:19:21 +0000718 * In either case, truncate and restart at 'end'.
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200719 */
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200720 if (diff > md->mask / 2 || diff < 0) {
721 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
722
723 /*
Wang Nanb6b85da2016-04-27 02:19:21 +0000724 * 'end' points to a known good entry, start there.
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200725 */
Wang Nanb6b85da2016-04-27 02:19:21 +0000726 start = end;
Wang Nanb04b7022016-04-26 02:28:54 +0000727 diff = 0;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200728 }
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200729 }
730
Wang Nanb04b7022016-04-26 02:28:54 +0000731 if (diff >= (int)sizeof(event->header)) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200732 size_t size;
733
Wang Nanb6b85da2016-04-27 02:19:21 +0000734 event = (union perf_event *)&data[start & md->mask];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200735 size = event->header.size;
736
Wang Nanb04b7022016-04-26 02:28:54 +0000737 if (size < sizeof(event->header) || diff < (int)size) {
738 event = NULL;
739 goto broken_event;
740 }
741
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200742 /*
743 * Event straddles the mmap boundary -- header should always
744 * be inside due to u64 alignment of output.
745 */
Wang Nanb6b85da2016-04-27 02:19:21 +0000746 if ((start & md->mask) + size != ((start + size) & md->mask)) {
747 unsigned int offset = start;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200748 unsigned int len = min(sizeof(*event), size), cpy;
Jiri Olsaa65cb4b2013-10-02 15:46:39 +0200749 void *dst = md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200750
751 do {
752 cpy = min(md->mask + 1 - (offset & md->mask), len);
753 memcpy(dst, &data[offset & md->mask], cpy);
754 offset += cpy;
755 dst += cpy;
756 len -= cpy;
757 } while (len);
758
Jiri Olsaa65cb4b2013-10-02 15:46:39 +0200759 event = (union perf_event *) md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200760 }
761
Wang Nanb6b85da2016-04-27 02:19:21 +0000762 start += size;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200763 }
764
Wang Nanb04b7022016-04-26 02:28:54 +0000765broken_event:
Wang Nan0f4ccd12016-04-27 02:19:20 +0000766 if (prev)
Wang Nanb6b85da2016-04-27 02:19:21 +0000767 *prev = start;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200768
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200769 return event;
770}
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200771
Wang Nan8db6d6b2016-07-14 08:34:35 +0000772union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup)
Wang Nan0f4ccd12016-04-27 02:19:20 +0000773{
Wang Nan0f4ccd12016-04-27 02:19:20 +0000774 u64 head;
775 u64 old = md->prev;
776
777 /*
778 * Check if event was unmapped due to a POLLHUP/POLLERR.
779 */
Elena Reshetova25a37202017-02-21 17:35:01 +0200780 if (!refcount_read(&md->refcnt))
Wang Nan0f4ccd12016-04-27 02:19:20 +0000781 return NULL;
782
783 head = perf_mmap__read_head(md);
784
Wang Nan8db6d6b2016-07-14 08:34:35 +0000785 return perf_mmap__read(md, check_messup, old, head, &md->prev);
Wang Nan0f4ccd12016-04-27 02:19:20 +0000786}
787
Wang Nane24c7522016-05-09 01:47:50 +0000788union perf_event *
Wang Nan8db6d6b2016-07-14 08:34:35 +0000789perf_mmap__read_backward(struct perf_mmap *md)
Wang Nane24c7522016-05-09 01:47:50 +0000790{
Wang Nane24c7522016-05-09 01:47:50 +0000791 u64 head, end;
792 u64 start = md->prev;
793
794 /*
795 * Check if event was unmapped due to a POLLHUP/POLLERR.
796 */
Elena Reshetova25a37202017-02-21 17:35:01 +0200797 if (!refcount_read(&md->refcnt))
Wang Nane24c7522016-05-09 01:47:50 +0000798 return NULL;
799
800 head = perf_mmap__read_head(md);
801 if (!head)
802 return NULL;
803
804 /*
805 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
806 * it each time when kernel writes to it, so in fact 'head' is
807 * negative. 'end' pointer is made manually by adding the size of
808 * the ring buffer to 'head' pointer, means the validate data can
809 * read is the whole ring buffer. If 'end' is positive, the ring
810 * buffer has not fully filled, so we must adjust 'end' to 0.
811 *
812 * However, since both 'head' and 'end' is unsigned, we can't
813 * simply compare 'end' against 0. Here we compare '-head' and
814 * the size of the ring buffer, where -head is the number of bytes
815 * kernel write to the ring buffer.
816 */
817 if (-head < (u64)(md->mask + 1))
818 end = 0;
819 else
820 end = head + md->mask + 1;
821
822 return perf_mmap__read(md, false, start, end, &md->prev);
823}
824
Wang Nan8db6d6b2016-07-14 08:34:35 +0000825union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
826{
827 struct perf_mmap *md = &evlist->mmap[idx];
828
829 /*
830 * Check messup is required for forward overwritable ring buffer:
831 * memory pointed by md->prev can be overwritten in this case.
832 * No need for read-write ring buffer: kernel stop outputting when
833 * it hit md->prev (perf_mmap__consume()).
834 */
835 return perf_mmap__read_forward(md, evlist->overwrite);
836}
837
838union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
839{
840 struct perf_mmap *md = &evlist->mmap[idx];
841
842 /*
843 * No need to check messup for backward ring buffer:
844 * We can always read arbitrary long data from a backward
845 * ring buffer unless we forget to pause it before reading.
846 */
847 return perf_mmap__read_backward(md);
848}
849
Wang Nan5a5ddeb2016-05-25 13:44:50 +0000850union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
851{
Wang Nana0c6f452016-07-14 08:34:41 +0000852 return perf_evlist__mmap_read_forward(evlist, idx);
Wang Nan5a5ddeb2016-05-25 13:44:50 +0000853}
854
Wang Nan8db6d6b2016-07-14 08:34:35 +0000855void perf_mmap__read_catchup(struct perf_mmap *md)
Wang Nane24c7522016-05-09 01:47:50 +0000856{
Wang Nane24c7522016-05-09 01:47:50 +0000857 u64 head;
858
Elena Reshetova25a37202017-02-21 17:35:01 +0200859 if (!refcount_read(&md->refcnt))
Wang Nane24c7522016-05-09 01:47:50 +0000860 return;
861
862 head = perf_mmap__read_head(md);
863 md->prev = head;
864}
865
Wang Nan8db6d6b2016-07-14 08:34:35 +0000866void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
867{
868 perf_mmap__read_catchup(&evlist->mmap[idx]);
869}
870
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300871static bool perf_mmap__empty(struct perf_mmap *md)
872{
Adrian Hunterb72e74d2015-04-24 22:29:43 +0300873 return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300874}
875
Wang Nan8db6d6b2016-07-14 08:34:35 +0000876static void perf_mmap__get(struct perf_mmap *map)
877{
Elena Reshetova25a37202017-02-21 17:35:01 +0200878 refcount_inc(&map->refcnt);
Wang Nan8db6d6b2016-07-14 08:34:35 +0000879}
880
881static void perf_mmap__put(struct perf_mmap *md)
882{
Elena Reshetova25a37202017-02-21 17:35:01 +0200883 BUG_ON(md->base && refcount_read(&md->refcnt) == 0);
Wang Nan8db6d6b2016-07-14 08:34:35 +0000884
Elena Reshetova25a37202017-02-21 17:35:01 +0200885 if (refcount_dec_and_test(&md->refcnt))
Wang Nan8db6d6b2016-07-14 08:34:35 +0000886 perf_mmap__munmap(md);
887}
888
Wang Nan8db6d6b2016-07-14 08:34:35 +0000889void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800890{
Wang Nan8db6d6b2016-07-14 08:34:35 +0000891 if (!overwrite) {
David Ahern7b8283b52015-04-07 09:20:37 -0600892 u64 old = md->prev;
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800893
894 perf_mmap__write_tail(md, old);
895 }
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300896
Elena Reshetova25a37202017-02-21 17:35:01 +0200897 if (refcount_read(&md->refcnt) == 1 && perf_mmap__empty(md))
Wang Nan8db6d6b2016-07-14 08:34:35 +0000898 perf_mmap__put(md);
899}
900
901void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
902{
903 perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800904}
905
Adrian Hunter718c6022015-04-09 18:53:42 +0300906int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
907 struct auxtrace_mmap_params *mp __maybe_unused,
908 void *userpg __maybe_unused,
909 int fd __maybe_unused)
910{
911 return 0;
912}
913
914void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
915{
916}
917
918void __weak auxtrace_mmap_params__init(
919 struct auxtrace_mmap_params *mp __maybe_unused,
920 off_t auxtrace_offset __maybe_unused,
921 unsigned int auxtrace_pages __maybe_unused,
922 bool auxtrace_overwrite __maybe_unused)
923{
924}
925
926void __weak auxtrace_mmap_params__set_idx(
927 struct auxtrace_mmap_params *mp __maybe_unused,
928 struct perf_evlist *evlist __maybe_unused,
929 int idx __maybe_unused,
930 bool per_cpu __maybe_unused)
931{
932}
933
Wang Nan8db6d6b2016-07-14 08:34:35 +0000934static void perf_mmap__munmap(struct perf_mmap *map)
935{
936 if (map->base != NULL) {
937 munmap(map->base, perf_mmap__mmap_len(map));
938 map->base = NULL;
939 map->fd = -1;
Elena Reshetova25a37202017-02-21 17:35:01 +0200940 refcount_set(&map->refcnt, 0);
Wang Nan8db6d6b2016-07-14 08:34:35 +0000941 }
942 auxtrace_mmap__munmap(&map->auxtrace_mmap);
943}
944
Wang Nana1f72612016-07-14 08:34:38 +0000945static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200946{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300947 int i;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200948
Wang Nanb2cb6152016-07-14 08:34:39 +0000949 if (evlist->mmap)
950 for (i = 0; i < evlist->nr_mmaps; i++)
951 perf_mmap__munmap(&evlist->mmap[i]);
Arnaldo Carvalho de Melo983874d2014-01-03 17:25:49 -0300952
Wang Nanb2cb6152016-07-14 08:34:39 +0000953 if (evlist->backward_mmap)
954 for (i = 0; i < evlist->nr_mmaps; i++)
955 perf_mmap__munmap(&evlist->backward_mmap[i]);
Wang Nana1f72612016-07-14 08:34:38 +0000956}
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300957
Wang Nana1f72612016-07-14 08:34:38 +0000958void perf_evlist__munmap(struct perf_evlist *evlist)
959{
960 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -0300961 zfree(&evlist->mmap);
Wang Nanb2cb6152016-07-14 08:34:39 +0000962 zfree(&evlist->backward_mmap);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200963}
964
Wang Nan8db6d6b2016-07-14 08:34:35 +0000965static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200966{
Wang Nand4c6fb32016-05-20 16:38:24 +0000967 int i;
Wang Nan8db6d6b2016-07-14 08:34:35 +0000968 struct perf_mmap *map;
Wang Nand4c6fb32016-05-20 16:38:24 +0000969
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -0300970 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -0700971 if (cpu_map__empty(evlist->cpus))
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900972 evlist->nr_mmaps = thread_map__nr(evlist->threads);
Wang Nan8db6d6b2016-07-14 08:34:35 +0000973 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
974 if (!map)
975 return NULL;
Wang Nan946ae1d2016-05-31 13:06:15 +0000976
Arnaldo Carvalho de Melo4738ca32017-02-23 13:24:34 -0300977 for (i = 0; i < evlist->nr_mmaps; i++) {
Wang Nan8db6d6b2016-07-14 08:34:35 +0000978 map[i].fd = -1;
Arnaldo Carvalho de Melo4738ca32017-02-23 13:24:34 -0300979 /*
980 * When the perf_mmap() call is made we grab one refcount, plus
981 * one extra to let perf_evlist__mmap_consume() get the last
982 * events after all real references (perf_mmap__get()) are
983 * dropped.
984 *
985 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
986 * thus does perf_mmap__get() on it.
987 */
988 refcount_set(&map[i].refcnt, 0);
989 }
Wang Nan8db6d6b2016-07-14 08:34:35 +0000990 return map;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200991}
992
Adrian Huntera8a8f3e2014-07-14 13:02:52 +0300993struct mmap_params {
994 int prot;
995 int mask;
Adrian Hunter718c6022015-04-09 18:53:42 +0300996 struct auxtrace_mmap_params auxtrace_mp;
Adrian Huntera8a8f3e2014-07-14 13:02:52 +0300997};
998
Wang Nan8db6d6b2016-07-14 08:34:35 +0000999static int perf_mmap__mmap(struct perf_mmap *map,
1000 struct mmap_params *mp, int fd)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001001{
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001002 /*
1003 * The last one will be done at perf_evlist__mmap_consume(), so that we
1004 * make sure we don't prevent tools from consuming every last event in
1005 * the ring buffer.
1006 *
1007 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
1008 * anymore, but the last events for it are still in the ring buffer,
1009 * waiting to be consumed.
1010 *
1011 * Tools can chose to ignore this at their own discretion, but the
1012 * evlist layer can't just drop it when filtering events in
1013 * perf_evlist__filter_pollfd().
1014 */
Elena Reshetova25a37202017-02-21 17:35:01 +02001015 refcount_set(&map->refcnt, 2);
Wang Nan8db6d6b2016-07-14 08:34:35 +00001016 map->prev = 0;
1017 map->mask = mp->mask;
1018 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
1019 MAP_SHARED, fd, 0);
1020 if (map->base == MAP_FAILED) {
Adrian Hunter02635962013-11-01 15:51:33 +02001021 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
1022 errno);
Wang Nan8db6d6b2016-07-14 08:34:35 +00001023 map->base = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001024 return -1;
Nelson Elhage301b1952011-12-19 08:39:30 -05001025 }
Wang Nan8db6d6b2016-07-14 08:34:35 +00001026 map->fd = fd;
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -03001027
Wang Nan8db6d6b2016-07-14 08:34:35 +00001028 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
1029 &mp->auxtrace_mp, map->base, fd))
Adrian Hunter718c6022015-04-09 18:53:42 +03001030 return -1;
1031
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001032 return 0;
1033}
1034
Wang Nanf3058a12016-05-24 02:28:59 +00001035static bool
1036perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
1037 struct perf_evsel *evsel)
1038{
Arnaldo Carvalho de Melo32a951b2016-07-14 08:34:33 +00001039 if (evsel->attr.write_backward)
Wang Nanf3058a12016-05-24 02:28:59 +00001040 return false;
1041 return true;
1042}
1043
Adrian Hunter04e21312013-10-18 15:29:13 +03001044static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
Mark Rutland9f21b812016-09-08 11:21:51 +01001045 struct mmap_params *mp, int cpu_idx,
Wang Nan078c3382016-07-14 08:34:40 +00001046 int thread, int *_output, int *_output_backward)
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001047{
1048 struct perf_evsel *evsel;
Wang Nanf3058a12016-05-24 02:28:59 +00001049 int revent;
Mark Rutland9f21b812016-09-08 11:21:51 +01001050 int evlist_cpu = cpu_map__cpu(evlist->cpus, cpu_idx);
Adrian Hunter04e21312013-10-18 15:29:13 +03001051
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001052 evlist__for_each_entry(evlist, evsel) {
Wang Nan078c3382016-07-14 08:34:40 +00001053 struct perf_mmap *maps = evlist->mmap;
1054 int *output = _output;
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001055 int fd;
Mark Rutland9f21b812016-09-08 11:21:51 +01001056 int cpu;
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001057
Wang Nan078c3382016-07-14 08:34:40 +00001058 if (evsel->attr.write_backward) {
1059 output = _output_backward;
1060 maps = evlist->backward_mmap;
1061
1062 if (!maps) {
1063 maps = perf_evlist__alloc_mmap(evlist);
1064 if (!maps)
1065 return -1;
1066 evlist->backward_mmap = maps;
Wang Nan54cc54d2016-07-14 08:34:42 +00001067 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
1068 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
Wang Nan078c3382016-07-14 08:34:40 +00001069 }
1070 }
Wang Nanf3058a12016-05-24 02:28:59 +00001071
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001072 if (evsel->system_wide && thread)
1073 continue;
1074
Mark Rutland9f21b812016-09-08 11:21:51 +01001075 cpu = cpu_map__idx(evsel->cpus, evlist_cpu);
1076 if (cpu == -1)
1077 continue;
1078
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001079 fd = FD(evsel, cpu, thread);
Adrian Hunter04e21312013-10-18 15:29:13 +03001080
1081 if (*output == -1) {
1082 *output = fd;
Wang Nan078c3382016-07-14 08:34:40 +00001083
1084 if (perf_mmap__mmap(&maps[idx], mp, *output) < 0)
Adrian Hunter04e21312013-10-18 15:29:13 +03001085 return -1;
1086 } else {
1087 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
1088 return -1;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001089
Wang Nan078c3382016-07-14 08:34:40 +00001090 perf_mmap__get(&maps[idx]);
Adrian Hunter04e21312013-10-18 15:29:13 +03001091 }
1092
Wang Nanf3058a12016-05-24 02:28:59 +00001093 revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
1094
Adrian Hunterf90d1942014-11-11 16:16:39 +02001095 /*
1096 * The system_wide flag causes a selected event to be opened
1097 * always without a pid. Consequently it will never get a
1098 * POLLHUP, but it is used for tracking in combination with
1099 * other events, so it should not need to be polled anyway.
1100 * Therefore don't add it for polling.
1101 */
1102 if (!evsel->system_wide &&
Wang Nan078c3382016-07-14 08:34:40 +00001103 __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
1104 perf_mmap__put(&maps[idx]);
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -03001105 return -1;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001106 }
Arnaldo Carvalho de Melo033fa712014-09-08 12:55:12 -03001107
Adrian Hunter3c659ee2014-10-27 15:49:22 +02001108 if (evsel->attr.read_format & PERF_FORMAT_ID) {
1109 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
1110 fd) < 0)
1111 return -1;
1112 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
1113 thread);
1114 }
Adrian Hunter04e21312013-10-18 15:29:13 +03001115 }
1116
1117 return 0;
1118}
1119
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001120static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
1121 struct mmap_params *mp)
Adrian Hunter04e21312013-10-18 15:29:13 +03001122{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001123 int cpu, thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001124 int nr_cpus = cpu_map__nr(evlist->cpus);
1125 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001126
Adrian Huntere3e1a542013-08-14 15:48:24 +03001127 pr_debug2("perf event ring buffer mmapped per cpu\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001128 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001129 int output = -1;
Wang Nan078c3382016-07-14 08:34:40 +00001130 int output_backward = -1;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001131
Adrian Hunter718c6022015-04-09 18:53:42 +03001132 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
1133 true);
1134
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001135 for (thread = 0; thread < nr_threads; thread++) {
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001136 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
Wang Nan078c3382016-07-14 08:34:40 +00001137 thread, &output, &output_backward))
Adrian Hunter04e21312013-10-18 15:29:13 +03001138 goto out_unmap;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001139 }
1140 }
1141
1142 return 0;
1143
1144out_unmap:
Wang Nana1f72612016-07-14 08:34:38 +00001145 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001146 return -1;
1147}
1148
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001149static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
1150 struct mmap_params *mp)
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001151{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001152 int thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001153 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001154
Adrian Huntere3e1a542013-08-14 15:48:24 +03001155 pr_debug2("perf event ring buffer mmapped per thread\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001156 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001157 int output = -1;
Wang Nan078c3382016-07-14 08:34:40 +00001158 int output_backward = -1;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001159
Adrian Hunter718c6022015-04-09 18:53:42 +03001160 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
1161 false);
1162
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001163 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
Wang Nan078c3382016-07-14 08:34:40 +00001164 &output, &output_backward))
Adrian Hunter04e21312013-10-18 15:29:13 +03001165 goto out_unmap;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001166 }
1167
1168 return 0;
1169
1170out_unmap:
Wang Nana1f72612016-07-14 08:34:38 +00001171 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001172 return -1;
1173}
1174
Arnaldo Carvalho de Melof5e71502016-04-15 17:46:31 -03001175unsigned long perf_event_mlock_kb_in_pages(void)
1176{
1177 unsigned long pages;
1178 int max;
1179
1180 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
1181 /*
1182 * Pick a once upon a time good value, i.e. things look
1183 * strange since we can't read a sysctl value, but lets not
1184 * die yet...
1185 */
1186 max = 512;
1187 } else {
1188 max -= (page_size / 1024);
1189 }
1190
1191 pages = (max * 1024) / page_size;
1192 if (!is_power_of_2(pages))
1193 pages = rounddown_pow_of_two(pages);
1194
1195 return pages;
1196}
1197
Jiri Olsa0c582442017-01-09 10:51:59 +01001198size_t perf_evlist__mmap_size(unsigned long pages)
Jiri Olsa994a1f72013-09-01 12:36:12 +02001199{
Arnaldo Carvalho de Melof5e71502016-04-15 17:46:31 -03001200 if (pages == UINT_MAX)
1201 pages = perf_event_mlock_kb_in_pages();
1202 else if (!is_power_of_2(pages))
Jiri Olsa994a1f72013-09-01 12:36:12 +02001203 return 0;
1204
1205 return (pages + 1) * page_size;
1206}
1207
David Ahern33c2dcf2013-11-12 07:46:55 -07001208static long parse_pages_arg(const char *str, unsigned long min,
1209 unsigned long max)
Jiri Olsa994a1f72013-09-01 12:36:12 +02001210{
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001211 unsigned long pages, val;
Jiri Olsa27050f52013-09-01 12:36:13 +02001212 static struct parse_tag tags[] = {
1213 { .tag = 'B', .mult = 1 },
1214 { .tag = 'K', .mult = 1 << 10 },
1215 { .tag = 'M', .mult = 1 << 20 },
1216 { .tag = 'G', .mult = 1 << 30 },
1217 { .tag = 0 },
1218 };
Jiri Olsa994a1f72013-09-01 12:36:12 +02001219
David Ahern89735042013-11-12 07:46:53 -07001220 if (str == NULL)
David Ahern33c2dcf2013-11-12 07:46:55 -07001221 return -EINVAL;
David Ahern89735042013-11-12 07:46:53 -07001222
Jiri Olsa27050f52013-09-01 12:36:13 +02001223 val = parse_tag_value(str, tags);
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001224 if (val != (unsigned long) -1) {
Jiri Olsa27050f52013-09-01 12:36:13 +02001225 /* we got file size value */
1226 pages = PERF_ALIGN(val, page_size) / page_size;
Jiri Olsa27050f52013-09-01 12:36:13 +02001227 } else {
1228 /* we got pages count value */
1229 char *eptr;
1230 pages = strtoul(str, &eptr, 10);
David Ahern33c2dcf2013-11-12 07:46:55 -07001231 if (*eptr != '\0')
1232 return -EINVAL;
Jiri Olsa994a1f72013-09-01 12:36:12 +02001233 }
1234
Adrian Hunter2bcab6c2013-12-09 15:18:37 +02001235 if (pages == 0 && min == 0) {
David Ahern33c2dcf2013-11-12 07:46:55 -07001236 /* leave number of pages at 0 */
Adrian Hunter1dbfa9382013-12-09 15:18:39 +02001237 } else if (!is_power_of_2(pages)) {
Jiri Olsa98081432017-01-09 10:51:55 +01001238 char buf[100];
1239
David Ahern33c2dcf2013-11-12 07:46:55 -07001240 /* round pages up to next power of 2 */
Arnaldo Carvalho de Melo91529832014-12-16 13:24:41 -03001241 pages = roundup_pow_of_two(pages);
Adrian Hunter1dbfa9382013-12-09 15:18:39 +02001242 if (!pages)
1243 return -EINVAL;
Jiri Olsa98081432017-01-09 10:51:55 +01001244
1245 unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
1246 pr_info("rounding mmap pages size to %s (%lu pages)\n",
1247 buf, pages);
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001248 }
1249
David Ahern33c2dcf2013-11-12 07:46:55 -07001250 if (pages > max)
1251 return -EINVAL;
1252
1253 return pages;
1254}
1255
Adrian Huntere9db1312015-04-09 18:53:46 +03001256int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
David Ahern33c2dcf2013-11-12 07:46:55 -07001257{
David Ahern33c2dcf2013-11-12 07:46:55 -07001258 unsigned long max = UINT_MAX;
1259 long pages;
1260
Adrian Hunterf5ae9c42013-12-09 15:18:38 +02001261 if (max > SIZE_MAX / page_size)
David Ahern33c2dcf2013-11-12 07:46:55 -07001262 max = SIZE_MAX / page_size;
1263
1264 pages = parse_pages_arg(str, 1, max);
1265 if (pages < 0) {
1266 pr_err("Invalid argument for --mmap_pages/-m\n");
Jiri Olsa994a1f72013-09-01 12:36:12 +02001267 return -1;
1268 }
1269
1270 *mmap_pages = pages;
1271 return 0;
1272}
1273
Adrian Huntere9db1312015-04-09 18:53:46 +03001274int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1275 int unset __maybe_unused)
1276{
1277 return __perf_evlist__parse_mmap_pages(opt->value, str);
1278}
1279
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001280/**
Adrian Hunter718c6022015-04-09 18:53:42 +03001281 * perf_evlist__mmap_ex - Create mmaps to receive events.
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001282 * @evlist: list of events
1283 * @pages: map length in pages
1284 * @overwrite: overwrite older events?
Adrian Hunter718c6022015-04-09 18:53:42 +03001285 * @auxtrace_pages - auxtrace map length in pages
1286 * @auxtrace_overwrite - overwrite older auxtrace data?
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001287 *
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001288 * If @overwrite is %false the user needs to signal event consumption using
1289 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
1290 * automatically.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001291 *
Adrian Hunter718c6022015-04-09 18:53:42 +03001292 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1293 * consumption using auxtrace_mmap__write_tail().
1294 *
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001295 * Return: %0 on success, negative error code otherwise.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001296 */
Adrian Hunter718c6022015-04-09 18:53:42 +03001297int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1298 bool overwrite, unsigned int auxtrace_pages,
1299 bool auxtrace_overwrite)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001300{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001301 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001302 const struct cpu_map *cpus = evlist->cpus;
1303 const struct thread_map *threads = evlist->threads;
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001304 struct mmap_params mp = {
1305 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
1306 };
Arnaldo Carvalho de Melo50a682c2011-11-09 09:10:47 -02001307
Wang Nan8db6d6b2016-07-14 08:34:35 +00001308 if (!evlist->mmap)
1309 evlist->mmap = perf_evlist__alloc_mmap(evlist);
1310 if (!evlist->mmap)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001311 return -ENOMEM;
1312
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -03001313 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001314 return -ENOMEM;
1315
1316 evlist->overwrite = overwrite;
Jiri Olsa994a1f72013-09-01 12:36:12 +02001317 evlist->mmap_len = perf_evlist__mmap_size(pages);
Adrian Hunter2af68ef2013-10-18 15:29:07 +03001318 pr_debug("mmap size %zuB\n", evlist->mmap_len);
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001319 mp.mask = evlist->mmap_len - page_size - 1;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001320
Adrian Hunter718c6022015-04-09 18:53:42 +03001321 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
1322 auxtrace_pages, auxtrace_overwrite);
1323
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001324 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001325 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03001326 evsel->sample_id == NULL &&
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -03001327 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001328 return -ENOMEM;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001329 }
1330
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -07001331 if (cpu_map__empty(cpus))
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001332 return perf_evlist__mmap_per_thread(evlist, &mp);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001333
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001334 return perf_evlist__mmap_per_cpu(evlist, &mp);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001335}
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001336
Adrian Hunter718c6022015-04-09 18:53:42 +03001337int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
1338 bool overwrite)
1339{
1340 return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
1341}
1342
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001343int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001344{
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001345 struct cpu_map *cpus;
1346 struct thread_map *threads;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001347
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001348 threads = thread_map__new_str(target->pid, target->tid, target->uid);
1349
1350 if (!threads)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001351 return -1;
1352
Dongsheng Yang9c105fb2013-12-04 17:56:40 -05001353 if (target__uses_dummy_map(target))
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001354 cpus = cpu_map__dummy_new();
Namhyung Kim879d77d2012-05-16 18:45:48 +09001355 else
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001356 cpus = cpu_map__new(target->cpu_list);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001357
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001358 if (!cpus)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001359 goto out_delete_threads;
1360
Adrian Hunterec9a77a2015-09-08 10:58:52 +03001361 evlist->has_user_cpus = !!target->cpu_list;
1362
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001363 perf_evlist__set_maps(evlist, cpus, threads);
Adrian Hunterd5bc0562015-09-08 10:58:51 +03001364
1365 return 0;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001366
1367out_delete_threads:
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001368 thread_map__put(threads);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001369 return -1;
1370}
1371
Adrian Hunterd5bc0562015-09-08 10:58:51 +03001372void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1373 struct thread_map *threads)
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001374{
Adrian Hunter934e0f22015-09-08 10:58:56 +03001375 /*
1376 * Allow for the possibility that one or another of the maps isn't being
1377 * changed i.e. don't put it. Note we are assuming the maps that are
1378 * being applied are brand new and evlist is taking ownership of the
1379 * original reference count of 1. If that is not the case it is up to
1380 * the caller to increase the reference count.
1381 */
1382 if (cpus != evlist->cpus) {
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001383 cpu_map__put(evlist->cpus);
Arnaldo Carvalho de Meloa55e5662016-02-17 10:57:19 -03001384 evlist->cpus = cpu_map__get(cpus);
Adrian Hunter934e0f22015-09-08 10:58:56 +03001385 }
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001386
Adrian Hunter934e0f22015-09-08 10:58:56 +03001387 if (threads != evlist->threads) {
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001388 thread_map__put(evlist->threads);
Arnaldo Carvalho de Meloa55e5662016-02-17 10:57:19 -03001389 evlist->threads = thread_map__get(threads);
Adrian Hunter934e0f22015-09-08 10:58:56 +03001390 }
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001391
Adrian Hunterec9a77a2015-09-08 10:58:52 +03001392 perf_evlist__propagate_maps(evlist);
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001393}
1394
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001395void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
1396 enum perf_event_sample_format bit)
1397{
1398 struct perf_evsel *evsel;
1399
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001400 evlist__for_each_entry(evlist, evsel)
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001401 __perf_evsel__set_sample_bit(evsel, bit);
1402}
1403
1404void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
1405 enum perf_event_sample_format bit)
1406{
1407 struct perf_evsel *evsel;
1408
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001409 evlist__for_each_entry(evlist, evsel)
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001410 __perf_evsel__reset_sample_bit(evsel, bit);
1411}
1412
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001413int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001414{
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001415 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001416 int err = 0;
1417 const int ncpus = cpu_map__nr(evlist->cpus),
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001418 nthreads = thread_map__nr(evlist->threads);
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001419
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001420 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001421 if (evsel->filter == NULL)
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001422 continue;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001423
Kan Liangd988d5e2015-08-21 02:23:14 -04001424 /*
1425 * filters only work for tracepoint event, which doesn't have cpu limit.
1426 * So evlist and evsel should always be same.
1427 */
Arnaldo Carvalho de Melof47805a2015-07-03 15:53:49 -03001428 err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter);
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001429 if (err) {
1430 *err_evsel = evsel;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001431 break;
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001432 }
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001433 }
1434
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001435 return err;
1436}
1437
1438int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1439{
1440 struct perf_evsel *evsel;
1441 int err = 0;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001442
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001443 evlist__for_each_entry(evlist, evsel) {
Wang Nanfdf14722016-02-26 09:31:53 +00001444 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1445 continue;
1446
Arnaldo Carvalho de Melo94ad89b2015-07-03 17:42:03 -03001447 err = perf_evsel__set_filter(evsel, filter);
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001448 if (err)
1449 break;
1450 }
1451
1452 return err;
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001453}
Frederic Weisbecker74429962011-05-21 17:49:00 +02001454
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001455int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001456{
1457 char *filter;
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001458 int ret = -1;
1459 size_t i;
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001460
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001461 for (i = 0; i < npids; ++i) {
1462 if (i == 0) {
1463 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1464 return -1;
1465 } else {
1466 char *tmp;
1467
1468 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1469 goto out_free;
1470
1471 free(filter);
1472 filter = tmp;
1473 }
1474 }
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001475
1476 ret = perf_evlist__set_filter(evlist, filter);
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001477out_free:
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001478 free(filter);
1479 return ret;
1480}
1481
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001482int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
1483{
1484 return perf_evlist__set_filter_pids(evlist, 1, &pid);
1485}
1486
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001487bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +02001488{
Adrian Hunter75562572013-08-27 11:23:09 +03001489 struct perf_evsel *pos;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001490
Adrian Hunter75562572013-08-27 11:23:09 +03001491 if (evlist->nr_entries == 1)
1492 return true;
1493
1494 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1495 return false;
1496
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001497 evlist__for_each_entry(evlist, pos) {
Adrian Hunter75562572013-08-27 11:23:09 +03001498 if (pos->id_pos != evlist->id_pos ||
1499 pos->is_pos != evlist->is_pos)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001500 return false;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001501 }
1502
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001503 return true;
1504}
1505
Adrian Hunter75562572013-08-27 11:23:09 +03001506u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001507{
Adrian Hunter75562572013-08-27 11:23:09 +03001508 struct perf_evsel *evsel;
1509
1510 if (evlist->combined_sample_type)
1511 return evlist->combined_sample_type;
1512
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001513 evlist__for_each_entry(evlist, evsel)
Adrian Hunter75562572013-08-27 11:23:09 +03001514 evlist->combined_sample_type |= evsel->attr.sample_type;
1515
1516 return evlist->combined_sample_type;
1517}
1518
1519u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1520{
1521 evlist->combined_sample_type = 0;
1522 return __perf_evlist__combined_sample_type(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001523}
1524
Andi Kleen98df8582015-07-18 08:24:47 -07001525u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
1526{
1527 struct perf_evsel *evsel;
1528 u64 branch_type = 0;
1529
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001530 evlist__for_each_entry(evlist, evsel)
Andi Kleen98df8582015-07-18 08:24:47 -07001531 branch_type |= evsel->attr.branch_sample_type;
1532 return branch_type;
1533}
1534
Jiri Olsa9ede4732012-10-10 17:38:13 +02001535bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1536{
1537 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1538 u64 read_format = first->attr.read_format;
1539 u64 sample_type = first->attr.sample_type;
1540
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001541 evlist__for_each_entry(evlist, pos) {
Jiri Olsa9ede4732012-10-10 17:38:13 +02001542 if (read_format != pos->attr.read_format)
1543 return false;
1544 }
1545
1546 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1547 if ((sample_type & PERF_SAMPLE_READ) &&
1548 !(read_format & PERF_FORMAT_ID)) {
1549 return false;
1550 }
1551
1552 return true;
1553}
1554
1555u64 perf_evlist__read_format(struct perf_evlist *evlist)
1556{
1557 struct perf_evsel *first = perf_evlist__first(evlist);
1558 return first->attr.read_format;
1559}
1560
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001561u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001562{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001563 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001564 struct perf_sample *data;
1565 u64 sample_type;
1566 u16 size = 0;
1567
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001568 if (!first->attr.sample_id_all)
1569 goto out;
1570
1571 sample_type = first->attr.sample_type;
1572
1573 if (sample_type & PERF_SAMPLE_TID)
1574 size += sizeof(data->tid) * 2;
1575
1576 if (sample_type & PERF_SAMPLE_TIME)
1577 size += sizeof(data->time);
1578
1579 if (sample_type & PERF_SAMPLE_ID)
1580 size += sizeof(data->id);
1581
1582 if (sample_type & PERF_SAMPLE_STREAM_ID)
1583 size += sizeof(data->stream_id);
1584
1585 if (sample_type & PERF_SAMPLE_CPU)
1586 size += sizeof(data->cpu) * 2;
Adrian Hunter75562572013-08-27 11:23:09 +03001587
1588 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1589 size += sizeof(data->id);
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001590out:
1591 return size;
1592}
1593
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001594bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001595{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001596 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001597
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001598 evlist__for_each_entry_continue(evlist, pos) {
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001599 if (first->attr.sample_id_all != pos->attr.sample_id_all)
1600 return false;
1601 }
1602
1603 return true;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001604}
1605
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001606bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +02001607{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001608 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001609 return first->attr.sample_id_all;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001610}
Arnaldo Carvalho de Melo81cce8d2011-10-05 19:11:32 -03001611
1612void perf_evlist__set_selected(struct perf_evlist *evlist,
1613 struct perf_evsel *evsel)
1614{
1615 evlist->selected = evsel;
1616}
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001617
Namhyung Kima74b4b62013-03-15 14:48:48 +09001618void perf_evlist__close(struct perf_evlist *evlist)
1619{
1620 struct perf_evsel *evsel;
1621 int ncpus = cpu_map__nr(evlist->cpus);
1622 int nthreads = thread_map__nr(evlist->threads);
1623
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001624 evlist__for_each_entry_reverse(evlist, evsel) {
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -03001625 int n = evsel->cpus ? evsel->cpus->nr : ncpus;
Stephane Eranian8ad92192014-01-17 16:34:06 +01001626 perf_evsel__close(evsel, n, nthreads);
1627 }
Namhyung Kima74b4b62013-03-15 14:48:48 +09001628}
1629
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001630static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1631{
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001632 struct cpu_map *cpus;
1633 struct thread_map *threads;
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001634 int err = -ENOMEM;
1635
1636 /*
1637 * Try reading /sys/devices/system/cpu/online to get
1638 * an all cpus map.
1639 *
1640 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1641 * code needs an overhaul to properly forward the
1642 * error, and we may not want to do that fallback to a
1643 * default cpu identity map :-\
1644 */
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001645 cpus = cpu_map__new(NULL);
1646 if (!cpus)
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001647 goto out;
1648
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001649 threads = thread_map__new_dummy();
1650 if (!threads)
1651 goto out_put;
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001652
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001653 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001654out:
1655 return err;
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001656out_put:
1657 cpu_map__put(cpus);
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001658 goto out;
1659}
1660
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001661int perf_evlist__open(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001662{
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001663 struct perf_evsel *evsel;
Namhyung Kima74b4b62013-03-15 14:48:48 +09001664 int err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001665
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001666 /*
1667 * Default: one fd per CPU, all threads, aka systemwide
1668 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1669 */
1670 if (evlist->threads == NULL && evlist->cpus == NULL) {
1671 err = perf_evlist__create_syswide_maps(evlist);
1672 if (err < 0)
1673 goto out_err;
1674 }
1675
Adrian Hunter733cd2f2013-09-06 22:40:11 +03001676 perf_evlist__update_id_pos(evlist);
1677
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001678 evlist__for_each_entry(evlist, evsel) {
Adrian Hunter23df7f72016-01-07 10:13:59 +01001679 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001680 if (err < 0)
1681 goto out_err;
1682 }
1683
1684 return 0;
1685out_err:
Namhyung Kima74b4b62013-03-15 14:48:48 +09001686 perf_evlist__close(evlist);
Namhyung Kim41c21a62012-02-23 12:13:36 +09001687 errno = -err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001688 return err;
1689}
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001690
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001691int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
Namhyung Kim55e162e2013-03-11 16:43:17 +09001692 const char *argv[], bool pipe_output,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001693 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001694{
1695 int child_ready_pipe[2], go_pipe[2];
1696 char bf;
1697
1698 if (pipe(child_ready_pipe) < 0) {
1699 perror("failed to create 'ready' pipe");
1700 return -1;
1701 }
1702
1703 if (pipe(go_pipe) < 0) {
1704 perror("failed to create 'go' pipe");
1705 goto out_close_ready_pipe;
1706 }
1707
1708 evlist->workload.pid = fork();
1709 if (evlist->workload.pid < 0) {
1710 perror("failed to fork");
1711 goto out_close_pipes;
1712 }
1713
1714 if (!evlist->workload.pid) {
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001715 int ret;
1716
Namhyung Kim119fa3c2013-03-11 16:43:16 +09001717 if (pipe_output)
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001718 dup2(2, 1);
1719
David Ahern0817df02013-05-25 17:50:39 -06001720 signal(SIGTERM, SIG_DFL);
1721
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001722 close(child_ready_pipe[0]);
1723 close(go_pipe[1]);
1724 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1725
1726 /*
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001727 * Tell the parent we're ready to go
1728 */
1729 close(child_ready_pipe[1]);
1730
1731 /*
1732 * Wait until the parent tells us to go.
1733 */
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001734 ret = read(go_pipe[0], &bf, 1);
1735 /*
1736 * The parent will ask for the execvp() to be performed by
1737 * writing exactly one byte, in workload.cork_fd, usually via
1738 * perf_evlist__start_workload().
1739 *
Arnaldo Carvalho de Melo20f86fc2015-02-03 13:29:05 -03001740 * For cancelling the workload without actually running it,
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001741 * the parent will just close workload.cork_fd, without writing
1742 * anything, i.e. read will return zero and we just exit()
1743 * here.
1744 */
1745 if (ret != 1) {
1746 if (ret == -1)
1747 perror("unable to read pipe");
1748 exit(ret);
1749 }
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001750
1751 execvp(argv[0], (char **)argv);
1752
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001753 if (exec_error) {
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001754 union sigval val;
1755
1756 val.sival_int = errno;
1757 if (sigqueue(getppid(), SIGUSR1, val))
1758 perror(argv[0]);
1759 } else
1760 perror(argv[0]);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001761 exit(-1);
1762 }
1763
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001764 if (exec_error) {
1765 struct sigaction act = {
1766 .sa_flags = SA_SIGINFO,
1767 .sa_sigaction = exec_error,
1768 };
1769 sigaction(SIGUSR1, &act, NULL);
1770 }
1771
Arnaldo Carvalho de Melo1aaf63b2014-10-10 14:29:49 -03001772 if (target__none(target)) {
1773 if (evlist->threads == NULL) {
1774 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1775 __func__, __LINE__);
1776 goto out_close_pipes;
1777 }
Jiri Olsae13798c2015-06-23 00:36:02 +02001778 thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
Arnaldo Carvalho de Melo1aaf63b2014-10-10 14:29:49 -03001779 }
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001780
1781 close(child_ready_pipe[1]);
1782 close(go_pipe[0]);
1783 /*
1784 * wait for child to settle
1785 */
1786 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1787 perror("unable to read pipe");
1788 goto out_close_pipes;
1789 }
1790
Namhyung Kimbcf31452013-06-26 16:14:15 +09001791 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001792 evlist->workload.cork_fd = go_pipe[1];
1793 close(child_ready_pipe[0]);
1794 return 0;
1795
1796out_close_pipes:
1797 close(go_pipe[0]);
1798 close(go_pipe[1]);
1799out_close_ready_pipe:
1800 close(child_ready_pipe[0]);
1801 close(child_ready_pipe[1]);
1802 return -1;
1803}
1804
1805int perf_evlist__start_workload(struct perf_evlist *evlist)
1806{
1807 if (evlist->workload.cork_fd > 0) {
David Ahernb3824402013-07-02 13:27:21 -06001808 char bf = 0;
Namhyung Kimbcf31452013-06-26 16:14:15 +09001809 int ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001810 /*
1811 * Remove the cork, let it rip!
1812 */
Namhyung Kimbcf31452013-06-26 16:14:15 +09001813 ret = write(evlist->workload.cork_fd, &bf, 1);
1814 if (ret < 0)
Soramichi Akiyamae978be92017-01-10 10:41:00 -03001815 perror("unable to write to pipe");
Namhyung Kimbcf31452013-06-26 16:14:15 +09001816
1817 close(evlist->workload.cork_fd);
1818 return ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001819 }
1820
1821 return 0;
1822}
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001823
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -03001824int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001825 struct perf_sample *sample)
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001826{
Adrian Hunter75562572013-08-27 11:23:09 +03001827 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1828
1829 if (!evsel)
1830 return -EFAULT;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001831 return perf_evsel__parse_sample(evsel, event, sample);
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001832}
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001833
1834size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1835{
1836 struct perf_evsel *evsel;
1837 size_t printed = 0;
1838
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001839 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001840 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1841 perf_evsel__name(evsel));
1842 }
1843
Davidlohr Buesob2222132013-11-12 22:24:24 -08001844 return printed + fprintf(fp, "\n");
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001845}
Arnaldo Carvalho de Melo6ef068c2013-10-17 12:07:58 -03001846
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001847int perf_evlist__strerror_open(struct perf_evlist *evlist,
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001848 int err, char *buf, size_t size)
1849{
1850 int printed, value;
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001851 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001852
1853 switch (err) {
1854 case EACCES:
1855 case EPERM:
1856 printed = scnprintf(buf, size,
1857 "Error:\t%s.\n"
1858 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1859
Adrian Hunter1a472452013-12-11 14:36:23 +02001860 value = perf_event_paranoid();
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001861
1862 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1863
1864 if (value >= 2) {
1865 printed += scnprintf(buf + printed, size - printed,
1866 "For your workloads it needs to be <= 1\nHint:\t");
1867 }
1868 printed += scnprintf(buf + printed, size - printed,
Arnaldo Carvalho de Melo5229e362014-06-10 17:18:54 -03001869 "For system wide tracing it needs to be set to -1.\n");
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001870
1871 printed += scnprintf(buf + printed, size - printed,
Arnaldo Carvalho de Melo5229e362014-06-10 17:18:54 -03001872 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1873 "Hint:\tThe current value is %d.", value);
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001874 break;
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001875 case EINVAL: {
1876 struct perf_evsel *first = perf_evlist__first(evlist);
1877 int max_freq;
1878
1879 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1880 goto out_default;
1881
1882 if (first->attr.sample_freq < (u64)max_freq)
1883 goto out_default;
1884
1885 printed = scnprintf(buf, size,
1886 "Error:\t%s.\n"
1887 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1888 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1889 emsg, max_freq, first->attr.sample_freq);
1890 break;
1891 }
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001892 default:
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001893out_default:
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001894 scnprintf(buf, size, "%s", emsg);
1895 break;
1896 }
1897
1898 return 0;
1899}
Adrian Huntera025e4f2013-12-11 14:36:35 +02001900
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001901int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1902{
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001903 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001904 int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001905
1906 switch (err) {
1907 case EPERM:
Arnaldo Carvalho de Meloe5d4a292014-12-12 15:59:51 -03001908 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001909 printed += scnprintf(buf + printed, size - printed,
1910 "Error:\t%s.\n"
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001911 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001912 "Hint:\tTried using %zd kB.\n",
Arnaldo Carvalho de Meloe5d4a292014-12-12 15:59:51 -03001913 emsg, pages_max_per_user, pages_attempted);
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001914
1915 if (pages_attempted >= pages_max_per_user) {
1916 printed += scnprintf(buf + printed, size - printed,
1917 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1918 pages_max_per_user + pages_attempted);
1919 }
1920
1921 printed += scnprintf(buf + printed, size - printed,
1922 "Hint:\tTry using a smaller -m/--mmap-pages value.");
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001923 break;
1924 default:
1925 scnprintf(buf, size, "%s", emsg);
1926 break;
1927 }
1928
1929 return 0;
1930}
1931
Adrian Huntera025e4f2013-12-11 14:36:35 +02001932void perf_evlist__to_front(struct perf_evlist *evlist,
1933 struct perf_evsel *move_evsel)
1934{
1935 struct perf_evsel *evsel, *n;
1936 LIST_HEAD(move);
1937
1938 if (move_evsel == perf_evlist__first(evlist))
1939 return;
1940
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001941 evlist__for_each_entry_safe(evlist, n, evsel) {
Adrian Huntera025e4f2013-12-11 14:36:35 +02001942 if (evsel->leader == move_evsel->leader)
1943 list_move_tail(&evsel->node, &move);
1944 }
1945
1946 list_splice(&move, &evlist->entries);
1947}
Adrian Hunter60b08962014-07-31 09:00:52 +03001948
1949void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1950 struct perf_evsel *tracking_evsel)
1951{
1952 struct perf_evsel *evsel;
1953
1954 if (tracking_evsel->tracking)
1955 return;
1956
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001957 evlist__for_each_entry(evlist, evsel) {
Adrian Hunter60b08962014-07-31 09:00:52 +03001958 if (evsel != tracking_evsel)
1959 evsel->tracking = false;
1960 }
1961
1962 tracking_evsel->tracking = true;
1963}
Wang Nan7630b3e2016-02-22 09:10:33 +00001964
1965struct perf_evsel *
1966perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
1967 const char *str)
1968{
1969 struct perf_evsel *evsel;
1970
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001971 evlist__for_each_entry(evlist, evsel) {
Wang Nan7630b3e2016-02-22 09:10:33 +00001972 if (!evsel->name)
1973 continue;
1974 if (strcmp(str, evsel->name) == 0)
1975 return evsel;
1976 }
1977
1978 return NULL;
1979}
Wang Nan54cc54d2016-07-14 08:34:42 +00001980
1981void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
1982 enum bkw_mmap_state state)
1983{
1984 enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1985 enum action {
1986 NONE,
1987 PAUSE,
1988 RESUME,
1989 } action = NONE;
1990
1991 if (!evlist->backward_mmap)
1992 return;
1993
1994 switch (old_state) {
1995 case BKW_MMAP_NOTREADY: {
1996 if (state != BKW_MMAP_RUNNING)
1997 goto state_err;;
1998 break;
1999 }
2000 case BKW_MMAP_RUNNING: {
2001 if (state != BKW_MMAP_DATA_PENDING)
2002 goto state_err;
2003 action = PAUSE;
2004 break;
2005 }
2006 case BKW_MMAP_DATA_PENDING: {
2007 if (state != BKW_MMAP_EMPTY)
2008 goto state_err;
2009 break;
2010 }
2011 case BKW_MMAP_EMPTY: {
2012 if (state != BKW_MMAP_RUNNING)
2013 goto state_err;
2014 action = RESUME;
2015 break;
2016 }
2017 default:
2018 WARN_ONCE(1, "Shouldn't get there\n");
2019 }
2020
2021 evlist->bkw_mmap_state = state;
2022
2023 switch (action) {
2024 case PAUSE:
2025 perf_evlist__pause(evlist);
2026 break;
2027 case RESUME:
2028 perf_evlist__resume(evlist);
2029 break;
2030 case NONE:
2031 default:
2032 break;
2033 }
2034
2035state_err:
2036 return;
2037}