blob: 2a40b8e1def70655277ac0458ad0becc1bbd8877 [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
Arnaldo Carvalho de Meloa8c9ae12011-11-05 08:41:51 -02009#include "util.h"
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -030010#include <api/fs/fs.h>
Arnaldo Carvalho de Melo5c5810412011-01-11 22:30:02 -020011#include <poll.h>
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020012#include "cpumap.h"
13#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090014#include "target.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020015#include "evlist.h"
16#include "evsel.h"
Adrian Huntere3e1a542013-08-14 15:48:24 +030017#include "debug.h"
Wang Nan54cc54d2016-07-14 08:34:42 +000018#include "asm/bug.h"
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020019#include <unistd.h>
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020020
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020021#include "parse-events.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060022#include <subcmd/parse-options.h>
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020023
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020024#include <sys/mman.h>
25
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020026#include <linux/bitops.h>
27#include <linux/hash.h>
Arnaldo Carvalho de Melo0389cd12014-12-15 16:04:11 -030028#include <linux/log2.h>
Jiri Olsa8dd2a132015-09-07 10:38:06 +020029#include <linux/err.h>
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020030
Wang Nan8db6d6b2016-07-14 08:34:35 +000031static void perf_mmap__munmap(struct perf_mmap *map);
Wang Nan48760752016-07-14 08:34:37 +000032static void perf_mmap__put(struct perf_mmap *map);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -030033
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020034#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030035#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020036
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020037void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
38 struct thread_map *threads)
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020039{
40 int i;
41
42 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
43 INIT_HLIST_HEAD(&evlist->heads[i]);
44 INIT_LIST_HEAD(&evlist->entries);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020045 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -030046 fdarray__init(&evlist->pollfd, 64);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020047 evlist->workload.pid = -1;
Wang Nan54cc54d2016-07-14 08:34:42 +000048 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020049}
50
Namhyung Kim334fe7a2013-03-11 16:43:12 +090051struct perf_evlist *perf_evlist__new(void)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020052{
53 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
54
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020055 if (evlist != NULL)
Namhyung Kim334fe7a2013-03-11 16:43:12 +090056 perf_evlist__init(evlist, NULL, NULL);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020057
58 return evlist;
59}
60
Jiri Olsab22d54b2013-09-01 12:36:14 +020061struct perf_evlist *perf_evlist__new_default(void)
62{
63 struct perf_evlist *evlist = perf_evlist__new();
64
65 if (evlist && perf_evlist__add_default(evlist)) {
66 perf_evlist__delete(evlist);
67 evlist = NULL;
68 }
69
70 return evlist;
71}
72
Arnaldo Carvalho de Melo5bae0252016-01-07 13:14:56 -030073struct perf_evlist *perf_evlist__new_dummy(void)
74{
75 struct perf_evlist *evlist = perf_evlist__new();
76
77 if (evlist && perf_evlist__add_dummy(evlist)) {
78 perf_evlist__delete(evlist);
79 evlist = NULL;
80 }
81
82 return evlist;
83}
84
Adrian Hunter75562572013-08-27 11:23:09 +030085/**
86 * perf_evlist__set_id_pos - set the positions of event ids.
87 * @evlist: selected event list
88 *
89 * Events with compatible sample types all have the same id_pos
90 * and is_pos. For convenience, put a copy on evlist.
91 */
92void perf_evlist__set_id_pos(struct perf_evlist *evlist)
93{
94 struct perf_evsel *first = perf_evlist__first(evlist);
95
96 evlist->id_pos = first->id_pos;
97 evlist->is_pos = first->is_pos;
98}
99
Adrian Hunter733cd2f2013-09-06 22:40:11 +0300100static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
101{
102 struct perf_evsel *evsel;
103
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300104 evlist__for_each_entry(evlist, evsel)
Adrian Hunter733cd2f2013-09-06 22:40:11 +0300105 perf_evsel__calc_id_pos(evsel);
106
107 perf_evlist__set_id_pos(evlist);
108}
109
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200110static void perf_evlist__purge(struct perf_evlist *evlist)
111{
112 struct perf_evsel *pos, *n;
113
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300114 evlist__for_each_entry_safe(evlist, n, pos) {
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200115 list_del_init(&pos->node);
Arnaldo Carvalho de Melod49e4692015-08-27 08:07:40 -0400116 pos->evlist = NULL;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200117 perf_evsel__delete(pos);
118 }
119
120 evlist->nr_entries = 0;
121}
122
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200123void perf_evlist__exit(struct perf_evlist *evlist)
124{
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -0300125 zfree(&evlist->mmap);
Wang Nanb2cb6152016-07-14 08:34:39 +0000126 zfree(&evlist->backward_mmap);
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300127 fdarray__exit(&evlist->pollfd);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200128}
129
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200130void perf_evlist__delete(struct perf_evlist *evlist)
131{
Arnaldo Carvalho de Melo0b04b3d2016-06-21 18:15:45 -0300132 if (evlist == NULL)
133 return;
134
Arnaldo Carvalho de Melo983874d2014-01-03 17:25:49 -0300135 perf_evlist__munmap(evlist);
Arnaldo Carvalho de Melof26e1c72014-01-03 16:54:12 -0300136 perf_evlist__close(evlist);
Jiri Olsaf30a79b2015-06-23 00:36:04 +0200137 cpu_map__put(evlist->cpus);
Jiri Olsa186fbb72015-06-23 00:36:05 +0200138 thread_map__put(evlist->threads);
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -0300139 evlist->cpus = NULL;
140 evlist->threads = NULL;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200141 perf_evlist__purge(evlist);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200142 perf_evlist__exit(evlist);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200143 free(evlist);
144}
145
Adrian Hunteradc0c3e2015-09-08 10:58:58 +0300146static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
147 struct perf_evsel *evsel)
148{
149 /*
150 * We already have cpus for evsel (via PMU sysfs) so
151 * keep it, if there's no target cpu list defined.
152 */
153 if (!evsel->own_cpus || evlist->has_user_cpus) {
154 cpu_map__put(evsel->cpus);
155 evsel->cpus = cpu_map__get(evlist->cpus);
156 } else if (evsel->cpus != evsel->own_cpus) {
157 cpu_map__put(evsel->cpus);
158 evsel->cpus = cpu_map__get(evsel->own_cpus);
159 }
160
161 thread_map__put(evsel->threads);
162 evsel->threads = thread_map__get(evlist->threads);
163}
164
165static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
166{
167 struct perf_evsel *evsel;
168
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300169 evlist__for_each_entry(evlist, evsel)
Adrian Hunteradc0c3e2015-09-08 10:58:58 +0300170 __perf_evlist__propagate_maps(evlist, evsel);
171}
172
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200173void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
174{
Arnaldo Carvalho de Melod49e4692015-08-27 08:07:40 -0400175 entry->evlist = evlist;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200176 list_add_tail(&entry->node, &evlist->entries);
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300177 entry->idx = evlist->nr_entries;
Adrian Hunter60b08962014-07-31 09:00:52 +0300178 entry->tracking = !entry->idx;
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300179
Adrian Hunter75562572013-08-27 11:23:09 +0300180 if (!evlist->nr_entries++)
181 perf_evlist__set_id_pos(evlist);
Adrian Hunter44c42d72015-09-08 10:58:59 +0300182
183 __perf_evlist__propagate_maps(evlist, entry);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200184}
185
Adrian Hunter47682302015-09-25 16:15:53 +0300186void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
187{
188 evsel->evlist = NULL;
189 list_del_init(&evsel->node);
190 evlist->nr_entries -= 1;
191}
192
Jiri Olsa0529bc12012-01-27 15:34:20 +0100193void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300194 struct list_head *list)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200195{
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300196 struct perf_evsel *evsel, *temp;
Adrian Hunter75562572013-08-27 11:23:09 +0300197
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300198 __evlist__for_each_entry_safe(list, temp, evsel) {
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300199 list_del_init(&evsel->node);
200 perf_evlist__add(evlist, evsel);
201 }
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200202}
203
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300204void __perf_evlist__set_leader(struct list_head *list)
205{
206 struct perf_evsel *evsel, *leader;
207
208 leader = list_entry(list->next, struct perf_evsel, node);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900209 evsel = list_entry(list->prev, struct perf_evsel, node);
210
211 leader->nr_members = evsel->idx - leader->idx + 1;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300212
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300213 __evlist__for_each_entry(list, evsel) {
Stephane Eranian74b21332013-01-31 13:54:37 +0100214 evsel->leader = leader;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300215 }
216}
217
218void perf_evlist__set_leader(struct perf_evlist *evlist)
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200219{
Namhyung Kim97f63e42013-01-22 18:09:29 +0900220 if (evlist->nr_entries) {
221 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300222 __perf_evlist__set_leader(&evlist->entries);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900223 }
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200224}
225
Jiri Olsa45cf6c32015-10-05 20:06:04 +0200226void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
Arnaldo Carvalho de Melo7f8d1ad2015-09-30 17:49:49 -0300227{
228 attr->precise_ip = 3;
229
230 while (attr->precise_ip != 0) {
231 int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
232 if (fd != -1) {
233 close(fd);
234 break;
235 }
236 --attr->precise_ip;
237 }
238}
239
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200240int perf_evlist__add_default(struct perf_evlist *evlist)
241{
242 struct perf_event_attr attr = {
243 .type = PERF_TYPE_HARDWARE,
244 .config = PERF_COUNT_HW_CPU_CYCLES,
245 };
Joerg Roedel1aed2672012-01-04 17:54:20 +0100246 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200247
Joerg Roedel1aed2672012-01-04 17:54:20 +0100248 event_attr_init(&attr);
249
Arnaldo Carvalho de Melo7f8d1ad2015-09-30 17:49:49 -0300250 perf_event_attr__set_max_precise_ip(&attr);
251
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300252 evsel = perf_evsel__new(&attr);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200253 if (evsel == NULL)
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200254 goto error;
255
Arnaldo Carvalho de Melo7f8d1ad2015-09-30 17:49:49 -0300256 /* use asprintf() because free(evsel) assumes name is allocated */
257 if (asprintf(&evsel->name, "cycles%.*s",
258 attr.precise_ip ? attr.precise_ip + 1 : 0, ":ppp") < 0)
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200259 goto error_free;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200260
261 perf_evlist__add(evlist, evsel);
262 return 0;
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200263error_free:
264 perf_evsel__delete(evsel);
265error:
266 return -ENOMEM;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200267}
Arnaldo Carvalho de Melo5c5810412011-01-11 22:30:02 -0200268
Arnaldo Carvalho de Melo5bae0252016-01-07 13:14:56 -0300269int perf_evlist__add_dummy(struct perf_evlist *evlist)
270{
271 struct perf_event_attr attr = {
272 .type = PERF_TYPE_SOFTWARE,
273 .config = PERF_COUNT_SW_DUMMY,
274 .size = sizeof(attr), /* to capture ABI version */
275 };
276 struct perf_evsel *evsel = perf_evsel__new(&attr);
277
278 if (evsel == NULL)
279 return -ENOMEM;
280
281 perf_evlist__add(evlist, evsel);
282 return 0;
283}
284
Arnaldo Carvalho de Meloe60fc842012-10-03 11:50:55 -0300285static int perf_evlist__add_attrs(struct perf_evlist *evlist,
286 struct perf_event_attr *attrs, size_t nr_attrs)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200287{
288 struct perf_evsel *evsel, *n;
289 LIST_HEAD(head);
290 size_t i;
291
292 for (i = 0; i < nr_attrs; i++) {
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300293 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200294 if (evsel == NULL)
295 goto out_delete_partial_list;
296 list_add_tail(&evsel->node, &head);
297 }
298
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300299 perf_evlist__splice_list_tail(evlist, &head);
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200300
301 return 0;
302
303out_delete_partial_list:
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300304 __evlist__for_each_entry_safe(&head, n, evsel)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200305 perf_evsel__delete(evsel);
306 return -1;
307}
308
Arnaldo Carvalho de Melo79695e12012-05-30 13:53:54 -0300309int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
310 struct perf_event_attr *attrs, size_t nr_attrs)
311{
312 size_t i;
313
314 for (i = 0; i < nr_attrs; i++)
315 event_attr_init(attrs + i);
316
317 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
318}
319
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -0300320struct perf_evsel *
321perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -0200322{
323 struct perf_evsel *evsel;
324
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300325 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -0200326 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
327 (int)evsel->attr.config == id)
328 return evsel;
329 }
330
331 return NULL;
332}
333
David Aherna2f28042013-08-28 22:29:51 -0600334struct perf_evsel *
335perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
336 const char *name)
337{
338 struct perf_evsel *evsel;
339
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300340 evlist__for_each_entry(evlist, evsel) {
David Aherna2f28042013-08-28 22:29:51 -0600341 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
342 (strcmp(evsel->name, name) == 0))
343 return evsel;
344 }
345
346 return NULL;
347}
348
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300349int perf_evlist__add_newtp(struct perf_evlist *evlist,
350 const char *sys, const char *name, void *handler)
351{
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300352 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300353
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200354 if (IS_ERR(evsel))
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300355 return -1;
356
Arnaldo Carvalho de Melo744a9712013-11-06 10:17:38 -0300357 evsel->handler = handler;
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300358 perf_evlist__add(evlist, evsel);
359 return 0;
360}
361
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300362static int perf_evlist__nr_threads(struct perf_evlist *evlist,
363 struct perf_evsel *evsel)
364{
365 if (evsel->system_wide)
366 return 1;
367 else
368 return thread_map__nr(evlist->threads);
369}
370
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300371void perf_evlist__disable(struct perf_evlist *evlist)
372{
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300373 struct perf_evsel *pos;
374
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300375 evlist__for_each_entry(evlist, pos) {
Jiri Olsa3e27c922015-12-03 10:06:42 +0100376 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
377 continue;
378 perf_evsel__disable(pos);
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300379 }
Arnaldo Carvalho de Melo2b56bcf2015-06-17 16:40:26 -0300380
381 evlist->enabled = false;
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300382}
383
David Ahern764e16a32011-08-25 10:17:55 -0600384void perf_evlist__enable(struct perf_evlist *evlist)
385{
David Ahern764e16a32011-08-25 10:17:55 -0600386 struct perf_evsel *pos;
387
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300388 evlist__for_each_entry(evlist, pos) {
Jiri Olsa3e27c922015-12-03 10:06:42 +0100389 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
390 continue;
391 perf_evsel__enable(pos);
David Ahern764e16a32011-08-25 10:17:55 -0600392 }
Arnaldo Carvalho de Melo2b56bcf2015-06-17 16:40:26 -0300393
394 evlist->enabled = true;
395}
396
397void perf_evlist__toggle_enable(struct perf_evlist *evlist)
398{
399 (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
David Ahern764e16a32011-08-25 10:17:55 -0600400}
401
Adrian Hunter1c650562014-07-31 09:00:56 +0300402static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
403 struct perf_evsel *evsel, int cpu)
404{
405 int thread, err;
406 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
407
408 if (!evsel->fd)
409 return -EINVAL;
410
411 for (thread = 0; thread < nr_threads; thread++) {
412 err = ioctl(FD(evsel, cpu, thread),
413 PERF_EVENT_IOC_ENABLE, 0);
414 if (err)
415 return err;
416 }
417 return 0;
418}
419
420static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
421 struct perf_evsel *evsel,
422 int thread)
423{
424 int cpu, err;
425 int nr_cpus = cpu_map__nr(evlist->cpus);
426
427 if (!evsel->fd)
428 return -EINVAL;
429
430 for (cpu = 0; cpu < nr_cpus; cpu++) {
431 err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
432 if (err)
433 return err;
434 }
435 return 0;
436}
437
438int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
439 struct perf_evsel *evsel, int idx)
440{
441 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
442
443 if (per_cpu_mmaps)
444 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
445 else
446 return perf_evlist__enable_event_thread(evlist, evsel, idx);
447}
448
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300449int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo5c5810412011-01-11 22:30:02 -0200450{
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900451 int nr_cpus = cpu_map__nr(evlist->cpus);
452 int nr_threads = thread_map__nr(evlist->threads);
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300453 int nfds = 0;
454 struct perf_evsel *evsel;
455
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300456 evlist__for_each_entry(evlist, evsel) {
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300457 if (evsel->system_wide)
458 nfds += nr_cpus;
459 else
460 nfds += nr_cpus * nr_threads;
461 }
462
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300463 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
464 fdarray__grow(&evlist->pollfd, nfds) < 0)
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300465 return -ENOMEM;
466
467 return 0;
Arnaldo Carvalho de Melo5c5810412011-01-11 22:30:02 -0200468}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200469
Wang Nan48760752016-07-14 08:34:37 +0000470static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
471 struct perf_mmap *map, short revent)
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300472{
Wang Nanf3058a12016-05-24 02:28:59 +0000473 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300474 /*
475 * Save the idx so that when we filter out fds POLLHUP'ed we can
476 * close the associated evlist->mmap[] entry.
477 */
478 if (pos >= 0) {
Wang Nan48760752016-07-14 08:34:37 +0000479 evlist->pollfd.priv[pos].ptr = map;
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300480
481 fcntl(fd, F_SETFL, O_NONBLOCK);
482 }
483
484 return pos;
485}
486
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300487int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200488{
Wang Nan48760752016-07-14 08:34:37 +0000489 return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300490}
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300491
Wang Nan258e4bf2016-05-25 13:44:57 +0000492static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
493 void *arg __maybe_unused)
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300494{
Wang Nan48760752016-07-14 08:34:37 +0000495 struct perf_mmap *map = fda->priv[fd].ptr;
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300496
Wang Nan48760752016-07-14 08:34:37 +0000497 if (map)
498 perf_mmap__put(map);
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200499}
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200500
Arnaldo Carvalho de Melo1ddec7f2014-08-12 23:04:11 -0300501int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
502{
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300503 return fdarray__filter(&evlist->pollfd, revents_and_mask,
Wang Nan258e4bf2016-05-25 13:44:57 +0000504 perf_evlist__munmap_filtered, NULL);
Arnaldo Carvalho de Melo1ddec7f2014-08-12 23:04:11 -0300505}
506
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -0300507int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
508{
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300509 return fdarray__poll(&evlist->pollfd, timeout);
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -0300510}
511
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300512static void perf_evlist__id_hash(struct perf_evlist *evlist,
513 struct perf_evsel *evsel,
514 int cpu, int thread, u64 id)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200515{
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300516 int hash;
517 struct perf_sample_id *sid = SID(evsel, cpu, thread);
518
519 sid->id = id;
520 sid->evsel = evsel;
521 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
522 hlist_add_head(&sid->node, &evlist->heads[hash]);
523}
524
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300525void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
526 int cpu, int thread, u64 id)
527{
528 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
529 evsel->id[evsel->ids++] = id;
530}
531
Jiri Olsa1c596122015-11-05 15:40:49 +0100532int perf_evlist__id_add_fd(struct perf_evlist *evlist,
533 struct perf_evsel *evsel,
534 int cpu, int thread, int fd)
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300535{
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200536 u64 read_data[4] = { 0, };
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300537 int id_idx = 1; /* The first entry is the counter value */
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200538 u64 id;
539 int ret;
540
541 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
542 if (!ret)
543 goto add;
544
545 if (errno != ENOTTY)
546 return -1;
547
548 /* Legacy way to get event id.. All hail to old kernels! */
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200549
Jiri Olsac4861af2012-10-12 13:02:21 +0200550 /*
551 * This way does not work with group format read, so bail
552 * out in that case.
553 */
554 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
555 return -1;
556
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200557 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
558 read(fd, &read_data, sizeof(read_data)) == -1)
559 return -1;
560
561 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
562 ++id_idx;
563 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
564 ++id_idx;
565
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200566 id = read_data[id_idx];
567
568 add:
569 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200570 return 0;
571}
572
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200573static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
574 struct perf_evsel *evsel, int idx, int cpu,
575 int thread)
576{
577 struct perf_sample_id *sid = SID(evsel, cpu, thread);
578 sid->idx = idx;
579 if (evlist->cpus && cpu >= 0)
580 sid->cpu = evlist->cpus->map[cpu];
581 else
582 sid->cpu = -1;
583 if (!evsel->system_wide && evlist->threads && thread >= 0)
Jiri Olsae13798c2015-06-23 00:36:02 +0200584 sid->tid = thread_map__pid(evlist->threads, thread);
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200585 else
586 sid->tid = -1;
587}
588
Jiri Olsa932a3592012-10-11 14:10:35 +0200589struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200590{
591 struct hlist_head *head;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200592 struct perf_sample_id *sid;
593 int hash;
594
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200595 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
596 head = &evlist->heads[hash];
597
Sasha Levinb67bfe02013-02-27 17:06:00 -0800598 hlist_for_each_entry(sid, head, node)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200599 if (sid->id == id)
Jiri Olsa932a3592012-10-11 14:10:35 +0200600 return sid;
601
602 return NULL;
603}
604
605struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
606{
607 struct perf_sample_id *sid;
608
Adrian Hunter05169df2015-08-20 11:26:45 +0300609 if (evlist->nr_entries == 1 || !id)
Jiri Olsa932a3592012-10-11 14:10:35 +0200610 return perf_evlist__first(evlist);
611
612 sid = perf_evlist__id2sid(evlist, id);
613 if (sid)
614 return sid->evsel;
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900615
616 if (!perf_evlist__sample_id_all(evlist))
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300617 return perf_evlist__first(evlist);
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900618
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200619 return NULL;
620}
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200621
Adrian Hunterdddcf6a2015-09-25 16:15:52 +0300622struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
623 u64 id)
624{
625 struct perf_sample_id *sid;
626
627 if (!id)
628 return NULL;
629
630 sid = perf_evlist__id2sid(evlist, id);
631 if (sid)
632 return sid->evsel;
633
634 return NULL;
635}
636
Adrian Hunter75562572013-08-27 11:23:09 +0300637static int perf_evlist__event2id(struct perf_evlist *evlist,
638 union perf_event *event, u64 *id)
639{
640 const u64 *array = event->sample.array;
641 ssize_t n;
642
643 n = (event->header.size - sizeof(event->header)) >> 3;
644
645 if (event->header.type == PERF_RECORD_SAMPLE) {
646 if (evlist->id_pos >= n)
647 return -1;
648 *id = array[evlist->id_pos];
649 } else {
650 if (evlist->is_pos > n)
651 return -1;
652 n -= evlist->is_pos;
653 *id = array[n];
654 }
655 return 0;
656}
657
Jiri Olsa7cb5c5a2016-07-10 13:07:53 +0200658struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
659 union perf_event *event)
Adrian Hunter75562572013-08-27 11:23:09 +0300660{
Adrian Hunter98be6962013-09-04 23:18:17 +0300661 struct perf_evsel *first = perf_evlist__first(evlist);
Adrian Hunter75562572013-08-27 11:23:09 +0300662 struct hlist_head *head;
663 struct perf_sample_id *sid;
664 int hash;
665 u64 id;
666
667 if (evlist->nr_entries == 1)
Adrian Hunter98be6962013-09-04 23:18:17 +0300668 return first;
669
670 if (!first->attr.sample_id_all &&
671 event->header.type != PERF_RECORD_SAMPLE)
672 return first;
Adrian Hunter75562572013-08-27 11:23:09 +0300673
674 if (perf_evlist__event2id(evlist, event, &id))
675 return NULL;
676
677 /* Synthesized events have an id of zero */
678 if (!id)
Adrian Hunter98be6962013-09-04 23:18:17 +0300679 return first;
Adrian Hunter75562572013-08-27 11:23:09 +0300680
681 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
682 head = &evlist->heads[hash];
683
684 hlist_for_each_entry(sid, head, node) {
685 if (sid->id == id)
686 return sid->evsel;
687 }
688 return NULL;
689}
690
Wang Nan65aea232016-05-23 07:13:38 +0000691static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
692{
693 int i;
694
Wang Nan078c3382016-07-14 08:34:40 +0000695 if (!evlist->backward_mmap)
696 return 0;
697
Wang Nan65aea232016-05-23 07:13:38 +0000698 for (i = 0; i < evlist->nr_mmaps; i++) {
Wang Nan078c3382016-07-14 08:34:40 +0000699 int fd = evlist->backward_mmap[i].fd;
Wang Nan65aea232016-05-23 07:13:38 +0000700 int err;
701
702 if (fd < 0)
703 continue;
704 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
705 if (err)
706 return err;
707 }
708 return 0;
709}
710
Wang Nanf6cdff82016-07-14 08:34:44 +0000711static int perf_evlist__pause(struct perf_evlist *evlist)
Wang Nan65aea232016-05-23 07:13:38 +0000712{
713 return perf_evlist__set_paused(evlist, true);
714}
715
Wang Nanf6cdff82016-07-14 08:34:44 +0000716static int perf_evlist__resume(struct perf_evlist *evlist)
Wang Nan65aea232016-05-23 07:13:38 +0000717{
718 return perf_evlist__set_paused(evlist, false);
719}
720
Wang Nanb6b85da2016-04-27 02:19:21 +0000721/* When check_messup is true, 'end' must points to a good entry */
Wang Nan0f4ccd12016-04-27 02:19:20 +0000722static union perf_event *
Wang Nanb6b85da2016-04-27 02:19:21 +0000723perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
724 u64 end, u64 *prev)
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200725{
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200726 unsigned char *data = md->base + page_size;
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200727 union perf_event *event = NULL;
Wang Nanb6b85da2016-04-27 02:19:21 +0000728 int diff = end - start;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200729
Wang Nanb6b85da2016-04-27 02:19:21 +0000730 if (check_messup) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200731 /*
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200732 * If we're further behind than half the buffer, there's a chance
733 * the writer will bite our tail and mess up the samples under us.
734 *
Wang Nanb6b85da2016-04-27 02:19:21 +0000735 * If we somehow ended up ahead of the 'end', we got messed up.
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200736 *
Wang Nanb6b85da2016-04-27 02:19:21 +0000737 * In either case, truncate and restart at 'end'.
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200738 */
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200739 if (diff > md->mask / 2 || diff < 0) {
740 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
741
742 /*
Wang Nanb6b85da2016-04-27 02:19:21 +0000743 * 'end' points to a known good entry, start there.
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200744 */
Wang Nanb6b85da2016-04-27 02:19:21 +0000745 start = end;
Wang Nanb04b7022016-04-26 02:28:54 +0000746 diff = 0;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200747 }
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200748 }
749
Wang Nanb04b7022016-04-26 02:28:54 +0000750 if (diff >= (int)sizeof(event->header)) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200751 size_t size;
752
Wang Nanb6b85da2016-04-27 02:19:21 +0000753 event = (union perf_event *)&data[start & md->mask];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200754 size = event->header.size;
755
Wang Nanb04b7022016-04-26 02:28:54 +0000756 if (size < sizeof(event->header) || diff < (int)size) {
757 event = NULL;
758 goto broken_event;
759 }
760
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200761 /*
762 * Event straddles the mmap boundary -- header should always
763 * be inside due to u64 alignment of output.
764 */
Wang Nanb6b85da2016-04-27 02:19:21 +0000765 if ((start & md->mask) + size != ((start + size) & md->mask)) {
766 unsigned int offset = start;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200767 unsigned int len = min(sizeof(*event), size), cpy;
Jiri Olsaa65cb4b2013-10-02 15:46:39 +0200768 void *dst = md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200769
770 do {
771 cpy = min(md->mask + 1 - (offset & md->mask), len);
772 memcpy(dst, &data[offset & md->mask], cpy);
773 offset += cpy;
774 dst += cpy;
775 len -= cpy;
776 } while (len);
777
Jiri Olsaa65cb4b2013-10-02 15:46:39 +0200778 event = (union perf_event *) md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200779 }
780
Wang Nanb6b85da2016-04-27 02:19:21 +0000781 start += size;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200782 }
783
Wang Nanb04b7022016-04-26 02:28:54 +0000784broken_event:
Wang Nan0f4ccd12016-04-27 02:19:20 +0000785 if (prev)
Wang Nanb6b85da2016-04-27 02:19:21 +0000786 *prev = start;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200787
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200788 return event;
789}
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200790
Wang Nan8db6d6b2016-07-14 08:34:35 +0000791union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup)
Wang Nan0f4ccd12016-04-27 02:19:20 +0000792{
Wang Nan0f4ccd12016-04-27 02:19:20 +0000793 u64 head;
794 u64 old = md->prev;
795
796 /*
797 * Check if event was unmapped due to a POLLHUP/POLLERR.
798 */
799 if (!atomic_read(&md->refcnt))
800 return NULL;
801
802 head = perf_mmap__read_head(md);
803
Wang Nan8db6d6b2016-07-14 08:34:35 +0000804 return perf_mmap__read(md, check_messup, old, head, &md->prev);
Wang Nan0f4ccd12016-04-27 02:19:20 +0000805}
806
Wang Nane24c7522016-05-09 01:47:50 +0000807union perf_event *
Wang Nan8db6d6b2016-07-14 08:34:35 +0000808perf_mmap__read_backward(struct perf_mmap *md)
Wang Nane24c7522016-05-09 01:47:50 +0000809{
Wang Nane24c7522016-05-09 01:47:50 +0000810 u64 head, end;
811 u64 start = md->prev;
812
813 /*
814 * Check if event was unmapped due to a POLLHUP/POLLERR.
815 */
816 if (!atomic_read(&md->refcnt))
817 return NULL;
818
819 head = perf_mmap__read_head(md);
820 if (!head)
821 return NULL;
822
823 /*
824 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
825 * it each time when kernel writes to it, so in fact 'head' is
826 * negative. 'end' pointer is made manually by adding the size of
827 * the ring buffer to 'head' pointer, means the validate data can
828 * read is the whole ring buffer. If 'end' is positive, the ring
829 * buffer has not fully filled, so we must adjust 'end' to 0.
830 *
831 * However, since both 'head' and 'end' is unsigned, we can't
832 * simply compare 'end' against 0. Here we compare '-head' and
833 * the size of the ring buffer, where -head is the number of bytes
834 * kernel write to the ring buffer.
835 */
836 if (-head < (u64)(md->mask + 1))
837 end = 0;
838 else
839 end = head + md->mask + 1;
840
841 return perf_mmap__read(md, false, start, end, &md->prev);
842}
843
Wang Nan8db6d6b2016-07-14 08:34:35 +0000844union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
845{
846 struct perf_mmap *md = &evlist->mmap[idx];
847
848 /*
849 * Check messup is required for forward overwritable ring buffer:
850 * memory pointed by md->prev can be overwritten in this case.
851 * No need for read-write ring buffer: kernel stop outputting when
852 * it hit md->prev (perf_mmap__consume()).
853 */
854 return perf_mmap__read_forward(md, evlist->overwrite);
855}
856
857union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
858{
859 struct perf_mmap *md = &evlist->mmap[idx];
860
861 /*
862 * No need to check messup for backward ring buffer:
863 * We can always read arbitrary long data from a backward
864 * ring buffer unless we forget to pause it before reading.
865 */
866 return perf_mmap__read_backward(md);
867}
868
Wang Nan5a5ddeb2016-05-25 13:44:50 +0000869union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
870{
Wang Nana0c6f452016-07-14 08:34:41 +0000871 return perf_evlist__mmap_read_forward(evlist, idx);
Wang Nan5a5ddeb2016-05-25 13:44:50 +0000872}
873
Wang Nan8db6d6b2016-07-14 08:34:35 +0000874void perf_mmap__read_catchup(struct perf_mmap *md)
Wang Nane24c7522016-05-09 01:47:50 +0000875{
Wang Nane24c7522016-05-09 01:47:50 +0000876 u64 head;
877
878 if (!atomic_read(&md->refcnt))
879 return;
880
881 head = perf_mmap__read_head(md);
882 md->prev = head;
883}
884
Wang Nan8db6d6b2016-07-14 08:34:35 +0000885void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
886{
887 perf_mmap__read_catchup(&evlist->mmap[idx]);
888}
889
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300890static bool perf_mmap__empty(struct perf_mmap *md)
891{
Adrian Hunterb72e74d2015-04-24 22:29:43 +0300892 return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300893}
894
Wang Nan8db6d6b2016-07-14 08:34:35 +0000895static void perf_mmap__get(struct perf_mmap *map)
896{
897 atomic_inc(&map->refcnt);
898}
899
900static void perf_mmap__put(struct perf_mmap *md)
901{
902 BUG_ON(md->base && atomic_read(&md->refcnt) == 0);
903
904 if (atomic_dec_and_test(&md->refcnt))
905 perf_mmap__munmap(md);
906}
907
Wang Nan8db6d6b2016-07-14 08:34:35 +0000908void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800909{
Wang Nan8db6d6b2016-07-14 08:34:35 +0000910 if (!overwrite) {
David Ahern7b8283b52015-04-07 09:20:37 -0600911 u64 old = md->prev;
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800912
913 perf_mmap__write_tail(md, old);
914 }
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300915
Arnaldo Carvalho de Melo71438492015-05-15 15:45:16 -0300916 if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md))
Wang Nan8db6d6b2016-07-14 08:34:35 +0000917 perf_mmap__put(md);
918}
919
920void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
921{
922 perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800923}
924
Adrian Hunter718c6022015-04-09 18:53:42 +0300925int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
926 struct auxtrace_mmap_params *mp __maybe_unused,
927 void *userpg __maybe_unused,
928 int fd __maybe_unused)
929{
930 return 0;
931}
932
933void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
934{
935}
936
937void __weak auxtrace_mmap_params__init(
938 struct auxtrace_mmap_params *mp __maybe_unused,
939 off_t auxtrace_offset __maybe_unused,
940 unsigned int auxtrace_pages __maybe_unused,
941 bool auxtrace_overwrite __maybe_unused)
942{
943}
944
945void __weak auxtrace_mmap_params__set_idx(
946 struct auxtrace_mmap_params *mp __maybe_unused,
947 struct perf_evlist *evlist __maybe_unused,
948 int idx __maybe_unused,
949 bool per_cpu __maybe_unused)
950{
951}
952
Wang Nan8db6d6b2016-07-14 08:34:35 +0000953static void perf_mmap__munmap(struct perf_mmap *map)
954{
955 if (map->base != NULL) {
956 munmap(map->base, perf_mmap__mmap_len(map));
957 map->base = NULL;
958 map->fd = -1;
959 atomic_set(&map->refcnt, 0);
960 }
961 auxtrace_mmap__munmap(&map->auxtrace_mmap);
962}
963
Wang Nana1f72612016-07-14 08:34:38 +0000964static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200965{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300966 int i;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200967
Wang Nanb2cb6152016-07-14 08:34:39 +0000968 if (evlist->mmap)
969 for (i = 0; i < evlist->nr_mmaps; i++)
970 perf_mmap__munmap(&evlist->mmap[i]);
Arnaldo Carvalho de Melo983874d2014-01-03 17:25:49 -0300971
Wang Nanb2cb6152016-07-14 08:34:39 +0000972 if (evlist->backward_mmap)
973 for (i = 0; i < evlist->nr_mmaps; i++)
974 perf_mmap__munmap(&evlist->backward_mmap[i]);
Wang Nana1f72612016-07-14 08:34:38 +0000975}
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300976
Wang Nana1f72612016-07-14 08:34:38 +0000977void perf_evlist__munmap(struct perf_evlist *evlist)
978{
979 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -0300980 zfree(&evlist->mmap);
Wang Nanb2cb6152016-07-14 08:34:39 +0000981 zfree(&evlist->backward_mmap);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200982}
983
Wang Nan8db6d6b2016-07-14 08:34:35 +0000984static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200985{
Wang Nand4c6fb32016-05-20 16:38:24 +0000986 int i;
Wang Nan8db6d6b2016-07-14 08:34:35 +0000987 struct perf_mmap *map;
Wang Nand4c6fb32016-05-20 16:38:24 +0000988
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -0300989 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -0700990 if (cpu_map__empty(evlist->cpus))
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900991 evlist->nr_mmaps = thread_map__nr(evlist->threads);
Wang Nan8db6d6b2016-07-14 08:34:35 +0000992 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
993 if (!map)
994 return NULL;
Wang Nan946ae1d2016-05-31 13:06:15 +0000995
Wang Nand4c6fb32016-05-20 16:38:24 +0000996 for (i = 0; i < evlist->nr_mmaps; i++)
Wang Nan8db6d6b2016-07-14 08:34:35 +0000997 map[i].fd = -1;
998 return map;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200999}
1000
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001001struct mmap_params {
1002 int prot;
1003 int mask;
Adrian Hunter718c6022015-04-09 18:53:42 +03001004 struct auxtrace_mmap_params auxtrace_mp;
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001005};
1006
Wang Nan8db6d6b2016-07-14 08:34:35 +00001007static int perf_mmap__mmap(struct perf_mmap *map,
1008 struct mmap_params *mp, int fd)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001009{
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001010 /*
1011 * The last one will be done at perf_evlist__mmap_consume(), so that we
1012 * make sure we don't prevent tools from consuming every last event in
1013 * the ring buffer.
1014 *
1015 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
1016 * anymore, but the last events for it are still in the ring buffer,
1017 * waiting to be consumed.
1018 *
1019 * Tools can chose to ignore this at their own discretion, but the
1020 * evlist layer can't just drop it when filtering events in
1021 * perf_evlist__filter_pollfd().
1022 */
Wang Nan8db6d6b2016-07-14 08:34:35 +00001023 atomic_set(&map->refcnt, 2);
1024 map->prev = 0;
1025 map->mask = mp->mask;
1026 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
1027 MAP_SHARED, fd, 0);
1028 if (map->base == MAP_FAILED) {
Adrian Hunter02635962013-11-01 15:51:33 +02001029 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
1030 errno);
Wang Nan8db6d6b2016-07-14 08:34:35 +00001031 map->base = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001032 return -1;
Nelson Elhage301b1952011-12-19 08:39:30 -05001033 }
Wang Nan8db6d6b2016-07-14 08:34:35 +00001034 map->fd = fd;
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -03001035
Wang Nan8db6d6b2016-07-14 08:34:35 +00001036 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
1037 &mp->auxtrace_mp, map->base, fd))
Adrian Hunter718c6022015-04-09 18:53:42 +03001038 return -1;
1039
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001040 return 0;
1041}
1042
Wang Nanf3058a12016-05-24 02:28:59 +00001043static bool
1044perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
1045 struct perf_evsel *evsel)
1046{
Arnaldo Carvalho de Melo32a951b2016-07-14 08:34:33 +00001047 if (evsel->attr.write_backward)
Wang Nanf3058a12016-05-24 02:28:59 +00001048 return false;
1049 return true;
1050}
1051
Adrian Hunter04e21312013-10-18 15:29:13 +03001052static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001053 struct mmap_params *mp, int cpu,
Wang Nan078c3382016-07-14 08:34:40 +00001054 int thread, int *_output, int *_output_backward)
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001055{
1056 struct perf_evsel *evsel;
Wang Nanf3058a12016-05-24 02:28:59 +00001057 int revent;
Adrian Hunter04e21312013-10-18 15:29:13 +03001058
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001059 evlist__for_each_entry(evlist, evsel) {
Wang Nan078c3382016-07-14 08:34:40 +00001060 struct perf_mmap *maps = evlist->mmap;
1061 int *output = _output;
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001062 int fd;
1063
Wang Nan078c3382016-07-14 08:34:40 +00001064 if (evsel->attr.write_backward) {
1065 output = _output_backward;
1066 maps = evlist->backward_mmap;
1067
1068 if (!maps) {
1069 maps = perf_evlist__alloc_mmap(evlist);
1070 if (!maps)
1071 return -1;
1072 evlist->backward_mmap = maps;
Wang Nan54cc54d2016-07-14 08:34:42 +00001073 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
1074 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
Wang Nan078c3382016-07-14 08:34:40 +00001075 }
1076 }
Wang Nanf3058a12016-05-24 02:28:59 +00001077
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001078 if (evsel->system_wide && thread)
1079 continue;
1080
1081 fd = FD(evsel, cpu, thread);
Adrian Hunter04e21312013-10-18 15:29:13 +03001082
1083 if (*output == -1) {
1084 *output = fd;
Wang Nan078c3382016-07-14 08:34:40 +00001085
1086 if (perf_mmap__mmap(&maps[idx], mp, *output) < 0)
Adrian Hunter04e21312013-10-18 15:29:13 +03001087 return -1;
1088 } else {
1089 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
1090 return -1;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001091
Wang Nan078c3382016-07-14 08:34:40 +00001092 perf_mmap__get(&maps[idx]);
Adrian Hunter04e21312013-10-18 15:29:13 +03001093 }
1094
Wang Nanf3058a12016-05-24 02:28:59 +00001095 revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
1096
Adrian Hunterf90d1942014-11-11 16:16:39 +02001097 /*
1098 * The system_wide flag causes a selected event to be opened
1099 * always without a pid. Consequently it will never get a
1100 * POLLHUP, but it is used for tracking in combination with
1101 * other events, so it should not need to be polled anyway.
1102 * Therefore don't add it for polling.
1103 */
1104 if (!evsel->system_wide &&
Wang Nan078c3382016-07-14 08:34:40 +00001105 __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
1106 perf_mmap__put(&maps[idx]);
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -03001107 return -1;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001108 }
Arnaldo Carvalho de Melo033fa712014-09-08 12:55:12 -03001109
Adrian Hunter3c659ee2014-10-27 15:49:22 +02001110 if (evsel->attr.read_format & PERF_FORMAT_ID) {
1111 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
1112 fd) < 0)
1113 return -1;
1114 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
1115 thread);
1116 }
Adrian Hunter04e21312013-10-18 15:29:13 +03001117 }
1118
1119 return 0;
1120}
1121
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001122static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
1123 struct mmap_params *mp)
Adrian Hunter04e21312013-10-18 15:29:13 +03001124{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001125 int cpu, thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001126 int nr_cpus = cpu_map__nr(evlist->cpus);
1127 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001128
Adrian Huntere3e1a542013-08-14 15:48:24 +03001129 pr_debug2("perf event ring buffer mmapped per cpu\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001130 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001131 int output = -1;
Wang Nan078c3382016-07-14 08:34:40 +00001132 int output_backward = -1;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001133
Adrian Hunter718c6022015-04-09 18:53:42 +03001134 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
1135 true);
1136
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001137 for (thread = 0; thread < nr_threads; thread++) {
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001138 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
Wang Nan078c3382016-07-14 08:34:40 +00001139 thread, &output, &output_backward))
Adrian Hunter04e21312013-10-18 15:29:13 +03001140 goto out_unmap;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001141 }
1142 }
1143
1144 return 0;
1145
1146out_unmap:
Wang Nana1f72612016-07-14 08:34:38 +00001147 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001148 return -1;
1149}
1150
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001151static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
1152 struct mmap_params *mp)
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001153{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001154 int thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001155 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001156
Adrian Huntere3e1a542013-08-14 15:48:24 +03001157 pr_debug2("perf event ring buffer mmapped per thread\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001158 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001159 int output = -1;
Wang Nan078c3382016-07-14 08:34:40 +00001160 int output_backward = -1;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001161
Adrian Hunter718c6022015-04-09 18:53:42 +03001162 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
1163 false);
1164
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001165 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
Wang Nan078c3382016-07-14 08:34:40 +00001166 &output, &output_backward))
Adrian Hunter04e21312013-10-18 15:29:13 +03001167 goto out_unmap;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001168 }
1169
1170 return 0;
1171
1172out_unmap:
Wang Nana1f72612016-07-14 08:34:38 +00001173 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001174 return -1;
1175}
1176
Arnaldo Carvalho de Melof5e71502016-04-15 17:46:31 -03001177unsigned long perf_event_mlock_kb_in_pages(void)
1178{
1179 unsigned long pages;
1180 int max;
1181
1182 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
1183 /*
1184 * Pick a once upon a time good value, i.e. things look
1185 * strange since we can't read a sysctl value, but lets not
1186 * die yet...
1187 */
1188 max = 512;
1189 } else {
1190 max -= (page_size / 1024);
1191 }
1192
1193 pages = (max * 1024) / page_size;
1194 if (!is_power_of_2(pages))
1195 pages = rounddown_pow_of_two(pages);
1196
1197 return pages;
1198}
1199
Jiri Olsa994a1f72013-09-01 12:36:12 +02001200static size_t perf_evlist__mmap_size(unsigned long pages)
1201{
Arnaldo Carvalho de Melof5e71502016-04-15 17:46:31 -03001202 if (pages == UINT_MAX)
1203 pages = perf_event_mlock_kb_in_pages();
1204 else if (!is_power_of_2(pages))
Jiri Olsa994a1f72013-09-01 12:36:12 +02001205 return 0;
1206
1207 return (pages + 1) * page_size;
1208}
1209
David Ahern33c2dcf2013-11-12 07:46:55 -07001210static long parse_pages_arg(const char *str, unsigned long min,
1211 unsigned long max)
Jiri Olsa994a1f72013-09-01 12:36:12 +02001212{
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001213 unsigned long pages, val;
Jiri Olsa27050f52013-09-01 12:36:13 +02001214 static struct parse_tag tags[] = {
1215 { .tag = 'B', .mult = 1 },
1216 { .tag = 'K', .mult = 1 << 10 },
1217 { .tag = 'M', .mult = 1 << 20 },
1218 { .tag = 'G', .mult = 1 << 30 },
1219 { .tag = 0 },
1220 };
Jiri Olsa994a1f72013-09-01 12:36:12 +02001221
David Ahern89735042013-11-12 07:46:53 -07001222 if (str == NULL)
David Ahern33c2dcf2013-11-12 07:46:55 -07001223 return -EINVAL;
David Ahern89735042013-11-12 07:46:53 -07001224
Jiri Olsa27050f52013-09-01 12:36:13 +02001225 val = parse_tag_value(str, tags);
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001226 if (val != (unsigned long) -1) {
Jiri Olsa27050f52013-09-01 12:36:13 +02001227 /* we got file size value */
1228 pages = PERF_ALIGN(val, page_size) / page_size;
Jiri Olsa27050f52013-09-01 12:36:13 +02001229 } else {
1230 /* we got pages count value */
1231 char *eptr;
1232 pages = strtoul(str, &eptr, 10);
David Ahern33c2dcf2013-11-12 07:46:55 -07001233 if (*eptr != '\0')
1234 return -EINVAL;
Jiri Olsa994a1f72013-09-01 12:36:12 +02001235 }
1236
Adrian Hunter2bcab6c2013-12-09 15:18:37 +02001237 if (pages == 0 && min == 0) {
David Ahern33c2dcf2013-11-12 07:46:55 -07001238 /* leave number of pages at 0 */
Adrian Hunter1dbfa9382013-12-09 15:18:39 +02001239 } else if (!is_power_of_2(pages)) {
David Ahern33c2dcf2013-11-12 07:46:55 -07001240 /* round pages up to next power of 2 */
Arnaldo Carvalho de Melo91529832014-12-16 13:24:41 -03001241 pages = roundup_pow_of_two(pages);
Adrian Hunter1dbfa9382013-12-09 15:18:39 +02001242 if (!pages)
1243 return -EINVAL;
David Ahern96398372013-11-12 07:46:54 -07001244 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
1245 pages * page_size, pages);
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001246 }
1247
David Ahern33c2dcf2013-11-12 07:46:55 -07001248 if (pages > max)
1249 return -EINVAL;
1250
1251 return pages;
1252}
1253
Adrian Huntere9db1312015-04-09 18:53:46 +03001254int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
David Ahern33c2dcf2013-11-12 07:46:55 -07001255{
David Ahern33c2dcf2013-11-12 07:46:55 -07001256 unsigned long max = UINT_MAX;
1257 long pages;
1258
Adrian Hunterf5ae9c42013-12-09 15:18:38 +02001259 if (max > SIZE_MAX / page_size)
David Ahern33c2dcf2013-11-12 07:46:55 -07001260 max = SIZE_MAX / page_size;
1261
1262 pages = parse_pages_arg(str, 1, max);
1263 if (pages < 0) {
1264 pr_err("Invalid argument for --mmap_pages/-m\n");
Jiri Olsa994a1f72013-09-01 12:36:12 +02001265 return -1;
1266 }
1267
1268 *mmap_pages = pages;
1269 return 0;
1270}
1271
Adrian Huntere9db1312015-04-09 18:53:46 +03001272int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1273 int unset __maybe_unused)
1274{
1275 return __perf_evlist__parse_mmap_pages(opt->value, str);
1276}
1277
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001278/**
Adrian Hunter718c6022015-04-09 18:53:42 +03001279 * perf_evlist__mmap_ex - Create mmaps to receive events.
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001280 * @evlist: list of events
1281 * @pages: map length in pages
1282 * @overwrite: overwrite older events?
Adrian Hunter718c6022015-04-09 18:53:42 +03001283 * @auxtrace_pages - auxtrace map length in pages
1284 * @auxtrace_overwrite - overwrite older auxtrace data?
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001285 *
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001286 * If @overwrite is %false the user needs to signal event consumption using
1287 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
1288 * automatically.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001289 *
Adrian Hunter718c6022015-04-09 18:53:42 +03001290 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1291 * consumption using auxtrace_mmap__write_tail().
1292 *
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001293 * Return: %0 on success, negative error code otherwise.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001294 */
Adrian Hunter718c6022015-04-09 18:53:42 +03001295int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1296 bool overwrite, unsigned int auxtrace_pages,
1297 bool auxtrace_overwrite)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001298{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001299 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001300 const struct cpu_map *cpus = evlist->cpus;
1301 const struct thread_map *threads = evlist->threads;
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001302 struct mmap_params mp = {
1303 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
1304 };
Arnaldo Carvalho de Melo50a682c2011-11-09 09:10:47 -02001305
Wang Nan8db6d6b2016-07-14 08:34:35 +00001306 if (!evlist->mmap)
1307 evlist->mmap = perf_evlist__alloc_mmap(evlist);
1308 if (!evlist->mmap)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001309 return -ENOMEM;
1310
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -03001311 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001312 return -ENOMEM;
1313
1314 evlist->overwrite = overwrite;
Jiri Olsa994a1f72013-09-01 12:36:12 +02001315 evlist->mmap_len = perf_evlist__mmap_size(pages);
Adrian Hunter2af68ef2013-10-18 15:29:07 +03001316 pr_debug("mmap size %zuB\n", evlist->mmap_len);
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001317 mp.mask = evlist->mmap_len - page_size - 1;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001318
Adrian Hunter718c6022015-04-09 18:53:42 +03001319 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
1320 auxtrace_pages, auxtrace_overwrite);
1321
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001322 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001323 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03001324 evsel->sample_id == NULL &&
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -03001325 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001326 return -ENOMEM;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001327 }
1328
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -07001329 if (cpu_map__empty(cpus))
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001330 return perf_evlist__mmap_per_thread(evlist, &mp);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001331
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001332 return perf_evlist__mmap_per_cpu(evlist, &mp);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001333}
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001334
Adrian Hunter718c6022015-04-09 18:53:42 +03001335int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
1336 bool overwrite)
1337{
1338 return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
1339}
1340
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001341int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001342{
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001343 struct cpu_map *cpus;
1344 struct thread_map *threads;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001345
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001346 threads = thread_map__new_str(target->pid, target->tid, target->uid);
1347
1348 if (!threads)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001349 return -1;
1350
Dongsheng Yang9c105fb2013-12-04 17:56:40 -05001351 if (target__uses_dummy_map(target))
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001352 cpus = cpu_map__dummy_new();
Namhyung Kim879d77d2012-05-16 18:45:48 +09001353 else
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001354 cpus = cpu_map__new(target->cpu_list);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001355
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001356 if (!cpus)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001357 goto out_delete_threads;
1358
Adrian Hunterec9a77a2015-09-08 10:58:52 +03001359 evlist->has_user_cpus = !!target->cpu_list;
1360
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001361 perf_evlist__set_maps(evlist, cpus, threads);
Adrian Hunterd5bc0562015-09-08 10:58:51 +03001362
1363 return 0;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001364
1365out_delete_threads:
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001366 thread_map__put(threads);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001367 return -1;
1368}
1369
Adrian Hunterd5bc0562015-09-08 10:58:51 +03001370void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1371 struct thread_map *threads)
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001372{
Adrian Hunter934e0f22015-09-08 10:58:56 +03001373 /*
1374 * Allow for the possibility that one or another of the maps isn't being
1375 * changed i.e. don't put it. Note we are assuming the maps that are
1376 * being applied are brand new and evlist is taking ownership of the
1377 * original reference count of 1. If that is not the case it is up to
1378 * the caller to increase the reference count.
1379 */
1380 if (cpus != evlist->cpus) {
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001381 cpu_map__put(evlist->cpus);
Arnaldo Carvalho de Meloa55e5662016-02-17 10:57:19 -03001382 evlist->cpus = cpu_map__get(cpus);
Adrian Hunter934e0f22015-09-08 10:58:56 +03001383 }
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001384
Adrian Hunter934e0f22015-09-08 10:58:56 +03001385 if (threads != evlist->threads) {
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001386 thread_map__put(evlist->threads);
Arnaldo Carvalho de Meloa55e5662016-02-17 10:57:19 -03001387 evlist->threads = thread_map__get(threads);
Adrian Hunter934e0f22015-09-08 10:58:56 +03001388 }
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001389
Adrian Hunterec9a77a2015-09-08 10:58:52 +03001390 perf_evlist__propagate_maps(evlist);
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001391}
1392
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001393void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
1394 enum perf_event_sample_format bit)
1395{
1396 struct perf_evsel *evsel;
1397
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001398 evlist__for_each_entry(evlist, evsel)
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001399 __perf_evsel__set_sample_bit(evsel, bit);
1400}
1401
1402void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
1403 enum perf_event_sample_format bit)
1404{
1405 struct perf_evsel *evsel;
1406
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001407 evlist__for_each_entry(evlist, evsel)
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001408 __perf_evsel__reset_sample_bit(evsel, bit);
1409}
1410
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001411int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001412{
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001413 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001414 int err = 0;
1415 const int ncpus = cpu_map__nr(evlist->cpus),
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001416 nthreads = thread_map__nr(evlist->threads);
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001417
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001418 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001419 if (evsel->filter == NULL)
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001420 continue;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001421
Kan Liangd988d5e2015-08-21 02:23:14 -04001422 /*
1423 * filters only work for tracepoint event, which doesn't have cpu limit.
1424 * So evlist and evsel should always be same.
1425 */
Arnaldo Carvalho de Melof47805a2015-07-03 15:53:49 -03001426 err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter);
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001427 if (err) {
1428 *err_evsel = evsel;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001429 break;
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001430 }
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001431 }
1432
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001433 return err;
1434}
1435
1436int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1437{
1438 struct perf_evsel *evsel;
1439 int err = 0;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001440
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001441 evlist__for_each_entry(evlist, evsel) {
Wang Nanfdf14722016-02-26 09:31:53 +00001442 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1443 continue;
1444
Arnaldo Carvalho de Melo94ad89b2015-07-03 17:42:03 -03001445 err = perf_evsel__set_filter(evsel, filter);
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001446 if (err)
1447 break;
1448 }
1449
1450 return err;
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001451}
Frederic Weisbecker74429962011-05-21 17:49:00 +02001452
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001453int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001454{
1455 char *filter;
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001456 int ret = -1;
1457 size_t i;
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001458
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001459 for (i = 0; i < npids; ++i) {
1460 if (i == 0) {
1461 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1462 return -1;
1463 } else {
1464 char *tmp;
1465
1466 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1467 goto out_free;
1468
1469 free(filter);
1470 filter = tmp;
1471 }
1472 }
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001473
1474 ret = perf_evlist__set_filter(evlist, filter);
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001475out_free:
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001476 free(filter);
1477 return ret;
1478}
1479
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001480int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
1481{
1482 return perf_evlist__set_filter_pids(evlist, 1, &pid);
1483}
1484
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001485bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +02001486{
Adrian Hunter75562572013-08-27 11:23:09 +03001487 struct perf_evsel *pos;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001488
Adrian Hunter75562572013-08-27 11:23:09 +03001489 if (evlist->nr_entries == 1)
1490 return true;
1491
1492 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1493 return false;
1494
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001495 evlist__for_each_entry(evlist, pos) {
Adrian Hunter75562572013-08-27 11:23:09 +03001496 if (pos->id_pos != evlist->id_pos ||
1497 pos->is_pos != evlist->is_pos)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001498 return false;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001499 }
1500
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001501 return true;
1502}
1503
Adrian Hunter75562572013-08-27 11:23:09 +03001504u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001505{
Adrian Hunter75562572013-08-27 11:23:09 +03001506 struct perf_evsel *evsel;
1507
1508 if (evlist->combined_sample_type)
1509 return evlist->combined_sample_type;
1510
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001511 evlist__for_each_entry(evlist, evsel)
Adrian Hunter75562572013-08-27 11:23:09 +03001512 evlist->combined_sample_type |= evsel->attr.sample_type;
1513
1514 return evlist->combined_sample_type;
1515}
1516
1517u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1518{
1519 evlist->combined_sample_type = 0;
1520 return __perf_evlist__combined_sample_type(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001521}
1522
Andi Kleen98df8582015-07-18 08:24:47 -07001523u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
1524{
1525 struct perf_evsel *evsel;
1526 u64 branch_type = 0;
1527
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001528 evlist__for_each_entry(evlist, evsel)
Andi Kleen98df8582015-07-18 08:24:47 -07001529 branch_type |= evsel->attr.branch_sample_type;
1530 return branch_type;
1531}
1532
Jiri Olsa9ede4732012-10-10 17:38:13 +02001533bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1534{
1535 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1536 u64 read_format = first->attr.read_format;
1537 u64 sample_type = first->attr.sample_type;
1538
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001539 evlist__for_each_entry(evlist, pos) {
Jiri Olsa9ede4732012-10-10 17:38:13 +02001540 if (read_format != pos->attr.read_format)
1541 return false;
1542 }
1543
1544 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1545 if ((sample_type & PERF_SAMPLE_READ) &&
1546 !(read_format & PERF_FORMAT_ID)) {
1547 return false;
1548 }
1549
1550 return true;
1551}
1552
1553u64 perf_evlist__read_format(struct perf_evlist *evlist)
1554{
1555 struct perf_evsel *first = perf_evlist__first(evlist);
1556 return first->attr.read_format;
1557}
1558
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001559u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001560{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001561 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001562 struct perf_sample *data;
1563 u64 sample_type;
1564 u16 size = 0;
1565
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001566 if (!first->attr.sample_id_all)
1567 goto out;
1568
1569 sample_type = first->attr.sample_type;
1570
1571 if (sample_type & PERF_SAMPLE_TID)
1572 size += sizeof(data->tid) * 2;
1573
1574 if (sample_type & PERF_SAMPLE_TIME)
1575 size += sizeof(data->time);
1576
1577 if (sample_type & PERF_SAMPLE_ID)
1578 size += sizeof(data->id);
1579
1580 if (sample_type & PERF_SAMPLE_STREAM_ID)
1581 size += sizeof(data->stream_id);
1582
1583 if (sample_type & PERF_SAMPLE_CPU)
1584 size += sizeof(data->cpu) * 2;
Adrian Hunter75562572013-08-27 11:23:09 +03001585
1586 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1587 size += sizeof(data->id);
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001588out:
1589 return size;
1590}
1591
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001592bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001593{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001594 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001595
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001596 evlist__for_each_entry_continue(evlist, pos) {
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001597 if (first->attr.sample_id_all != pos->attr.sample_id_all)
1598 return false;
1599 }
1600
1601 return true;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001602}
1603
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001604bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +02001605{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001606 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001607 return first->attr.sample_id_all;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001608}
Arnaldo Carvalho de Melo81cce8d2011-10-05 19:11:32 -03001609
1610void perf_evlist__set_selected(struct perf_evlist *evlist,
1611 struct perf_evsel *evsel)
1612{
1613 evlist->selected = evsel;
1614}
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001615
Namhyung Kima74b4b62013-03-15 14:48:48 +09001616void perf_evlist__close(struct perf_evlist *evlist)
1617{
1618 struct perf_evsel *evsel;
1619 int ncpus = cpu_map__nr(evlist->cpus);
1620 int nthreads = thread_map__nr(evlist->threads);
Stephane Eranian8ad92192014-01-17 16:34:06 +01001621 int n;
Namhyung Kima74b4b62013-03-15 14:48:48 +09001622
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001623 evlist__for_each_entry_reverse(evlist, evsel) {
Stephane Eranian8ad92192014-01-17 16:34:06 +01001624 n = evsel->cpus ? evsel->cpus->nr : ncpus;
1625 perf_evsel__close(evsel, n, nthreads);
1626 }
Namhyung Kima74b4b62013-03-15 14:48:48 +09001627}
1628
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001629static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1630{
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001631 struct cpu_map *cpus;
1632 struct thread_map *threads;
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001633 int err = -ENOMEM;
1634
1635 /*
1636 * Try reading /sys/devices/system/cpu/online to get
1637 * an all cpus map.
1638 *
1639 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1640 * code needs an overhaul to properly forward the
1641 * error, and we may not want to do that fallback to a
1642 * default cpu identity map :-\
1643 */
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001644 cpus = cpu_map__new(NULL);
1645 if (!cpus)
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001646 goto out;
1647
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001648 threads = thread_map__new_dummy();
1649 if (!threads)
1650 goto out_put;
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001651
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001652 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001653out:
1654 return err;
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001655out_put:
1656 cpu_map__put(cpus);
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001657 goto out;
1658}
1659
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001660int perf_evlist__open(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001661{
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001662 struct perf_evsel *evsel;
Namhyung Kima74b4b62013-03-15 14:48:48 +09001663 int err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001664
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001665 /*
1666 * Default: one fd per CPU, all threads, aka systemwide
1667 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1668 */
1669 if (evlist->threads == NULL && evlist->cpus == NULL) {
1670 err = perf_evlist__create_syswide_maps(evlist);
1671 if (err < 0)
1672 goto out_err;
1673 }
1674
Adrian Hunter733cd2f2013-09-06 22:40:11 +03001675 perf_evlist__update_id_pos(evlist);
1676
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001677 evlist__for_each_entry(evlist, evsel) {
Adrian Hunter23df7f72016-01-07 10:13:59 +01001678 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001679 if (err < 0)
1680 goto out_err;
1681 }
1682
1683 return 0;
1684out_err:
Namhyung Kima74b4b62013-03-15 14:48:48 +09001685 perf_evlist__close(evlist);
Namhyung Kim41c21a62012-02-23 12:13:36 +09001686 errno = -err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001687 return err;
1688}
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001689
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001690int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
Namhyung Kim55e162e2013-03-11 16:43:17 +09001691 const char *argv[], bool pipe_output,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001692 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001693{
1694 int child_ready_pipe[2], go_pipe[2];
1695 char bf;
1696
1697 if (pipe(child_ready_pipe) < 0) {
1698 perror("failed to create 'ready' pipe");
1699 return -1;
1700 }
1701
1702 if (pipe(go_pipe) < 0) {
1703 perror("failed to create 'go' pipe");
1704 goto out_close_ready_pipe;
1705 }
1706
1707 evlist->workload.pid = fork();
1708 if (evlist->workload.pid < 0) {
1709 perror("failed to fork");
1710 goto out_close_pipes;
1711 }
1712
1713 if (!evlist->workload.pid) {
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001714 int ret;
1715
Namhyung Kim119fa3c2013-03-11 16:43:16 +09001716 if (pipe_output)
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001717 dup2(2, 1);
1718
David Ahern0817df02013-05-25 17:50:39 -06001719 signal(SIGTERM, SIG_DFL);
1720
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001721 close(child_ready_pipe[0]);
1722 close(go_pipe[1]);
1723 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1724
1725 /*
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001726 * Tell the parent we're ready to go
1727 */
1728 close(child_ready_pipe[1]);
1729
1730 /*
1731 * Wait until the parent tells us to go.
1732 */
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001733 ret = read(go_pipe[0], &bf, 1);
1734 /*
1735 * The parent will ask for the execvp() to be performed by
1736 * writing exactly one byte, in workload.cork_fd, usually via
1737 * perf_evlist__start_workload().
1738 *
Arnaldo Carvalho de Melo20f86fc2015-02-03 13:29:05 -03001739 * For cancelling the workload without actually running it,
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001740 * the parent will just close workload.cork_fd, without writing
1741 * anything, i.e. read will return zero and we just exit()
1742 * here.
1743 */
1744 if (ret != 1) {
1745 if (ret == -1)
1746 perror("unable to read pipe");
1747 exit(ret);
1748 }
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001749
1750 execvp(argv[0], (char **)argv);
1751
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001752 if (exec_error) {
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001753 union sigval val;
1754
1755 val.sival_int = errno;
1756 if (sigqueue(getppid(), SIGUSR1, val))
1757 perror(argv[0]);
1758 } else
1759 perror(argv[0]);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001760 exit(-1);
1761 }
1762
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001763 if (exec_error) {
1764 struct sigaction act = {
1765 .sa_flags = SA_SIGINFO,
1766 .sa_sigaction = exec_error,
1767 };
1768 sigaction(SIGUSR1, &act, NULL);
1769 }
1770
Arnaldo Carvalho de Melo1aaf63b2014-10-10 14:29:49 -03001771 if (target__none(target)) {
1772 if (evlist->threads == NULL) {
1773 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1774 __func__, __LINE__);
1775 goto out_close_pipes;
1776 }
Jiri Olsae13798c2015-06-23 00:36:02 +02001777 thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
Arnaldo Carvalho de Melo1aaf63b2014-10-10 14:29:49 -03001778 }
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001779
1780 close(child_ready_pipe[1]);
1781 close(go_pipe[0]);
1782 /*
1783 * wait for child to settle
1784 */
1785 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1786 perror("unable to read pipe");
1787 goto out_close_pipes;
1788 }
1789
Namhyung Kimbcf31452013-06-26 16:14:15 +09001790 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001791 evlist->workload.cork_fd = go_pipe[1];
1792 close(child_ready_pipe[0]);
1793 return 0;
1794
1795out_close_pipes:
1796 close(go_pipe[0]);
1797 close(go_pipe[1]);
1798out_close_ready_pipe:
1799 close(child_ready_pipe[0]);
1800 close(child_ready_pipe[1]);
1801 return -1;
1802}
1803
1804int perf_evlist__start_workload(struct perf_evlist *evlist)
1805{
1806 if (evlist->workload.cork_fd > 0) {
David Ahernb3824402013-07-02 13:27:21 -06001807 char bf = 0;
Namhyung Kimbcf31452013-06-26 16:14:15 +09001808 int ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001809 /*
1810 * Remove the cork, let it rip!
1811 */
Namhyung Kimbcf31452013-06-26 16:14:15 +09001812 ret = write(evlist->workload.cork_fd, &bf, 1);
1813 if (ret < 0)
1814 perror("enable to write to pipe");
1815
1816 close(evlist->workload.cork_fd);
1817 return ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001818 }
1819
1820 return 0;
1821}
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001822
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -03001823int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001824 struct perf_sample *sample)
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001825{
Adrian Hunter75562572013-08-27 11:23:09 +03001826 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1827
1828 if (!evsel)
1829 return -EFAULT;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001830 return perf_evsel__parse_sample(evsel, event, sample);
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001831}
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001832
1833size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1834{
1835 struct perf_evsel *evsel;
1836 size_t printed = 0;
1837
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001838 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001839 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1840 perf_evsel__name(evsel));
1841 }
1842
Davidlohr Buesob2222132013-11-12 22:24:24 -08001843 return printed + fprintf(fp, "\n");
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001844}
Arnaldo Carvalho de Melo6ef068c2013-10-17 12:07:58 -03001845
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001846int perf_evlist__strerror_open(struct perf_evlist *evlist,
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001847 int err, char *buf, size_t size)
1848{
1849 int printed, value;
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001850 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001851
1852 switch (err) {
1853 case EACCES:
1854 case EPERM:
1855 printed = scnprintf(buf, size,
1856 "Error:\t%s.\n"
1857 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1858
Adrian Hunter1a472452013-12-11 14:36:23 +02001859 value = perf_event_paranoid();
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001860
1861 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1862
1863 if (value >= 2) {
1864 printed += scnprintf(buf + printed, size - printed,
1865 "For your workloads it needs to be <= 1\nHint:\t");
1866 }
1867 printed += scnprintf(buf + printed, size - printed,
Arnaldo Carvalho de Melo5229e362014-06-10 17:18:54 -03001868 "For system wide tracing it needs to be set to -1.\n");
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001869
1870 printed += scnprintf(buf + printed, size - printed,
Arnaldo Carvalho de Melo5229e362014-06-10 17:18:54 -03001871 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1872 "Hint:\tThe current value is %d.", value);
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001873 break;
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001874 case EINVAL: {
1875 struct perf_evsel *first = perf_evlist__first(evlist);
1876 int max_freq;
1877
1878 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1879 goto out_default;
1880
1881 if (first->attr.sample_freq < (u64)max_freq)
1882 goto out_default;
1883
1884 printed = scnprintf(buf, size,
1885 "Error:\t%s.\n"
1886 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1887 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1888 emsg, max_freq, first->attr.sample_freq);
1889 break;
1890 }
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001891 default:
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001892out_default:
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001893 scnprintf(buf, size, "%s", emsg);
1894 break;
1895 }
1896
1897 return 0;
1898}
Adrian Huntera025e4f2013-12-11 14:36:35 +02001899
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001900int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1901{
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001902 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001903 int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001904
1905 switch (err) {
1906 case EPERM:
Arnaldo Carvalho de Meloe5d4a292014-12-12 15:59:51 -03001907 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001908 printed += scnprintf(buf + printed, size - printed,
1909 "Error:\t%s.\n"
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001910 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001911 "Hint:\tTried using %zd kB.\n",
Arnaldo Carvalho de Meloe5d4a292014-12-12 15:59:51 -03001912 emsg, pages_max_per_user, pages_attempted);
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001913
1914 if (pages_attempted >= pages_max_per_user) {
1915 printed += scnprintf(buf + printed, size - printed,
1916 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1917 pages_max_per_user + pages_attempted);
1918 }
1919
1920 printed += scnprintf(buf + printed, size - printed,
1921 "Hint:\tTry using a smaller -m/--mmap-pages value.");
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001922 break;
1923 default:
1924 scnprintf(buf, size, "%s", emsg);
1925 break;
1926 }
1927
1928 return 0;
1929}
1930
Adrian Huntera025e4f2013-12-11 14:36:35 +02001931void perf_evlist__to_front(struct perf_evlist *evlist,
1932 struct perf_evsel *move_evsel)
1933{
1934 struct perf_evsel *evsel, *n;
1935 LIST_HEAD(move);
1936
1937 if (move_evsel == perf_evlist__first(evlist))
1938 return;
1939
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001940 evlist__for_each_entry_safe(evlist, n, evsel) {
Adrian Huntera025e4f2013-12-11 14:36:35 +02001941 if (evsel->leader == move_evsel->leader)
1942 list_move_tail(&evsel->node, &move);
1943 }
1944
1945 list_splice(&move, &evlist->entries);
1946}
Adrian Hunter60b08962014-07-31 09:00:52 +03001947
1948void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1949 struct perf_evsel *tracking_evsel)
1950{
1951 struct perf_evsel *evsel;
1952
1953 if (tracking_evsel->tracking)
1954 return;
1955
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001956 evlist__for_each_entry(evlist, evsel) {
Adrian Hunter60b08962014-07-31 09:00:52 +03001957 if (evsel != tracking_evsel)
1958 evsel->tracking = false;
1959 }
1960
1961 tracking_evsel->tracking = true;
1962}
Wang Nan7630b3e2016-02-22 09:10:33 +00001963
1964struct perf_evsel *
1965perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
1966 const char *str)
1967{
1968 struct perf_evsel *evsel;
1969
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001970 evlist__for_each_entry(evlist, evsel) {
Wang Nan7630b3e2016-02-22 09:10:33 +00001971 if (!evsel->name)
1972 continue;
1973 if (strcmp(str, evsel->name) == 0)
1974 return evsel;
1975 }
1976
1977 return NULL;
1978}
Wang Nan54cc54d2016-07-14 08:34:42 +00001979
1980void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
1981 enum bkw_mmap_state state)
1982{
1983 enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1984 enum action {
1985 NONE,
1986 PAUSE,
1987 RESUME,
1988 } action = NONE;
1989
1990 if (!evlist->backward_mmap)
1991 return;
1992
1993 switch (old_state) {
1994 case BKW_MMAP_NOTREADY: {
1995 if (state != BKW_MMAP_RUNNING)
1996 goto state_err;;
1997 break;
1998 }
1999 case BKW_MMAP_RUNNING: {
2000 if (state != BKW_MMAP_DATA_PENDING)
2001 goto state_err;
2002 action = PAUSE;
2003 break;
2004 }
2005 case BKW_MMAP_DATA_PENDING: {
2006 if (state != BKW_MMAP_EMPTY)
2007 goto state_err;
2008 break;
2009 }
2010 case BKW_MMAP_EMPTY: {
2011 if (state != BKW_MMAP_RUNNING)
2012 goto state_err;
2013 action = RESUME;
2014 break;
2015 }
2016 default:
2017 WARN_ONCE(1, "Shouldn't get there\n");
2018 }
2019
2020 evlist->bkw_mmap_state = state;
2021
2022 switch (action) {
2023 case PAUSE:
2024 perf_evlist__pause(evlist);
2025 break;
2026 case RESUME:
2027 perf_evlist__resume(evlist);
2028 break;
2029 case NONE:
2030 default:
2031 break;
2032 }
2033
2034state_err:
2035 return;
2036}