blob: 7570f903200e154d90be59303396ef4aec43df32 [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
Arnaldo Carvalho de Meloa8c9ae12011-11-05 08:41:51 -02009#include "util.h"
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -030010#include <api/fs/fs.h>
Arnaldo Carvalho de Melo5c5810412011-01-11 22:30:02 -020011#include <poll.h>
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020012#include "cpumap.h"
13#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090014#include "target.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020015#include "evlist.h"
16#include "evsel.h"
Adrian Huntere3e1a542013-08-14 15:48:24 +030017#include "debug.h"
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020018#include <unistd.h>
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020019
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020020#include "parse-events.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060021#include <subcmd/parse-options.h>
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020022
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020023#include <sys/mman.h>
24
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020025#include <linux/bitops.h>
26#include <linux/hash.h>
Arnaldo Carvalho de Melo0389cd12014-12-15 16:04:11 -030027#include <linux/log2.h>
Jiri Olsa8dd2a132015-09-07 10:38:06 +020028#include <linux/err.h>
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020029
Wang Nan8db6d6b2016-07-14 08:34:35 +000030static void perf_mmap__munmap(struct perf_mmap *map);
Wang Nan48760752016-07-14 08:34:37 +000031static void perf_mmap__put(struct perf_mmap *map);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -030032
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020033#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030034#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020035
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020036void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
37 struct thread_map *threads)
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020038{
39 int i;
40
41 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
42 INIT_HLIST_HEAD(&evlist->heads[i]);
43 INIT_LIST_HEAD(&evlist->entries);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020044 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -030045 fdarray__init(&evlist->pollfd, 64);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020046 evlist->workload.pid = -1;
Wang Nan3a62a7b2016-05-23 07:13:41 +000047 evlist->backward = false;
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020048}
49
Namhyung Kim334fe7a2013-03-11 16:43:12 +090050struct perf_evlist *perf_evlist__new(void)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020051{
52 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
53
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020054 if (evlist != NULL)
Namhyung Kim334fe7a2013-03-11 16:43:12 +090055 perf_evlist__init(evlist, NULL, NULL);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020056
57 return evlist;
58}
59
Jiri Olsab22d54b2013-09-01 12:36:14 +020060struct perf_evlist *perf_evlist__new_default(void)
61{
62 struct perf_evlist *evlist = perf_evlist__new();
63
64 if (evlist && perf_evlist__add_default(evlist)) {
65 perf_evlist__delete(evlist);
66 evlist = NULL;
67 }
68
69 return evlist;
70}
71
Arnaldo Carvalho de Melo5bae0252016-01-07 13:14:56 -030072struct perf_evlist *perf_evlist__new_dummy(void)
73{
74 struct perf_evlist *evlist = perf_evlist__new();
75
76 if (evlist && perf_evlist__add_dummy(evlist)) {
77 perf_evlist__delete(evlist);
78 evlist = NULL;
79 }
80
81 return evlist;
82}
83
Adrian Hunter75562572013-08-27 11:23:09 +030084/**
85 * perf_evlist__set_id_pos - set the positions of event ids.
86 * @evlist: selected event list
87 *
88 * Events with compatible sample types all have the same id_pos
89 * and is_pos. For convenience, put a copy on evlist.
90 */
91void perf_evlist__set_id_pos(struct perf_evlist *evlist)
92{
93 struct perf_evsel *first = perf_evlist__first(evlist);
94
95 evlist->id_pos = first->id_pos;
96 evlist->is_pos = first->is_pos;
97}
98
Adrian Hunter733cd2f2013-09-06 22:40:11 +030099static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
100{
101 struct perf_evsel *evsel;
102
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300103 evlist__for_each_entry(evlist, evsel)
Adrian Hunter733cd2f2013-09-06 22:40:11 +0300104 perf_evsel__calc_id_pos(evsel);
105
106 perf_evlist__set_id_pos(evlist);
107}
108
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200109static void perf_evlist__purge(struct perf_evlist *evlist)
110{
111 struct perf_evsel *pos, *n;
112
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300113 evlist__for_each_entry_safe(evlist, n, pos) {
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200114 list_del_init(&pos->node);
Arnaldo Carvalho de Melod49e4692015-08-27 08:07:40 -0400115 pos->evlist = NULL;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200116 perf_evsel__delete(pos);
117 }
118
119 evlist->nr_entries = 0;
120}
121
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200122void perf_evlist__exit(struct perf_evlist *evlist)
123{
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -0300124 zfree(&evlist->mmap);
Wang Nanb2cb6152016-07-14 08:34:39 +0000125 zfree(&evlist->backward_mmap);
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300126 fdarray__exit(&evlist->pollfd);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200127}
128
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200129void perf_evlist__delete(struct perf_evlist *evlist)
130{
Arnaldo Carvalho de Melo0b04b3d2016-06-21 18:15:45 -0300131 if (evlist == NULL)
132 return;
133
Arnaldo Carvalho de Melo983874d2014-01-03 17:25:49 -0300134 perf_evlist__munmap(evlist);
Arnaldo Carvalho de Melof26e1c72014-01-03 16:54:12 -0300135 perf_evlist__close(evlist);
Jiri Olsaf30a79b2015-06-23 00:36:04 +0200136 cpu_map__put(evlist->cpus);
Jiri Olsa186fbb72015-06-23 00:36:05 +0200137 thread_map__put(evlist->threads);
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -0300138 evlist->cpus = NULL;
139 evlist->threads = NULL;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200140 perf_evlist__purge(evlist);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200141 perf_evlist__exit(evlist);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200142 free(evlist);
143}
144
Adrian Hunteradc0c3e2015-09-08 10:58:58 +0300145static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
146 struct perf_evsel *evsel)
147{
148 /*
149 * We already have cpus for evsel (via PMU sysfs) so
150 * keep it, if there's no target cpu list defined.
151 */
152 if (!evsel->own_cpus || evlist->has_user_cpus) {
153 cpu_map__put(evsel->cpus);
154 evsel->cpus = cpu_map__get(evlist->cpus);
155 } else if (evsel->cpus != evsel->own_cpus) {
156 cpu_map__put(evsel->cpus);
157 evsel->cpus = cpu_map__get(evsel->own_cpus);
158 }
159
160 thread_map__put(evsel->threads);
161 evsel->threads = thread_map__get(evlist->threads);
162}
163
164static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
165{
166 struct perf_evsel *evsel;
167
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300168 evlist__for_each_entry(evlist, evsel)
Adrian Hunteradc0c3e2015-09-08 10:58:58 +0300169 __perf_evlist__propagate_maps(evlist, evsel);
170}
171
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200172void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
173{
Arnaldo Carvalho de Melod49e4692015-08-27 08:07:40 -0400174 entry->evlist = evlist;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200175 list_add_tail(&entry->node, &evlist->entries);
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300176 entry->idx = evlist->nr_entries;
Adrian Hunter60b08962014-07-31 09:00:52 +0300177 entry->tracking = !entry->idx;
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300178
Adrian Hunter75562572013-08-27 11:23:09 +0300179 if (!evlist->nr_entries++)
180 perf_evlist__set_id_pos(evlist);
Adrian Hunter44c42d72015-09-08 10:58:59 +0300181
182 __perf_evlist__propagate_maps(evlist, entry);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200183}
184
Adrian Hunter47682302015-09-25 16:15:53 +0300185void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
186{
187 evsel->evlist = NULL;
188 list_del_init(&evsel->node);
189 evlist->nr_entries -= 1;
190}
191
Jiri Olsa0529bc12012-01-27 15:34:20 +0100192void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300193 struct list_head *list)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200194{
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300195 struct perf_evsel *evsel, *temp;
Adrian Hunter75562572013-08-27 11:23:09 +0300196
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300197 __evlist__for_each_entry_safe(list, temp, evsel) {
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300198 list_del_init(&evsel->node);
199 perf_evlist__add(evlist, evsel);
200 }
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200201}
202
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300203void __perf_evlist__set_leader(struct list_head *list)
204{
205 struct perf_evsel *evsel, *leader;
206
207 leader = list_entry(list->next, struct perf_evsel, node);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900208 evsel = list_entry(list->prev, struct perf_evsel, node);
209
210 leader->nr_members = evsel->idx - leader->idx + 1;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300211
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300212 __evlist__for_each_entry(list, evsel) {
Stephane Eranian74b21332013-01-31 13:54:37 +0100213 evsel->leader = leader;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300214 }
215}
216
217void perf_evlist__set_leader(struct perf_evlist *evlist)
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200218{
Namhyung Kim97f63e42013-01-22 18:09:29 +0900219 if (evlist->nr_entries) {
220 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300221 __perf_evlist__set_leader(&evlist->entries);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900222 }
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200223}
224
Jiri Olsa45cf6c32015-10-05 20:06:04 +0200225void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
Arnaldo Carvalho de Melo7f8d1ad2015-09-30 17:49:49 -0300226{
227 attr->precise_ip = 3;
228
229 while (attr->precise_ip != 0) {
230 int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
231 if (fd != -1) {
232 close(fd);
233 break;
234 }
235 --attr->precise_ip;
236 }
237}
238
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200239int perf_evlist__add_default(struct perf_evlist *evlist)
240{
241 struct perf_event_attr attr = {
242 .type = PERF_TYPE_HARDWARE,
243 .config = PERF_COUNT_HW_CPU_CYCLES,
244 };
Joerg Roedel1aed2672012-01-04 17:54:20 +0100245 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200246
Joerg Roedel1aed2672012-01-04 17:54:20 +0100247 event_attr_init(&attr);
248
Arnaldo Carvalho de Melo7f8d1ad2015-09-30 17:49:49 -0300249 perf_event_attr__set_max_precise_ip(&attr);
250
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300251 evsel = perf_evsel__new(&attr);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200252 if (evsel == NULL)
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200253 goto error;
254
Arnaldo Carvalho de Melo7f8d1ad2015-09-30 17:49:49 -0300255 /* use asprintf() because free(evsel) assumes name is allocated */
256 if (asprintf(&evsel->name, "cycles%.*s",
257 attr.precise_ip ? attr.precise_ip + 1 : 0, ":ppp") < 0)
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200258 goto error_free;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200259
260 perf_evlist__add(evlist, evsel);
261 return 0;
Stephane Eraniancc2d86b2011-06-07 18:19:36 +0200262error_free:
263 perf_evsel__delete(evsel);
264error:
265 return -ENOMEM;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200266}
Arnaldo Carvalho de Melo5c5810412011-01-11 22:30:02 -0200267
Arnaldo Carvalho de Melo5bae0252016-01-07 13:14:56 -0300268int perf_evlist__add_dummy(struct perf_evlist *evlist)
269{
270 struct perf_event_attr attr = {
271 .type = PERF_TYPE_SOFTWARE,
272 .config = PERF_COUNT_SW_DUMMY,
273 .size = sizeof(attr), /* to capture ABI version */
274 };
275 struct perf_evsel *evsel = perf_evsel__new(&attr);
276
277 if (evsel == NULL)
278 return -ENOMEM;
279
280 perf_evlist__add(evlist, evsel);
281 return 0;
282}
283
Arnaldo Carvalho de Meloe60fc842012-10-03 11:50:55 -0300284static int perf_evlist__add_attrs(struct perf_evlist *evlist,
285 struct perf_event_attr *attrs, size_t nr_attrs)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200286{
287 struct perf_evsel *evsel, *n;
288 LIST_HEAD(head);
289 size_t i;
290
291 for (i = 0; i < nr_attrs; i++) {
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300292 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200293 if (evsel == NULL)
294 goto out_delete_partial_list;
295 list_add_tail(&evsel->node, &head);
296 }
297
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300298 perf_evlist__splice_list_tail(evlist, &head);
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200299
300 return 0;
301
302out_delete_partial_list:
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300303 __evlist__for_each_entry_safe(&head, n, evsel)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200304 perf_evsel__delete(evsel);
305 return -1;
306}
307
Arnaldo Carvalho de Melo79695e12012-05-30 13:53:54 -0300308int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
309 struct perf_event_attr *attrs, size_t nr_attrs)
310{
311 size_t i;
312
313 for (i = 0; i < nr_attrs; i++)
314 event_attr_init(attrs + i);
315
316 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
317}
318
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -0300319struct perf_evsel *
320perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -0200321{
322 struct perf_evsel *evsel;
323
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300324 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -0200325 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
326 (int)evsel->attr.config == id)
327 return evsel;
328 }
329
330 return NULL;
331}
332
David Aherna2f28042013-08-28 22:29:51 -0600333struct perf_evsel *
334perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
335 const char *name)
336{
337 struct perf_evsel *evsel;
338
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300339 evlist__for_each_entry(evlist, evsel) {
David Aherna2f28042013-08-28 22:29:51 -0600340 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
341 (strcmp(evsel->name, name) == 0))
342 return evsel;
343 }
344
345 return NULL;
346}
347
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300348int perf_evlist__add_newtp(struct perf_evlist *evlist,
349 const char *sys, const char *name, void *handler)
350{
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300351 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300352
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200353 if (IS_ERR(evsel))
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300354 return -1;
355
Arnaldo Carvalho de Melo744a9712013-11-06 10:17:38 -0300356 evsel->handler = handler;
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300357 perf_evlist__add(evlist, evsel);
358 return 0;
359}
360
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300361static int perf_evlist__nr_threads(struct perf_evlist *evlist,
362 struct perf_evsel *evsel)
363{
364 if (evsel->system_wide)
365 return 1;
366 else
367 return thread_map__nr(evlist->threads);
368}
369
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300370void perf_evlist__disable(struct perf_evlist *evlist)
371{
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300372 struct perf_evsel *pos;
373
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300374 evlist__for_each_entry(evlist, pos) {
Jiri Olsa3e27c922015-12-03 10:06:42 +0100375 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
376 continue;
377 perf_evsel__disable(pos);
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300378 }
Arnaldo Carvalho de Melo2b56bcf2015-06-17 16:40:26 -0300379
380 evlist->enabled = false;
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300381}
382
David Ahern764e16a32011-08-25 10:17:55 -0600383void perf_evlist__enable(struct perf_evlist *evlist)
384{
David Ahern764e16a32011-08-25 10:17:55 -0600385 struct perf_evsel *pos;
386
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300387 evlist__for_each_entry(evlist, pos) {
Jiri Olsa3e27c922015-12-03 10:06:42 +0100388 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
389 continue;
390 perf_evsel__enable(pos);
David Ahern764e16a32011-08-25 10:17:55 -0600391 }
Arnaldo Carvalho de Melo2b56bcf2015-06-17 16:40:26 -0300392
393 evlist->enabled = true;
394}
395
396void perf_evlist__toggle_enable(struct perf_evlist *evlist)
397{
398 (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
David Ahern764e16a32011-08-25 10:17:55 -0600399}
400
Adrian Hunter1c650562014-07-31 09:00:56 +0300401static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
402 struct perf_evsel *evsel, int cpu)
403{
404 int thread, err;
405 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
406
407 if (!evsel->fd)
408 return -EINVAL;
409
410 for (thread = 0; thread < nr_threads; thread++) {
411 err = ioctl(FD(evsel, cpu, thread),
412 PERF_EVENT_IOC_ENABLE, 0);
413 if (err)
414 return err;
415 }
416 return 0;
417}
418
419static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
420 struct perf_evsel *evsel,
421 int thread)
422{
423 int cpu, err;
424 int nr_cpus = cpu_map__nr(evlist->cpus);
425
426 if (!evsel->fd)
427 return -EINVAL;
428
429 for (cpu = 0; cpu < nr_cpus; cpu++) {
430 err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
431 if (err)
432 return err;
433 }
434 return 0;
435}
436
437int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
438 struct perf_evsel *evsel, int idx)
439{
440 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
441
442 if (per_cpu_mmaps)
443 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
444 else
445 return perf_evlist__enable_event_thread(evlist, evsel, idx);
446}
447
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300448int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo5c5810412011-01-11 22:30:02 -0200449{
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900450 int nr_cpus = cpu_map__nr(evlist->cpus);
451 int nr_threads = thread_map__nr(evlist->threads);
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300452 int nfds = 0;
453 struct perf_evsel *evsel;
454
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300455 evlist__for_each_entry(evlist, evsel) {
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300456 if (evsel->system_wide)
457 nfds += nr_cpus;
458 else
459 nfds += nr_cpus * nr_threads;
460 }
461
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300462 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
463 fdarray__grow(&evlist->pollfd, nfds) < 0)
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300464 return -ENOMEM;
465
466 return 0;
Arnaldo Carvalho de Melo5c5810412011-01-11 22:30:02 -0200467}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200468
Wang Nan48760752016-07-14 08:34:37 +0000469static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
470 struct perf_mmap *map, short revent)
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300471{
Wang Nanf3058a12016-05-24 02:28:59 +0000472 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300473 /*
474 * Save the idx so that when we filter out fds POLLHUP'ed we can
475 * close the associated evlist->mmap[] entry.
476 */
477 if (pos >= 0) {
Wang Nan48760752016-07-14 08:34:37 +0000478 evlist->pollfd.priv[pos].ptr = map;
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300479
480 fcntl(fd, F_SETFL, O_NONBLOCK);
481 }
482
483 return pos;
484}
485
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300486int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200487{
Wang Nan48760752016-07-14 08:34:37 +0000488 return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300489}
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300490
Wang Nan258e4bf2016-05-25 13:44:57 +0000491static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
492 void *arg __maybe_unused)
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300493{
Wang Nan48760752016-07-14 08:34:37 +0000494 struct perf_mmap *map = fda->priv[fd].ptr;
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300495
Wang Nan48760752016-07-14 08:34:37 +0000496 if (map)
497 perf_mmap__put(map);
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200498}
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200499
Arnaldo Carvalho de Melo1ddec7f2014-08-12 23:04:11 -0300500int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
501{
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300502 return fdarray__filter(&evlist->pollfd, revents_and_mask,
Wang Nan258e4bf2016-05-25 13:44:57 +0000503 perf_evlist__munmap_filtered, NULL);
Arnaldo Carvalho de Melo1ddec7f2014-08-12 23:04:11 -0300504}
505
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -0300506int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
507{
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300508 return fdarray__poll(&evlist->pollfd, timeout);
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -0300509}
510
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300511static void perf_evlist__id_hash(struct perf_evlist *evlist,
512 struct perf_evsel *evsel,
513 int cpu, int thread, u64 id)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200514{
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300515 int hash;
516 struct perf_sample_id *sid = SID(evsel, cpu, thread);
517
518 sid->id = id;
519 sid->evsel = evsel;
520 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
521 hlist_add_head(&sid->node, &evlist->heads[hash]);
522}
523
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300524void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
525 int cpu, int thread, u64 id)
526{
527 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
528 evsel->id[evsel->ids++] = id;
529}
530
Jiri Olsa1c596122015-11-05 15:40:49 +0100531int perf_evlist__id_add_fd(struct perf_evlist *evlist,
532 struct perf_evsel *evsel,
533 int cpu, int thread, int fd)
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300534{
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200535 u64 read_data[4] = { 0, };
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300536 int id_idx = 1; /* The first entry is the counter value */
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200537 u64 id;
538 int ret;
539
540 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
541 if (!ret)
542 goto add;
543
544 if (errno != ENOTTY)
545 return -1;
546
547 /* Legacy way to get event id.. All hail to old kernels! */
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200548
Jiri Olsac4861af2012-10-12 13:02:21 +0200549 /*
550 * This way does not work with group format read, so bail
551 * out in that case.
552 */
553 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
554 return -1;
555
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200556 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
557 read(fd, &read_data, sizeof(read_data)) == -1)
558 return -1;
559
560 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
561 ++id_idx;
562 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
563 ++id_idx;
564
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200565 id = read_data[id_idx];
566
567 add:
568 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200569 return 0;
570}
571
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200572static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
573 struct perf_evsel *evsel, int idx, int cpu,
574 int thread)
575{
576 struct perf_sample_id *sid = SID(evsel, cpu, thread);
577 sid->idx = idx;
578 if (evlist->cpus && cpu >= 0)
579 sid->cpu = evlist->cpus->map[cpu];
580 else
581 sid->cpu = -1;
582 if (!evsel->system_wide && evlist->threads && thread >= 0)
Jiri Olsae13798c2015-06-23 00:36:02 +0200583 sid->tid = thread_map__pid(evlist->threads, thread);
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200584 else
585 sid->tid = -1;
586}
587
Jiri Olsa932a3592012-10-11 14:10:35 +0200588struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200589{
590 struct hlist_head *head;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200591 struct perf_sample_id *sid;
592 int hash;
593
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200594 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
595 head = &evlist->heads[hash];
596
Sasha Levinb67bfe02013-02-27 17:06:00 -0800597 hlist_for_each_entry(sid, head, node)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200598 if (sid->id == id)
Jiri Olsa932a3592012-10-11 14:10:35 +0200599 return sid;
600
601 return NULL;
602}
603
604struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
605{
606 struct perf_sample_id *sid;
607
Adrian Hunter05169df2015-08-20 11:26:45 +0300608 if (evlist->nr_entries == 1 || !id)
Jiri Olsa932a3592012-10-11 14:10:35 +0200609 return perf_evlist__first(evlist);
610
611 sid = perf_evlist__id2sid(evlist, id);
612 if (sid)
613 return sid->evsel;
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900614
615 if (!perf_evlist__sample_id_all(evlist))
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300616 return perf_evlist__first(evlist);
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900617
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200618 return NULL;
619}
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200620
Adrian Hunterdddcf6a2015-09-25 16:15:52 +0300621struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
622 u64 id)
623{
624 struct perf_sample_id *sid;
625
626 if (!id)
627 return NULL;
628
629 sid = perf_evlist__id2sid(evlist, id);
630 if (sid)
631 return sid->evsel;
632
633 return NULL;
634}
635
Adrian Hunter75562572013-08-27 11:23:09 +0300636static int perf_evlist__event2id(struct perf_evlist *evlist,
637 union perf_event *event, u64 *id)
638{
639 const u64 *array = event->sample.array;
640 ssize_t n;
641
642 n = (event->header.size - sizeof(event->header)) >> 3;
643
644 if (event->header.type == PERF_RECORD_SAMPLE) {
645 if (evlist->id_pos >= n)
646 return -1;
647 *id = array[evlist->id_pos];
648 } else {
649 if (evlist->is_pos > n)
650 return -1;
651 n -= evlist->is_pos;
652 *id = array[n];
653 }
654 return 0;
655}
656
Jiri Olsa7cb5c5a2016-07-10 13:07:53 +0200657struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
658 union perf_event *event)
Adrian Hunter75562572013-08-27 11:23:09 +0300659{
Adrian Hunter98be6962013-09-04 23:18:17 +0300660 struct perf_evsel *first = perf_evlist__first(evlist);
Adrian Hunter75562572013-08-27 11:23:09 +0300661 struct hlist_head *head;
662 struct perf_sample_id *sid;
663 int hash;
664 u64 id;
665
666 if (evlist->nr_entries == 1)
Adrian Hunter98be6962013-09-04 23:18:17 +0300667 return first;
668
669 if (!first->attr.sample_id_all &&
670 event->header.type != PERF_RECORD_SAMPLE)
671 return first;
Adrian Hunter75562572013-08-27 11:23:09 +0300672
673 if (perf_evlist__event2id(evlist, event, &id))
674 return NULL;
675
676 /* Synthesized events have an id of zero */
677 if (!id)
Adrian Hunter98be6962013-09-04 23:18:17 +0300678 return first;
Adrian Hunter75562572013-08-27 11:23:09 +0300679
680 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
681 head = &evlist->heads[hash];
682
683 hlist_for_each_entry(sid, head, node) {
684 if (sid->id == id)
685 return sid->evsel;
686 }
687 return NULL;
688}
689
Wang Nan65aea232016-05-23 07:13:38 +0000690static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
691{
692 int i;
693
Wang Nan078c3382016-07-14 08:34:40 +0000694 if (!evlist->backward_mmap)
695 return 0;
696
Wang Nan65aea232016-05-23 07:13:38 +0000697 for (i = 0; i < evlist->nr_mmaps; i++) {
Wang Nan078c3382016-07-14 08:34:40 +0000698 int fd = evlist->backward_mmap[i].fd;
Wang Nan65aea232016-05-23 07:13:38 +0000699 int err;
700
701 if (fd < 0)
702 continue;
703 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
704 if (err)
705 return err;
706 }
707 return 0;
708}
709
710int perf_evlist__pause(struct perf_evlist *evlist)
711{
712 return perf_evlist__set_paused(evlist, true);
713}
714
715int perf_evlist__resume(struct perf_evlist *evlist)
716{
717 return perf_evlist__set_paused(evlist, false);
718}
719
Wang Nanb6b85da2016-04-27 02:19:21 +0000720/* When check_messup is true, 'end' must points to a good entry */
Wang Nan0f4ccd12016-04-27 02:19:20 +0000721static union perf_event *
Wang Nanb6b85da2016-04-27 02:19:21 +0000722perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
723 u64 end, u64 *prev)
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200724{
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200725 unsigned char *data = md->base + page_size;
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200726 union perf_event *event = NULL;
Wang Nanb6b85da2016-04-27 02:19:21 +0000727 int diff = end - start;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200728
Wang Nanb6b85da2016-04-27 02:19:21 +0000729 if (check_messup) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200730 /*
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200731 * If we're further behind than half the buffer, there's a chance
732 * the writer will bite our tail and mess up the samples under us.
733 *
Wang Nanb6b85da2016-04-27 02:19:21 +0000734 * If we somehow ended up ahead of the 'end', we got messed up.
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200735 *
Wang Nanb6b85da2016-04-27 02:19:21 +0000736 * In either case, truncate and restart at 'end'.
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200737 */
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200738 if (diff > md->mask / 2 || diff < 0) {
739 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
740
741 /*
Wang Nanb6b85da2016-04-27 02:19:21 +0000742 * 'end' points to a known good entry, start there.
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200743 */
Wang Nanb6b85da2016-04-27 02:19:21 +0000744 start = end;
Wang Nanb04b7022016-04-26 02:28:54 +0000745 diff = 0;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200746 }
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200747 }
748
Wang Nanb04b7022016-04-26 02:28:54 +0000749 if (diff >= (int)sizeof(event->header)) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200750 size_t size;
751
Wang Nanb6b85da2016-04-27 02:19:21 +0000752 event = (union perf_event *)&data[start & md->mask];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200753 size = event->header.size;
754
Wang Nanb04b7022016-04-26 02:28:54 +0000755 if (size < sizeof(event->header) || diff < (int)size) {
756 event = NULL;
757 goto broken_event;
758 }
759
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200760 /*
761 * Event straddles the mmap boundary -- header should always
762 * be inside due to u64 alignment of output.
763 */
Wang Nanb6b85da2016-04-27 02:19:21 +0000764 if ((start & md->mask) + size != ((start + size) & md->mask)) {
765 unsigned int offset = start;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200766 unsigned int len = min(sizeof(*event), size), cpy;
Jiri Olsaa65cb4b2013-10-02 15:46:39 +0200767 void *dst = md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200768
769 do {
770 cpy = min(md->mask + 1 - (offset & md->mask), len);
771 memcpy(dst, &data[offset & md->mask], cpy);
772 offset += cpy;
773 dst += cpy;
774 len -= cpy;
775 } while (len);
776
Jiri Olsaa65cb4b2013-10-02 15:46:39 +0200777 event = (union perf_event *) md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200778 }
779
Wang Nanb6b85da2016-04-27 02:19:21 +0000780 start += size;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200781 }
782
Wang Nanb04b7022016-04-26 02:28:54 +0000783broken_event:
Wang Nan0f4ccd12016-04-27 02:19:20 +0000784 if (prev)
Wang Nanb6b85da2016-04-27 02:19:21 +0000785 *prev = start;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200786
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200787 return event;
788}
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200789
Wang Nan8db6d6b2016-07-14 08:34:35 +0000790union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup)
Wang Nan0f4ccd12016-04-27 02:19:20 +0000791{
Wang Nan0f4ccd12016-04-27 02:19:20 +0000792 u64 head;
793 u64 old = md->prev;
794
795 /*
796 * Check if event was unmapped due to a POLLHUP/POLLERR.
797 */
798 if (!atomic_read(&md->refcnt))
799 return NULL;
800
801 head = perf_mmap__read_head(md);
802
Wang Nan8db6d6b2016-07-14 08:34:35 +0000803 return perf_mmap__read(md, check_messup, old, head, &md->prev);
Wang Nan0f4ccd12016-04-27 02:19:20 +0000804}
805
Wang Nane24c7522016-05-09 01:47:50 +0000806union perf_event *
Wang Nan8db6d6b2016-07-14 08:34:35 +0000807perf_mmap__read_backward(struct perf_mmap *md)
Wang Nane24c7522016-05-09 01:47:50 +0000808{
Wang Nane24c7522016-05-09 01:47:50 +0000809 u64 head, end;
810 u64 start = md->prev;
811
812 /*
813 * Check if event was unmapped due to a POLLHUP/POLLERR.
814 */
815 if (!atomic_read(&md->refcnt))
816 return NULL;
817
818 head = perf_mmap__read_head(md);
819 if (!head)
820 return NULL;
821
822 /*
823 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
824 * it each time when kernel writes to it, so in fact 'head' is
825 * negative. 'end' pointer is made manually by adding the size of
826 * the ring buffer to 'head' pointer, means the validate data can
827 * read is the whole ring buffer. If 'end' is positive, the ring
828 * buffer has not fully filled, so we must adjust 'end' to 0.
829 *
830 * However, since both 'head' and 'end' is unsigned, we can't
831 * simply compare 'end' against 0. Here we compare '-head' and
832 * the size of the ring buffer, where -head is the number of bytes
833 * kernel write to the ring buffer.
834 */
835 if (-head < (u64)(md->mask + 1))
836 end = 0;
837 else
838 end = head + md->mask + 1;
839
840 return perf_mmap__read(md, false, start, end, &md->prev);
841}
842
Wang Nan8db6d6b2016-07-14 08:34:35 +0000843union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
844{
845 struct perf_mmap *md = &evlist->mmap[idx];
846
847 /*
848 * Check messup is required for forward overwritable ring buffer:
849 * memory pointed by md->prev can be overwritten in this case.
850 * No need for read-write ring buffer: kernel stop outputting when
851 * it hit md->prev (perf_mmap__consume()).
852 */
853 return perf_mmap__read_forward(md, evlist->overwrite);
854}
855
856union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
857{
858 struct perf_mmap *md = &evlist->mmap[idx];
859
860 /*
861 * No need to check messup for backward ring buffer:
862 * We can always read arbitrary long data from a backward
863 * ring buffer unless we forget to pause it before reading.
864 */
865 return perf_mmap__read_backward(md);
866}
867
Wang Nan5a5ddeb2016-05-25 13:44:50 +0000868union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
869{
870 if (!evlist->backward)
871 return perf_evlist__mmap_read_forward(evlist, idx);
872 return perf_evlist__mmap_read_backward(evlist, idx);
873}
874
Wang Nan8db6d6b2016-07-14 08:34:35 +0000875void perf_mmap__read_catchup(struct perf_mmap *md)
Wang Nane24c7522016-05-09 01:47:50 +0000876{
Wang Nane24c7522016-05-09 01:47:50 +0000877 u64 head;
878
879 if (!atomic_read(&md->refcnt))
880 return;
881
882 head = perf_mmap__read_head(md);
883 md->prev = head;
884}
885
Wang Nan8db6d6b2016-07-14 08:34:35 +0000886void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
887{
888 perf_mmap__read_catchup(&evlist->mmap[idx]);
889}
890
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300891static bool perf_mmap__empty(struct perf_mmap *md)
892{
Adrian Hunterb72e74d2015-04-24 22:29:43 +0300893 return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300894}
895
Wang Nan8db6d6b2016-07-14 08:34:35 +0000896static void perf_mmap__get(struct perf_mmap *map)
897{
898 atomic_inc(&map->refcnt);
899}
900
901static void perf_mmap__put(struct perf_mmap *md)
902{
903 BUG_ON(md->base && atomic_read(&md->refcnt) == 0);
904
905 if (atomic_dec_and_test(&md->refcnt))
906 perf_mmap__munmap(md);
907}
908
Wang Nan8db6d6b2016-07-14 08:34:35 +0000909void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800910{
Wang Nan8db6d6b2016-07-14 08:34:35 +0000911 if (!overwrite) {
David Ahern7b8283b52015-04-07 09:20:37 -0600912 u64 old = md->prev;
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800913
914 perf_mmap__write_tail(md, old);
915 }
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300916
Arnaldo Carvalho de Melo71438492015-05-15 15:45:16 -0300917 if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md))
Wang Nan8db6d6b2016-07-14 08:34:35 +0000918 perf_mmap__put(md);
919}
920
921void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
922{
923 perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800924}
925
Adrian Hunter718c6022015-04-09 18:53:42 +0300926int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
927 struct auxtrace_mmap_params *mp __maybe_unused,
928 void *userpg __maybe_unused,
929 int fd __maybe_unused)
930{
931 return 0;
932}
933
934void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
935{
936}
937
938void __weak auxtrace_mmap_params__init(
939 struct auxtrace_mmap_params *mp __maybe_unused,
940 off_t auxtrace_offset __maybe_unused,
941 unsigned int auxtrace_pages __maybe_unused,
942 bool auxtrace_overwrite __maybe_unused)
943{
944}
945
946void __weak auxtrace_mmap_params__set_idx(
947 struct auxtrace_mmap_params *mp __maybe_unused,
948 struct perf_evlist *evlist __maybe_unused,
949 int idx __maybe_unused,
950 bool per_cpu __maybe_unused)
951{
952}
953
Wang Nan8db6d6b2016-07-14 08:34:35 +0000954static void perf_mmap__munmap(struct perf_mmap *map)
955{
956 if (map->base != NULL) {
957 munmap(map->base, perf_mmap__mmap_len(map));
958 map->base = NULL;
959 map->fd = -1;
960 atomic_set(&map->refcnt, 0);
961 }
962 auxtrace_mmap__munmap(&map->auxtrace_mmap);
963}
964
Wang Nana1f72612016-07-14 08:34:38 +0000965static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200966{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300967 int i;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200968
Wang Nanb2cb6152016-07-14 08:34:39 +0000969 if (evlist->mmap)
970 for (i = 0; i < evlist->nr_mmaps; i++)
971 perf_mmap__munmap(&evlist->mmap[i]);
Arnaldo Carvalho de Melo983874d2014-01-03 17:25:49 -0300972
Wang Nanb2cb6152016-07-14 08:34:39 +0000973 if (evlist->backward_mmap)
974 for (i = 0; i < evlist->nr_mmaps; i++)
975 perf_mmap__munmap(&evlist->backward_mmap[i]);
Wang Nana1f72612016-07-14 08:34:38 +0000976}
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300977
Wang Nana1f72612016-07-14 08:34:38 +0000978void perf_evlist__munmap(struct perf_evlist *evlist)
979{
980 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -0300981 zfree(&evlist->mmap);
Wang Nanb2cb6152016-07-14 08:34:39 +0000982 zfree(&evlist->backward_mmap);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200983}
984
Wang Nan8db6d6b2016-07-14 08:34:35 +0000985static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200986{
Wang Nand4c6fb32016-05-20 16:38:24 +0000987 int i;
Wang Nan8db6d6b2016-07-14 08:34:35 +0000988 struct perf_mmap *map;
Wang Nand4c6fb32016-05-20 16:38:24 +0000989
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -0300990 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -0700991 if (cpu_map__empty(evlist->cpus))
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900992 evlist->nr_mmaps = thread_map__nr(evlist->threads);
Wang Nan8db6d6b2016-07-14 08:34:35 +0000993 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
994 if (!map)
995 return NULL;
Wang Nan946ae1d2016-05-31 13:06:15 +0000996
Wang Nand4c6fb32016-05-20 16:38:24 +0000997 for (i = 0; i < evlist->nr_mmaps; i++)
Wang Nan8db6d6b2016-07-14 08:34:35 +0000998 map[i].fd = -1;
999 return map;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001000}
1001
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001002struct mmap_params {
1003 int prot;
1004 int mask;
Adrian Hunter718c6022015-04-09 18:53:42 +03001005 struct auxtrace_mmap_params auxtrace_mp;
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001006};
1007
Wang Nan8db6d6b2016-07-14 08:34:35 +00001008static int perf_mmap__mmap(struct perf_mmap *map,
1009 struct mmap_params *mp, int fd)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001010{
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001011 /*
1012 * The last one will be done at perf_evlist__mmap_consume(), so that we
1013 * make sure we don't prevent tools from consuming every last event in
1014 * the ring buffer.
1015 *
1016 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
1017 * anymore, but the last events for it are still in the ring buffer,
1018 * waiting to be consumed.
1019 *
1020 * Tools can chose to ignore this at their own discretion, but the
1021 * evlist layer can't just drop it when filtering events in
1022 * perf_evlist__filter_pollfd().
1023 */
Wang Nan8db6d6b2016-07-14 08:34:35 +00001024 atomic_set(&map->refcnt, 2);
1025 map->prev = 0;
1026 map->mask = mp->mask;
1027 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
1028 MAP_SHARED, fd, 0);
1029 if (map->base == MAP_FAILED) {
Adrian Hunter02635962013-11-01 15:51:33 +02001030 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
1031 errno);
Wang Nan8db6d6b2016-07-14 08:34:35 +00001032 map->base = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001033 return -1;
Nelson Elhage301b1952011-12-19 08:39:30 -05001034 }
Wang Nan8db6d6b2016-07-14 08:34:35 +00001035 map->fd = fd;
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -03001036
Wang Nan8db6d6b2016-07-14 08:34:35 +00001037 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
1038 &mp->auxtrace_mp, map->base, fd))
Adrian Hunter718c6022015-04-09 18:53:42 +03001039 return -1;
1040
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001041 return 0;
1042}
1043
Wang Nanf3058a12016-05-24 02:28:59 +00001044static bool
1045perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
1046 struct perf_evsel *evsel)
1047{
Arnaldo Carvalho de Melo32a951b2016-07-14 08:34:33 +00001048 if (evsel->attr.write_backward)
Wang Nanf3058a12016-05-24 02:28:59 +00001049 return false;
1050 return true;
1051}
1052
Adrian Hunter04e21312013-10-18 15:29:13 +03001053static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001054 struct mmap_params *mp, int cpu,
Wang Nan078c3382016-07-14 08:34:40 +00001055 int thread, int *_output, int *_output_backward)
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001056{
1057 struct perf_evsel *evsel;
Wang Nanf3058a12016-05-24 02:28:59 +00001058 int revent;
Adrian Hunter04e21312013-10-18 15:29:13 +03001059
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001060 evlist__for_each_entry(evlist, evsel) {
Wang Nan078c3382016-07-14 08:34:40 +00001061 struct perf_mmap *maps = evlist->mmap;
1062 int *output = _output;
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001063 int fd;
1064
Wang Nan078c3382016-07-14 08:34:40 +00001065 if (evsel->attr.write_backward) {
1066 output = _output_backward;
1067 maps = evlist->backward_mmap;
1068
1069 if (!maps) {
1070 maps = perf_evlist__alloc_mmap(evlist);
1071 if (!maps)
1072 return -1;
1073 evlist->backward_mmap = maps;
1074 }
1075 }
Wang Nanf3058a12016-05-24 02:28:59 +00001076
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001077 if (evsel->system_wide && thread)
1078 continue;
1079
1080 fd = FD(evsel, cpu, thread);
Adrian Hunter04e21312013-10-18 15:29:13 +03001081
1082 if (*output == -1) {
1083 *output = fd;
Wang Nan078c3382016-07-14 08:34:40 +00001084
1085 if (perf_mmap__mmap(&maps[idx], mp, *output) < 0)
Adrian Hunter04e21312013-10-18 15:29:13 +03001086 return -1;
1087 } else {
1088 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
1089 return -1;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001090
Wang Nan078c3382016-07-14 08:34:40 +00001091 perf_mmap__get(&maps[idx]);
Adrian Hunter04e21312013-10-18 15:29:13 +03001092 }
1093
Wang Nanf3058a12016-05-24 02:28:59 +00001094 revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
1095
Adrian Hunterf90d1942014-11-11 16:16:39 +02001096 /*
1097 * The system_wide flag causes a selected event to be opened
1098 * always without a pid. Consequently it will never get a
1099 * POLLHUP, but it is used for tracking in combination with
1100 * other events, so it should not need to be polled anyway.
1101 * Therefore don't add it for polling.
1102 */
1103 if (!evsel->system_wide &&
Wang Nan078c3382016-07-14 08:34:40 +00001104 __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
1105 perf_mmap__put(&maps[idx]);
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -03001106 return -1;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001107 }
Arnaldo Carvalho de Melo033fa712014-09-08 12:55:12 -03001108
Adrian Hunter3c659ee2014-10-27 15:49:22 +02001109 if (evsel->attr.read_format & PERF_FORMAT_ID) {
1110 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
1111 fd) < 0)
1112 return -1;
1113 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
1114 thread);
1115 }
Adrian Hunter04e21312013-10-18 15:29:13 +03001116 }
1117
1118 return 0;
1119}
1120
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001121static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
1122 struct mmap_params *mp)
Adrian Hunter04e21312013-10-18 15:29:13 +03001123{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001124 int cpu, thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001125 int nr_cpus = cpu_map__nr(evlist->cpus);
1126 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001127
Adrian Huntere3e1a542013-08-14 15:48:24 +03001128 pr_debug2("perf event ring buffer mmapped per cpu\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001129 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001130 int output = -1;
Wang Nan078c3382016-07-14 08:34:40 +00001131 int output_backward = -1;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001132
Adrian Hunter718c6022015-04-09 18:53:42 +03001133 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
1134 true);
1135
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001136 for (thread = 0; thread < nr_threads; thread++) {
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001137 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
Wang Nan078c3382016-07-14 08:34:40 +00001138 thread, &output, &output_backward))
Adrian Hunter04e21312013-10-18 15:29:13 +03001139 goto out_unmap;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001140 }
1141 }
1142
1143 return 0;
1144
1145out_unmap:
Wang Nana1f72612016-07-14 08:34:38 +00001146 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001147 return -1;
1148}
1149
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001150static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
1151 struct mmap_params *mp)
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001152{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001153 int thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001154 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001155
Adrian Huntere3e1a542013-08-14 15:48:24 +03001156 pr_debug2("perf event ring buffer mmapped per thread\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001157 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001158 int output = -1;
Wang Nan078c3382016-07-14 08:34:40 +00001159 int output_backward = -1;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001160
Adrian Hunter718c6022015-04-09 18:53:42 +03001161 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
1162 false);
1163
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001164 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
Wang Nan078c3382016-07-14 08:34:40 +00001165 &output, &output_backward))
Adrian Hunter04e21312013-10-18 15:29:13 +03001166 goto out_unmap;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001167 }
1168
1169 return 0;
1170
1171out_unmap:
Wang Nana1f72612016-07-14 08:34:38 +00001172 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001173 return -1;
1174}
1175
Arnaldo Carvalho de Melof5e71502016-04-15 17:46:31 -03001176unsigned long perf_event_mlock_kb_in_pages(void)
1177{
1178 unsigned long pages;
1179 int max;
1180
1181 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
1182 /*
1183 * Pick a once upon a time good value, i.e. things look
1184 * strange since we can't read a sysctl value, but lets not
1185 * die yet...
1186 */
1187 max = 512;
1188 } else {
1189 max -= (page_size / 1024);
1190 }
1191
1192 pages = (max * 1024) / page_size;
1193 if (!is_power_of_2(pages))
1194 pages = rounddown_pow_of_two(pages);
1195
1196 return pages;
1197}
1198
Jiri Olsa994a1f72013-09-01 12:36:12 +02001199static size_t perf_evlist__mmap_size(unsigned long pages)
1200{
Arnaldo Carvalho de Melof5e71502016-04-15 17:46:31 -03001201 if (pages == UINT_MAX)
1202 pages = perf_event_mlock_kb_in_pages();
1203 else if (!is_power_of_2(pages))
Jiri Olsa994a1f72013-09-01 12:36:12 +02001204 return 0;
1205
1206 return (pages + 1) * page_size;
1207}
1208
David Ahern33c2dcf2013-11-12 07:46:55 -07001209static long parse_pages_arg(const char *str, unsigned long min,
1210 unsigned long max)
Jiri Olsa994a1f72013-09-01 12:36:12 +02001211{
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001212 unsigned long pages, val;
Jiri Olsa27050f52013-09-01 12:36:13 +02001213 static struct parse_tag tags[] = {
1214 { .tag = 'B', .mult = 1 },
1215 { .tag = 'K', .mult = 1 << 10 },
1216 { .tag = 'M', .mult = 1 << 20 },
1217 { .tag = 'G', .mult = 1 << 30 },
1218 { .tag = 0 },
1219 };
Jiri Olsa994a1f72013-09-01 12:36:12 +02001220
David Ahern89735042013-11-12 07:46:53 -07001221 if (str == NULL)
David Ahern33c2dcf2013-11-12 07:46:55 -07001222 return -EINVAL;
David Ahern89735042013-11-12 07:46:53 -07001223
Jiri Olsa27050f52013-09-01 12:36:13 +02001224 val = parse_tag_value(str, tags);
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001225 if (val != (unsigned long) -1) {
Jiri Olsa27050f52013-09-01 12:36:13 +02001226 /* we got file size value */
1227 pages = PERF_ALIGN(val, page_size) / page_size;
Jiri Olsa27050f52013-09-01 12:36:13 +02001228 } else {
1229 /* we got pages count value */
1230 char *eptr;
1231 pages = strtoul(str, &eptr, 10);
David Ahern33c2dcf2013-11-12 07:46:55 -07001232 if (*eptr != '\0')
1233 return -EINVAL;
Jiri Olsa994a1f72013-09-01 12:36:12 +02001234 }
1235
Adrian Hunter2bcab6c2013-12-09 15:18:37 +02001236 if (pages == 0 && min == 0) {
David Ahern33c2dcf2013-11-12 07:46:55 -07001237 /* leave number of pages at 0 */
Adrian Hunter1dbfa9382013-12-09 15:18:39 +02001238 } else if (!is_power_of_2(pages)) {
David Ahern33c2dcf2013-11-12 07:46:55 -07001239 /* round pages up to next power of 2 */
Arnaldo Carvalho de Melo91529832014-12-16 13:24:41 -03001240 pages = roundup_pow_of_two(pages);
Adrian Hunter1dbfa9382013-12-09 15:18:39 +02001241 if (!pages)
1242 return -EINVAL;
David Ahern96398372013-11-12 07:46:54 -07001243 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
1244 pages * page_size, pages);
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001245 }
1246
David Ahern33c2dcf2013-11-12 07:46:55 -07001247 if (pages > max)
1248 return -EINVAL;
1249
1250 return pages;
1251}
1252
Adrian Huntere9db1312015-04-09 18:53:46 +03001253int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
David Ahern33c2dcf2013-11-12 07:46:55 -07001254{
David Ahern33c2dcf2013-11-12 07:46:55 -07001255 unsigned long max = UINT_MAX;
1256 long pages;
1257
Adrian Hunterf5ae9c42013-12-09 15:18:38 +02001258 if (max > SIZE_MAX / page_size)
David Ahern33c2dcf2013-11-12 07:46:55 -07001259 max = SIZE_MAX / page_size;
1260
1261 pages = parse_pages_arg(str, 1, max);
1262 if (pages < 0) {
1263 pr_err("Invalid argument for --mmap_pages/-m\n");
Jiri Olsa994a1f72013-09-01 12:36:12 +02001264 return -1;
1265 }
1266
1267 *mmap_pages = pages;
1268 return 0;
1269}
1270
Adrian Huntere9db1312015-04-09 18:53:46 +03001271int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1272 int unset __maybe_unused)
1273{
1274 return __perf_evlist__parse_mmap_pages(opt->value, str);
1275}
1276
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001277/**
Adrian Hunter718c6022015-04-09 18:53:42 +03001278 * perf_evlist__mmap_ex - Create mmaps to receive events.
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001279 * @evlist: list of events
1280 * @pages: map length in pages
1281 * @overwrite: overwrite older events?
Adrian Hunter718c6022015-04-09 18:53:42 +03001282 * @auxtrace_pages - auxtrace map length in pages
1283 * @auxtrace_overwrite - overwrite older auxtrace data?
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001284 *
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001285 * If @overwrite is %false the user needs to signal event consumption using
1286 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
1287 * automatically.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001288 *
Adrian Hunter718c6022015-04-09 18:53:42 +03001289 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1290 * consumption using auxtrace_mmap__write_tail().
1291 *
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001292 * Return: %0 on success, negative error code otherwise.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001293 */
Adrian Hunter718c6022015-04-09 18:53:42 +03001294int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1295 bool overwrite, unsigned int auxtrace_pages,
1296 bool auxtrace_overwrite)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001297{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001298 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001299 const struct cpu_map *cpus = evlist->cpus;
1300 const struct thread_map *threads = evlist->threads;
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001301 struct mmap_params mp = {
1302 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
1303 };
Arnaldo Carvalho de Melo50a682c2011-11-09 09:10:47 -02001304
Wang Nan8db6d6b2016-07-14 08:34:35 +00001305 if (!evlist->mmap)
1306 evlist->mmap = perf_evlist__alloc_mmap(evlist);
1307 if (!evlist->mmap)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001308 return -ENOMEM;
1309
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -03001310 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001311 return -ENOMEM;
1312
1313 evlist->overwrite = overwrite;
Jiri Olsa994a1f72013-09-01 12:36:12 +02001314 evlist->mmap_len = perf_evlist__mmap_size(pages);
Adrian Hunter2af68ef2013-10-18 15:29:07 +03001315 pr_debug("mmap size %zuB\n", evlist->mmap_len);
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001316 mp.mask = evlist->mmap_len - page_size - 1;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001317
Adrian Hunter718c6022015-04-09 18:53:42 +03001318 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
1319 auxtrace_pages, auxtrace_overwrite);
1320
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001321 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001322 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03001323 evsel->sample_id == NULL &&
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -03001324 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001325 return -ENOMEM;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001326 }
1327
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -07001328 if (cpu_map__empty(cpus))
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001329 return perf_evlist__mmap_per_thread(evlist, &mp);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001330
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001331 return perf_evlist__mmap_per_cpu(evlist, &mp);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001332}
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001333
Adrian Hunter718c6022015-04-09 18:53:42 +03001334int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
1335 bool overwrite)
1336{
1337 return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
1338}
1339
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001340int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001341{
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001342 struct cpu_map *cpus;
1343 struct thread_map *threads;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001344
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001345 threads = thread_map__new_str(target->pid, target->tid, target->uid);
1346
1347 if (!threads)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001348 return -1;
1349
Dongsheng Yang9c105fb2013-12-04 17:56:40 -05001350 if (target__uses_dummy_map(target))
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001351 cpus = cpu_map__dummy_new();
Namhyung Kim879d77d2012-05-16 18:45:48 +09001352 else
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001353 cpus = cpu_map__new(target->cpu_list);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001354
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001355 if (!cpus)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001356 goto out_delete_threads;
1357
Adrian Hunterec9a77a2015-09-08 10:58:52 +03001358 evlist->has_user_cpus = !!target->cpu_list;
1359
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001360 perf_evlist__set_maps(evlist, cpus, threads);
Adrian Hunterd5bc0562015-09-08 10:58:51 +03001361
1362 return 0;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001363
1364out_delete_threads:
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001365 thread_map__put(threads);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001366 return -1;
1367}
1368
Adrian Hunterd5bc0562015-09-08 10:58:51 +03001369void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1370 struct thread_map *threads)
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001371{
Adrian Hunter934e0f22015-09-08 10:58:56 +03001372 /*
1373 * Allow for the possibility that one or another of the maps isn't being
1374 * changed i.e. don't put it. Note we are assuming the maps that are
1375 * being applied are brand new and evlist is taking ownership of the
1376 * original reference count of 1. If that is not the case it is up to
1377 * the caller to increase the reference count.
1378 */
1379 if (cpus != evlist->cpus) {
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001380 cpu_map__put(evlist->cpus);
Arnaldo Carvalho de Meloa55e5662016-02-17 10:57:19 -03001381 evlist->cpus = cpu_map__get(cpus);
Adrian Hunter934e0f22015-09-08 10:58:56 +03001382 }
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001383
Adrian Hunter934e0f22015-09-08 10:58:56 +03001384 if (threads != evlist->threads) {
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001385 thread_map__put(evlist->threads);
Arnaldo Carvalho de Meloa55e5662016-02-17 10:57:19 -03001386 evlist->threads = thread_map__get(threads);
Adrian Hunter934e0f22015-09-08 10:58:56 +03001387 }
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001388
Adrian Hunterec9a77a2015-09-08 10:58:52 +03001389 perf_evlist__propagate_maps(evlist);
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001390}
1391
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001392void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
1393 enum perf_event_sample_format bit)
1394{
1395 struct perf_evsel *evsel;
1396
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001397 evlist__for_each_entry(evlist, evsel)
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001398 __perf_evsel__set_sample_bit(evsel, bit);
1399}
1400
1401void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
1402 enum perf_event_sample_format bit)
1403{
1404 struct perf_evsel *evsel;
1405
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001406 evlist__for_each_entry(evlist, evsel)
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001407 __perf_evsel__reset_sample_bit(evsel, bit);
1408}
1409
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001410int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001411{
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001412 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001413 int err = 0;
1414 const int ncpus = cpu_map__nr(evlist->cpus),
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001415 nthreads = thread_map__nr(evlist->threads);
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001416
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001417 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001418 if (evsel->filter == NULL)
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001419 continue;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001420
Kan Liangd988d5e2015-08-21 02:23:14 -04001421 /*
1422 * filters only work for tracepoint event, which doesn't have cpu limit.
1423 * So evlist and evsel should always be same.
1424 */
Arnaldo Carvalho de Melof47805a2015-07-03 15:53:49 -03001425 err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter);
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001426 if (err) {
1427 *err_evsel = evsel;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001428 break;
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001429 }
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001430 }
1431
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001432 return err;
1433}
1434
1435int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1436{
1437 struct perf_evsel *evsel;
1438 int err = 0;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001439
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001440 evlist__for_each_entry(evlist, evsel) {
Wang Nanfdf14722016-02-26 09:31:53 +00001441 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1442 continue;
1443
Arnaldo Carvalho de Melo94ad89b2015-07-03 17:42:03 -03001444 err = perf_evsel__set_filter(evsel, filter);
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001445 if (err)
1446 break;
1447 }
1448
1449 return err;
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001450}
Frederic Weisbecker74429962011-05-21 17:49:00 +02001451
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001452int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001453{
1454 char *filter;
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001455 int ret = -1;
1456 size_t i;
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001457
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001458 for (i = 0; i < npids; ++i) {
1459 if (i == 0) {
1460 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1461 return -1;
1462 } else {
1463 char *tmp;
1464
1465 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1466 goto out_free;
1467
1468 free(filter);
1469 filter = tmp;
1470 }
1471 }
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001472
1473 ret = perf_evlist__set_filter(evlist, filter);
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001474out_free:
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001475 free(filter);
1476 return ret;
1477}
1478
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001479int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
1480{
1481 return perf_evlist__set_filter_pids(evlist, 1, &pid);
1482}
1483
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001484bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +02001485{
Adrian Hunter75562572013-08-27 11:23:09 +03001486 struct perf_evsel *pos;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001487
Adrian Hunter75562572013-08-27 11:23:09 +03001488 if (evlist->nr_entries == 1)
1489 return true;
1490
1491 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1492 return false;
1493
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001494 evlist__for_each_entry(evlist, pos) {
Adrian Hunter75562572013-08-27 11:23:09 +03001495 if (pos->id_pos != evlist->id_pos ||
1496 pos->is_pos != evlist->is_pos)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001497 return false;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001498 }
1499
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001500 return true;
1501}
1502
Adrian Hunter75562572013-08-27 11:23:09 +03001503u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001504{
Adrian Hunter75562572013-08-27 11:23:09 +03001505 struct perf_evsel *evsel;
1506
1507 if (evlist->combined_sample_type)
1508 return evlist->combined_sample_type;
1509
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001510 evlist__for_each_entry(evlist, evsel)
Adrian Hunter75562572013-08-27 11:23:09 +03001511 evlist->combined_sample_type |= evsel->attr.sample_type;
1512
1513 return evlist->combined_sample_type;
1514}
1515
1516u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1517{
1518 evlist->combined_sample_type = 0;
1519 return __perf_evlist__combined_sample_type(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001520}
1521
Andi Kleen98df8582015-07-18 08:24:47 -07001522u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
1523{
1524 struct perf_evsel *evsel;
1525 u64 branch_type = 0;
1526
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001527 evlist__for_each_entry(evlist, evsel)
Andi Kleen98df8582015-07-18 08:24:47 -07001528 branch_type |= evsel->attr.branch_sample_type;
1529 return branch_type;
1530}
1531
Jiri Olsa9ede4732012-10-10 17:38:13 +02001532bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1533{
1534 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1535 u64 read_format = first->attr.read_format;
1536 u64 sample_type = first->attr.sample_type;
1537
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001538 evlist__for_each_entry(evlist, pos) {
Jiri Olsa9ede4732012-10-10 17:38:13 +02001539 if (read_format != pos->attr.read_format)
1540 return false;
1541 }
1542
1543 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1544 if ((sample_type & PERF_SAMPLE_READ) &&
1545 !(read_format & PERF_FORMAT_ID)) {
1546 return false;
1547 }
1548
1549 return true;
1550}
1551
1552u64 perf_evlist__read_format(struct perf_evlist *evlist)
1553{
1554 struct perf_evsel *first = perf_evlist__first(evlist);
1555 return first->attr.read_format;
1556}
1557
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001558u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001559{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001560 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001561 struct perf_sample *data;
1562 u64 sample_type;
1563 u16 size = 0;
1564
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001565 if (!first->attr.sample_id_all)
1566 goto out;
1567
1568 sample_type = first->attr.sample_type;
1569
1570 if (sample_type & PERF_SAMPLE_TID)
1571 size += sizeof(data->tid) * 2;
1572
1573 if (sample_type & PERF_SAMPLE_TIME)
1574 size += sizeof(data->time);
1575
1576 if (sample_type & PERF_SAMPLE_ID)
1577 size += sizeof(data->id);
1578
1579 if (sample_type & PERF_SAMPLE_STREAM_ID)
1580 size += sizeof(data->stream_id);
1581
1582 if (sample_type & PERF_SAMPLE_CPU)
1583 size += sizeof(data->cpu) * 2;
Adrian Hunter75562572013-08-27 11:23:09 +03001584
1585 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1586 size += sizeof(data->id);
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001587out:
1588 return size;
1589}
1590
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001591bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001592{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001593 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001594
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001595 evlist__for_each_entry_continue(evlist, pos) {
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001596 if (first->attr.sample_id_all != pos->attr.sample_id_all)
1597 return false;
1598 }
1599
1600 return true;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001601}
1602
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001603bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +02001604{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001605 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001606 return first->attr.sample_id_all;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001607}
Arnaldo Carvalho de Melo81cce8d2011-10-05 19:11:32 -03001608
1609void perf_evlist__set_selected(struct perf_evlist *evlist,
1610 struct perf_evsel *evsel)
1611{
1612 evlist->selected = evsel;
1613}
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001614
Namhyung Kima74b4b62013-03-15 14:48:48 +09001615void perf_evlist__close(struct perf_evlist *evlist)
1616{
1617 struct perf_evsel *evsel;
1618 int ncpus = cpu_map__nr(evlist->cpus);
1619 int nthreads = thread_map__nr(evlist->threads);
Stephane Eranian8ad92192014-01-17 16:34:06 +01001620 int n;
Namhyung Kima74b4b62013-03-15 14:48:48 +09001621
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001622 evlist__for_each_entry_reverse(evlist, evsel) {
Stephane Eranian8ad92192014-01-17 16:34:06 +01001623 n = evsel->cpus ? evsel->cpus->nr : ncpus;
1624 perf_evsel__close(evsel, n, nthreads);
1625 }
Namhyung Kima74b4b62013-03-15 14:48:48 +09001626}
1627
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001628static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1629{
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001630 struct cpu_map *cpus;
1631 struct thread_map *threads;
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001632 int err = -ENOMEM;
1633
1634 /*
1635 * Try reading /sys/devices/system/cpu/online to get
1636 * an all cpus map.
1637 *
1638 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1639 * code needs an overhaul to properly forward the
1640 * error, and we may not want to do that fallback to a
1641 * default cpu identity map :-\
1642 */
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001643 cpus = cpu_map__new(NULL);
1644 if (!cpus)
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001645 goto out;
1646
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001647 threads = thread_map__new_dummy();
1648 if (!threads)
1649 goto out_put;
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001650
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001651 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001652out:
1653 return err;
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001654out_put:
1655 cpu_map__put(cpus);
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001656 goto out;
1657}
1658
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001659int perf_evlist__open(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001660{
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001661 struct perf_evsel *evsel;
Namhyung Kima74b4b62013-03-15 14:48:48 +09001662 int err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001663
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001664 /*
1665 * Default: one fd per CPU, all threads, aka systemwide
1666 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1667 */
1668 if (evlist->threads == NULL && evlist->cpus == NULL) {
1669 err = perf_evlist__create_syswide_maps(evlist);
1670 if (err < 0)
1671 goto out_err;
1672 }
1673
Adrian Hunter733cd2f2013-09-06 22:40:11 +03001674 perf_evlist__update_id_pos(evlist);
1675
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001676 evlist__for_each_entry(evlist, evsel) {
Adrian Hunter23df7f72016-01-07 10:13:59 +01001677 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001678 if (err < 0)
1679 goto out_err;
1680 }
1681
1682 return 0;
1683out_err:
Namhyung Kima74b4b62013-03-15 14:48:48 +09001684 perf_evlist__close(evlist);
Namhyung Kim41c21a62012-02-23 12:13:36 +09001685 errno = -err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001686 return err;
1687}
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001688
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001689int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
Namhyung Kim55e162e2013-03-11 16:43:17 +09001690 const char *argv[], bool pipe_output,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001691 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001692{
1693 int child_ready_pipe[2], go_pipe[2];
1694 char bf;
1695
1696 if (pipe(child_ready_pipe) < 0) {
1697 perror("failed to create 'ready' pipe");
1698 return -1;
1699 }
1700
1701 if (pipe(go_pipe) < 0) {
1702 perror("failed to create 'go' pipe");
1703 goto out_close_ready_pipe;
1704 }
1705
1706 evlist->workload.pid = fork();
1707 if (evlist->workload.pid < 0) {
1708 perror("failed to fork");
1709 goto out_close_pipes;
1710 }
1711
1712 if (!evlist->workload.pid) {
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001713 int ret;
1714
Namhyung Kim119fa3c2013-03-11 16:43:16 +09001715 if (pipe_output)
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001716 dup2(2, 1);
1717
David Ahern0817df02013-05-25 17:50:39 -06001718 signal(SIGTERM, SIG_DFL);
1719
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001720 close(child_ready_pipe[0]);
1721 close(go_pipe[1]);
1722 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1723
1724 /*
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001725 * Tell the parent we're ready to go
1726 */
1727 close(child_ready_pipe[1]);
1728
1729 /*
1730 * Wait until the parent tells us to go.
1731 */
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001732 ret = read(go_pipe[0], &bf, 1);
1733 /*
1734 * The parent will ask for the execvp() to be performed by
1735 * writing exactly one byte, in workload.cork_fd, usually via
1736 * perf_evlist__start_workload().
1737 *
Arnaldo Carvalho de Melo20f86fc2015-02-03 13:29:05 -03001738 * For cancelling the workload without actually running it,
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001739 * the parent will just close workload.cork_fd, without writing
1740 * anything, i.e. read will return zero and we just exit()
1741 * here.
1742 */
1743 if (ret != 1) {
1744 if (ret == -1)
1745 perror("unable to read pipe");
1746 exit(ret);
1747 }
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001748
1749 execvp(argv[0], (char **)argv);
1750
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001751 if (exec_error) {
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001752 union sigval val;
1753
1754 val.sival_int = errno;
1755 if (sigqueue(getppid(), SIGUSR1, val))
1756 perror(argv[0]);
1757 } else
1758 perror(argv[0]);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001759 exit(-1);
1760 }
1761
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001762 if (exec_error) {
1763 struct sigaction act = {
1764 .sa_flags = SA_SIGINFO,
1765 .sa_sigaction = exec_error,
1766 };
1767 sigaction(SIGUSR1, &act, NULL);
1768 }
1769
Arnaldo Carvalho de Melo1aaf63b2014-10-10 14:29:49 -03001770 if (target__none(target)) {
1771 if (evlist->threads == NULL) {
1772 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1773 __func__, __LINE__);
1774 goto out_close_pipes;
1775 }
Jiri Olsae13798c2015-06-23 00:36:02 +02001776 thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
Arnaldo Carvalho de Melo1aaf63b2014-10-10 14:29:49 -03001777 }
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001778
1779 close(child_ready_pipe[1]);
1780 close(go_pipe[0]);
1781 /*
1782 * wait for child to settle
1783 */
1784 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1785 perror("unable to read pipe");
1786 goto out_close_pipes;
1787 }
1788
Namhyung Kimbcf31452013-06-26 16:14:15 +09001789 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001790 evlist->workload.cork_fd = go_pipe[1];
1791 close(child_ready_pipe[0]);
1792 return 0;
1793
1794out_close_pipes:
1795 close(go_pipe[0]);
1796 close(go_pipe[1]);
1797out_close_ready_pipe:
1798 close(child_ready_pipe[0]);
1799 close(child_ready_pipe[1]);
1800 return -1;
1801}
1802
1803int perf_evlist__start_workload(struct perf_evlist *evlist)
1804{
1805 if (evlist->workload.cork_fd > 0) {
David Ahernb3824402013-07-02 13:27:21 -06001806 char bf = 0;
Namhyung Kimbcf31452013-06-26 16:14:15 +09001807 int ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001808 /*
1809 * Remove the cork, let it rip!
1810 */
Namhyung Kimbcf31452013-06-26 16:14:15 +09001811 ret = write(evlist->workload.cork_fd, &bf, 1);
1812 if (ret < 0)
1813 perror("enable to write to pipe");
1814
1815 close(evlist->workload.cork_fd);
1816 return ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001817 }
1818
1819 return 0;
1820}
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001821
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -03001822int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001823 struct perf_sample *sample)
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001824{
Adrian Hunter75562572013-08-27 11:23:09 +03001825 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1826
1827 if (!evsel)
1828 return -EFAULT;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001829 return perf_evsel__parse_sample(evsel, event, sample);
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001830}
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001831
1832size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1833{
1834 struct perf_evsel *evsel;
1835 size_t printed = 0;
1836
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001837 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001838 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1839 perf_evsel__name(evsel));
1840 }
1841
Davidlohr Buesob2222132013-11-12 22:24:24 -08001842 return printed + fprintf(fp, "\n");
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001843}
Arnaldo Carvalho de Melo6ef068c2013-10-17 12:07:58 -03001844
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001845int perf_evlist__strerror_open(struct perf_evlist *evlist,
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001846 int err, char *buf, size_t size)
1847{
1848 int printed, value;
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001849 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001850
1851 switch (err) {
1852 case EACCES:
1853 case EPERM:
1854 printed = scnprintf(buf, size,
1855 "Error:\t%s.\n"
1856 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1857
Adrian Hunter1a472452013-12-11 14:36:23 +02001858 value = perf_event_paranoid();
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001859
1860 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1861
1862 if (value >= 2) {
1863 printed += scnprintf(buf + printed, size - printed,
1864 "For your workloads it needs to be <= 1\nHint:\t");
1865 }
1866 printed += scnprintf(buf + printed, size - printed,
Arnaldo Carvalho de Melo5229e362014-06-10 17:18:54 -03001867 "For system wide tracing it needs to be set to -1.\n");
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001868
1869 printed += scnprintf(buf + printed, size - printed,
Arnaldo Carvalho de Melo5229e362014-06-10 17:18:54 -03001870 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1871 "Hint:\tThe current value is %d.", value);
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001872 break;
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001873 case EINVAL: {
1874 struct perf_evsel *first = perf_evlist__first(evlist);
1875 int max_freq;
1876
1877 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1878 goto out_default;
1879
1880 if (first->attr.sample_freq < (u64)max_freq)
1881 goto out_default;
1882
1883 printed = scnprintf(buf, size,
1884 "Error:\t%s.\n"
1885 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1886 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1887 emsg, max_freq, first->attr.sample_freq);
1888 break;
1889 }
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001890 default:
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001891out_default:
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001892 scnprintf(buf, size, "%s", emsg);
1893 break;
1894 }
1895
1896 return 0;
1897}
Adrian Huntera025e4f2013-12-11 14:36:35 +02001898
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001899int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1900{
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001901 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001902 int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001903
1904 switch (err) {
1905 case EPERM:
Arnaldo Carvalho de Meloe5d4a292014-12-12 15:59:51 -03001906 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001907 printed += scnprintf(buf + printed, size - printed,
1908 "Error:\t%s.\n"
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001909 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001910 "Hint:\tTried using %zd kB.\n",
Arnaldo Carvalho de Meloe5d4a292014-12-12 15:59:51 -03001911 emsg, pages_max_per_user, pages_attempted);
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001912
1913 if (pages_attempted >= pages_max_per_user) {
1914 printed += scnprintf(buf + printed, size - printed,
1915 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1916 pages_max_per_user + pages_attempted);
1917 }
1918
1919 printed += scnprintf(buf + printed, size - printed,
1920 "Hint:\tTry using a smaller -m/--mmap-pages value.");
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001921 break;
1922 default:
1923 scnprintf(buf, size, "%s", emsg);
1924 break;
1925 }
1926
1927 return 0;
1928}
1929
Adrian Huntera025e4f2013-12-11 14:36:35 +02001930void perf_evlist__to_front(struct perf_evlist *evlist,
1931 struct perf_evsel *move_evsel)
1932{
1933 struct perf_evsel *evsel, *n;
1934 LIST_HEAD(move);
1935
1936 if (move_evsel == perf_evlist__first(evlist))
1937 return;
1938
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001939 evlist__for_each_entry_safe(evlist, n, evsel) {
Adrian Huntera025e4f2013-12-11 14:36:35 +02001940 if (evsel->leader == move_evsel->leader)
1941 list_move_tail(&evsel->node, &move);
1942 }
1943
1944 list_splice(&move, &evlist->entries);
1945}
Adrian Hunter60b08962014-07-31 09:00:52 +03001946
1947void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1948 struct perf_evsel *tracking_evsel)
1949{
1950 struct perf_evsel *evsel;
1951
1952 if (tracking_evsel->tracking)
1953 return;
1954
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001955 evlist__for_each_entry(evlist, evsel) {
Adrian Hunter60b08962014-07-31 09:00:52 +03001956 if (evsel != tracking_evsel)
1957 evsel->tracking = false;
1958 }
1959
1960 tracking_evsel->tracking = true;
1961}
Wang Nan7630b3e2016-02-22 09:10:33 +00001962
1963struct perf_evsel *
1964perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
1965 const char *str)
1966{
1967 struct perf_evsel *evsel;
1968
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001969 evlist__for_each_entry(evlist, evsel) {
Wang Nan7630b3e2016-02-22 09:10:33 +00001970 if (!evsel->name)
1971 continue;
1972 if (strcmp(str, evsel->name) == 0)
1973 return evsel;
1974 }
1975
1976 return NULL;
1977}