blob: 46c0faf6c502b9ae31ac09901bbca31de61691bf [file] [log] [blame]
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
Arnaldo Carvalho de Meloa8c9ae12011-11-05 08:41:51 -02009#include "util.h"
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -030010#include <api/fs/fs.h>
Arnaldo Carvalho de Meloa43783a2017-04-18 10:46:11 -030011#include <errno.h>
Arnaldo Carvalho de Melofd20e812017-04-17 15:23:08 -030012#include <inttypes.h>
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -020013#include <poll.h>
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020014#include "cpumap.h"
15#include "thread_map.h"
Namhyung Kim12864b32012-04-26 14:15:22 +090016#include "target.h"
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020017#include "evlist.h"
18#include "evsel.h"
Adrian Huntere3e1a542013-08-14 15:48:24 +030019#include "debug.h"
Arnaldo Carvalho de Melo58db1d62017-04-19 16:05:56 -030020#include "units.h"
Wang Nan54cc54d2016-07-14 08:34:42 +000021#include "asm/bug.h"
Arnaldo Carvalho de Melo9607ad32017-04-19 15:49:18 -030022#include <signal.h>
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020023#include <unistd.h>
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020024
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020025#include "parse-events.h"
Josh Poimboeuf4b6ab942015-12-15 09:39:39 -060026#include <subcmd/parse-options.h>
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -020027
Arnaldo Carvalho de Melo86a5e0c2017-04-19 19:03:14 -030028#include <sys/ioctl.h>
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020029#include <sys/mman.h>
30
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020031#include <linux/bitops.h>
32#include <linux/hash.h>
Arnaldo Carvalho de Melo0389cd12014-12-15 16:04:11 -030033#include <linux/log2.h>
Jiri Olsa8dd2a132015-09-07 10:38:06 +020034#include <linux/err.h>
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -020035
Wang Nan8db6d6b2016-07-14 08:34:35 +000036static void perf_mmap__munmap(struct perf_mmap *map);
Wang Nan48760752016-07-14 08:34:37 +000037static void perf_mmap__put(struct perf_mmap *map);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -030038
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020039#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -030040#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -020041
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020042void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
43 struct thread_map *threads)
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020044{
45 int i;
46
47 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
48 INIT_HLIST_HEAD(&evlist->heads[i]);
49 INIT_LIST_HEAD(&evlist->entries);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -020050 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -030051 fdarray__init(&evlist->pollfd, 64);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -020052 evlist->workload.pid = -1;
Wang Nan54cc54d2016-07-14 08:34:42 +000053 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020054}
55
Namhyung Kim334fe7a2013-03-11 16:43:12 +090056struct perf_evlist *perf_evlist__new(void)
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020057{
58 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
59
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -020060 if (evlist != NULL)
Namhyung Kim334fe7a2013-03-11 16:43:12 +090061 perf_evlist__init(evlist, NULL, NULL);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -020062
63 return evlist;
64}
65
Jiri Olsab22d54b2013-09-01 12:36:14 +020066struct perf_evlist *perf_evlist__new_default(void)
67{
68 struct perf_evlist *evlist = perf_evlist__new();
69
70 if (evlist && perf_evlist__add_default(evlist)) {
71 perf_evlist__delete(evlist);
72 evlist = NULL;
73 }
74
75 return evlist;
76}
77
Arnaldo Carvalho de Melo5bae0252016-01-07 13:14:56 -030078struct perf_evlist *perf_evlist__new_dummy(void)
79{
80 struct perf_evlist *evlist = perf_evlist__new();
81
82 if (evlist && perf_evlist__add_dummy(evlist)) {
83 perf_evlist__delete(evlist);
84 evlist = NULL;
85 }
86
87 return evlist;
88}
89
Adrian Hunter75562572013-08-27 11:23:09 +030090/**
91 * perf_evlist__set_id_pos - set the positions of event ids.
92 * @evlist: selected event list
93 *
94 * Events with compatible sample types all have the same id_pos
95 * and is_pos. For convenience, put a copy on evlist.
96 */
97void perf_evlist__set_id_pos(struct perf_evlist *evlist)
98{
99 struct perf_evsel *first = perf_evlist__first(evlist);
100
101 evlist->id_pos = first->id_pos;
102 evlist->is_pos = first->is_pos;
103}
104
Adrian Hunter733cd2f2013-09-06 22:40:11 +0300105static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
106{
107 struct perf_evsel *evsel;
108
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300109 evlist__for_each_entry(evlist, evsel)
Adrian Hunter733cd2f2013-09-06 22:40:11 +0300110 perf_evsel__calc_id_pos(evsel);
111
112 perf_evlist__set_id_pos(evlist);
113}
114
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200115static void perf_evlist__purge(struct perf_evlist *evlist)
116{
117 struct perf_evsel *pos, *n;
118
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300119 evlist__for_each_entry_safe(evlist, n, pos) {
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200120 list_del_init(&pos->node);
Arnaldo Carvalho de Melod49e4692015-08-27 08:07:40 -0400121 pos->evlist = NULL;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200122 perf_evsel__delete(pos);
123 }
124
125 evlist->nr_entries = 0;
126}
127
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200128void perf_evlist__exit(struct perf_evlist *evlist)
129{
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -0300130 zfree(&evlist->mmap);
Wang Nanb2cb6152016-07-14 08:34:39 +0000131 zfree(&evlist->backward_mmap);
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300132 fdarray__exit(&evlist->pollfd);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200133}
134
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200135void perf_evlist__delete(struct perf_evlist *evlist)
136{
Arnaldo Carvalho de Melo0b04b3d2016-06-21 18:15:45 -0300137 if (evlist == NULL)
138 return;
139
Arnaldo Carvalho de Melo983874d2014-01-03 17:25:49 -0300140 perf_evlist__munmap(evlist);
Arnaldo Carvalho de Melof26e1c72014-01-03 16:54:12 -0300141 perf_evlist__close(evlist);
Jiri Olsaf30a79b2015-06-23 00:36:04 +0200142 cpu_map__put(evlist->cpus);
Jiri Olsa186fbb72015-06-23 00:36:05 +0200143 thread_map__put(evlist->threads);
Arnaldo Carvalho de Melo03ad9742014-01-03 15:56:06 -0300144 evlist->cpus = NULL;
145 evlist->threads = NULL;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200146 perf_evlist__purge(evlist);
Arnaldo Carvalho de Meloef1d1af2011-01-18 21:41:45 -0200147 perf_evlist__exit(evlist);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200148 free(evlist);
149}
150
Adrian Hunteradc0c3e2015-09-08 10:58:58 +0300151static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
152 struct perf_evsel *evsel)
153{
154 /*
155 * We already have cpus for evsel (via PMU sysfs) so
156 * keep it, if there's no target cpu list defined.
157 */
158 if (!evsel->own_cpus || evlist->has_user_cpus) {
159 cpu_map__put(evsel->cpus);
160 evsel->cpus = cpu_map__get(evlist->cpus);
161 } else if (evsel->cpus != evsel->own_cpus) {
162 cpu_map__put(evsel->cpus);
163 evsel->cpus = cpu_map__get(evsel->own_cpus);
164 }
165
166 thread_map__put(evsel->threads);
167 evsel->threads = thread_map__get(evlist->threads);
168}
169
170static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
171{
172 struct perf_evsel *evsel;
173
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300174 evlist__for_each_entry(evlist, evsel)
Adrian Hunteradc0c3e2015-09-08 10:58:58 +0300175 __perf_evlist__propagate_maps(evlist, evsel);
176}
177
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200178void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
179{
Arnaldo Carvalho de Melod49e4692015-08-27 08:07:40 -0400180 entry->evlist = evlist;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200181 list_add_tail(&entry->node, &evlist->entries);
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300182 entry->idx = evlist->nr_entries;
Adrian Hunter60b08962014-07-31 09:00:52 +0300183 entry->tracking = !entry->idx;
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300184
Adrian Hunter75562572013-08-27 11:23:09 +0300185 if (!evlist->nr_entries++)
186 perf_evlist__set_id_pos(evlist);
Adrian Hunter44c42d72015-09-08 10:58:59 +0300187
188 __perf_evlist__propagate_maps(evlist, entry);
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200189}
190
Adrian Hunter47682302015-09-25 16:15:53 +0300191void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
192{
193 evsel->evlist = NULL;
194 list_del_init(&evsel->node);
195 evlist->nr_entries -= 1;
196}
197
Jiri Olsa0529bc12012-01-27 15:34:20 +0100198void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300199 struct list_head *list)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200200{
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300201 struct perf_evsel *evsel, *temp;
Adrian Hunter75562572013-08-27 11:23:09 +0300202
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300203 __evlist__for_each_entry_safe(list, temp, evsel) {
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300204 list_del_init(&evsel->node);
205 perf_evlist__add(evlist, evsel);
206 }
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200207}
208
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300209void __perf_evlist__set_leader(struct list_head *list)
210{
211 struct perf_evsel *evsel, *leader;
212
213 leader = list_entry(list->next, struct perf_evsel, node);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900214 evsel = list_entry(list->prev, struct perf_evsel, node);
215
216 leader->nr_members = evsel->idx - leader->idx + 1;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300217
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300218 __evlist__for_each_entry(list, evsel) {
Stephane Eranian74b21332013-01-31 13:54:37 +0100219 evsel->leader = leader;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300220 }
221}
222
223void perf_evlist__set_leader(struct perf_evlist *evlist)
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200224{
Namhyung Kim97f63e42013-01-22 18:09:29 +0900225 if (evlist->nr_entries) {
226 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
Arnaldo Carvalho de Melo63dab222012-08-14 16:35:48 -0300227 __perf_evlist__set_leader(&evlist->entries);
Namhyung Kim97f63e42013-01-22 18:09:29 +0900228 }
Jiri Olsa6a4bb042012-08-08 12:22:36 +0200229}
230
Jiri Olsa45cf6c32015-10-05 20:06:04 +0200231void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
Arnaldo Carvalho de Melo7f8d1ad2015-09-30 17:49:49 -0300232{
233 attr->precise_ip = 3;
234
235 while (attr->precise_ip != 0) {
236 int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
237 if (fd != -1) {
238 close(fd);
239 break;
240 }
241 --attr->precise_ip;
242 }
243}
244
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200245int perf_evlist__add_default(struct perf_evlist *evlist)
246{
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300247 struct perf_evsel *evsel = perf_evsel__new_cycles();
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200248
249 if (evsel == NULL)
Arnaldo Carvalho de Melo7c48dcf2016-07-28 18:33:20 -0300250 return -ENOMEM;
Arnaldo Carvalho de Melo361c99a2011-01-11 20:56:53 -0200251
252 perf_evlist__add(evlist, evsel);
253 return 0;
254}
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200255
Arnaldo Carvalho de Melo5bae0252016-01-07 13:14:56 -0300256int perf_evlist__add_dummy(struct perf_evlist *evlist)
257{
258 struct perf_event_attr attr = {
259 .type = PERF_TYPE_SOFTWARE,
260 .config = PERF_COUNT_SW_DUMMY,
261 .size = sizeof(attr), /* to capture ABI version */
262 };
263 struct perf_evsel *evsel = perf_evsel__new(&attr);
264
265 if (evsel == NULL)
266 return -ENOMEM;
267
268 perf_evlist__add(evlist, evsel);
269 return 0;
270}
271
Arnaldo Carvalho de Meloe60fc842012-10-03 11:50:55 -0300272static int perf_evlist__add_attrs(struct perf_evlist *evlist,
273 struct perf_event_attr *attrs, size_t nr_attrs)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200274{
275 struct perf_evsel *evsel, *n;
276 LIST_HEAD(head);
277 size_t i;
278
279 for (i = 0; i < nr_attrs; i++) {
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300280 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200281 if (evsel == NULL)
282 goto out_delete_partial_list;
283 list_add_tail(&evsel->node, &head);
284 }
285
Adrian Hunterf114d6e2015-09-08 10:58:53 +0300286 perf_evlist__splice_list_tail(evlist, &head);
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200287
288 return 0;
289
290out_delete_partial_list:
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300291 __evlist__for_each_entry_safe(&head, n, evsel)
Arnaldo Carvalho de Melo50d08e42011-11-04 09:10:59 -0200292 perf_evsel__delete(evsel);
293 return -1;
294}
295
Arnaldo Carvalho de Melo79695e12012-05-30 13:53:54 -0300296int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
297 struct perf_event_attr *attrs, size_t nr_attrs)
298{
299 size_t i;
300
301 for (i = 0; i < nr_attrs; i++)
302 event_attr_init(attrs + i);
303
304 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
305}
306
Arnaldo Carvalho de Meloda378962012-06-27 13:08:42 -0300307struct perf_evsel *
308perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -0200309{
310 struct perf_evsel *evsel;
311
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300312 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Meloee29be62011-11-28 17:57:40 -0200313 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
314 (int)evsel->attr.config == id)
315 return evsel;
316 }
317
318 return NULL;
319}
320
David Aherna2f28042013-08-28 22:29:51 -0600321struct perf_evsel *
322perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
323 const char *name)
324{
325 struct perf_evsel *evsel;
326
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300327 evlist__for_each_entry(evlist, evsel) {
David Aherna2f28042013-08-28 22:29:51 -0600328 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
329 (strcmp(evsel->name, name) == 0))
330 return evsel;
331 }
332
333 return NULL;
334}
335
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300336int perf_evlist__add_newtp(struct perf_evlist *evlist,
337 const char *sys, const char *name, void *handler)
338{
Arnaldo Carvalho de Meloef503832013-11-07 16:41:19 -0300339 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300340
Jiri Olsa8dd2a132015-09-07 10:38:06 +0200341 if (IS_ERR(evsel))
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300342 return -1;
343
Arnaldo Carvalho de Melo744a9712013-11-06 10:17:38 -0300344 evsel->handler = handler;
Arnaldo Carvalho de Melo39876e72012-10-03 11:40:22 -0300345 perf_evlist__add(evlist, evsel);
346 return 0;
347}
348
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300349static int perf_evlist__nr_threads(struct perf_evlist *evlist,
350 struct perf_evsel *evsel)
351{
352 if (evsel->system_wide)
353 return 1;
354 else
355 return thread_map__nr(evlist->threads);
356}
357
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300358void perf_evlist__disable(struct perf_evlist *evlist)
359{
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300360 struct perf_evsel *pos;
361
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300362 evlist__for_each_entry(evlist, pos) {
Jiri Olsa3e27c922015-12-03 10:06:42 +0100363 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
364 continue;
365 perf_evsel__disable(pos);
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300366 }
Arnaldo Carvalho de Melo2b56bcf2015-06-17 16:40:26 -0300367
368 evlist->enabled = false;
Arnaldo Carvalho de Melo4152ab32011-07-25 11:06:19 -0300369}
370
David Ahern764e16a32011-08-25 10:17:55 -0600371void perf_evlist__enable(struct perf_evlist *evlist)
372{
David Ahern764e16a32011-08-25 10:17:55 -0600373 struct perf_evsel *pos;
374
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300375 evlist__for_each_entry(evlist, pos) {
Jiri Olsa3e27c922015-12-03 10:06:42 +0100376 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
377 continue;
378 perf_evsel__enable(pos);
David Ahern764e16a32011-08-25 10:17:55 -0600379 }
Arnaldo Carvalho de Melo2b56bcf2015-06-17 16:40:26 -0300380
381 evlist->enabled = true;
382}
383
384void perf_evlist__toggle_enable(struct perf_evlist *evlist)
385{
386 (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
David Ahern764e16a32011-08-25 10:17:55 -0600387}
388
Adrian Hunter1c650562014-07-31 09:00:56 +0300389static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
390 struct perf_evsel *evsel, int cpu)
391{
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -0300392 int thread;
Adrian Hunter1c650562014-07-31 09:00:56 +0300393 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
394
395 if (!evsel->fd)
396 return -EINVAL;
397
398 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -0300399 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
Adrian Hunter1c650562014-07-31 09:00:56 +0300400 if (err)
401 return err;
402 }
403 return 0;
404}
405
406static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
407 struct perf_evsel *evsel,
408 int thread)
409{
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -0300410 int cpu;
Adrian Hunter1c650562014-07-31 09:00:56 +0300411 int nr_cpus = cpu_map__nr(evlist->cpus);
412
413 if (!evsel->fd)
414 return -EINVAL;
415
416 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -0300417 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
Adrian Hunter1c650562014-07-31 09:00:56 +0300418 if (err)
419 return err;
420 }
421 return 0;
422}
423
424int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
425 struct perf_evsel *evsel, int idx)
426{
427 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
428
429 if (per_cpu_mmaps)
430 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
431 else
432 return perf_evlist__enable_event_thread(evlist, evsel, idx);
433}
434
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300435int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200436{
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900437 int nr_cpus = cpu_map__nr(evlist->cpus);
438 int nr_threads = thread_map__nr(evlist->threads);
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300439 int nfds = 0;
440 struct perf_evsel *evsel;
441
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -0300442 evlist__for_each_entry(evlist, evsel) {
Adrian Hunterbf8e8f42014-07-31 09:00:51 +0300443 if (evsel->system_wide)
444 nfds += nr_cpus;
445 else
446 nfds += nr_cpus * nr_threads;
447 }
448
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300449 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
450 fdarray__grow(&evlist->pollfd, nfds) < 0)
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300451 return -ENOMEM;
452
453 return 0;
Arnaldo Carvalho de Melo5c581042011-01-11 22:30:02 -0200454}
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200455
Wang Nan48760752016-07-14 08:34:37 +0000456static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
457 struct perf_mmap *map, short revent)
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300458{
Wang Nanf3058a12016-05-24 02:28:59 +0000459 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300460 /*
461 * Save the idx so that when we filter out fds POLLHUP'ed we can
462 * close the associated evlist->mmap[] entry.
463 */
464 if (pos >= 0) {
Wang Nan48760752016-07-14 08:34:37 +0000465 evlist->pollfd.priv[pos].ptr = map;
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300466
467 fcntl(fd, F_SETFL, O_NONBLOCK);
468 }
469
470 return pos;
471}
472
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -0300473int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200474{
Wang Nan48760752016-07-14 08:34:37 +0000475 return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300476}
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300477
Wang Nan258e4bf2016-05-25 13:44:57 +0000478static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
479 void *arg __maybe_unused)
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300480{
Wang Nan48760752016-07-14 08:34:37 +0000481 struct perf_mmap *map = fda->priv[fd].ptr;
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300482
Wang Nan48760752016-07-14 08:34:37 +0000483 if (map)
484 perf_mmap__put(map);
Arnaldo Carvalho de Melo70082dd2011-01-12 17:03:24 -0200485}
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200486
Arnaldo Carvalho de Melo1ddec7f2014-08-12 23:04:11 -0300487int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
488{
Arnaldo Carvalho de Meloe4b356b2014-09-08 11:27:49 -0300489 return fdarray__filter(&evlist->pollfd, revents_and_mask,
Wang Nan258e4bf2016-05-25 13:44:57 +0000490 perf_evlist__munmap_filtered, NULL);
Arnaldo Carvalho de Melo1ddec7f2014-08-12 23:04:11 -0300491}
492
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -0300493int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
494{
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -0300495 return fdarray__poll(&evlist->pollfd, timeout);
Arnaldo Carvalho de Melof66a8892014-08-18 17:25:59 -0300496}
497
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300498static void perf_evlist__id_hash(struct perf_evlist *evlist,
499 struct perf_evsel *evsel,
500 int cpu, int thread, u64 id)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200501{
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300502 int hash;
503 struct perf_sample_id *sid = SID(evsel, cpu, thread);
504
505 sid->id = id;
506 sid->evsel = evsel;
507 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
508 hlist_add_head(&sid->node, &evlist->heads[hash]);
509}
510
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -0300511void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
512 int cpu, int thread, u64 id)
513{
514 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
515 evsel->id[evsel->ids++] = id;
516}
517
Jiri Olsa1c596122015-11-05 15:40:49 +0100518int perf_evlist__id_add_fd(struct perf_evlist *evlist,
519 struct perf_evsel *evsel,
520 int cpu, int thread, int fd)
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300521{
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200522 u64 read_data[4] = { 0, };
Arnaldo Carvalho de Melo3d3b5e92011-03-04 22:29:39 -0300523 int id_idx = 1; /* The first entry is the counter value */
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200524 u64 id;
525 int ret;
526
527 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
528 if (!ret)
529 goto add;
530
531 if (errno != ENOTTY)
532 return -1;
533
534 /* Legacy way to get event id.. All hail to old kernels! */
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200535
Jiri Olsac4861af2012-10-12 13:02:21 +0200536 /*
537 * This way does not work with group format read, so bail
538 * out in that case.
539 */
540 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
541 return -1;
542
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200543 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
544 read(fd, &read_data, sizeof(read_data)) == -1)
545 return -1;
546
547 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
548 ++id_idx;
549 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
550 ++id_idx;
551
Jiri Olsae2b5abe2012-04-04 19:32:27 +0200552 id = read_data[id_idx];
553
554 add:
555 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200556 return 0;
557}
558
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200559static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
560 struct perf_evsel *evsel, int idx, int cpu,
561 int thread)
562{
563 struct perf_sample_id *sid = SID(evsel, cpu, thread);
564 sid->idx = idx;
565 if (evlist->cpus && cpu >= 0)
566 sid->cpu = evlist->cpus->map[cpu];
567 else
568 sid->cpu = -1;
569 if (!evsel->system_wide && evlist->threads && thread >= 0)
Jiri Olsae13798c2015-06-23 00:36:02 +0200570 sid->tid = thread_map__pid(evlist->threads, thread);
Adrian Hunter3c659ee2014-10-27 15:49:22 +0200571 else
572 sid->tid = -1;
573}
574
Jiri Olsa932a3592012-10-11 14:10:35 +0200575struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200576{
577 struct hlist_head *head;
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200578 struct perf_sample_id *sid;
579 int hash;
580
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200581 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
582 head = &evlist->heads[hash];
583
Sasha Levinb67bfe02013-02-27 17:06:00 -0800584 hlist_for_each_entry(sid, head, node)
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200585 if (sid->id == id)
Jiri Olsa932a3592012-10-11 14:10:35 +0200586 return sid;
587
588 return NULL;
589}
590
591struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
592{
593 struct perf_sample_id *sid;
594
Adrian Hunter05169df2015-08-20 11:26:45 +0300595 if (evlist->nr_entries == 1 || !id)
Jiri Olsa932a3592012-10-11 14:10:35 +0200596 return perf_evlist__first(evlist);
597
598 sid = perf_evlist__id2sid(evlist, id);
599 if (sid)
600 return sid->evsel;
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900601
602 if (!perf_evlist__sample_id_all(evlist))
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -0300603 return perf_evlist__first(evlist);
Namhyung Kim30e68bc2012-02-20 10:47:26 +0900604
Arnaldo Carvalho de Melo70db7532011-01-12 22:39:13 -0200605 return NULL;
606}
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200607
Adrian Hunterdddcf6a2015-09-25 16:15:52 +0300608struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
609 u64 id)
610{
611 struct perf_sample_id *sid;
612
613 if (!id)
614 return NULL;
615
616 sid = perf_evlist__id2sid(evlist, id);
617 if (sid)
618 return sid->evsel;
619
620 return NULL;
621}
622
Adrian Hunter75562572013-08-27 11:23:09 +0300623static int perf_evlist__event2id(struct perf_evlist *evlist,
624 union perf_event *event, u64 *id)
625{
626 const u64 *array = event->sample.array;
627 ssize_t n;
628
629 n = (event->header.size - sizeof(event->header)) >> 3;
630
631 if (event->header.type == PERF_RECORD_SAMPLE) {
632 if (evlist->id_pos >= n)
633 return -1;
634 *id = array[evlist->id_pos];
635 } else {
636 if (evlist->is_pos > n)
637 return -1;
638 n -= evlist->is_pos;
639 *id = array[n];
640 }
641 return 0;
642}
643
Jiri Olsa7cb5c5a2016-07-10 13:07:53 +0200644struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
645 union perf_event *event)
Adrian Hunter75562572013-08-27 11:23:09 +0300646{
Adrian Hunter98be6962013-09-04 23:18:17 +0300647 struct perf_evsel *first = perf_evlist__first(evlist);
Adrian Hunter75562572013-08-27 11:23:09 +0300648 struct hlist_head *head;
649 struct perf_sample_id *sid;
650 int hash;
651 u64 id;
652
653 if (evlist->nr_entries == 1)
Adrian Hunter98be6962013-09-04 23:18:17 +0300654 return first;
655
656 if (!first->attr.sample_id_all &&
657 event->header.type != PERF_RECORD_SAMPLE)
658 return first;
Adrian Hunter75562572013-08-27 11:23:09 +0300659
660 if (perf_evlist__event2id(evlist, event, &id))
661 return NULL;
662
663 /* Synthesized events have an id of zero */
664 if (!id)
Adrian Hunter98be6962013-09-04 23:18:17 +0300665 return first;
Adrian Hunter75562572013-08-27 11:23:09 +0300666
667 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
668 head = &evlist->heads[hash];
669
670 hlist_for_each_entry(sid, head, node) {
671 if (sid->id == id)
672 return sid->evsel;
673 }
674 return NULL;
675}
676
Wang Nan65aea232016-05-23 07:13:38 +0000677static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
678{
679 int i;
680
Wang Nan078c3382016-07-14 08:34:40 +0000681 if (!evlist->backward_mmap)
682 return 0;
683
Wang Nan65aea232016-05-23 07:13:38 +0000684 for (i = 0; i < evlist->nr_mmaps; i++) {
Wang Nan078c3382016-07-14 08:34:40 +0000685 int fd = evlist->backward_mmap[i].fd;
Wang Nan65aea232016-05-23 07:13:38 +0000686 int err;
687
688 if (fd < 0)
689 continue;
690 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
691 if (err)
692 return err;
693 }
694 return 0;
695}
696
Wang Nanf6cdff82016-07-14 08:34:44 +0000697static int perf_evlist__pause(struct perf_evlist *evlist)
Wang Nan65aea232016-05-23 07:13:38 +0000698{
699 return perf_evlist__set_paused(evlist, true);
700}
701
Wang Nanf6cdff82016-07-14 08:34:44 +0000702static int perf_evlist__resume(struct perf_evlist *evlist)
Wang Nan65aea232016-05-23 07:13:38 +0000703{
704 return perf_evlist__set_paused(evlist, false);
705}
706
Wang Nanb6b85da2016-04-27 02:19:21 +0000707/* When check_messup is true, 'end' must points to a good entry */
Wang Nan0f4ccd12016-04-27 02:19:20 +0000708static union perf_event *
Wang Nanb6b85da2016-04-27 02:19:21 +0000709perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
710 u64 end, u64 *prev)
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200711{
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200712 unsigned char *data = md->base + page_size;
Arnaldo Carvalho de Melo8115d602011-01-29 14:01:45 -0200713 union perf_event *event = NULL;
Wang Nanb6b85da2016-04-27 02:19:21 +0000714 int diff = end - start;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200715
Wang Nanb6b85da2016-04-27 02:19:21 +0000716 if (check_messup) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200717 /*
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200718 * If we're further behind than half the buffer, there's a chance
719 * the writer will bite our tail and mess up the samples under us.
720 *
Wang Nanb6b85da2016-04-27 02:19:21 +0000721 * If we somehow ended up ahead of the 'end', we got messed up.
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200722 *
Wang Nanb6b85da2016-04-27 02:19:21 +0000723 * In either case, truncate and restart at 'end'.
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200724 */
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200725 if (diff > md->mask / 2 || diff < 0) {
726 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
727
728 /*
Wang Nanb6b85da2016-04-27 02:19:21 +0000729 * 'end' points to a known good entry, start there.
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200730 */
Wang Nanb6b85da2016-04-27 02:19:21 +0000731 start = end;
Wang Nanb04b7022016-04-26 02:28:54 +0000732 diff = 0;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200733 }
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200734 }
735
Wang Nanb04b7022016-04-26 02:28:54 +0000736 if (diff >= (int)sizeof(event->header)) {
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200737 size_t size;
738
Wang Nanb6b85da2016-04-27 02:19:21 +0000739 event = (union perf_event *)&data[start & md->mask];
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200740 size = event->header.size;
741
Wang Nanb04b7022016-04-26 02:28:54 +0000742 if (size < sizeof(event->header) || diff < (int)size) {
743 event = NULL;
744 goto broken_event;
745 }
746
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200747 /*
748 * Event straddles the mmap boundary -- header should always
749 * be inside due to u64 alignment of output.
750 */
Wang Nanb6b85da2016-04-27 02:19:21 +0000751 if ((start & md->mask) + size != ((start + size) & md->mask)) {
752 unsigned int offset = start;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200753 unsigned int len = min(sizeof(*event), size), cpy;
Jiri Olsaa65cb4b2013-10-02 15:46:39 +0200754 void *dst = md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200755
756 do {
757 cpy = min(md->mask + 1 - (offset & md->mask), len);
758 memcpy(dst, &data[offset & md->mask], cpy);
759 offset += cpy;
760 dst += cpy;
761 len -= cpy;
762 } while (len);
763
Jiri Olsaa65cb4b2013-10-02 15:46:39 +0200764 event = (union perf_event *) md->event_copy;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200765 }
766
Wang Nanb6b85da2016-04-27 02:19:21 +0000767 start += size;
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200768 }
769
Wang Nanb04b7022016-04-26 02:28:54 +0000770broken_event:
Wang Nan0f4ccd12016-04-27 02:19:20 +0000771 if (prev)
Wang Nanb6b85da2016-04-27 02:19:21 +0000772 *prev = start;
Arnaldo Carvalho de Melo7bb41152011-01-29 09:08:13 -0200773
Arnaldo Carvalho de Melo04391de2011-01-15 10:40:59 -0200774 return event;
775}
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200776
Wang Nan8db6d6b2016-07-14 08:34:35 +0000777union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup)
Wang Nan0f4ccd12016-04-27 02:19:20 +0000778{
Wang Nan0f4ccd12016-04-27 02:19:20 +0000779 u64 head;
780 u64 old = md->prev;
781
782 /*
783 * Check if event was unmapped due to a POLLHUP/POLLERR.
784 */
Elena Reshetova25a37202017-02-21 17:35:01 +0200785 if (!refcount_read(&md->refcnt))
Wang Nan0f4ccd12016-04-27 02:19:20 +0000786 return NULL;
787
788 head = perf_mmap__read_head(md);
789
Wang Nan8db6d6b2016-07-14 08:34:35 +0000790 return perf_mmap__read(md, check_messup, old, head, &md->prev);
Wang Nan0f4ccd12016-04-27 02:19:20 +0000791}
792
Wang Nane24c7522016-05-09 01:47:50 +0000793union perf_event *
Wang Nan8db6d6b2016-07-14 08:34:35 +0000794perf_mmap__read_backward(struct perf_mmap *md)
Wang Nane24c7522016-05-09 01:47:50 +0000795{
Wang Nane24c7522016-05-09 01:47:50 +0000796 u64 head, end;
797 u64 start = md->prev;
798
799 /*
800 * Check if event was unmapped due to a POLLHUP/POLLERR.
801 */
Elena Reshetova25a37202017-02-21 17:35:01 +0200802 if (!refcount_read(&md->refcnt))
Wang Nane24c7522016-05-09 01:47:50 +0000803 return NULL;
804
805 head = perf_mmap__read_head(md);
806 if (!head)
807 return NULL;
808
809 /*
810 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
811 * it each time when kernel writes to it, so in fact 'head' is
812 * negative. 'end' pointer is made manually by adding the size of
813 * the ring buffer to 'head' pointer, means the validate data can
814 * read is the whole ring buffer. If 'end' is positive, the ring
815 * buffer has not fully filled, so we must adjust 'end' to 0.
816 *
817 * However, since both 'head' and 'end' is unsigned, we can't
818 * simply compare 'end' against 0. Here we compare '-head' and
819 * the size of the ring buffer, where -head is the number of bytes
820 * kernel write to the ring buffer.
821 */
822 if (-head < (u64)(md->mask + 1))
823 end = 0;
824 else
825 end = head + md->mask + 1;
826
827 return perf_mmap__read(md, false, start, end, &md->prev);
828}
829
Wang Nan8db6d6b2016-07-14 08:34:35 +0000830union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
831{
832 struct perf_mmap *md = &evlist->mmap[idx];
833
834 /*
835 * Check messup is required for forward overwritable ring buffer:
836 * memory pointed by md->prev can be overwritten in this case.
837 * No need for read-write ring buffer: kernel stop outputting when
838 * it hit md->prev (perf_mmap__consume()).
839 */
840 return perf_mmap__read_forward(md, evlist->overwrite);
841}
842
843union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
844{
845 struct perf_mmap *md = &evlist->mmap[idx];
846
847 /*
848 * No need to check messup for backward ring buffer:
849 * We can always read arbitrary long data from a backward
850 * ring buffer unless we forget to pause it before reading.
851 */
852 return perf_mmap__read_backward(md);
853}
854
Wang Nan5a5ddeb2016-05-25 13:44:50 +0000855union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
856{
Wang Nana0c6f452016-07-14 08:34:41 +0000857 return perf_evlist__mmap_read_forward(evlist, idx);
Wang Nan5a5ddeb2016-05-25 13:44:50 +0000858}
859
Wang Nan8db6d6b2016-07-14 08:34:35 +0000860void perf_mmap__read_catchup(struct perf_mmap *md)
Wang Nane24c7522016-05-09 01:47:50 +0000861{
Wang Nane24c7522016-05-09 01:47:50 +0000862 u64 head;
863
Elena Reshetova25a37202017-02-21 17:35:01 +0200864 if (!refcount_read(&md->refcnt))
Wang Nane24c7522016-05-09 01:47:50 +0000865 return;
866
867 head = perf_mmap__read_head(md);
868 md->prev = head;
869}
870
Wang Nan8db6d6b2016-07-14 08:34:35 +0000871void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
872{
873 perf_mmap__read_catchup(&evlist->mmap[idx]);
874}
875
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300876static bool perf_mmap__empty(struct perf_mmap *md)
877{
Adrian Hunterb72e74d2015-04-24 22:29:43 +0300878 return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300879}
880
Wang Nan8db6d6b2016-07-14 08:34:35 +0000881static void perf_mmap__get(struct perf_mmap *map)
882{
Elena Reshetova25a37202017-02-21 17:35:01 +0200883 refcount_inc(&map->refcnt);
Wang Nan8db6d6b2016-07-14 08:34:35 +0000884}
885
886static void perf_mmap__put(struct perf_mmap *md)
887{
Elena Reshetova25a37202017-02-21 17:35:01 +0200888 BUG_ON(md->base && refcount_read(&md->refcnt) == 0);
Wang Nan8db6d6b2016-07-14 08:34:35 +0000889
Elena Reshetova25a37202017-02-21 17:35:01 +0200890 if (refcount_dec_and_test(&md->refcnt))
Wang Nan8db6d6b2016-07-14 08:34:35 +0000891 perf_mmap__munmap(md);
892}
893
Wang Nan8db6d6b2016-07-14 08:34:35 +0000894void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800895{
Wang Nan8db6d6b2016-07-14 08:34:35 +0000896 if (!overwrite) {
David Ahern7b8283b52015-04-07 09:20:37 -0600897 u64 old = md->prev;
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800898
899 perf_mmap__write_tail(md, old);
900 }
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -0300901
Elena Reshetova25a37202017-02-21 17:35:01 +0200902 if (refcount_read(&md->refcnt) == 1 && perf_mmap__empty(md))
Wang Nan8db6d6b2016-07-14 08:34:35 +0000903 perf_mmap__put(md);
904}
905
906void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
907{
908 perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
Zhouyi Zhou8e50d382013-10-24 15:43:33 +0800909}
910
Adrian Hunter718c6022015-04-09 18:53:42 +0300911int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
912 struct auxtrace_mmap_params *mp __maybe_unused,
913 void *userpg __maybe_unused,
914 int fd __maybe_unused)
915{
916 return 0;
917}
918
919void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
920{
921}
922
923void __weak auxtrace_mmap_params__init(
924 struct auxtrace_mmap_params *mp __maybe_unused,
925 off_t auxtrace_offset __maybe_unused,
926 unsigned int auxtrace_pages __maybe_unused,
927 bool auxtrace_overwrite __maybe_unused)
928{
929}
930
931void __weak auxtrace_mmap_params__set_idx(
932 struct auxtrace_mmap_params *mp __maybe_unused,
933 struct perf_evlist *evlist __maybe_unused,
934 int idx __maybe_unused,
935 bool per_cpu __maybe_unused)
936{
937}
938
Wang Nan8db6d6b2016-07-14 08:34:35 +0000939static void perf_mmap__munmap(struct perf_mmap *map)
940{
941 if (map->base != NULL) {
942 munmap(map->base, perf_mmap__mmap_len(map));
943 map->base = NULL;
944 map->fd = -1;
Elena Reshetova25a37202017-02-21 17:35:01 +0200945 refcount_set(&map->refcnt, 0);
Wang Nan8db6d6b2016-07-14 08:34:35 +0000946 }
947 auxtrace_mmap__munmap(&map->auxtrace_mmap);
948}
949
Wang Nana1f72612016-07-14 08:34:38 +0000950static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200951{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300952 int i;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200953
Wang Nanb2cb6152016-07-14 08:34:39 +0000954 if (evlist->mmap)
955 for (i = 0; i < evlist->nr_mmaps; i++)
956 perf_mmap__munmap(&evlist->mmap[i]);
Arnaldo Carvalho de Melo983874d2014-01-03 17:25:49 -0300957
Wang Nanb2cb6152016-07-14 08:34:39 +0000958 if (evlist->backward_mmap)
959 for (i = 0; i < evlist->nr_mmaps; i++)
960 perf_mmap__munmap(&evlist->backward_mmap[i]);
Wang Nana1f72612016-07-14 08:34:38 +0000961}
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -0300962
Wang Nana1f72612016-07-14 08:34:38 +0000963void perf_evlist__munmap(struct perf_evlist *evlist)
964{
965 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Melo04662522013-12-26 17:41:15 -0300966 zfree(&evlist->mmap);
Wang Nanb2cb6152016-07-14 08:34:39 +0000967 zfree(&evlist->backward_mmap);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200968}
969
Wang Nan8db6d6b2016-07-14 08:34:35 +0000970static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200971{
Wang Nand4c6fb32016-05-20 16:38:24 +0000972 int i;
Wang Nan8db6d6b2016-07-14 08:34:35 +0000973 struct perf_mmap *map;
Wang Nand4c6fb32016-05-20 16:38:24 +0000974
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -0300975 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -0700976 if (cpu_map__empty(evlist->cpus))
Namhyung Kimb3a319d2013-03-11 16:43:14 +0900977 evlist->nr_mmaps = thread_map__nr(evlist->threads);
Wang Nan8db6d6b2016-07-14 08:34:35 +0000978 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
979 if (!map)
980 return NULL;
Wang Nan946ae1d2016-05-31 13:06:15 +0000981
Arnaldo Carvalho de Melo4738ca32017-02-23 13:24:34 -0300982 for (i = 0; i < evlist->nr_mmaps; i++) {
Wang Nan8db6d6b2016-07-14 08:34:35 +0000983 map[i].fd = -1;
Arnaldo Carvalho de Melo4738ca32017-02-23 13:24:34 -0300984 /*
985 * When the perf_mmap() call is made we grab one refcount, plus
986 * one extra to let perf_evlist__mmap_consume() get the last
987 * events after all real references (perf_mmap__get()) are
988 * dropped.
989 *
990 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
991 * thus does perf_mmap__get() on it.
992 */
993 refcount_set(&map[i].refcnt, 0);
994 }
Wang Nan8db6d6b2016-07-14 08:34:35 +0000995 return map;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -0200996}
997
Adrian Huntera8a8f3e2014-07-14 13:02:52 +0300998struct mmap_params {
999 int prot;
1000 int mask;
Adrian Hunter718c6022015-04-09 18:53:42 +03001001 struct auxtrace_mmap_params auxtrace_mp;
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001002};
1003
Wang Nan8db6d6b2016-07-14 08:34:35 +00001004static int perf_mmap__mmap(struct perf_mmap *map,
1005 struct mmap_params *mp, int fd)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001006{
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001007 /*
1008 * The last one will be done at perf_evlist__mmap_consume(), so that we
1009 * make sure we don't prevent tools from consuming every last event in
1010 * the ring buffer.
1011 *
1012 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
1013 * anymore, but the last events for it are still in the ring buffer,
1014 * waiting to be consumed.
1015 *
1016 * Tools can chose to ignore this at their own discretion, but the
1017 * evlist layer can't just drop it when filtering events in
1018 * perf_evlist__filter_pollfd().
1019 */
Elena Reshetova25a37202017-02-21 17:35:01 +02001020 refcount_set(&map->refcnt, 2);
Wang Nan8db6d6b2016-07-14 08:34:35 +00001021 map->prev = 0;
1022 map->mask = mp->mask;
1023 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
1024 MAP_SHARED, fd, 0);
1025 if (map->base == MAP_FAILED) {
Adrian Hunter02635962013-11-01 15:51:33 +02001026 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
1027 errno);
Wang Nan8db6d6b2016-07-14 08:34:35 +00001028 map->base = NULL;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001029 return -1;
Nelson Elhage301b1952011-12-19 08:39:30 -05001030 }
Wang Nan8db6d6b2016-07-14 08:34:35 +00001031 map->fd = fd;
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -03001032
Wang Nan8db6d6b2016-07-14 08:34:35 +00001033 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
1034 &mp->auxtrace_mp, map->base, fd))
Adrian Hunter718c6022015-04-09 18:53:42 +03001035 return -1;
1036
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001037 return 0;
1038}
1039
Wang Nanf3058a12016-05-24 02:28:59 +00001040static bool
1041perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
1042 struct perf_evsel *evsel)
1043{
Arnaldo Carvalho de Melo32a951b2016-07-14 08:34:33 +00001044 if (evsel->attr.write_backward)
Wang Nanf3058a12016-05-24 02:28:59 +00001045 return false;
1046 return true;
1047}
1048
Adrian Hunter04e21312013-10-18 15:29:13 +03001049static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
Mark Rutland9f21b812016-09-08 11:21:51 +01001050 struct mmap_params *mp, int cpu_idx,
Wang Nan078c3382016-07-14 08:34:40 +00001051 int thread, int *_output, int *_output_backward)
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001052{
1053 struct perf_evsel *evsel;
Wang Nanf3058a12016-05-24 02:28:59 +00001054 int revent;
Mark Rutland9f21b812016-09-08 11:21:51 +01001055 int evlist_cpu = cpu_map__cpu(evlist->cpus, cpu_idx);
Adrian Hunter04e21312013-10-18 15:29:13 +03001056
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001057 evlist__for_each_entry(evlist, evsel) {
Wang Nan078c3382016-07-14 08:34:40 +00001058 struct perf_mmap *maps = evlist->mmap;
1059 int *output = _output;
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001060 int fd;
Mark Rutland9f21b812016-09-08 11:21:51 +01001061 int cpu;
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001062
Wang Nan078c3382016-07-14 08:34:40 +00001063 if (evsel->attr.write_backward) {
1064 output = _output_backward;
1065 maps = evlist->backward_mmap;
1066
1067 if (!maps) {
1068 maps = perf_evlist__alloc_mmap(evlist);
1069 if (!maps)
1070 return -1;
1071 evlist->backward_mmap = maps;
Wang Nan54cc54d2016-07-14 08:34:42 +00001072 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
1073 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
Wang Nan078c3382016-07-14 08:34:40 +00001074 }
1075 }
Wang Nanf3058a12016-05-24 02:28:59 +00001076
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001077 if (evsel->system_wide && thread)
1078 continue;
1079
Mark Rutland9f21b812016-09-08 11:21:51 +01001080 cpu = cpu_map__idx(evsel->cpus, evlist_cpu);
1081 if (cpu == -1)
1082 continue;
1083
Adrian Hunterbf8e8f42014-07-31 09:00:51 +03001084 fd = FD(evsel, cpu, thread);
Adrian Hunter04e21312013-10-18 15:29:13 +03001085
1086 if (*output == -1) {
1087 *output = fd;
Wang Nan078c3382016-07-14 08:34:40 +00001088
1089 if (perf_mmap__mmap(&maps[idx], mp, *output) < 0)
Adrian Hunter04e21312013-10-18 15:29:13 +03001090 return -1;
1091 } else {
1092 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
1093 return -1;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001094
Wang Nan078c3382016-07-14 08:34:40 +00001095 perf_mmap__get(&maps[idx]);
Adrian Hunter04e21312013-10-18 15:29:13 +03001096 }
1097
Wang Nanf3058a12016-05-24 02:28:59 +00001098 revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
1099
Adrian Hunterf90d1942014-11-11 16:16:39 +02001100 /*
1101 * The system_wide flag causes a selected event to be opened
1102 * always without a pid. Consequently it will never get a
1103 * POLLHUP, but it is used for tracking in combination with
1104 * other events, so it should not need to be polled anyway.
1105 * Therefore don't add it for polling.
1106 */
1107 if (!evsel->system_wide &&
Wang Nan078c3382016-07-14 08:34:40 +00001108 __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
1109 perf_mmap__put(&maps[idx]);
Arnaldo Carvalho de Meload6765d2014-08-18 16:44:06 -03001110 return -1;
Arnaldo Carvalho de Melo82396982014-09-08 13:26:35 -03001111 }
Arnaldo Carvalho de Melo033fa712014-09-08 12:55:12 -03001112
Adrian Hunter3c659ee2014-10-27 15:49:22 +02001113 if (evsel->attr.read_format & PERF_FORMAT_ID) {
1114 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
1115 fd) < 0)
1116 return -1;
1117 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
1118 thread);
1119 }
Adrian Hunter04e21312013-10-18 15:29:13 +03001120 }
1121
1122 return 0;
1123}
1124
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001125static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
1126 struct mmap_params *mp)
Adrian Hunter04e21312013-10-18 15:29:13 +03001127{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001128 int cpu, thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001129 int nr_cpus = cpu_map__nr(evlist->cpus);
1130 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001131
Adrian Huntere3e1a542013-08-14 15:48:24 +03001132 pr_debug2("perf event ring buffer mmapped per cpu\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001133 for (cpu = 0; cpu < nr_cpus; cpu++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001134 int output = -1;
Wang Nan078c3382016-07-14 08:34:40 +00001135 int output_backward = -1;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001136
Adrian Hunter718c6022015-04-09 18:53:42 +03001137 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
1138 true);
1139
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001140 for (thread = 0; thread < nr_threads; thread++) {
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001141 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
Wang Nan078c3382016-07-14 08:34:40 +00001142 thread, &output, &output_backward))
Adrian Hunter04e21312013-10-18 15:29:13 +03001143 goto out_unmap;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001144 }
1145 }
1146
1147 return 0;
1148
1149out_unmap:
Wang Nana1f72612016-07-14 08:34:38 +00001150 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001151 return -1;
1152}
1153
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001154static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
1155 struct mmap_params *mp)
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001156{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001157 int thread;
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001158 int nr_threads = thread_map__nr(evlist->threads);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001159
Adrian Huntere3e1a542013-08-14 15:48:24 +03001160 pr_debug2("perf event ring buffer mmapped per thread\n");
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001161 for (thread = 0; thread < nr_threads; thread++) {
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001162 int output = -1;
Wang Nan078c3382016-07-14 08:34:40 +00001163 int output_backward = -1;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001164
Adrian Hunter718c6022015-04-09 18:53:42 +03001165 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
1166 false);
1167
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001168 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
Wang Nan078c3382016-07-14 08:34:40 +00001169 &output, &output_backward))
Adrian Hunter04e21312013-10-18 15:29:13 +03001170 goto out_unmap;
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001171 }
1172
1173 return 0;
1174
1175out_unmap:
Wang Nana1f72612016-07-14 08:34:38 +00001176 perf_evlist__munmap_nofree(evlist);
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001177 return -1;
1178}
1179
Arnaldo Carvalho de Melof5e71502016-04-15 17:46:31 -03001180unsigned long perf_event_mlock_kb_in_pages(void)
1181{
1182 unsigned long pages;
1183 int max;
1184
1185 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
1186 /*
1187 * Pick a once upon a time good value, i.e. things look
1188 * strange since we can't read a sysctl value, but lets not
1189 * die yet...
1190 */
1191 max = 512;
1192 } else {
1193 max -= (page_size / 1024);
1194 }
1195
1196 pages = (max * 1024) / page_size;
1197 if (!is_power_of_2(pages))
1198 pages = rounddown_pow_of_two(pages);
1199
1200 return pages;
1201}
1202
Jiri Olsa0c582442017-01-09 10:51:59 +01001203size_t perf_evlist__mmap_size(unsigned long pages)
Jiri Olsa994a1f72013-09-01 12:36:12 +02001204{
Arnaldo Carvalho de Melof5e71502016-04-15 17:46:31 -03001205 if (pages == UINT_MAX)
1206 pages = perf_event_mlock_kb_in_pages();
1207 else if (!is_power_of_2(pages))
Jiri Olsa994a1f72013-09-01 12:36:12 +02001208 return 0;
1209
1210 return (pages + 1) * page_size;
1211}
1212
David Ahern33c2dcf2013-11-12 07:46:55 -07001213static long parse_pages_arg(const char *str, unsigned long min,
1214 unsigned long max)
Jiri Olsa994a1f72013-09-01 12:36:12 +02001215{
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001216 unsigned long pages, val;
Jiri Olsa27050f52013-09-01 12:36:13 +02001217 static struct parse_tag tags[] = {
1218 { .tag = 'B', .mult = 1 },
1219 { .tag = 'K', .mult = 1 << 10 },
1220 { .tag = 'M', .mult = 1 << 20 },
1221 { .tag = 'G', .mult = 1 << 30 },
1222 { .tag = 0 },
1223 };
Jiri Olsa994a1f72013-09-01 12:36:12 +02001224
David Ahern89735042013-11-12 07:46:53 -07001225 if (str == NULL)
David Ahern33c2dcf2013-11-12 07:46:55 -07001226 return -EINVAL;
David Ahern89735042013-11-12 07:46:53 -07001227
Jiri Olsa27050f52013-09-01 12:36:13 +02001228 val = parse_tag_value(str, tags);
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001229 if (val != (unsigned long) -1) {
Jiri Olsa27050f52013-09-01 12:36:13 +02001230 /* we got file size value */
1231 pages = PERF_ALIGN(val, page_size) / page_size;
Jiri Olsa27050f52013-09-01 12:36:13 +02001232 } else {
1233 /* we got pages count value */
1234 char *eptr;
1235 pages = strtoul(str, &eptr, 10);
David Ahern33c2dcf2013-11-12 07:46:55 -07001236 if (*eptr != '\0')
1237 return -EINVAL;
Jiri Olsa994a1f72013-09-01 12:36:12 +02001238 }
1239
Adrian Hunter2bcab6c2013-12-09 15:18:37 +02001240 if (pages == 0 && min == 0) {
David Ahern33c2dcf2013-11-12 07:46:55 -07001241 /* leave number of pages at 0 */
Adrian Hunter1dbfa9382013-12-09 15:18:39 +02001242 } else if (!is_power_of_2(pages)) {
Jiri Olsa98081432017-01-09 10:51:55 +01001243 char buf[100];
1244
David Ahern33c2dcf2013-11-12 07:46:55 -07001245 /* round pages up to next power of 2 */
Arnaldo Carvalho de Melo91529832014-12-16 13:24:41 -03001246 pages = roundup_pow_of_two(pages);
Adrian Hunter1dbfa9382013-12-09 15:18:39 +02001247 if (!pages)
1248 return -EINVAL;
Jiri Olsa98081432017-01-09 10:51:55 +01001249
1250 unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
1251 pr_info("rounding mmap pages size to %s (%lu pages)\n",
1252 buf, pages);
Adrian Hunter2fbe4ab2013-10-22 10:34:18 +03001253 }
1254
David Ahern33c2dcf2013-11-12 07:46:55 -07001255 if (pages > max)
1256 return -EINVAL;
1257
1258 return pages;
1259}
1260
Adrian Huntere9db1312015-04-09 18:53:46 +03001261int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
David Ahern33c2dcf2013-11-12 07:46:55 -07001262{
David Ahern33c2dcf2013-11-12 07:46:55 -07001263 unsigned long max = UINT_MAX;
1264 long pages;
1265
Adrian Hunterf5ae9c42013-12-09 15:18:38 +02001266 if (max > SIZE_MAX / page_size)
David Ahern33c2dcf2013-11-12 07:46:55 -07001267 max = SIZE_MAX / page_size;
1268
1269 pages = parse_pages_arg(str, 1, max);
1270 if (pages < 0) {
1271 pr_err("Invalid argument for --mmap_pages/-m\n");
Jiri Olsa994a1f72013-09-01 12:36:12 +02001272 return -1;
1273 }
1274
1275 *mmap_pages = pages;
1276 return 0;
1277}
1278
Adrian Huntere9db1312015-04-09 18:53:46 +03001279int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1280 int unset __maybe_unused)
1281{
1282 return __perf_evlist__parse_mmap_pages(opt->value, str);
1283}
1284
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001285/**
Adrian Hunter718c6022015-04-09 18:53:42 +03001286 * perf_evlist__mmap_ex - Create mmaps to receive events.
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001287 * @evlist: list of events
1288 * @pages: map length in pages
1289 * @overwrite: overwrite older events?
Adrian Hunter718c6022015-04-09 18:53:42 +03001290 * @auxtrace_pages - auxtrace map length in pages
1291 * @auxtrace_overwrite - overwrite older auxtrace data?
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001292 *
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001293 * If @overwrite is %false the user needs to signal event consumption using
1294 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
1295 * automatically.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001296 *
Adrian Hunter718c6022015-04-09 18:53:42 +03001297 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1298 * consumption using auxtrace_mmap__write_tail().
1299 *
Adrian Hunterc83fa7f2013-10-18 15:29:12 +03001300 * Return: %0 on success, negative error code otherwise.
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001301 */
Adrian Hunter718c6022015-04-09 18:53:42 +03001302int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1303 bool overwrite, unsigned int auxtrace_pages,
1304 bool auxtrace_overwrite)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001305{
Arnaldo Carvalho de Meloaece9482011-05-15 09:39:00 -03001306 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001307 const struct cpu_map *cpus = evlist->cpus;
1308 const struct thread_map *threads = evlist->threads;
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001309 struct mmap_params mp = {
1310 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
1311 };
Arnaldo Carvalho de Melo50a682c2011-11-09 09:10:47 -02001312
Wang Nan8db6d6b2016-07-14 08:34:35 +00001313 if (!evlist->mmap)
1314 evlist->mmap = perf_evlist__alloc_mmap(evlist);
1315 if (!evlist->mmap)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001316 return -ENOMEM;
1317
Arnaldo Carvalho de Melo1b853372014-09-03 18:02:59 -03001318 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001319 return -ENOMEM;
1320
1321 evlist->overwrite = overwrite;
Jiri Olsa994a1f72013-09-01 12:36:12 +02001322 evlist->mmap_len = perf_evlist__mmap_size(pages);
Adrian Hunter2af68ef2013-10-18 15:29:07 +03001323 pr_debug("mmap size %zuB\n", evlist->mmap_len);
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001324 mp.mask = evlist->mmap_len - page_size - 1;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001325
Adrian Hunter718c6022015-04-09 18:53:42 +03001326 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
1327 auxtrace_pages, auxtrace_overwrite);
1328
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001329 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001330 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
Arnaldo Carvalho de Meloa91e5432011-03-10 11:15:54 -03001331 evsel->sample_id == NULL &&
Arnaldo Carvalho de Meloa14bb7a2012-09-26 12:41:14 -03001332 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001333 return -ENOMEM;
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001334 }
1335
Sukadev Bhattiproluec1e7e42013-05-22 17:42:38 -07001336 if (cpu_map__empty(cpus))
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001337 return perf_evlist__mmap_per_thread(evlist, &mp);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001338
Adrian Huntera8a8f3e2014-07-14 13:02:52 +03001339 return perf_evlist__mmap_per_cpu(evlist, &mp);
Arnaldo Carvalho de Melof8a95302011-01-30 10:46:46 -02001340}
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001341
Adrian Hunter718c6022015-04-09 18:53:42 +03001342int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
1343 bool overwrite)
1344{
1345 return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
1346}
1347
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001348int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001349{
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001350 struct cpu_map *cpus;
1351 struct thread_map *threads;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001352
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001353 threads = thread_map__new_str(target->pid, target->tid, target->uid);
1354
1355 if (!threads)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001356 return -1;
1357
Dongsheng Yang9c105fb2013-12-04 17:56:40 -05001358 if (target__uses_dummy_map(target))
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001359 cpus = cpu_map__dummy_new();
Namhyung Kim879d77d2012-05-16 18:45:48 +09001360 else
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001361 cpus = cpu_map__new(target->cpu_list);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001362
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001363 if (!cpus)
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001364 goto out_delete_threads;
1365
Adrian Hunterec9a77a2015-09-08 10:58:52 +03001366 evlist->has_user_cpus = !!target->cpu_list;
1367
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001368 perf_evlist__set_maps(evlist, cpus, threads);
Adrian Hunterd5bc0562015-09-08 10:58:51 +03001369
1370 return 0;
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001371
1372out_delete_threads:
Adrian Hunter74bfd2b2015-09-08 10:58:57 +03001373 thread_map__put(threads);
Arnaldo Carvalho de Melo7e2ed092011-01-30 11:59:43 -02001374 return -1;
1375}
1376
Adrian Hunterd5bc0562015-09-08 10:58:51 +03001377void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1378 struct thread_map *threads)
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001379{
Adrian Hunter934e0f22015-09-08 10:58:56 +03001380 /*
1381 * Allow for the possibility that one or another of the maps isn't being
1382 * changed i.e. don't put it. Note we are assuming the maps that are
1383 * being applied are brand new and evlist is taking ownership of the
1384 * original reference count of 1. If that is not the case it is up to
1385 * the caller to increase the reference count.
1386 */
1387 if (cpus != evlist->cpus) {
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001388 cpu_map__put(evlist->cpus);
Arnaldo Carvalho de Meloa55e5662016-02-17 10:57:19 -03001389 evlist->cpus = cpu_map__get(cpus);
Adrian Hunter934e0f22015-09-08 10:58:56 +03001390 }
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001391
Adrian Hunter934e0f22015-09-08 10:58:56 +03001392 if (threads != evlist->threads) {
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001393 thread_map__put(evlist->threads);
Arnaldo Carvalho de Meloa55e5662016-02-17 10:57:19 -03001394 evlist->threads = thread_map__get(threads);
Adrian Hunter934e0f22015-09-08 10:58:56 +03001395 }
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001396
Adrian Hunterec9a77a2015-09-08 10:58:52 +03001397 perf_evlist__propagate_maps(evlist);
Jiri Olsa3de5cfb2015-07-21 14:31:30 +02001398}
1399
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001400void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
1401 enum perf_event_sample_format bit)
1402{
1403 struct perf_evsel *evsel;
1404
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001405 evlist__for_each_entry(evlist, evsel)
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001406 __perf_evsel__set_sample_bit(evsel, bit);
1407}
1408
1409void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
1410 enum perf_event_sample_format bit)
1411{
1412 struct perf_evsel *evsel;
1413
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001414 evlist__for_each_entry(evlist, evsel)
Arnaldo Carvalho de Melo22c8a372016-04-11 18:37:45 -03001415 __perf_evsel__reset_sample_bit(evsel, bit);
1416}
1417
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001418int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001419{
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001420 struct perf_evsel *evsel;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001421 int err = 0;
1422 const int ncpus = cpu_map__nr(evlist->cpus),
Namhyung Kimb3a319d2013-03-11 16:43:14 +09001423 nthreads = thread_map__nr(evlist->threads);
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001424
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001425 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001426 if (evsel->filter == NULL)
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001427 continue;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001428
Kan Liangd988d5e2015-08-21 02:23:14 -04001429 /*
1430 * filters only work for tracepoint event, which doesn't have cpu limit.
1431 * So evlist and evsel should always be same.
1432 */
Arnaldo Carvalho de Melof47805a2015-07-03 15:53:49 -03001433 err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter);
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001434 if (err) {
1435 *err_evsel = evsel;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001436 break;
Arnaldo Carvalho de Melo23d4aad2015-03-24 19:23:47 -03001437 }
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001438 }
1439
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001440 return err;
1441}
1442
1443int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1444{
1445 struct perf_evsel *evsel;
1446 int err = 0;
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001447
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001448 evlist__for_each_entry(evlist, evsel) {
Wang Nanfdf14722016-02-26 09:31:53 +00001449 if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1450 continue;
1451
Arnaldo Carvalho de Melo94ad89b2015-07-03 17:42:03 -03001452 err = perf_evsel__set_filter(evsel, filter);
Arnaldo Carvalho de Melo745cefc2012-09-26 15:07:39 -03001453 if (err)
1454 break;
1455 }
1456
1457 return err;
Frederic Weisbecker0a102472011-02-26 04:51:54 +01001458}
Frederic Weisbecker74429962011-05-21 17:49:00 +02001459
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001460int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001461{
1462 char *filter;
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001463 int ret = -1;
1464 size_t i;
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001465
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001466 for (i = 0; i < npids; ++i) {
1467 if (i == 0) {
1468 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1469 return -1;
1470 } else {
1471 char *tmp;
1472
1473 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1474 goto out_free;
1475
1476 free(filter);
1477 filter = tmp;
1478 }
1479 }
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001480
1481 ret = perf_evlist__set_filter(evlist, filter);
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001482out_free:
Arnaldo Carvalho de Melocfd70a22015-02-21 10:09:55 -08001483 free(filter);
1484 return ret;
1485}
1486
Arnaldo Carvalho de Melobe199ad2015-02-21 11:33:47 -08001487int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
1488{
1489 return perf_evlist__set_filter_pids(evlist, 1, &pid);
1490}
1491
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001492bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +02001493{
Adrian Hunter75562572013-08-27 11:23:09 +03001494 struct perf_evsel *pos;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001495
Adrian Hunter75562572013-08-27 11:23:09 +03001496 if (evlist->nr_entries == 1)
1497 return true;
1498
1499 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1500 return false;
1501
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001502 evlist__for_each_entry(evlist, pos) {
Adrian Hunter75562572013-08-27 11:23:09 +03001503 if (pos->id_pos != evlist->id_pos ||
1504 pos->is_pos != evlist->is_pos)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001505 return false;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001506 }
1507
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001508 return true;
1509}
1510
Adrian Hunter75562572013-08-27 11:23:09 +03001511u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001512{
Adrian Hunter75562572013-08-27 11:23:09 +03001513 struct perf_evsel *evsel;
1514
1515 if (evlist->combined_sample_type)
1516 return evlist->combined_sample_type;
1517
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001518 evlist__for_each_entry(evlist, evsel)
Adrian Hunter75562572013-08-27 11:23:09 +03001519 evlist->combined_sample_type |= evsel->attr.sample_type;
1520
1521 return evlist->combined_sample_type;
1522}
1523
1524u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1525{
1526 evlist->combined_sample_type = 0;
1527 return __perf_evlist__combined_sample_type(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001528}
1529
Andi Kleen98df8582015-07-18 08:24:47 -07001530u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
1531{
1532 struct perf_evsel *evsel;
1533 u64 branch_type = 0;
1534
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001535 evlist__for_each_entry(evlist, evsel)
Andi Kleen98df8582015-07-18 08:24:47 -07001536 branch_type |= evsel->attr.branch_sample_type;
1537 return branch_type;
1538}
1539
Jiri Olsa9ede4732012-10-10 17:38:13 +02001540bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1541{
1542 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1543 u64 read_format = first->attr.read_format;
1544 u64 sample_type = first->attr.sample_type;
1545
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001546 evlist__for_each_entry(evlist, pos) {
Jiri Olsa9ede4732012-10-10 17:38:13 +02001547 if (read_format != pos->attr.read_format)
1548 return false;
1549 }
1550
1551 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1552 if ((sample_type & PERF_SAMPLE_READ) &&
1553 !(read_format & PERF_FORMAT_ID)) {
1554 return false;
1555 }
1556
1557 return true;
1558}
1559
1560u64 perf_evlist__read_format(struct perf_evlist *evlist)
1561{
1562 struct perf_evsel *first = perf_evlist__first(evlist);
1563 return first->attr.read_format;
1564}
1565
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001566u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001567{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001568 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001569 struct perf_sample *data;
1570 u64 sample_type;
1571 u16 size = 0;
1572
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001573 if (!first->attr.sample_id_all)
1574 goto out;
1575
1576 sample_type = first->attr.sample_type;
1577
1578 if (sample_type & PERF_SAMPLE_TID)
1579 size += sizeof(data->tid) * 2;
1580
1581 if (sample_type & PERF_SAMPLE_TIME)
1582 size += sizeof(data->time);
1583
1584 if (sample_type & PERF_SAMPLE_ID)
1585 size += sizeof(data->id);
1586
1587 if (sample_type & PERF_SAMPLE_STREAM_ID)
1588 size += sizeof(data->stream_id);
1589
1590 if (sample_type & PERF_SAMPLE_CPU)
1591 size += sizeof(data->cpu) * 2;
Adrian Hunter75562572013-08-27 11:23:09 +03001592
1593 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1594 size += sizeof(data->id);
Arnaldo Carvalho de Melo81e36bf2011-11-11 22:28:50 -02001595out:
1596 return size;
1597}
1598
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001599bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001600{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001601 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001602
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001603 evlist__for_each_entry_continue(evlist, pos) {
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001604 if (first->attr.sample_id_all != pos->attr.sample_id_all)
1605 return false;
1606 }
1607
1608 return true;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001609}
1610
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001611bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
Frederic Weisbecker74429962011-05-21 17:49:00 +02001612{
Arnaldo Carvalho de Melo0c21f732012-08-14 16:42:15 -03001613 struct perf_evsel *first = perf_evlist__first(evlist);
Arnaldo Carvalho de Meloc2a70652011-06-02 11:04:54 -03001614 return first->attr.sample_id_all;
Frederic Weisbecker74429962011-05-21 17:49:00 +02001615}
Arnaldo Carvalho de Melo81cce8d2011-10-05 19:11:32 -03001616
1617void perf_evlist__set_selected(struct perf_evlist *evlist,
1618 struct perf_evsel *evsel)
1619{
1620 evlist->selected = evsel;
1621}
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001622
Namhyung Kima74b4b62013-03-15 14:48:48 +09001623void perf_evlist__close(struct perf_evlist *evlist)
1624{
1625 struct perf_evsel *evsel;
1626 int ncpus = cpu_map__nr(evlist->cpus);
1627 int nthreads = thread_map__nr(evlist->threads);
1628
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001629 evlist__for_each_entry_reverse(evlist, evsel) {
Arnaldo Carvalho de Melo18ef15c2016-10-03 11:07:24 -03001630 int n = evsel->cpus ? evsel->cpus->nr : ncpus;
Stephane Eranian8ad92192014-01-17 16:34:06 +01001631 perf_evsel__close(evsel, n, nthreads);
1632 }
Namhyung Kima74b4b62013-03-15 14:48:48 +09001633}
1634
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001635static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1636{
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001637 struct cpu_map *cpus;
1638 struct thread_map *threads;
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001639 int err = -ENOMEM;
1640
1641 /*
1642 * Try reading /sys/devices/system/cpu/online to get
1643 * an all cpus map.
1644 *
1645 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1646 * code needs an overhaul to properly forward the
1647 * error, and we may not want to do that fallback to a
1648 * default cpu identity map :-\
1649 */
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001650 cpus = cpu_map__new(NULL);
1651 if (!cpus)
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001652 goto out;
1653
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001654 threads = thread_map__new_dummy();
1655 if (!threads)
1656 goto out_put;
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001657
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001658 perf_evlist__set_maps(evlist, cpus, threads);
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001659out:
1660 return err;
Adrian Hunter8c0498b2015-09-08 10:59:00 +03001661out_put:
1662 cpu_map__put(cpus);
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001663 goto out;
1664}
1665
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001666int perf_evlist__open(struct perf_evlist *evlist)
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001667{
Jiri Olsa6a4bb042012-08-08 12:22:36 +02001668 struct perf_evsel *evsel;
Namhyung Kima74b4b62013-03-15 14:48:48 +09001669 int err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001670
Arnaldo Carvalho de Melo4112eb12014-10-10 15:55:15 -03001671 /*
1672 * Default: one fd per CPU, all threads, aka systemwide
1673 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1674 */
1675 if (evlist->threads == NULL && evlist->cpus == NULL) {
1676 err = perf_evlist__create_syswide_maps(evlist);
1677 if (err < 0)
1678 goto out_err;
1679 }
1680
Adrian Hunter733cd2f2013-09-06 22:40:11 +03001681 perf_evlist__update_id_pos(evlist);
1682
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001683 evlist__for_each_entry(evlist, evsel) {
Adrian Hunter23df7f72016-01-07 10:13:59 +01001684 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001685 if (err < 0)
1686 goto out_err;
1687 }
1688
1689 return 0;
1690out_err:
Namhyung Kima74b4b62013-03-15 14:48:48 +09001691 perf_evlist__close(evlist);
Namhyung Kim41c21a62012-02-23 12:13:36 +09001692 errno = -err;
Arnaldo Carvalho de Melo727ab042011-10-25 10:42:19 -02001693 return err;
1694}
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001695
Arnaldo Carvalho de Melo602ad872013-11-12 16:46:16 -03001696int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
Namhyung Kim55e162e2013-03-11 16:43:17 +09001697 const char *argv[], bool pipe_output,
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001698 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001699{
1700 int child_ready_pipe[2], go_pipe[2];
1701 char bf;
1702
1703 if (pipe(child_ready_pipe) < 0) {
1704 perror("failed to create 'ready' pipe");
1705 return -1;
1706 }
1707
1708 if (pipe(go_pipe) < 0) {
1709 perror("failed to create 'go' pipe");
1710 goto out_close_ready_pipe;
1711 }
1712
1713 evlist->workload.pid = fork();
1714 if (evlist->workload.pid < 0) {
1715 perror("failed to fork");
1716 goto out_close_pipes;
1717 }
1718
1719 if (!evlist->workload.pid) {
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001720 int ret;
1721
Namhyung Kim119fa3c2013-03-11 16:43:16 +09001722 if (pipe_output)
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001723 dup2(2, 1);
1724
David Ahern0817df02013-05-25 17:50:39 -06001725 signal(SIGTERM, SIG_DFL);
1726
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001727 close(child_ready_pipe[0]);
1728 close(go_pipe[1]);
1729 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1730
1731 /*
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001732 * Tell the parent we're ready to go
1733 */
1734 close(child_ready_pipe[1]);
1735
1736 /*
1737 * Wait until the parent tells us to go.
1738 */
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001739 ret = read(go_pipe[0], &bf, 1);
1740 /*
1741 * The parent will ask for the execvp() to be performed by
1742 * writing exactly one byte, in workload.cork_fd, usually via
1743 * perf_evlist__start_workload().
1744 *
Arnaldo Carvalho de Melo20f86fc2015-02-03 13:29:05 -03001745 * For cancelling the workload without actually running it,
Arnaldo Carvalho de Melo5f1c4222014-07-28 12:39:50 -03001746 * the parent will just close workload.cork_fd, without writing
1747 * anything, i.e. read will return zero and we just exit()
1748 * here.
1749 */
1750 if (ret != 1) {
1751 if (ret == -1)
1752 perror("unable to read pipe");
1753 exit(ret);
1754 }
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001755
1756 execvp(argv[0], (char **)argv);
1757
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001758 if (exec_error) {
Arnaldo Carvalho de Melof33cbe72014-01-02 15:11:25 -03001759 union sigval val;
1760
1761 val.sival_int = errno;
1762 if (sigqueue(getppid(), SIGUSR1, val))
1763 perror(argv[0]);
1764 } else
1765 perror(argv[0]);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001766 exit(-1);
1767 }
1768
Arnaldo Carvalho de Melo735f7e02014-01-03 14:56:49 -03001769 if (exec_error) {
1770 struct sigaction act = {
1771 .sa_flags = SA_SIGINFO,
1772 .sa_sigaction = exec_error,
1773 };
1774 sigaction(SIGUSR1, &act, NULL);
1775 }
1776
Arnaldo Carvalho de Melo1aaf63b2014-10-10 14:29:49 -03001777 if (target__none(target)) {
1778 if (evlist->threads == NULL) {
1779 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1780 __func__, __LINE__);
1781 goto out_close_pipes;
1782 }
Jiri Olsae13798c2015-06-23 00:36:02 +02001783 thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
Arnaldo Carvalho de Melo1aaf63b2014-10-10 14:29:49 -03001784 }
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001785
1786 close(child_ready_pipe[1]);
1787 close(go_pipe[0]);
1788 /*
1789 * wait for child to settle
1790 */
1791 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1792 perror("unable to read pipe");
1793 goto out_close_pipes;
1794 }
1795
Namhyung Kimbcf31452013-06-26 16:14:15 +09001796 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001797 evlist->workload.cork_fd = go_pipe[1];
1798 close(child_ready_pipe[0]);
1799 return 0;
1800
1801out_close_pipes:
1802 close(go_pipe[0]);
1803 close(go_pipe[1]);
1804out_close_ready_pipe:
1805 close(child_ready_pipe[0]);
1806 close(child_ready_pipe[1]);
1807 return -1;
1808}
1809
1810int perf_evlist__start_workload(struct perf_evlist *evlist)
1811{
1812 if (evlist->workload.cork_fd > 0) {
David Ahernb3824402013-07-02 13:27:21 -06001813 char bf = 0;
Namhyung Kimbcf31452013-06-26 16:14:15 +09001814 int ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001815 /*
1816 * Remove the cork, let it rip!
1817 */
Namhyung Kimbcf31452013-06-26 16:14:15 +09001818 ret = write(evlist->workload.cork_fd, &bf, 1);
1819 if (ret < 0)
Soramichi Akiyamae978be92017-01-10 10:41:00 -03001820 perror("unable to write to pipe");
Namhyung Kimbcf31452013-06-26 16:14:15 +09001821
1822 close(evlist->workload.cork_fd);
1823 return ret;
Arnaldo Carvalho de Melo35b9d882011-11-09 08:47:15 -02001824 }
1825
1826 return 0;
1827}
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001828
Arnaldo Carvalho de Meloa3f698f2012-08-02 12:23:46 -03001829int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001830 struct perf_sample *sample)
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001831{
Adrian Hunter75562572013-08-27 11:23:09 +03001832 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1833
1834 if (!evsel)
1835 return -EFAULT;
Arnaldo Carvalho de Melo0807d2d2012-09-26 12:48:18 -03001836 return perf_evsel__parse_sample(evsel, event, sample);
Arnaldo Carvalho de Melocb0b29e2012-08-02 11:42:57 -03001837}
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001838
1839size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1840{
1841 struct perf_evsel *evsel;
1842 size_t printed = 0;
1843
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001844 evlist__for_each_entry(evlist, evsel) {
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001845 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1846 perf_evsel__name(evsel));
1847 }
1848
Davidlohr Buesob2222132013-11-12 22:24:24 -08001849 return printed + fprintf(fp, "\n");
Arnaldo Carvalho de Melo78f067b2012-09-06 14:54:11 -03001850}
Arnaldo Carvalho de Melo6ef068c2013-10-17 12:07:58 -03001851
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001852int perf_evlist__strerror_open(struct perf_evlist *evlist,
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001853 int err, char *buf, size_t size)
1854{
1855 int printed, value;
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001856 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001857
1858 switch (err) {
1859 case EACCES:
1860 case EPERM:
1861 printed = scnprintf(buf, size,
1862 "Error:\t%s.\n"
1863 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1864
Adrian Hunter1a472452013-12-11 14:36:23 +02001865 value = perf_event_paranoid();
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001866
1867 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1868
1869 if (value >= 2) {
1870 printed += scnprintf(buf + printed, size - printed,
1871 "For your workloads it needs to be <= 1\nHint:\t");
1872 }
1873 printed += scnprintf(buf + printed, size - printed,
Arnaldo Carvalho de Melo5229e362014-06-10 17:18:54 -03001874 "For system wide tracing it needs to be set to -1.\n");
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001875
1876 printed += scnprintf(buf + printed, size - printed,
Arnaldo Carvalho de Melo5229e362014-06-10 17:18:54 -03001877 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1878 "Hint:\tThe current value is %d.", value);
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001879 break;
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001880 case EINVAL: {
1881 struct perf_evsel *first = perf_evlist__first(evlist);
1882 int max_freq;
1883
1884 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1885 goto out_default;
1886
1887 if (first->attr.sample_freq < (u64)max_freq)
1888 goto out_default;
1889
1890 printed = scnprintf(buf, size,
1891 "Error:\t%s.\n"
1892 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1893 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1894 emsg, max_freq, first->attr.sample_freq);
1895 break;
1896 }
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001897 default:
Arnaldo Carvalho de Melod9aade7f2016-02-18 13:34:09 -03001898out_default:
Arnaldo Carvalho de Meloa8f23d82013-10-17 17:38:29 -03001899 scnprintf(buf, size, "%s", emsg);
1900 break;
1901 }
1902
1903 return 0;
1904}
Adrian Huntera025e4f2013-12-11 14:36:35 +02001905
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001906int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1907{
Arnaldo Carvalho de Meloc8b5f2c2016-07-06 11:56:20 -03001908 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001909 int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001910
1911 switch (err) {
1912 case EPERM:
Arnaldo Carvalho de Meloe5d4a292014-12-12 15:59:51 -03001913 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001914 printed += scnprintf(buf + printed, size - printed,
1915 "Error:\t%s.\n"
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001916 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001917 "Hint:\tTried using %zd kB.\n",
Arnaldo Carvalho de Meloe5d4a292014-12-12 15:59:51 -03001918 emsg, pages_max_per_user, pages_attempted);
Arnaldo Carvalho de Meloe965bea2014-12-12 16:25:33 -03001919
1920 if (pages_attempted >= pages_max_per_user) {
1921 printed += scnprintf(buf + printed, size - printed,
1922 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1923 pages_max_per_user + pages_attempted);
1924 }
1925
1926 printed += scnprintf(buf + printed, size - printed,
1927 "Hint:\tTry using a smaller -m/--mmap-pages value.");
Arnaldo Carvalho de Melo956fa572014-12-11 18:03:01 -03001928 break;
1929 default:
1930 scnprintf(buf, size, "%s", emsg);
1931 break;
1932 }
1933
1934 return 0;
1935}
1936
Adrian Huntera025e4f2013-12-11 14:36:35 +02001937void perf_evlist__to_front(struct perf_evlist *evlist,
1938 struct perf_evsel *move_evsel)
1939{
1940 struct perf_evsel *evsel, *n;
1941 LIST_HEAD(move);
1942
1943 if (move_evsel == perf_evlist__first(evlist))
1944 return;
1945
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001946 evlist__for_each_entry_safe(evlist, n, evsel) {
Adrian Huntera025e4f2013-12-11 14:36:35 +02001947 if (evsel->leader == move_evsel->leader)
1948 list_move_tail(&evsel->node, &move);
1949 }
1950
1951 list_splice(&move, &evlist->entries);
1952}
Adrian Hunter60b08962014-07-31 09:00:52 +03001953
1954void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1955 struct perf_evsel *tracking_evsel)
1956{
1957 struct perf_evsel *evsel;
1958
1959 if (tracking_evsel->tracking)
1960 return;
1961
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001962 evlist__for_each_entry(evlist, evsel) {
Adrian Hunter60b08962014-07-31 09:00:52 +03001963 if (evsel != tracking_evsel)
1964 evsel->tracking = false;
1965 }
1966
1967 tracking_evsel->tracking = true;
1968}
Wang Nan7630b3e2016-02-22 09:10:33 +00001969
1970struct perf_evsel *
1971perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
1972 const char *str)
1973{
1974 struct perf_evsel *evsel;
1975
Arnaldo Carvalho de Meloe5cadb92016-06-23 11:26:15 -03001976 evlist__for_each_entry(evlist, evsel) {
Wang Nan7630b3e2016-02-22 09:10:33 +00001977 if (!evsel->name)
1978 continue;
1979 if (strcmp(str, evsel->name) == 0)
1980 return evsel;
1981 }
1982
1983 return NULL;
1984}
Wang Nan54cc54d2016-07-14 08:34:42 +00001985
1986void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
1987 enum bkw_mmap_state state)
1988{
1989 enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1990 enum action {
1991 NONE,
1992 PAUSE,
1993 RESUME,
1994 } action = NONE;
1995
1996 if (!evlist->backward_mmap)
1997 return;
1998
1999 switch (old_state) {
2000 case BKW_MMAP_NOTREADY: {
2001 if (state != BKW_MMAP_RUNNING)
2002 goto state_err;;
2003 break;
2004 }
2005 case BKW_MMAP_RUNNING: {
2006 if (state != BKW_MMAP_DATA_PENDING)
2007 goto state_err;
2008 action = PAUSE;
2009 break;
2010 }
2011 case BKW_MMAP_DATA_PENDING: {
2012 if (state != BKW_MMAP_EMPTY)
2013 goto state_err;
2014 break;
2015 }
2016 case BKW_MMAP_EMPTY: {
2017 if (state != BKW_MMAP_RUNNING)
2018 goto state_err;
2019 action = RESUME;
2020 break;
2021 }
2022 default:
2023 WARN_ONCE(1, "Shouldn't get there\n");
2024 }
2025
2026 evlist->bkw_mmap_state = state;
2027
2028 switch (action) {
2029 case PAUSE:
2030 perf_evlist__pause(evlist);
2031 break;
2032 case RESUME:
2033 perf_evlist__resume(evlist);
2034 break;
2035 case NONE:
2036 default:
2037 break;
2038 }
2039
2040state_err:
2041 return;
2042}