| Arnaldo Carvalho de Melo | f8a9530 | 2011-01-30 10:46:46 -0200 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | 
|  | 3 | * | 
|  | 4 | * Parts came from builtin-{top,stat,record}.c, see those files for further | 
|  | 5 | * copyright notes. | 
|  | 6 | * | 
|  | 7 | * Released under the GPL v2. (and only v2, not any later version) | 
|  | 8 | */ | 
|  | 9 |  | 
| David Ahern | 936be50 | 2011-09-06 09:12:26 -0600 | [diff] [blame] | 10 | #include <byteswap.h> | 
| Jiri Olsa | 0f6a301 | 2012-08-07 15:20:45 +0200 | [diff] [blame] | 11 | #include <linux/bitops.h> | 
| Borislav Petkov | 553873e | 2013-12-09 17:14:23 +0100 | [diff] [blame] | 12 | #include <api/fs/debugfs.h> | 
| Robert Richter | 4e31902 | 2013-06-11 17:29:18 +0200 | [diff] [blame] | 13 | #include <traceevent/event-parse.h> | 
|  | 14 | #include <linux/hw_breakpoint.h> | 
|  | 15 | #include <linux/perf_event.h> | 
| Andi Kleen | bec1967 | 2013-08-04 19:41:26 -0700 | [diff] [blame] | 16 | #include <sys/resource.h> | 
| Robert Richter | 4e31902 | 2013-06-11 17:29:18 +0200 | [diff] [blame] | 17 | #include "asm/bug.h" | 
| Arnaldo Carvalho de Melo | 8f651ea | 2014-10-09 16:12:24 -0300 | [diff] [blame] | 18 | #include "callchain.h" | 
| Arnaldo Carvalho de Melo | f14d570 | 2014-10-17 12:17:40 -0300 | [diff] [blame] | 19 | #include "cgroup.h" | 
| Arnaldo Carvalho de Melo | 69aad6f | 2011-01-03 16:39:04 -0200 | [diff] [blame] | 20 | #include "evsel.h" | 
| Arnaldo Carvalho de Melo | 70082dd | 2011-01-12 17:03:24 -0200 | [diff] [blame] | 21 | #include "evlist.h" | 
| Arnaldo Carvalho de Melo | 69aad6f | 2011-01-03 16:39:04 -0200 | [diff] [blame] | 22 | #include "util.h" | 
| Arnaldo Carvalho de Melo | 86bd5e8 | 2011-01-03 23:09:46 -0200 | [diff] [blame] | 23 | #include "cpumap.h" | 
| Arnaldo Carvalho de Melo | fd78260 | 2011-01-18 15:15:24 -0200 | [diff] [blame] | 24 | #include "thread_map.h" | 
| Namhyung Kim | 12864b3 | 2012-04-26 14:15:22 +0900 | [diff] [blame] | 25 | #include "target.h" | 
| Jiri Olsa | 26d3302 | 2012-08-07 15:20:47 +0200 | [diff] [blame] | 26 | #include "perf_regs.h" | 
| Adrian Hunter | e3e1a54 | 2013-08-14 15:48:24 +0300 | [diff] [blame] | 27 | #include "debug.h" | 
| Jiri Olsa | 97978b3 | 2013-12-03 14:09:24 +0100 | [diff] [blame] | 28 | #include "trace-event.h" | 
| Jiri Olsa | a9a3a4d | 2015-06-14 10:19:26 +0200 | [diff] [blame] | 29 | #include "stat.h" | 
| Arnaldo Carvalho de Melo | 69aad6f | 2011-01-03 16:39:04 -0200 | [diff] [blame] | 30 |  | 
| Arnaldo Carvalho de Melo | 594ac61 | 2012-12-13 13:13:07 -0300 | [diff] [blame] | 31 | static struct { | 
|  | 32 | bool sample_id_all; | 
|  | 33 | bool exclude_guest; | 
| Stephane Eranian | 5c5e854 | 2013-08-21 12:10:25 +0200 | [diff] [blame] | 34 | bool mmap2; | 
| Yann Droneaud | 57480d2 | 2014-06-30 22:28:47 +0200 | [diff] [blame] | 35 | bool cloexec; | 
| Peter Zijlstra | 814c8c3 | 2015-03-31 00:19:31 +0200 | [diff] [blame] | 36 | bool clockid; | 
|  | 37 | bool clockid_wrong; | 
| Arnaldo Carvalho de Melo | 594ac61 | 2012-12-13 13:13:07 -0300 | [diff] [blame] | 38 | } perf_missing_features; | 
|  | 39 |  | 
| Peter Zijlstra | 814c8c3 | 2015-03-31 00:19:31 +0200 | [diff] [blame] | 40 | static clockid_t clockid; | 
|  | 41 |  | 
| Arnaldo Carvalho de Melo | ce8ccff | 2014-10-09 15:29:51 -0300 | [diff] [blame] | 42 | static int perf_evsel__no_extra_init(struct perf_evsel *evsel __maybe_unused) | 
|  | 43 | { | 
|  | 44 | return 0; | 
|  | 45 | } | 
|  | 46 |  | 
|  | 47 | static void perf_evsel__no_extra_fini(struct perf_evsel *evsel __maybe_unused) | 
|  | 48 | { | 
|  | 49 | } | 
|  | 50 |  | 
|  | 51 | static struct { | 
|  | 52 | size_t	size; | 
|  | 53 | int	(*init)(struct perf_evsel *evsel); | 
|  | 54 | void	(*fini)(struct perf_evsel *evsel); | 
|  | 55 | } perf_evsel__object = { | 
|  | 56 | .size = sizeof(struct perf_evsel), | 
|  | 57 | .init = perf_evsel__no_extra_init, | 
|  | 58 | .fini = perf_evsel__no_extra_fini, | 
|  | 59 | }; | 
|  | 60 |  | 
|  | 61 | int perf_evsel__object_config(size_t object_size, | 
|  | 62 | int (*init)(struct perf_evsel *evsel), | 
|  | 63 | void (*fini)(struct perf_evsel *evsel)) | 
|  | 64 | { | 
|  | 65 |  | 
|  | 66 | if (object_size == 0) | 
|  | 67 | goto set_methods; | 
|  | 68 |  | 
|  | 69 | if (perf_evsel__object.size > object_size) | 
|  | 70 | return -EINVAL; | 
|  | 71 |  | 
|  | 72 | perf_evsel__object.size = object_size; | 
|  | 73 |  | 
|  | 74 | set_methods: | 
|  | 75 | if (init != NULL) | 
|  | 76 | perf_evsel__object.init = init; | 
|  | 77 |  | 
|  | 78 | if (fini != NULL) | 
|  | 79 | perf_evsel__object.fini = fini; | 
|  | 80 |  | 
|  | 81 | return 0; | 
|  | 82 | } | 
|  | 83 |  | 
| Arnaldo Carvalho de Melo | c52b12e | 2011-01-03 17:45:52 -0200 | [diff] [blame] | 84 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) | 
|  | 85 |  | 
| Adrian Hunter | 7556257 | 2013-08-27 11:23:09 +0300 | [diff] [blame] | 86 | int __perf_evsel__sample_size(u64 sample_type) | 
| Arnaldo Carvalho de Melo | c2a7065 | 2011-06-02 11:04:54 -0300 | [diff] [blame] | 87 | { | 
|  | 88 | u64 mask = sample_type & PERF_SAMPLE_MASK; | 
|  | 89 | int size = 0; | 
|  | 90 | int i; | 
|  | 91 |  | 
|  | 92 | for (i = 0; i < 64; i++) { | 
|  | 93 | if (mask & (1ULL << i)) | 
|  | 94 | size++; | 
|  | 95 | } | 
|  | 96 |  | 
|  | 97 | size *= sizeof(u64); | 
|  | 98 |  | 
|  | 99 | return size; | 
|  | 100 | } | 
|  | 101 |  | 
| Adrian Hunter | 7556257 | 2013-08-27 11:23:09 +0300 | [diff] [blame] | 102 | /** | 
|  | 103 | * __perf_evsel__calc_id_pos - calculate id_pos. | 
|  | 104 | * @sample_type: sample type | 
|  | 105 | * | 
|  | 106 | * This function returns the position of the event id (PERF_SAMPLE_ID or | 
|  | 107 | * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct | 
|  | 108 | * sample_event. | 
|  | 109 | */ | 
|  | 110 | static int __perf_evsel__calc_id_pos(u64 sample_type) | 
|  | 111 | { | 
|  | 112 | int idx = 0; | 
|  | 113 |  | 
|  | 114 | if (sample_type & PERF_SAMPLE_IDENTIFIER) | 
|  | 115 | return 0; | 
|  | 116 |  | 
|  | 117 | if (!(sample_type & PERF_SAMPLE_ID)) | 
|  | 118 | return -1; | 
|  | 119 |  | 
|  | 120 | if (sample_type & PERF_SAMPLE_IP) | 
|  | 121 | idx += 1; | 
|  | 122 |  | 
|  | 123 | if (sample_type & PERF_SAMPLE_TID) | 
|  | 124 | idx += 1; | 
|  | 125 |  | 
|  | 126 | if (sample_type & PERF_SAMPLE_TIME) | 
|  | 127 | idx += 1; | 
|  | 128 |  | 
|  | 129 | if (sample_type & PERF_SAMPLE_ADDR) | 
|  | 130 | idx += 1; | 
|  | 131 |  | 
|  | 132 | return idx; | 
|  | 133 | } | 
|  | 134 |  | 
|  | 135 | /** | 
|  | 136 | * __perf_evsel__calc_is_pos - calculate is_pos. | 
|  | 137 | * @sample_type: sample type | 
|  | 138 | * | 
|  | 139 | * This function returns the position (counting backwards) of the event id | 
|  | 140 | * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if | 
|  | 141 | * sample_id_all is used there is an id sample appended to non-sample events. | 
|  | 142 | */ | 
|  | 143 | static int __perf_evsel__calc_is_pos(u64 sample_type) | 
|  | 144 | { | 
|  | 145 | int idx = 1; | 
|  | 146 |  | 
|  | 147 | if (sample_type & PERF_SAMPLE_IDENTIFIER) | 
|  | 148 | return 1; | 
|  | 149 |  | 
|  | 150 | if (!(sample_type & PERF_SAMPLE_ID)) | 
|  | 151 | return -1; | 
|  | 152 |  | 
|  | 153 | if (sample_type & PERF_SAMPLE_CPU) | 
|  | 154 | idx += 1; | 
|  | 155 |  | 
|  | 156 | if (sample_type & PERF_SAMPLE_STREAM_ID) | 
|  | 157 | idx += 1; | 
|  | 158 |  | 
|  | 159 | return idx; | 
|  | 160 | } | 
|  | 161 |  | 
|  | 162 | void perf_evsel__calc_id_pos(struct perf_evsel *evsel) | 
|  | 163 | { | 
|  | 164 | evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type); | 
|  | 165 | evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type); | 
|  | 166 | } | 
|  | 167 |  | 
| Arnaldo Carvalho de Melo | 7be5ebe | 2012-12-10 14:53:43 -0300 | [diff] [blame] | 168 | void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, | 
|  | 169 | enum perf_event_sample_format bit) | 
|  | 170 | { | 
|  | 171 | if (!(evsel->attr.sample_type & bit)) { | 
|  | 172 | evsel->attr.sample_type |= bit; | 
|  | 173 | evsel->sample_size += sizeof(u64); | 
| Adrian Hunter | 7556257 | 2013-08-27 11:23:09 +0300 | [diff] [blame] | 174 | perf_evsel__calc_id_pos(evsel); | 
| Arnaldo Carvalho de Melo | 7be5ebe | 2012-12-10 14:53:43 -0300 | [diff] [blame] | 175 | } | 
|  | 176 | } | 
|  | 177 |  | 
|  | 178 | void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, | 
|  | 179 | enum perf_event_sample_format bit) | 
|  | 180 | { | 
|  | 181 | if (evsel->attr.sample_type & bit) { | 
|  | 182 | evsel->attr.sample_type &= ~bit; | 
|  | 183 | evsel->sample_size -= sizeof(u64); | 
| Adrian Hunter | 7556257 | 2013-08-27 11:23:09 +0300 | [diff] [blame] | 184 | perf_evsel__calc_id_pos(evsel); | 
| Arnaldo Carvalho de Melo | 7be5ebe | 2012-12-10 14:53:43 -0300 | [diff] [blame] | 185 | } | 
|  | 186 | } | 
|  | 187 |  | 
| Adrian Hunter | 7556257 | 2013-08-27 11:23:09 +0300 | [diff] [blame] | 188 | void perf_evsel__set_sample_id(struct perf_evsel *evsel, | 
|  | 189 | bool can_sample_identifier) | 
| Arnaldo Carvalho de Melo | 7a5a5ca | 2012-12-10 15:21:30 -0300 | [diff] [blame] | 190 | { | 
| Adrian Hunter | 7556257 | 2013-08-27 11:23:09 +0300 | [diff] [blame] | 191 | if (can_sample_identifier) { | 
|  | 192 | perf_evsel__reset_sample_bit(evsel, ID); | 
|  | 193 | perf_evsel__set_sample_bit(evsel, IDENTIFIER); | 
|  | 194 | } else { | 
|  | 195 | perf_evsel__set_sample_bit(evsel, ID); | 
|  | 196 | } | 
| Arnaldo Carvalho de Melo | 7a5a5ca | 2012-12-10 15:21:30 -0300 | [diff] [blame] | 197 | evsel->attr.read_format |= PERF_FORMAT_ID; | 
|  | 198 | } | 
|  | 199 |  | 
| Arnaldo Carvalho de Melo | ef1d1af | 2011-01-18 21:41:45 -0200 | [diff] [blame] | 200 | void perf_evsel__init(struct perf_evsel *evsel, | 
|  | 201 | struct perf_event_attr *attr, int idx) | 
|  | 202 | { | 
|  | 203 | evsel->idx	   = idx; | 
| Adrian Hunter | 60b0896 | 2014-07-31 09:00:52 +0300 | [diff] [blame] | 204 | evsel->tracking	   = !idx; | 
| Arnaldo Carvalho de Melo | ef1d1af | 2011-01-18 21:41:45 -0200 | [diff] [blame] | 205 | evsel->attr	   = *attr; | 
| Namhyung Kim | 2cfda56 | 2012-11-29 15:38:29 +0900 | [diff] [blame] | 206 | evsel->leader	   = evsel; | 
| Stephane Eranian | 410136f | 2013-11-12 17:58:49 +0100 | [diff] [blame] | 207 | evsel->unit	   = ""; | 
|  | 208 | evsel->scale	   = 1.0; | 
| Arnaldo Carvalho de Melo | ef1d1af | 2011-01-18 21:41:45 -0200 | [diff] [blame] | 209 | INIT_LIST_HEAD(&evsel->node); | 
| Arnaldo Carvalho de Melo | ce8ccff | 2014-10-09 15:29:51 -0300 | [diff] [blame] | 210 | perf_evsel__object.init(evsel); | 
| Arnaldo Carvalho de Melo | bde0946 | 2012-08-01 18:53:11 -0300 | [diff] [blame] | 211 | evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); | 
| Adrian Hunter | 7556257 | 2013-08-27 11:23:09 +0300 | [diff] [blame] | 212 | perf_evsel__calc_id_pos(evsel); | 
| Arnaldo Carvalho de Melo | ef1d1af | 2011-01-18 21:41:45 -0200 | [diff] [blame] | 213 | } | 
|  | 214 |  | 
| Arnaldo Carvalho de Melo | ef50383 | 2013-11-07 16:41:19 -0300 | [diff] [blame] | 215 | struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx) | 
| Arnaldo Carvalho de Melo | 69aad6f | 2011-01-03 16:39:04 -0200 | [diff] [blame] | 216 | { | 
| Arnaldo Carvalho de Melo | ce8ccff | 2014-10-09 15:29:51 -0300 | [diff] [blame] | 217 | struct perf_evsel *evsel = zalloc(perf_evsel__object.size); | 
| Arnaldo Carvalho de Melo | 69aad6f | 2011-01-03 16:39:04 -0200 | [diff] [blame] | 218 |  | 
| Arnaldo Carvalho de Melo | ef1d1af | 2011-01-18 21:41:45 -0200 | [diff] [blame] | 219 | if (evsel != NULL) | 
|  | 220 | perf_evsel__init(evsel, attr, idx); | 
| Arnaldo Carvalho de Melo | 69aad6f | 2011-01-03 16:39:04 -0200 | [diff] [blame] | 221 |  | 
|  | 222 | return evsel; | 
|  | 223 | } | 
|  | 224 |  | 
| Arnaldo Carvalho de Melo | ef50383 | 2013-11-07 16:41:19 -0300 | [diff] [blame] | 225 | struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx) | 
| Arnaldo Carvalho de Melo | efd2b92 | 2012-09-18 11:21:50 -0300 | [diff] [blame] | 226 | { | 
| Arnaldo Carvalho de Melo | ce8ccff | 2014-10-09 15:29:51 -0300 | [diff] [blame] | 227 | struct perf_evsel *evsel = zalloc(perf_evsel__object.size); | 
| Arnaldo Carvalho de Melo | efd2b92 | 2012-09-18 11:21:50 -0300 | [diff] [blame] | 228 |  | 
|  | 229 | if (evsel != NULL) { | 
|  | 230 | struct perf_event_attr attr = { | 
| Arnaldo Carvalho de Melo | 0b80f8b3 | 2012-09-26 12:28:26 -0300 | [diff] [blame] | 231 | .type	       = PERF_TYPE_TRACEPOINT, | 
|  | 232 | .sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | | 
|  | 233 | PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), | 
| Arnaldo Carvalho de Melo | efd2b92 | 2012-09-18 11:21:50 -0300 | [diff] [blame] | 234 | }; | 
|  | 235 |  | 
| Arnaldo Carvalho de Melo | e48ffe2 | 2012-09-26 17:11:38 -0300 | [diff] [blame] | 236 | if (asprintf(&evsel->name, "%s:%s", sys, name) < 0) | 
|  | 237 | goto out_free; | 
|  | 238 |  | 
| Jiri Olsa | 97978b3 | 2013-12-03 14:09:24 +0100 | [diff] [blame] | 239 | evsel->tp_format = trace_event__tp_format(sys, name); | 
| Arnaldo Carvalho de Melo | efd2b92 | 2012-09-18 11:21:50 -0300 | [diff] [blame] | 240 | if (evsel->tp_format == NULL) | 
|  | 241 | goto out_free; | 
|  | 242 |  | 
| Arnaldo Carvalho de Melo | 0b80f8b3 | 2012-09-26 12:28:26 -0300 | [diff] [blame] | 243 | event_attr_init(&attr); | 
| Arnaldo Carvalho de Melo | efd2b92 | 2012-09-18 11:21:50 -0300 | [diff] [blame] | 244 | attr.config = evsel->tp_format->id; | 
| Arnaldo Carvalho de Melo | 0b80f8b3 | 2012-09-26 12:28:26 -0300 | [diff] [blame] | 245 | attr.sample_period = 1; | 
| Arnaldo Carvalho de Melo | efd2b92 | 2012-09-18 11:21:50 -0300 | [diff] [blame] | 246 | perf_evsel__init(evsel, &attr, idx); | 
| Arnaldo Carvalho de Melo | efd2b92 | 2012-09-18 11:21:50 -0300 | [diff] [blame] | 247 | } | 
|  | 248 |  | 
|  | 249 | return evsel; | 
|  | 250 |  | 
|  | 251 | out_free: | 
| Arnaldo Carvalho de Melo | 74cf249 | 2013-12-27 16:55:14 -0300 | [diff] [blame] | 252 | zfree(&evsel->name); | 
| Arnaldo Carvalho de Melo | efd2b92 | 2012-09-18 11:21:50 -0300 | [diff] [blame] | 253 | free(evsel); | 
|  | 254 | return NULL; | 
|  | 255 | } | 
|  | 256 |  | 
| Arnaldo Carvalho de Melo | 8ad7013 | 2012-09-06 13:11:18 -0300 | [diff] [blame] | 257 | const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = { | 
| Arnaldo Carvalho de Melo | c410431 | 2012-05-25 16:38:11 -0300 | [diff] [blame] | 258 | "cycles", | 
|  | 259 | "instructions", | 
|  | 260 | "cache-references", | 
|  | 261 | "cache-misses", | 
|  | 262 | "branches", | 
|  | 263 | "branch-misses", | 
|  | 264 | "bus-cycles", | 
|  | 265 | "stalled-cycles-frontend", | 
|  | 266 | "stalled-cycles-backend", | 
|  | 267 | "ref-cycles", | 
|  | 268 | }; | 
|  | 269 |  | 
| Arnaldo Carvalho de Melo | dd4f522 | 2012-06-13 15:52:42 -0300 | [diff] [blame] | 270 | static const char *__perf_evsel__hw_name(u64 config) | 
| Arnaldo Carvalho de Melo | c410431 | 2012-05-25 16:38:11 -0300 | [diff] [blame] | 271 | { | 
|  | 272 | if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config]) | 
|  | 273 | return perf_evsel__hw_names[config]; | 
|  | 274 |  | 
|  | 275 | return "unknown-hardware"; | 
|  | 276 | } | 
|  | 277 |  | 
| Arnaldo Carvalho de Melo | 27f1861 | 2012-06-11 13:33:09 -0300 | [diff] [blame] | 278 | static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size) | 
| Arnaldo Carvalho de Melo | c410431 | 2012-05-25 16:38:11 -0300 | [diff] [blame] | 279 | { | 
| Arnaldo Carvalho de Melo | 27f1861 | 2012-06-11 13:33:09 -0300 | [diff] [blame] | 280 | int colon = 0, r = 0; | 
| Arnaldo Carvalho de Melo | c410431 | 2012-05-25 16:38:11 -0300 | [diff] [blame] | 281 | struct perf_event_attr *attr = &evsel->attr; | 
| Arnaldo Carvalho de Melo | c410431 | 2012-05-25 16:38:11 -0300 | [diff] [blame] | 282 | bool exclude_guest_default = false; | 
|  | 283 |  | 
|  | 284 | #define MOD_PRINT(context, mod)	do {					\ | 
|  | 285 | if (!attr->exclude_##context) {				\ | 
| Arnaldo Carvalho de Melo | 27f1861 | 2012-06-11 13:33:09 -0300 | [diff] [blame] | 286 | if (!colon) colon = ++r;			\ | 
| Arnaldo Carvalho de Melo | c410431 | 2012-05-25 16:38:11 -0300 | [diff] [blame] | 287 | r += scnprintf(bf + r, size - r, "%c", mod);	\ | 
|  | 288 | } } while(0) | 
|  | 289 |  | 
|  | 290 | if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) { | 
|  | 291 | MOD_PRINT(kernel, 'k'); | 
|  | 292 | MOD_PRINT(user, 'u'); | 
|  | 293 | MOD_PRINT(hv, 'h'); | 
|  | 294 | exclude_guest_default = true; | 
|  | 295 | } | 
|  | 296 |  | 
|  | 297 | if (attr->precise_ip) { | 
|  | 298 | if (!colon) | 
| Arnaldo Carvalho de Melo | 27f1861 | 2012-06-11 13:33:09 -0300 | [diff] [blame] | 299 | colon = ++r; | 
| Arnaldo Carvalho de Melo | c410431 | 2012-05-25 16:38:11 -0300 | [diff] [blame] | 300 | r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp"); | 
|  | 301 | exclude_guest_default = true; | 
|  | 302 | } | 
|  | 303 |  | 
|  | 304 | if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) { | 
|  | 305 | MOD_PRINT(host, 'H'); | 
|  | 306 | MOD_PRINT(guest, 'G'); | 
|  | 307 | } | 
|  | 308 | #undef MOD_PRINT | 
|  | 309 | if (colon) | 
| Arnaldo Carvalho de Melo | 27f1861 | 2012-06-11 13:33:09 -0300 | [diff] [blame] | 310 | bf[colon - 1] = ':'; | 
| Arnaldo Carvalho de Melo | c410431 | 2012-05-25 16:38:11 -0300 | [diff] [blame] | 311 | return r; | 
|  | 312 | } | 
|  | 313 |  | 
| Arnaldo Carvalho de Melo | 27f1861 | 2012-06-11 13:33:09 -0300 | [diff] [blame] | 314 | static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size) | 
|  | 315 | { | 
|  | 316 | int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config)); | 
|  | 317 | return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); | 
|  | 318 | } | 
|  | 319 |  | 
| Arnaldo Carvalho de Melo | 8ad7013 | 2012-09-06 13:11:18 -0300 | [diff] [blame] | 320 | const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = { | 
| Arnaldo Carvalho de Melo | 335c2f5 | 2012-06-11 14:36:20 -0300 | [diff] [blame] | 321 | "cpu-clock", | 
|  | 322 | "task-clock", | 
|  | 323 | "page-faults", | 
|  | 324 | "context-switches", | 
| Arnaldo Carvalho de Melo | 8ad7013 | 2012-09-06 13:11:18 -0300 | [diff] [blame] | 325 | "cpu-migrations", | 
| Arnaldo Carvalho de Melo | 335c2f5 | 2012-06-11 14:36:20 -0300 | [diff] [blame] | 326 | "minor-faults", | 
|  | 327 | "major-faults", | 
|  | 328 | "alignment-faults", | 
|  | 329 | "emulation-faults", | 
| Adrian Hunter | d22d1a2 | 2013-08-31 21:50:52 +0300 | [diff] [blame] | 330 | "dummy", | 
| Arnaldo Carvalho de Melo | 335c2f5 | 2012-06-11 14:36:20 -0300 | [diff] [blame] | 331 | }; | 
|  | 332 |  | 
| Arnaldo Carvalho de Melo | dd4f522 | 2012-06-13 15:52:42 -0300 | [diff] [blame] | 333 | static const char *__perf_evsel__sw_name(u64 config) | 
| Arnaldo Carvalho de Melo | 335c2f5 | 2012-06-11 14:36:20 -0300 | [diff] [blame] | 334 | { | 
|  | 335 | if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config]) | 
|  | 336 | return perf_evsel__sw_names[config]; | 
|  | 337 | return "unknown-software"; | 
|  | 338 | } | 
|  | 339 |  | 
|  | 340 | static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size) | 
|  | 341 | { | 
|  | 342 | int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config)); | 
|  | 343 | return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); | 
|  | 344 | } | 
|  | 345 |  | 
| Jiri Olsa | 287e74a | 2012-06-28 23:18:49 +0200 | [diff] [blame] | 346 | static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type) | 
|  | 347 | { | 
|  | 348 | int r; | 
|  | 349 |  | 
|  | 350 | r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr); | 
|  | 351 |  | 
|  | 352 | if (type & HW_BREAKPOINT_R) | 
|  | 353 | r += scnprintf(bf + r, size - r, "r"); | 
|  | 354 |  | 
|  | 355 | if (type & HW_BREAKPOINT_W) | 
|  | 356 | r += scnprintf(bf + r, size - r, "w"); | 
|  | 357 |  | 
|  | 358 | if (type & HW_BREAKPOINT_X) | 
|  | 359 | r += scnprintf(bf + r, size - r, "x"); | 
|  | 360 |  | 
|  | 361 | return r; | 
|  | 362 | } | 
|  | 363 |  | 
|  | 364 | static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size) | 
|  | 365 | { | 
|  | 366 | struct perf_event_attr *attr = &evsel->attr; | 
|  | 367 | int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type); | 
|  | 368 | return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); | 
|  | 369 | } | 
|  | 370 |  | 
| Arnaldo Carvalho de Melo | 0b668bc | 2012-06-11 14:08:07 -0300 | [diff] [blame] | 371 | const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] | 
|  | 372 | [PERF_EVSEL__MAX_ALIASES] = { | 
|  | 373 | { "L1-dcache",	"l1-d",		"l1d",		"L1-data",		}, | 
|  | 374 | { "L1-icache",	"l1-i",		"l1i",		"L1-instruction",	}, | 
|  | 375 | { "LLC",	"L2",							}, | 
|  | 376 | { "dTLB",	"d-tlb",	"Data-TLB",				}, | 
|  | 377 | { "iTLB",	"i-tlb",	"Instruction-TLB",			}, | 
|  | 378 | { "branch",	"branches",	"bpu",		"btb",		"bpc",	}, | 
|  | 379 | { "node",								}, | 
|  | 380 | }; | 
|  | 381 |  | 
|  | 382 | const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] | 
|  | 383 | [PERF_EVSEL__MAX_ALIASES] = { | 
|  | 384 | { "load",	"loads",	"read",					}, | 
|  | 385 | { "store",	"stores",	"write",				}, | 
|  | 386 | { "prefetch",	"prefetches",	"speculative-read", "speculative-load",	}, | 
|  | 387 | }; | 
|  | 388 |  | 
|  | 389 | const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] | 
|  | 390 | [PERF_EVSEL__MAX_ALIASES] = { | 
|  | 391 | { "refs",	"Reference",	"ops",		"access",		}, | 
|  | 392 | { "misses",	"miss",							}, | 
|  | 393 | }; | 
|  | 394 |  | 
|  | 395 | #define C(x)		PERF_COUNT_HW_CACHE_##x | 
|  | 396 | #define CACHE_READ	(1 << C(OP_READ)) | 
|  | 397 | #define CACHE_WRITE	(1 << C(OP_WRITE)) | 
|  | 398 | #define CACHE_PREFETCH	(1 << C(OP_PREFETCH)) | 
|  | 399 | #define COP(x)		(1 << x) | 
|  | 400 |  | 
|  | 401 | /* | 
|  | 402 | * cache operartion stat | 
|  | 403 | * L1I : Read and prefetch only | 
|  | 404 | * ITLB and BPU : Read-only | 
|  | 405 | */ | 
|  | 406 | static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = { | 
|  | 407 | [C(L1D)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | 
|  | 408 | [C(L1I)]	= (CACHE_READ | CACHE_PREFETCH), | 
|  | 409 | [C(LL)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | 
|  | 410 | [C(DTLB)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | 
|  | 411 | [C(ITLB)]	= (CACHE_READ), | 
|  | 412 | [C(BPU)]	= (CACHE_READ), | 
|  | 413 | [C(NODE)]	= (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH), | 
|  | 414 | }; | 
|  | 415 |  | 
|  | 416 | bool perf_evsel__is_cache_op_valid(u8 type, u8 op) | 
|  | 417 | { | 
|  | 418 | if (perf_evsel__hw_cache_stat[type] & COP(op)) | 
|  | 419 | return true;	/* valid */ | 
|  | 420 | else | 
|  | 421 | return false;	/* invalid */ | 
|  | 422 | } | 
|  | 423 |  | 
|  | 424 | int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, | 
|  | 425 | char *bf, size_t size) | 
|  | 426 | { | 
|  | 427 | if (result) { | 
|  | 428 | return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0], | 
|  | 429 | perf_evsel__hw_cache_op[op][0], | 
|  | 430 | perf_evsel__hw_cache_result[result][0]); | 
|  | 431 | } | 
|  | 432 |  | 
|  | 433 | return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0], | 
|  | 434 | perf_evsel__hw_cache_op[op][1]); | 
|  | 435 | } | 
|  | 436 |  | 
| Arnaldo Carvalho de Melo | dd4f522 | 2012-06-13 15:52:42 -0300 | [diff] [blame] | 437 | static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size) | 
| Arnaldo Carvalho de Melo | 0b668bc | 2012-06-11 14:08:07 -0300 | [diff] [blame] | 438 | { | 
|  | 439 | u8 op, result, type = (config >>  0) & 0xff; | 
|  | 440 | const char *err = "unknown-ext-hardware-cache-type"; | 
|  | 441 |  | 
|  | 442 | if (type > PERF_COUNT_HW_CACHE_MAX) | 
|  | 443 | goto out_err; | 
|  | 444 |  | 
|  | 445 | op = (config >>  8) & 0xff; | 
|  | 446 | err = "unknown-ext-hardware-cache-op"; | 
|  | 447 | if (op > PERF_COUNT_HW_CACHE_OP_MAX) | 
|  | 448 | goto out_err; | 
|  | 449 |  | 
|  | 450 | result = (config >> 16) & 0xff; | 
|  | 451 | err = "unknown-ext-hardware-cache-result"; | 
|  | 452 | if (result > PERF_COUNT_HW_CACHE_RESULT_MAX) | 
|  | 453 | goto out_err; | 
|  | 454 |  | 
|  | 455 | err = "invalid-cache"; | 
|  | 456 | if (!perf_evsel__is_cache_op_valid(type, op)) | 
|  | 457 | goto out_err; | 
|  | 458 |  | 
|  | 459 | return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size); | 
|  | 460 | out_err: | 
|  | 461 | return scnprintf(bf, size, "%s", err); | 
|  | 462 | } | 
|  | 463 |  | 
|  | 464 | static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size) | 
|  | 465 | { | 
|  | 466 | int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size); | 
|  | 467 | return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); | 
|  | 468 | } | 
|  | 469 |  | 
| Arnaldo Carvalho de Melo | 6eef3d9 | 2012-06-13 11:53:37 -0300 | [diff] [blame] | 470 | static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size) | 
|  | 471 | { | 
|  | 472 | int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config); | 
|  | 473 | return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret); | 
|  | 474 | } | 
|  | 475 |  | 
| Arnaldo Carvalho de Melo | 7289f83 | 2012-06-12 12:34:58 -0300 | [diff] [blame] | 476 | const char *perf_evsel__name(struct perf_evsel *evsel) | 
| Arnaldo Carvalho de Melo | a446083 | 2012-06-12 10:29:12 -0300 | [diff] [blame] | 477 | { | 
| Arnaldo Carvalho de Melo | 7289f83 | 2012-06-12 12:34:58 -0300 | [diff] [blame] | 478 | char bf[128]; | 
| Arnaldo Carvalho de Melo | a446083 | 2012-06-12 10:29:12 -0300 | [diff] [blame] | 479 |  | 
| Arnaldo Carvalho de Melo | 7289f83 | 2012-06-12 12:34:58 -0300 | [diff] [blame] | 480 | if (evsel->name) | 
|  | 481 | return evsel->name; | 
| Arnaldo Carvalho de Melo | c410431 | 2012-05-25 16:38:11 -0300 | [diff] [blame] | 482 |  | 
|  | 483 | switch (evsel->attr.type) { | 
|  | 484 | case PERF_TYPE_RAW: | 
| Arnaldo Carvalho de Melo | 6eef3d9 | 2012-06-13 11:53:37 -0300 | [diff] [blame] | 485 | perf_evsel__raw_name(evsel, bf, sizeof(bf)); | 
| Arnaldo Carvalho de Melo | c410431 | 2012-05-25 16:38:11 -0300 | [diff] [blame] | 486 | break; | 
|  | 487 |  | 
|  | 488 | case PERF_TYPE_HARDWARE: | 
| Arnaldo Carvalho de Melo | 7289f83 | 2012-06-12 12:34:58 -0300 | [diff] [blame] | 489 | perf_evsel__hw_name(evsel, bf, sizeof(bf)); | 
| Arnaldo Carvalho de Melo | c410431 | 2012-05-25 16:38:11 -0300 | [diff] [blame] | 490 | break; | 
| Arnaldo Carvalho de Melo | 0b668bc | 2012-06-11 14:08:07 -0300 | [diff] [blame] | 491 |  | 
|  | 492 | case PERF_TYPE_HW_CACHE: | 
| Arnaldo Carvalho de Melo | 7289f83 | 2012-06-12 12:34:58 -0300 | [diff] [blame] | 493 | perf_evsel__hw_cache_name(evsel, bf, sizeof(bf)); | 
| Arnaldo Carvalho de Melo | 0b668bc | 2012-06-11 14:08:07 -0300 | [diff] [blame] | 494 | break; | 
|  | 495 |  | 
| Arnaldo Carvalho de Melo | 335c2f5 | 2012-06-11 14:36:20 -0300 | [diff] [blame] | 496 | case PERF_TYPE_SOFTWARE: | 
| Arnaldo Carvalho de Melo | 7289f83 | 2012-06-12 12:34:58 -0300 | [diff] [blame] | 497 | perf_evsel__sw_name(evsel, bf, sizeof(bf)); | 
| Arnaldo Carvalho de Melo | 335c2f5 | 2012-06-11 14:36:20 -0300 | [diff] [blame] | 498 | break; | 
|  | 499 |  | 
| Arnaldo Carvalho de Melo | a446083 | 2012-06-12 10:29:12 -0300 | [diff] [blame] | 500 | case PERF_TYPE_TRACEPOINT: | 
| Arnaldo Carvalho de Melo | 7289f83 | 2012-06-12 12:34:58 -0300 | [diff] [blame] | 501 | scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint"); | 
| Arnaldo Carvalho de Melo | a446083 | 2012-06-12 10:29:12 -0300 | [diff] [blame] | 502 | break; | 
|  | 503 |  | 
| Jiri Olsa | 287e74a | 2012-06-28 23:18:49 +0200 | [diff] [blame] | 504 | case PERF_TYPE_BREAKPOINT: | 
|  | 505 | perf_evsel__bp_name(evsel, bf, sizeof(bf)); | 
|  | 506 | break; | 
|  | 507 |  | 
| Arnaldo Carvalho de Melo | c410431 | 2012-05-25 16:38:11 -0300 | [diff] [blame] | 508 | default: | 
| Robert Richter | ca1b145 | 2012-08-16 21:10:18 +0200 | [diff] [blame] | 509 | scnprintf(bf, sizeof(bf), "unknown attr type: %d", | 
|  | 510 | evsel->attr.type); | 
| Arnaldo Carvalho de Melo | a446083 | 2012-06-12 10:29:12 -0300 | [diff] [blame] | 511 | break; | 
| Arnaldo Carvalho de Melo | c410431 | 2012-05-25 16:38:11 -0300 | [diff] [blame] | 512 | } | 
|  | 513 |  | 
| Arnaldo Carvalho de Melo | 7289f83 | 2012-06-12 12:34:58 -0300 | [diff] [blame] | 514 | evsel->name = strdup(bf); | 
|  | 515 |  | 
|  | 516 | return evsel->name ?: "unknown"; | 
| Arnaldo Carvalho de Melo | c410431 | 2012-05-25 16:38:11 -0300 | [diff] [blame] | 517 | } | 
|  | 518 |  | 
| Namhyung Kim | 717e263 | 2013-01-22 18:09:44 +0900 | [diff] [blame] | 519 | const char *perf_evsel__group_name(struct perf_evsel *evsel) | 
|  | 520 | { | 
|  | 521 | return evsel->group_name ?: "anon group"; | 
|  | 522 | } | 
|  | 523 |  | 
|  | 524 | int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size) | 
|  | 525 | { | 
|  | 526 | int ret; | 
|  | 527 | struct perf_evsel *pos; | 
|  | 528 | const char *group_name = perf_evsel__group_name(evsel); | 
|  | 529 |  | 
|  | 530 | ret = scnprintf(buf, size, "%s", group_name); | 
|  | 531 |  | 
|  | 532 | ret += scnprintf(buf + ret, size - ret, " { %s", | 
|  | 533 | perf_evsel__name(evsel)); | 
|  | 534 |  | 
|  | 535 | for_each_group_member(pos, evsel) | 
|  | 536 | ret += scnprintf(buf + ret, size - ret, ", %s", | 
|  | 537 | perf_evsel__name(pos)); | 
|  | 538 |  | 
|  | 539 | ret += scnprintf(buf + ret, size - ret, " }"); | 
|  | 540 |  | 
|  | 541 | return ret; | 
|  | 542 | } | 
|  | 543 |  | 
| Jiri Olsa | 6bedfab | 2014-03-02 16:56:40 +0100 | [diff] [blame] | 544 | static void | 
| Kan Liang | aad2b21 | 2015-01-05 13:23:04 -0500 | [diff] [blame] | 545 | perf_evsel__config_callgraph(struct perf_evsel *evsel, | 
|  | 546 | struct record_opts *opts) | 
| Jiri Olsa | 6bedfab | 2014-03-02 16:56:40 +0100 | [diff] [blame] | 547 | { | 
|  | 548 | bool function = perf_evsel__is_function_event(evsel); | 
|  | 549 | struct perf_event_attr *attr = &evsel->attr; | 
|  | 550 |  | 
|  | 551 | perf_evsel__set_sample_bit(evsel, CALLCHAIN); | 
|  | 552 |  | 
| Kan Liang | aad2b21 | 2015-01-05 13:23:04 -0500 | [diff] [blame] | 553 | if (callchain_param.record_mode == CALLCHAIN_LBR) { | 
|  | 554 | if (!opts->branch_stack) { | 
|  | 555 | if (attr->exclude_user) { | 
|  | 556 | pr_warning("LBR callstack option is only available " | 
|  | 557 | "to get user callchain information. " | 
|  | 558 | "Falling back to framepointers.\n"); | 
|  | 559 | } else { | 
|  | 560 | perf_evsel__set_sample_bit(evsel, BRANCH_STACK); | 
|  | 561 | attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER | | 
|  | 562 | PERF_SAMPLE_BRANCH_CALL_STACK; | 
|  | 563 | } | 
|  | 564 | } else | 
|  | 565 | pr_warning("Cannot use LBR callstack with branch stack. " | 
|  | 566 | "Falling back to framepointers.\n"); | 
|  | 567 | } | 
|  | 568 |  | 
| Namhyung Kim | 72a128a | 2014-09-23 10:01:41 +0900 | [diff] [blame] | 569 | if (callchain_param.record_mode == CALLCHAIN_DWARF) { | 
| Jiri Olsa | 6bedfab | 2014-03-02 16:56:40 +0100 | [diff] [blame] | 570 | if (!function) { | 
|  | 571 | perf_evsel__set_sample_bit(evsel, REGS_USER); | 
|  | 572 | perf_evsel__set_sample_bit(evsel, STACK_USER); | 
|  | 573 | attr->sample_regs_user = PERF_REGS_MASK; | 
| Namhyung Kim | 72a128a | 2014-09-23 10:01:41 +0900 | [diff] [blame] | 574 | attr->sample_stack_user = callchain_param.dump_size; | 
| Jiri Olsa | 6bedfab | 2014-03-02 16:56:40 +0100 | [diff] [blame] | 575 | attr->exclude_callchain_user = 1; | 
|  | 576 | } else { | 
|  | 577 | pr_info("Cannot use DWARF unwind for function trace event," | 
|  | 578 | " falling back to framepointers.\n"); | 
|  | 579 | } | 
|  | 580 | } | 
|  | 581 |  | 
|  | 582 | if (function) { | 
|  | 583 | pr_info("Disabling user space callchains for function trace event.\n"); | 
|  | 584 | attr->exclude_callchain_user = 1; | 
|  | 585 | } | 
|  | 586 | } | 
|  | 587 |  | 
| Jiri Olsa | 774cb49 | 2012-11-12 18:34:01 +0100 | [diff] [blame] | 588 | /* | 
|  | 589 | * The enable_on_exec/disabled value strategy: | 
|  | 590 | * | 
|  | 591 | *  1) For any type of traced program: | 
|  | 592 | *    - all independent events and group leaders are disabled | 
|  | 593 | *    - all group members are enabled | 
|  | 594 | * | 
|  | 595 | *     Group members are ruled by group leaders. They need to | 
|  | 596 | *     be enabled, because the group scheduling relies on that. | 
|  | 597 | * | 
|  | 598 | *  2) For traced programs executed by perf: | 
|  | 599 | *     - all independent events and group leaders have | 
|  | 600 | *       enable_on_exec set | 
|  | 601 | *     - we don't specifically enable or disable any event during | 
|  | 602 | *       the record command | 
|  | 603 | * | 
|  | 604 | *     Independent events and group leaders are initially disabled | 
|  | 605 | *     and get enabled by exec. Group members are ruled by group | 
|  | 606 | *     leaders as stated in 1). | 
|  | 607 | * | 
|  | 608 | *  3) For traced programs attached by perf (pid/tid): | 
|  | 609 | *     - we specifically enable or disable all events during | 
|  | 610 | *       the record command | 
|  | 611 | * | 
|  | 612 | *     When attaching events to already running traced we | 
|  | 613 | *     enable/disable events specifically, as there's no | 
|  | 614 | *     initial traced exec call. | 
|  | 615 | */ | 
| Arnaldo Carvalho de Melo | b400679 | 2013-12-19 14:43:45 -0300 | [diff] [blame] | 616 | void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 617 | { | 
| Jiri Olsa | 3c17631 | 2012-10-10 17:39:03 +0200 | [diff] [blame] | 618 | struct perf_evsel *leader = evsel->leader; | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 619 | struct perf_event_attr *attr = &evsel->attr; | 
| Adrian Hunter | 60b0896 | 2014-07-31 09:00:52 +0300 | [diff] [blame] | 620 | int track = evsel->tracking; | 
| Adrian Hunter | 3aa5939 | 2013-11-15 15:52:29 +0200 | [diff] [blame] | 621 | bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread; | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 622 |  | 
| Arnaldo Carvalho de Melo | 594ac61 | 2012-12-13 13:13:07 -0300 | [diff] [blame] | 623 | attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1; | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 624 | attr->inherit	    = !opts->no_inherit; | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 625 |  | 
| Arnaldo Carvalho de Melo | 7be5ebe | 2012-12-10 14:53:43 -0300 | [diff] [blame] | 626 | perf_evsel__set_sample_bit(evsel, IP); | 
|  | 627 | perf_evsel__set_sample_bit(evsel, TID); | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 628 |  | 
| Jiri Olsa | 3c17631 | 2012-10-10 17:39:03 +0200 | [diff] [blame] | 629 | if (evsel->sample_read) { | 
|  | 630 | perf_evsel__set_sample_bit(evsel, READ); | 
|  | 631 |  | 
|  | 632 | /* | 
|  | 633 | * We need ID even in case of single event, because | 
|  | 634 | * PERF_SAMPLE_READ process ID specific data. | 
|  | 635 | */ | 
| Adrian Hunter | 7556257 | 2013-08-27 11:23:09 +0300 | [diff] [blame] | 636 | perf_evsel__set_sample_id(evsel, false); | 
| Jiri Olsa | 3c17631 | 2012-10-10 17:39:03 +0200 | [diff] [blame] | 637 |  | 
|  | 638 | /* | 
|  | 639 | * Apply group format only if we belong to group | 
|  | 640 | * with more than one members. | 
|  | 641 | */ | 
|  | 642 | if (leader->nr_members > 1) { | 
|  | 643 | attr->read_format |= PERF_FORMAT_GROUP; | 
|  | 644 | attr->inherit = 0; | 
|  | 645 | } | 
|  | 646 | } | 
|  | 647 |  | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 648 | /* | 
| Namhyung Kim | 17314e2 | 2014-06-09 14:43:37 +0900 | [diff] [blame] | 649 | * We default some events to have a default interval. But keep | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 650 | * it a weak assumption overridable by the user. | 
|  | 651 | */ | 
| Namhyung Kim | 17314e2 | 2014-06-09 14:43:37 +0900 | [diff] [blame] | 652 | if (!attr->sample_period || (opts->user_freq != UINT_MAX || | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 653 | opts->user_interval != ULLONG_MAX)) { | 
|  | 654 | if (opts->freq) { | 
| Arnaldo Carvalho de Melo | 7be5ebe | 2012-12-10 14:53:43 -0300 | [diff] [blame] | 655 | perf_evsel__set_sample_bit(evsel, PERIOD); | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 656 | attr->freq		= 1; | 
|  | 657 | attr->sample_freq	= opts->freq; | 
|  | 658 | } else { | 
|  | 659 | attr->sample_period = opts->default_interval; | 
|  | 660 | } | 
|  | 661 | } | 
|  | 662 |  | 
| Jiri Olsa | 3c17631 | 2012-10-10 17:39:03 +0200 | [diff] [blame] | 663 | /* | 
|  | 664 | * Disable sampling for all group members other | 
|  | 665 | * than leader in case leader 'leads' the sampling. | 
|  | 666 | */ | 
|  | 667 | if ((leader != evsel) && leader->sample_read) { | 
|  | 668 | attr->sample_freq   = 0; | 
|  | 669 | attr->sample_period = 0; | 
|  | 670 | } | 
|  | 671 |  | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 672 | if (opts->no_samples) | 
|  | 673 | attr->sample_freq = 0; | 
|  | 674 |  | 
|  | 675 | if (opts->inherit_stat) | 
|  | 676 | attr->inherit_stat = 1; | 
|  | 677 |  | 
|  | 678 | if (opts->sample_address) { | 
| Arnaldo Carvalho de Melo | 7be5ebe | 2012-12-10 14:53:43 -0300 | [diff] [blame] | 679 | perf_evsel__set_sample_bit(evsel, ADDR); | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 680 | attr->mmap_data = track; | 
|  | 681 | } | 
|  | 682 |  | 
| Jiri Olsa | f140373 | 2014-11-13 18:21:03 +0100 | [diff] [blame] | 683 | /* | 
|  | 684 | * We don't allow user space callchains for  function trace | 
|  | 685 | * event, due to issues with page faults while tracing page | 
|  | 686 | * fault handler and its overall trickiness nature. | 
|  | 687 | */ | 
|  | 688 | if (perf_evsel__is_function_event(evsel)) | 
|  | 689 | evsel->attr.exclude_callchain_user = 1; | 
|  | 690 |  | 
| Namhyung Kim | 72a128a | 2014-09-23 10:01:41 +0900 | [diff] [blame] | 691 | if (callchain_param.enabled && !evsel->no_aux_samples) | 
| Kan Liang | aad2b21 | 2015-01-05 13:23:04 -0500 | [diff] [blame] | 692 | perf_evsel__config_callgraph(evsel, opts); | 
| Jiri Olsa | 26d3302 | 2012-08-07 15:20:47 +0200 | [diff] [blame] | 693 |  | 
| Stephane Eranian | 6a21c0b | 2014-09-24 13:48:39 +0200 | [diff] [blame] | 694 | if (opts->sample_intr_regs) { | 
|  | 695 | attr->sample_regs_intr = PERF_REGS_MASK; | 
|  | 696 | perf_evsel__set_sample_bit(evsel, REGS_INTR); | 
|  | 697 | } | 
|  | 698 |  | 
| Adrian Hunter | 3aa5939 | 2013-11-15 15:52:29 +0200 | [diff] [blame] | 699 | if (target__has_cpu(&opts->target)) | 
| Arnaldo Carvalho de Melo | 7be5ebe | 2012-12-10 14:53:43 -0300 | [diff] [blame] | 700 | perf_evsel__set_sample_bit(evsel, CPU); | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 701 |  | 
| Andrew Vagin | 3e76ac7 | 2011-12-20 17:32:45 +0300 | [diff] [blame] | 702 | if (opts->period) | 
| Arnaldo Carvalho de Melo | 7be5ebe | 2012-12-10 14:53:43 -0300 | [diff] [blame] | 703 | perf_evsel__set_sample_bit(evsel, PERIOD); | 
| Andrew Vagin | 3e76ac7 | 2011-12-20 17:32:45 +0300 | [diff] [blame] | 704 |  | 
| Andi Kleen | 8affc2b | 2014-07-31 14:45:04 +0800 | [diff] [blame] | 705 | /* | 
|  | 706 | * When the user explicitely disabled time don't force it here. | 
|  | 707 | */ | 
|  | 708 | if (opts->sample_time && | 
|  | 709 | (!perf_missing_features.sample_id_all && | 
|  | 710 | (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu))) | 
| Arnaldo Carvalho de Melo | 7be5ebe | 2012-12-10 14:53:43 -0300 | [diff] [blame] | 711 | perf_evsel__set_sample_bit(evsel, TIME); | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 712 |  | 
| Adrian Hunter | 6ff1ce7 | 2014-07-14 13:02:56 +0300 | [diff] [blame] | 713 | if (opts->raw_samples && !evsel->no_aux_samples) { | 
| Arnaldo Carvalho de Melo | 7be5ebe | 2012-12-10 14:53:43 -0300 | [diff] [blame] | 714 | perf_evsel__set_sample_bit(evsel, TIME); | 
|  | 715 | perf_evsel__set_sample_bit(evsel, RAW); | 
|  | 716 | perf_evsel__set_sample_bit(evsel, CPU); | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 717 | } | 
|  | 718 |  | 
| Stephane Eranian | ccf49bf | 2013-01-24 16:10:37 +0100 | [diff] [blame] | 719 | if (opts->sample_address) | 
| Adrian Hunter | 1e7ed5e | 2013-11-01 15:51:35 +0200 | [diff] [blame] | 720 | perf_evsel__set_sample_bit(evsel, DATA_SRC); | 
| Stephane Eranian | ccf49bf | 2013-01-24 16:10:37 +0100 | [diff] [blame] | 721 |  | 
| Arnaldo Carvalho de Melo | 509051e | 2014-01-14 17:52:14 -0300 | [diff] [blame] | 722 | if (opts->no_buffering) { | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 723 | attr->watermark = 0; | 
|  | 724 | attr->wakeup_events = 1; | 
|  | 725 | } | 
| Adrian Hunter | 6ff1ce7 | 2014-07-14 13:02:56 +0300 | [diff] [blame] | 726 | if (opts->branch_stack && !evsel->no_aux_samples) { | 
| Arnaldo Carvalho de Melo | 7be5ebe | 2012-12-10 14:53:43 -0300 | [diff] [blame] | 727 | perf_evsel__set_sample_bit(evsel, BRANCH_STACK); | 
| Roberto Agostino Vitillo | bdfebd8 | 2012-02-09 23:21:02 +0100 | [diff] [blame] | 728 | attr->branch_sample_type = opts->branch_stack; | 
|  | 729 | } | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 730 |  | 
| Andi Kleen | 0548429 | 2013-01-24 16:10:29 +0100 | [diff] [blame] | 731 | if (opts->sample_weight) | 
| Adrian Hunter | 1e7ed5e | 2013-11-01 15:51:35 +0200 | [diff] [blame] | 732 | perf_evsel__set_sample_bit(evsel, WEIGHT); | 
| Andi Kleen | 0548429 | 2013-01-24 16:10:29 +0100 | [diff] [blame] | 733 |  | 
| Namhyung Kim | 62e503b | 2015-01-29 17:06:46 +0900 | [diff] [blame] | 734 | attr->task  = track; | 
| Stephane Eranian | 5c5e854 | 2013-08-21 12:10:25 +0200 | [diff] [blame] | 735 | attr->mmap  = track; | 
| Don Zickus | a5a5ba7 | 2014-05-30 10:49:42 -0400 | [diff] [blame] | 736 | attr->mmap2 = track && !perf_missing_features.mmap2; | 
| Stephane Eranian | 5c5e854 | 2013-08-21 12:10:25 +0200 | [diff] [blame] | 737 | attr->comm  = track; | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 738 |  | 
| Andi Kleen | 475eeab | 2013-09-20 07:40:43 -0700 | [diff] [blame] | 739 | if (opts->sample_transaction) | 
| Adrian Hunter | 1e7ed5e | 2013-11-01 15:51:35 +0200 | [diff] [blame] | 740 | perf_evsel__set_sample_bit(evsel, TRANSACTION); | 
| Andi Kleen | 475eeab | 2013-09-20 07:40:43 -0700 | [diff] [blame] | 741 |  | 
| Andi Kleen | 85c273d | 2015-02-24 15:13:40 -0800 | [diff] [blame] | 742 | if (opts->running_time) { | 
|  | 743 | evsel->attr.read_format |= | 
|  | 744 | PERF_FORMAT_TOTAL_TIME_ENABLED | | 
|  | 745 | PERF_FORMAT_TOTAL_TIME_RUNNING; | 
|  | 746 | } | 
|  | 747 |  | 
| Jiri Olsa | 774cb49 | 2012-11-12 18:34:01 +0100 | [diff] [blame] | 748 | /* | 
|  | 749 | * XXX see the function comment above | 
|  | 750 | * | 
|  | 751 | * Disabling only independent events or group leaders, | 
|  | 752 | * keeping group members enabled. | 
|  | 753 | */ | 
| Namhyung Kim | 823254e | 2012-11-29 15:38:30 +0900 | [diff] [blame] | 754 | if (perf_evsel__is_group_leader(evsel)) | 
| Jiri Olsa | 774cb49 | 2012-11-12 18:34:01 +0100 | [diff] [blame] | 755 | attr->disabled = 1; | 
|  | 756 |  | 
|  | 757 | /* | 
|  | 758 | * Setting enable_on_exec for independent events and | 
|  | 759 | * group leaders for traced executed by perf. | 
|  | 760 | */ | 
| Andi Kleen | 6619a53 | 2014-01-11 13:38:27 -0800 | [diff] [blame] | 761 | if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) && | 
|  | 762 | !opts->initial_delay) | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 763 | attr->enable_on_exec = 1; | 
| Adrian Hunter | 2afd2bc | 2014-07-14 13:02:57 +0300 | [diff] [blame] | 764 |  | 
|  | 765 | if (evsel->immediate) { | 
|  | 766 | attr->disabled = 0; | 
|  | 767 | attr->enable_on_exec = 0; | 
|  | 768 | } | 
| Peter Zijlstra | 814c8c3 | 2015-03-31 00:19:31 +0200 | [diff] [blame] | 769 |  | 
|  | 770 | clockid = opts->clockid; | 
|  | 771 | if (opts->use_clockid) { | 
|  | 772 | attr->use_clockid = 1; | 
|  | 773 | attr->clockid = opts->clockid; | 
|  | 774 | } | 
| Arnaldo Carvalho de Melo | 0f82ebc | 2011-11-08 14:41:57 -0200 | [diff] [blame] | 775 | } | 
|  | 776 |  | 
| Arnaldo Carvalho de Melo | 8885846 | 2014-10-13 13:30:27 -0300 | [diff] [blame] | 777 | static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) | 
| Arnaldo Carvalho de Melo | 69aad6f | 2011-01-03 16:39:04 -0200 | [diff] [blame] | 778 | { | 
| David Ahern | 4af4c95 | 2011-05-27 09:58:34 -0600 | [diff] [blame] | 779 | int cpu, thread; | 
| Adrian Hunter | bf8e8f4 | 2014-07-31 09:00:51 +0300 | [diff] [blame] | 780 |  | 
|  | 781 | if (evsel->system_wide) | 
|  | 782 | nthreads = 1; | 
|  | 783 |  | 
| Arnaldo Carvalho de Melo | 69aad6f | 2011-01-03 16:39:04 -0200 | [diff] [blame] | 784 | evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); | 
| David Ahern | 4af4c95 | 2011-05-27 09:58:34 -0600 | [diff] [blame] | 785 |  | 
|  | 786 | if (evsel->fd) { | 
|  | 787 | for (cpu = 0; cpu < ncpus; cpu++) { | 
|  | 788 | for (thread = 0; thread < nthreads; thread++) { | 
|  | 789 | FD(evsel, cpu, thread) = -1; | 
|  | 790 | } | 
|  | 791 | } | 
|  | 792 | } | 
|  | 793 |  | 
| Arnaldo Carvalho de Melo | 69aad6f | 2011-01-03 16:39:04 -0200 | [diff] [blame] | 794 | return evsel->fd != NULL ? 0 : -ENOMEM; | 
|  | 795 | } | 
|  | 796 |  | 
| Andi Kleen | e2407be | 2013-08-02 17:41:10 -0700 | [diff] [blame] | 797 | static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads, | 
|  | 798 | int ioc,  void *arg) | 
| Arnaldo Carvalho de Melo | 745cefc | 2012-09-26 15:07:39 -0300 | [diff] [blame] | 799 | { | 
|  | 800 | int cpu, thread; | 
|  | 801 |  | 
| Adrian Hunter | bf8e8f4 | 2014-07-31 09:00:51 +0300 | [diff] [blame] | 802 | if (evsel->system_wide) | 
|  | 803 | nthreads = 1; | 
|  | 804 |  | 
| Arnaldo Carvalho de Melo | 745cefc | 2012-09-26 15:07:39 -0300 | [diff] [blame] | 805 | for (cpu = 0; cpu < ncpus; cpu++) { | 
|  | 806 | for (thread = 0; thread < nthreads; thread++) { | 
|  | 807 | int fd = FD(evsel, cpu, thread), | 
| Andi Kleen | e2407be | 2013-08-02 17:41:10 -0700 | [diff] [blame] | 808 | err = ioctl(fd, ioc, arg); | 
| Arnaldo Carvalho de Melo | 745cefc | 2012-09-26 15:07:39 -0300 | [diff] [blame] | 809 |  | 
|  | 810 | if (err) | 
|  | 811 | return err; | 
|  | 812 | } | 
|  | 813 | } | 
|  | 814 |  | 
|  | 815 | return 0; | 
|  | 816 | } | 
|  | 817 |  | 
| Andi Kleen | e2407be | 2013-08-02 17:41:10 -0700 | [diff] [blame] | 818 | int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, | 
|  | 819 | const char *filter) | 
|  | 820 | { | 
|  | 821 | return perf_evsel__run_ioctl(evsel, ncpus, nthreads, | 
|  | 822 | PERF_EVENT_IOC_SET_FILTER, | 
|  | 823 | (void *)filter); | 
|  | 824 | } | 
|  | 825 |  | 
|  | 826 | int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads) | 
|  | 827 | { | 
|  | 828 | return perf_evsel__run_ioctl(evsel, ncpus, nthreads, | 
|  | 829 | PERF_EVENT_IOC_ENABLE, | 
|  | 830 | 0); | 
|  | 831 | } | 
|  | 832 |  | 
| Arnaldo Carvalho de Melo | 70db753 | 2011-01-12 22:39:13 -0200 | [diff] [blame] | 833 | int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) | 
|  | 834 | { | 
| Vineet Gupta | 8d9cbd8 | 2015-01-13 19:13:23 +0530 | [diff] [blame] | 835 | if (ncpus == 0 || nthreads == 0) | 
|  | 836 | return 0; | 
|  | 837 |  | 
| Adrian Hunter | bf8e8f4 | 2014-07-31 09:00:51 +0300 | [diff] [blame] | 838 | if (evsel->system_wide) | 
|  | 839 | nthreads = 1; | 
|  | 840 |  | 
| Arnaldo Carvalho de Melo | a91e543 | 2011-03-10 11:15:54 -0300 | [diff] [blame] | 841 | evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); | 
|  | 842 | if (evsel->sample_id == NULL) | 
|  | 843 | return -ENOMEM; | 
|  | 844 |  | 
|  | 845 | evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); | 
|  | 846 | if (evsel->id == NULL) { | 
|  | 847 | xyarray__delete(evsel->sample_id); | 
|  | 848 | evsel->sample_id = NULL; | 
|  | 849 | return -ENOMEM; | 
|  | 850 | } | 
|  | 851 |  | 
|  | 852 | return 0; | 
| Arnaldo Carvalho de Melo | 70db753 | 2011-01-12 22:39:13 -0200 | [diff] [blame] | 853 | } | 
|  | 854 |  | 
| Arnaldo Carvalho de Melo | 8885846 | 2014-10-13 13:30:27 -0300 | [diff] [blame] | 855 | static void perf_evsel__free_fd(struct perf_evsel *evsel) | 
| Arnaldo Carvalho de Melo | 69aad6f | 2011-01-03 16:39:04 -0200 | [diff] [blame] | 856 | { | 
|  | 857 | xyarray__delete(evsel->fd); | 
|  | 858 | evsel->fd = NULL; | 
|  | 859 | } | 
|  | 860 |  | 
| Arnaldo Carvalho de Melo | 8885846 | 2014-10-13 13:30:27 -0300 | [diff] [blame] | 861 | static void perf_evsel__free_id(struct perf_evsel *evsel) | 
| Arnaldo Carvalho de Melo | 70db753 | 2011-01-12 22:39:13 -0200 | [diff] [blame] | 862 | { | 
| Arnaldo Carvalho de Melo | a91e543 | 2011-03-10 11:15:54 -0300 | [diff] [blame] | 863 | xyarray__delete(evsel->sample_id); | 
|  | 864 | evsel->sample_id = NULL; | 
| Arnaldo Carvalho de Melo | 0466252 | 2013-12-26 17:41:15 -0300 | [diff] [blame] | 865 | zfree(&evsel->id); | 
| Arnaldo Carvalho de Melo | 70db753 | 2011-01-12 22:39:13 -0200 | [diff] [blame] | 866 | } | 
|  | 867 |  | 
| Arnaldo Carvalho de Melo | c52b12e | 2011-01-03 17:45:52 -0200 | [diff] [blame] | 868 | void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) | 
|  | 869 | { | 
|  | 870 | int cpu, thread; | 
|  | 871 |  | 
| Adrian Hunter | bf8e8f4 | 2014-07-31 09:00:51 +0300 | [diff] [blame] | 872 | if (evsel->system_wide) | 
|  | 873 | nthreads = 1; | 
|  | 874 |  | 
| Arnaldo Carvalho de Melo | c52b12e | 2011-01-03 17:45:52 -0200 | [diff] [blame] | 875 | for (cpu = 0; cpu < ncpus; cpu++) | 
|  | 876 | for (thread = 0; thread < nthreads; ++thread) { | 
|  | 877 | close(FD(evsel, cpu, thread)); | 
|  | 878 | FD(evsel, cpu, thread) = -1; | 
|  | 879 | } | 
|  | 880 | } | 
|  | 881 |  | 
| Arnaldo Carvalho de Melo | ef1d1af | 2011-01-18 21:41:45 -0200 | [diff] [blame] | 882 | void perf_evsel__exit(struct perf_evsel *evsel) | 
| Arnaldo Carvalho de Melo | 69aad6f | 2011-01-03 16:39:04 -0200 | [diff] [blame] | 883 | { | 
|  | 884 | assert(list_empty(&evsel->node)); | 
| Namhyung Kim | 736b05a | 2013-03-15 14:48:49 +0900 | [diff] [blame] | 885 | perf_evsel__free_fd(evsel); | 
|  | 886 | perf_evsel__free_id(evsel); | 
| Arnaldo Carvalho de Melo | 597e48c | 2014-10-16 13:25:01 -0300 | [diff] [blame] | 887 | close_cgroup(evsel->cgrp); | 
| Jiri Olsa | f30a79b | 2015-06-23 00:36:04 +0200 | [diff] [blame] | 888 | cpu_map__put(evsel->cpus); | 
| Jiri Olsa | 578e91e | 2015-06-23 00:36:07 +0200 | [diff] [blame] | 889 | thread_map__put(evsel->threads); | 
| Arnaldo Carvalho de Melo | 597e48c | 2014-10-16 13:25:01 -0300 | [diff] [blame] | 890 | zfree(&evsel->group_name); | 
| Arnaldo Carvalho de Melo | 597e48c | 2014-10-16 13:25:01 -0300 | [diff] [blame] | 891 | zfree(&evsel->name); | 
| Arnaldo Carvalho de Melo | ce8ccff | 2014-10-09 15:29:51 -0300 | [diff] [blame] | 892 | perf_evsel__object.fini(evsel); | 
| Arnaldo Carvalho de Melo | ef1d1af | 2011-01-18 21:41:45 -0200 | [diff] [blame] | 893 | } | 
|  | 894 |  | 
|  | 895 | void perf_evsel__delete(struct perf_evsel *evsel) | 
|  | 896 | { | 
|  | 897 | perf_evsel__exit(evsel); | 
| Arnaldo Carvalho de Melo | 69aad6f | 2011-01-03 16:39:04 -0200 | [diff] [blame] | 898 | free(evsel); | 
|  | 899 | } | 
| Arnaldo Carvalho de Melo | c52b12e | 2011-01-03 17:45:52 -0200 | [diff] [blame] | 900 |  | 
| Jiri Olsa | a6fa003 | 2015-06-26 11:29:11 +0200 | [diff] [blame] | 901 | void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread, | 
| Jiri Olsa | 857a94a | 2014-11-21 10:31:05 +0100 | [diff] [blame] | 902 | struct perf_counts_values *count) | 
| Stephane Eranian | c7a79c4 | 2013-01-29 12:47:43 +0100 | [diff] [blame] | 903 | { | 
|  | 904 | struct perf_counts_values tmp; | 
|  | 905 |  | 
|  | 906 | if (!evsel->prev_raw_counts) | 
|  | 907 | return; | 
|  | 908 |  | 
|  | 909 | if (cpu == -1) { | 
|  | 910 | tmp = evsel->prev_raw_counts->aggr; | 
|  | 911 | evsel->prev_raw_counts->aggr = *count; | 
|  | 912 | } else { | 
| Jiri Olsa | a6fa003 | 2015-06-26 11:29:11 +0200 | [diff] [blame] | 913 | tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread); | 
|  | 914 | *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count; | 
| Stephane Eranian | c7a79c4 | 2013-01-29 12:47:43 +0100 | [diff] [blame] | 915 | } | 
|  | 916 |  | 
|  | 917 | count->val = count->val - tmp.val; | 
|  | 918 | count->ena = count->ena - tmp.ena; | 
|  | 919 | count->run = count->run - tmp.run; | 
|  | 920 | } | 
|  | 921 |  | 
| Jiri Olsa | 13112bb | 2014-11-21 10:31:06 +0100 | [diff] [blame] | 922 | void perf_counts_values__scale(struct perf_counts_values *count, | 
|  | 923 | bool scale, s8 *pscaled) | 
|  | 924 | { | 
|  | 925 | s8 scaled = 0; | 
|  | 926 |  | 
|  | 927 | if (scale) { | 
|  | 928 | if (count->run == 0) { | 
|  | 929 | scaled = -1; | 
|  | 930 | count->val = 0; | 
|  | 931 | } else if (count->run < count->ena) { | 
|  | 932 | scaled = 1; | 
|  | 933 | count->val = (u64)((double) count->val * count->ena / count->run + 0.5); | 
|  | 934 | } | 
|  | 935 | } else | 
|  | 936 | count->ena = count->run = 0; | 
|  | 937 |  | 
|  | 938 | if (pscaled) | 
|  | 939 | *pscaled = scaled; | 
|  | 940 | } | 
|  | 941 |  | 
| Jiri Olsa | 011dccb | 2014-11-21 10:31:07 +0100 | [diff] [blame] | 942 | int perf_evsel__read_cb(struct perf_evsel *evsel, int cpu, int thread, | 
|  | 943 | perf_evsel__read_cb_t cb) | 
|  | 944 | { | 
|  | 945 | struct perf_counts_values count; | 
|  | 946 |  | 
|  | 947 | memset(&count, 0, sizeof(count)); | 
|  | 948 |  | 
|  | 949 | if (FD(evsel, cpu, thread) < 0) | 
|  | 950 | return -EINVAL; | 
|  | 951 |  | 
|  | 952 | if (readn(FD(evsel, cpu, thread), &count, sizeof(count)) < 0) | 
|  | 953 | return -errno; | 
|  | 954 |  | 
|  | 955 | return cb(evsel, cpu, thread, &count); | 
|  | 956 | } | 
|  | 957 |  | 
| Jiri Olsa | f99f4719 | 2015-06-26 11:29:18 +0200 | [diff] [blame^] | 958 | int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread, | 
|  | 959 | struct perf_counts_values *count) | 
|  | 960 | { | 
|  | 961 | memset(count, 0, sizeof(*count)); | 
|  | 962 |  | 
|  | 963 | if (FD(evsel, cpu, thread) < 0) | 
|  | 964 | return -EINVAL; | 
|  | 965 |  | 
|  | 966 | if (readn(FD(evsel, cpu, thread), count, sizeof(*count)) < 0) | 
|  | 967 | return -errno; | 
|  | 968 |  | 
|  | 969 | return 0; | 
|  | 970 | } | 
|  | 971 |  | 
| Arnaldo Carvalho de Melo | c52b12e | 2011-01-03 17:45:52 -0200 | [diff] [blame] | 972 | int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, | 
|  | 973 | int cpu, int thread, bool scale) | 
|  | 974 | { | 
|  | 975 | struct perf_counts_values count; | 
|  | 976 | size_t nv = scale ? 3 : 1; | 
|  | 977 |  | 
|  | 978 | if (FD(evsel, cpu, thread) < 0) | 
|  | 979 | return -EINVAL; | 
|  | 980 |  | 
| Jiri Olsa | a6fa003 | 2015-06-26 11:29:11 +0200 | [diff] [blame] | 981 | if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0) | 
| Arnaldo Carvalho de Melo | 4eed11d | 2011-01-04 00:13:17 -0200 | [diff] [blame] | 982 | return -ENOMEM; | 
|  | 983 |  | 
| Arnaldo Carvalho de Melo | c52b12e | 2011-01-03 17:45:52 -0200 | [diff] [blame] | 984 | if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0) | 
|  | 985 | return -errno; | 
|  | 986 |  | 
| Jiri Olsa | a6fa003 | 2015-06-26 11:29:11 +0200 | [diff] [blame] | 987 | perf_evsel__compute_deltas(evsel, cpu, thread, &count); | 
| Jiri Olsa | 13112bb | 2014-11-21 10:31:06 +0100 | [diff] [blame] | 988 | perf_counts_values__scale(&count, scale, NULL); | 
| Jiri Olsa | a6fa003 | 2015-06-26 11:29:11 +0200 | [diff] [blame] | 989 | *perf_counts(evsel->counts, cpu, thread) = count; | 
| Arnaldo Carvalho de Melo | c52b12e | 2011-01-03 17:45:52 -0200 | [diff] [blame] | 990 | return 0; | 
|  | 991 | } | 
|  | 992 |  | 
| Jiri Olsa | 6a4bb04 | 2012-08-08 12:22:36 +0200 | [diff] [blame] | 993 | static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread) | 
|  | 994 | { | 
|  | 995 | struct perf_evsel *leader = evsel->leader; | 
|  | 996 | int fd; | 
|  | 997 |  | 
| Namhyung Kim | 823254e | 2012-11-29 15:38:30 +0900 | [diff] [blame] | 998 | if (perf_evsel__is_group_leader(evsel)) | 
| Jiri Olsa | 6a4bb04 | 2012-08-08 12:22:36 +0200 | [diff] [blame] | 999 | return -1; | 
|  | 1000 |  | 
|  | 1001 | /* | 
|  | 1002 | * Leader must be already processed/open, | 
|  | 1003 | * if not it's a bug. | 
|  | 1004 | */ | 
|  | 1005 | BUG_ON(!leader->fd); | 
|  | 1006 |  | 
|  | 1007 | fd = FD(leader, cpu, thread); | 
|  | 1008 | BUG_ON(fd == -1); | 
|  | 1009 |  | 
|  | 1010 | return fd; | 
|  | 1011 | } | 
|  | 1012 |  | 
| Peter Zijlstra | 2c5e8c5 | 2015-04-07 11:09:54 +0200 | [diff] [blame] | 1013 | struct bit_names { | 
|  | 1014 | int bit; | 
|  | 1015 | const char *name; | 
|  | 1016 | }; | 
| Adrian Hunter | e3e1a54 | 2013-08-14 15:48:24 +0300 | [diff] [blame] | 1017 |  | 
| Peter Zijlstra | 2c5e8c5 | 2015-04-07 11:09:54 +0200 | [diff] [blame] | 1018 | static void __p_bits(char *buf, size_t size, u64 value, struct bit_names *bits) | 
| Adrian Hunter | e3e1a54 | 2013-08-14 15:48:24 +0300 | [diff] [blame] | 1019 | { | 
| Peter Zijlstra | 2c5e8c5 | 2015-04-07 11:09:54 +0200 | [diff] [blame] | 1020 | bool first_bit = true; | 
|  | 1021 | int i = 0; | 
| Adrian Hunter | e3e1a54 | 2013-08-14 15:48:24 +0300 | [diff] [blame] | 1022 |  | 
| Peter Zijlstra | 2c5e8c5 | 2015-04-07 11:09:54 +0200 | [diff] [blame] | 1023 | do { | 
|  | 1024 | if (value & bits[i].bit) { | 
|  | 1025 | buf += scnprintf(buf, size, "%s%s", first_bit ? "" : "|", bits[i].name); | 
|  | 1026 | first_bit = false; | 
|  | 1027 | } | 
|  | 1028 | } while (bits[++i].name != NULL); | 
|  | 1029 | } | 
| Adrian Hunter | e3e1a54 | 2013-08-14 15:48:24 +0300 | [diff] [blame] | 1030 |  | 
| Peter Zijlstra | 2c5e8c5 | 2015-04-07 11:09:54 +0200 | [diff] [blame] | 1031 | static void __p_sample_type(char *buf, size_t size, u64 value) | 
|  | 1032 | { | 
|  | 1033 | #define bit_name(n) { PERF_SAMPLE_##n, #n } | 
|  | 1034 | struct bit_names bits[] = { | 
|  | 1035 | bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR), | 
|  | 1036 | bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU), | 
|  | 1037 | bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW), | 
|  | 1038 | bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER), | 
|  | 1039 | bit_name(IDENTIFIER), bit_name(REGS_INTR), | 
|  | 1040 | { .name = NULL, } | 
|  | 1041 | }; | 
|  | 1042 | #undef bit_name | 
|  | 1043 | __p_bits(buf, size, value, bits); | 
|  | 1044 | } | 
| Adrian Hunter | e3e1a54 | 2013-08-14 15:48:24 +0300 | [diff] [blame] | 1045 |  | 
| Peter Zijlstra | 2c5e8c5 | 2015-04-07 11:09:54 +0200 | [diff] [blame] | 1046 | static void __p_read_format(char *buf, size_t size, u64 value) | 
|  | 1047 | { | 
|  | 1048 | #define bit_name(n) { PERF_FORMAT_##n, #n } | 
|  | 1049 | struct bit_names bits[] = { | 
|  | 1050 | bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING), | 
|  | 1051 | bit_name(ID), bit_name(GROUP), | 
|  | 1052 | { .name = NULL, } | 
|  | 1053 | }; | 
|  | 1054 | #undef bit_name | 
|  | 1055 | __p_bits(buf, size, value, bits); | 
|  | 1056 | } | 
| Peter Zijlstra | 814c8c3 | 2015-03-31 00:19:31 +0200 | [diff] [blame] | 1057 |  | 
| Peter Zijlstra | 2c5e8c5 | 2015-04-07 11:09:54 +0200 | [diff] [blame] | 1058 | #define BUF_SIZE		1024 | 
| Adrian Hunter | e3e1a54 | 2013-08-14 15:48:24 +0300 | [diff] [blame] | 1059 |  | 
| Adrian Hunter | 7310aed | 2015-06-11 15:51:04 +0300 | [diff] [blame] | 1060 | #define p_hex(val)		snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val)) | 
| Peter Zijlstra | 2c5e8c5 | 2015-04-07 11:09:54 +0200 | [diff] [blame] | 1061 | #define p_unsigned(val)		snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val)) | 
|  | 1062 | #define p_signed(val)		snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val)) | 
|  | 1063 | #define p_sample_type(val)	__p_sample_type(buf, BUF_SIZE, val) | 
|  | 1064 | #define p_read_format(val)	__p_read_format(buf, BUF_SIZE, val) | 
| Adrian Hunter | e3e1a54 | 2013-08-14 15:48:24 +0300 | [diff] [blame] | 1065 |  | 
| Peter Zijlstra | 2c5e8c5 | 2015-04-07 11:09:54 +0200 | [diff] [blame] | 1066 | #define PRINT_ATTRn(_n, _f, _p)				\ | 
|  | 1067 | do {							\ | 
|  | 1068 | if (attr->_f) {					\ | 
|  | 1069 | _p(attr->_f);				\ | 
|  | 1070 | ret += attr__fprintf(fp, _n, buf, priv);\ | 
|  | 1071 | }						\ | 
|  | 1072 | } while (0) | 
|  | 1073 |  | 
|  | 1074 | #define PRINT_ATTRf(_f, _p)	PRINT_ATTRn(#_f, _f, _p) | 
|  | 1075 |  | 
|  | 1076 | int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr, | 
|  | 1077 | attr__fprintf_f attr__fprintf, void *priv) | 
|  | 1078 | { | 
|  | 1079 | char buf[BUF_SIZE]; | 
|  | 1080 | int ret = 0; | 
|  | 1081 |  | 
|  | 1082 | PRINT_ATTRf(type, p_unsigned); | 
|  | 1083 | PRINT_ATTRf(size, p_unsigned); | 
|  | 1084 | PRINT_ATTRf(config, p_hex); | 
|  | 1085 | PRINT_ATTRn("{ sample_period, sample_freq }", sample_period, p_unsigned); | 
|  | 1086 | PRINT_ATTRf(sample_type, p_sample_type); | 
|  | 1087 | PRINT_ATTRf(read_format, p_read_format); | 
|  | 1088 |  | 
|  | 1089 | PRINT_ATTRf(disabled, p_unsigned); | 
|  | 1090 | PRINT_ATTRf(inherit, p_unsigned); | 
|  | 1091 | PRINT_ATTRf(pinned, p_unsigned); | 
|  | 1092 | PRINT_ATTRf(exclusive, p_unsigned); | 
|  | 1093 | PRINT_ATTRf(exclude_user, p_unsigned); | 
|  | 1094 | PRINT_ATTRf(exclude_kernel, p_unsigned); | 
|  | 1095 | PRINT_ATTRf(exclude_hv, p_unsigned); | 
|  | 1096 | PRINT_ATTRf(exclude_idle, p_unsigned); | 
|  | 1097 | PRINT_ATTRf(mmap, p_unsigned); | 
|  | 1098 | PRINT_ATTRf(comm, p_unsigned); | 
|  | 1099 | PRINT_ATTRf(freq, p_unsigned); | 
|  | 1100 | PRINT_ATTRf(inherit_stat, p_unsigned); | 
|  | 1101 | PRINT_ATTRf(enable_on_exec, p_unsigned); | 
|  | 1102 | PRINT_ATTRf(task, p_unsigned); | 
|  | 1103 | PRINT_ATTRf(watermark, p_unsigned); | 
|  | 1104 | PRINT_ATTRf(precise_ip, p_unsigned); | 
|  | 1105 | PRINT_ATTRf(mmap_data, p_unsigned); | 
|  | 1106 | PRINT_ATTRf(sample_id_all, p_unsigned); | 
|  | 1107 | PRINT_ATTRf(exclude_host, p_unsigned); | 
|  | 1108 | PRINT_ATTRf(exclude_guest, p_unsigned); | 
|  | 1109 | PRINT_ATTRf(exclude_callchain_kernel, p_unsigned); | 
|  | 1110 | PRINT_ATTRf(exclude_callchain_user, p_unsigned); | 
|  | 1111 | PRINT_ATTRf(mmap2, p_unsigned); | 
|  | 1112 | PRINT_ATTRf(comm_exec, p_unsigned); | 
|  | 1113 | PRINT_ATTRf(use_clockid, p_unsigned); | 
|  | 1114 |  | 
|  | 1115 | PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned); | 
|  | 1116 | PRINT_ATTRf(bp_type, p_unsigned); | 
|  | 1117 | PRINT_ATTRn("{ bp_addr, config1 }", bp_addr, p_hex); | 
|  | 1118 | PRINT_ATTRn("{ bp_len, config2 }", bp_len, p_hex); | 
|  | 1119 | PRINT_ATTRf(sample_regs_user, p_hex); | 
|  | 1120 | PRINT_ATTRf(sample_stack_user, p_unsigned); | 
|  | 1121 | PRINT_ATTRf(clockid, p_signed); | 
|  | 1122 | PRINT_ATTRf(sample_regs_intr, p_hex); | 
| Adrian Hunter | 70d73de | 2015-04-09 18:54:06 +0300 | [diff] [blame] | 1123 | PRINT_ATTRf(aux_watermark, p_unsigned); | 
| Adrian Hunter | e3e1a54 | 2013-08-14 15:48:24 +0300 | [diff] [blame] | 1124 |  | 
|  | 1125 | return ret; | 
|  | 1126 | } | 
|  | 1127 |  | 
| Peter Zijlstra | 2c5e8c5 | 2015-04-07 11:09:54 +0200 | [diff] [blame] | 1128 | static int __open_attr__fprintf(FILE *fp, const char *name, const char *val, | 
|  | 1129 | void *priv __attribute__((unused))) | 
|  | 1130 | { | 
|  | 1131 | return fprintf(fp, "  %-32s %s\n", name, val); | 
|  | 1132 | } | 
|  | 1133 |  | 
| Arnaldo Carvalho de Melo | 0252208 | 2011-01-04 11:55:27 -0200 | [diff] [blame] | 1134 | static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, | 
| Jiri Olsa | 6a4bb04 | 2012-08-08 12:22:36 +0200 | [diff] [blame] | 1135 | struct thread_map *threads) | 
| Arnaldo Carvalho de Melo | 4829060 | 2011-01-03 17:48:12 -0200 | [diff] [blame] | 1136 | { | 
| Adrian Hunter | bf8e8f4 | 2014-07-31 09:00:51 +0300 | [diff] [blame] | 1137 | int cpu, thread, nthreads; | 
| Yann Droneaud | 57480d2 | 2014-06-30 22:28:47 +0200 | [diff] [blame] | 1138 | unsigned long flags = PERF_FLAG_FD_CLOEXEC; | 
| Arnaldo Carvalho de Melo | 727ab04 | 2011-10-25 10:42:19 -0200 | [diff] [blame] | 1139 | int pid = -1, err; | 
| Andi Kleen | bec1967 | 2013-08-04 19:41:26 -0700 | [diff] [blame] | 1140 | enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; | 
| Arnaldo Carvalho de Melo | 4829060 | 2011-01-03 17:48:12 -0200 | [diff] [blame] | 1141 |  | 
| Adrian Hunter | bf8e8f4 | 2014-07-31 09:00:51 +0300 | [diff] [blame] | 1142 | if (evsel->system_wide) | 
|  | 1143 | nthreads = 1; | 
|  | 1144 | else | 
|  | 1145 | nthreads = threads->nr; | 
|  | 1146 |  | 
| Arnaldo Carvalho de Melo | 0252208 | 2011-01-04 11:55:27 -0200 | [diff] [blame] | 1147 | if (evsel->fd == NULL && | 
| Adrian Hunter | bf8e8f4 | 2014-07-31 09:00:51 +0300 | [diff] [blame] | 1148 | perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0) | 
| Arnaldo Carvalho de Melo | 727ab04 | 2011-10-25 10:42:19 -0200 | [diff] [blame] | 1149 | return -ENOMEM; | 
| Arnaldo Carvalho de Melo | 4eed11d | 2011-01-04 00:13:17 -0200 | [diff] [blame] | 1150 |  | 
| Stephane Eranian | 023695d | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1151 | if (evsel->cgrp) { | 
| Yann Droneaud | 57480d2 | 2014-06-30 22:28:47 +0200 | [diff] [blame] | 1152 | flags |= PERF_FLAG_PID_CGROUP; | 
| Stephane Eranian | 023695d | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1153 | pid = evsel->cgrp->fd; | 
|  | 1154 | } | 
|  | 1155 |  | 
| Arnaldo Carvalho de Melo | 594ac61 | 2012-12-13 13:13:07 -0300 | [diff] [blame] | 1156 | fallback_missing_features: | 
| Peter Zijlstra | 814c8c3 | 2015-03-31 00:19:31 +0200 | [diff] [blame] | 1157 | if (perf_missing_features.clockid_wrong) | 
|  | 1158 | evsel->attr.clockid = CLOCK_MONOTONIC; /* should always work */ | 
|  | 1159 | if (perf_missing_features.clockid) { | 
|  | 1160 | evsel->attr.use_clockid = 0; | 
|  | 1161 | evsel->attr.clockid = 0; | 
|  | 1162 | } | 
| Yann Droneaud | 57480d2 | 2014-06-30 22:28:47 +0200 | [diff] [blame] | 1163 | if (perf_missing_features.cloexec) | 
|  | 1164 | flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC; | 
| Stephane Eranian | 5c5e854 | 2013-08-21 12:10:25 +0200 | [diff] [blame] | 1165 | if (perf_missing_features.mmap2) | 
|  | 1166 | evsel->attr.mmap2 = 0; | 
| Arnaldo Carvalho de Melo | 594ac61 | 2012-12-13 13:13:07 -0300 | [diff] [blame] | 1167 | if (perf_missing_features.exclude_guest) | 
|  | 1168 | evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; | 
|  | 1169 | retry_sample_id: | 
|  | 1170 | if (perf_missing_features.sample_id_all) | 
|  | 1171 | evsel->attr.sample_id_all = 0; | 
|  | 1172 |  | 
| Peter Zijlstra | 2c5e8c5 | 2015-04-07 11:09:54 +0200 | [diff] [blame] | 1173 | if (verbose >= 2) { | 
|  | 1174 | fprintf(stderr, "%.60s\n", graph_dotted_line); | 
|  | 1175 | fprintf(stderr, "perf_event_attr:\n"); | 
|  | 1176 | perf_event_attr__fprintf(stderr, &evsel->attr, __open_attr__fprintf, NULL); | 
|  | 1177 | fprintf(stderr, "%.60s\n", graph_dotted_line); | 
|  | 1178 | } | 
| Adrian Hunter | e3e1a54 | 2013-08-14 15:48:24 +0300 | [diff] [blame] | 1179 |  | 
| Arnaldo Carvalho de Melo | 86bd5e8 | 2011-01-03 23:09:46 -0200 | [diff] [blame] | 1180 | for (cpu = 0; cpu < cpus->nr; cpu++) { | 
| Arnaldo Carvalho de Melo | 9d04f17 | 2011-01-12 00:08:18 -0200 | [diff] [blame] | 1181 |  | 
| Adrian Hunter | bf8e8f4 | 2014-07-31 09:00:51 +0300 | [diff] [blame] | 1182 | for (thread = 0; thread < nthreads; thread++) { | 
| Jiri Olsa | 6a4bb04 | 2012-08-08 12:22:36 +0200 | [diff] [blame] | 1183 | int group_fd; | 
| Stephane Eranian | 023695d | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1184 |  | 
| Adrian Hunter | bf8e8f4 | 2014-07-31 09:00:51 +0300 | [diff] [blame] | 1185 | if (!evsel->cgrp && !evsel->system_wide) | 
| Jiri Olsa | e13798c | 2015-06-23 00:36:02 +0200 | [diff] [blame] | 1186 | pid = thread_map__pid(threads, thread); | 
| Stephane Eranian | 023695d | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1187 |  | 
| Jiri Olsa | 6a4bb04 | 2012-08-08 12:22:36 +0200 | [diff] [blame] | 1188 | group_fd = get_group_fd(evsel, cpu, thread); | 
| Andi Kleen | bec1967 | 2013-08-04 19:41:26 -0700 | [diff] [blame] | 1189 | retry_open: | 
| Ramkumar Ramachandra | a33f6ef | 2014-03-18 15:10:42 -0400 | [diff] [blame] | 1190 | pr_debug2("sys_perf_event_open: pid %d  cpu %d  group_fd %d  flags %#lx\n", | 
| Adrian Hunter | e3e1a54 | 2013-08-14 15:48:24 +0300 | [diff] [blame] | 1191 | pid, cpus->map[cpu], group_fd, flags); | 
|  | 1192 |  | 
| Arnaldo Carvalho de Melo | 0252208 | 2011-01-04 11:55:27 -0200 | [diff] [blame] | 1193 | FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr, | 
| Stephane Eranian | 023695d | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1194 | pid, | 
| Arnaldo Carvalho de Melo | f08199d | 2011-01-11 23:42:19 -0200 | [diff] [blame] | 1195 | cpus->map[cpu], | 
| Stephane Eranian | 023695d | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 1196 | group_fd, flags); | 
| Arnaldo Carvalho de Melo | 727ab04 | 2011-10-25 10:42:19 -0200 | [diff] [blame] | 1197 | if (FD(evsel, cpu, thread) < 0) { | 
|  | 1198 | err = -errno; | 
| Ramkumar Ramachandra | a33f6ef | 2014-03-18 15:10:42 -0400 | [diff] [blame] | 1199 | pr_debug2("sys_perf_event_open failed, error %d\n", | 
| Adrian Hunter | f852fd6 | 2013-11-01 15:51:29 +0200 | [diff] [blame] | 1200 | err); | 
| Arnaldo Carvalho de Melo | 594ac61 | 2012-12-13 13:13:07 -0300 | [diff] [blame] | 1201 | goto try_fallback; | 
| Arnaldo Carvalho de Melo | 727ab04 | 2011-10-25 10:42:19 -0200 | [diff] [blame] | 1202 | } | 
| Andi Kleen | bec1967 | 2013-08-04 19:41:26 -0700 | [diff] [blame] | 1203 | set_rlimit = NO_CHANGE; | 
| Peter Zijlstra | 814c8c3 | 2015-03-31 00:19:31 +0200 | [diff] [blame] | 1204 |  | 
|  | 1205 | /* | 
|  | 1206 | * If we succeeded but had to kill clockid, fail and | 
|  | 1207 | * have perf_evsel__open_strerror() print us a nice | 
|  | 1208 | * error. | 
|  | 1209 | */ | 
|  | 1210 | if (perf_missing_features.clockid || | 
|  | 1211 | perf_missing_features.clockid_wrong) { | 
|  | 1212 | err = -EINVAL; | 
|  | 1213 | goto out_close; | 
|  | 1214 | } | 
| Arnaldo Carvalho de Melo | 0252208 | 2011-01-04 11:55:27 -0200 | [diff] [blame] | 1215 | } | 
| Arnaldo Carvalho de Melo | 4829060 | 2011-01-03 17:48:12 -0200 | [diff] [blame] | 1216 | } | 
|  | 1217 |  | 
|  | 1218 | return 0; | 
|  | 1219 |  | 
| Arnaldo Carvalho de Melo | 594ac61 | 2012-12-13 13:13:07 -0300 | [diff] [blame] | 1220 | try_fallback: | 
| Andi Kleen | bec1967 | 2013-08-04 19:41:26 -0700 | [diff] [blame] | 1221 | /* | 
|  | 1222 | * perf stat needs between 5 and 22 fds per CPU. When we run out | 
|  | 1223 | * of them try to increase the limits. | 
|  | 1224 | */ | 
|  | 1225 | if (err == -EMFILE && set_rlimit < INCREASED_MAX) { | 
|  | 1226 | struct rlimit l; | 
|  | 1227 | int old_errno = errno; | 
|  | 1228 |  | 
|  | 1229 | if (getrlimit(RLIMIT_NOFILE, &l) == 0) { | 
|  | 1230 | if (set_rlimit == NO_CHANGE) | 
|  | 1231 | l.rlim_cur = l.rlim_max; | 
|  | 1232 | else { | 
|  | 1233 | l.rlim_cur = l.rlim_max + 1000; | 
|  | 1234 | l.rlim_max = l.rlim_cur; | 
|  | 1235 | } | 
|  | 1236 | if (setrlimit(RLIMIT_NOFILE, &l) == 0) { | 
|  | 1237 | set_rlimit++; | 
|  | 1238 | errno = old_errno; | 
|  | 1239 | goto retry_open; | 
|  | 1240 | } | 
|  | 1241 | } | 
|  | 1242 | errno = old_errno; | 
|  | 1243 | } | 
|  | 1244 |  | 
| Arnaldo Carvalho de Melo | 594ac61 | 2012-12-13 13:13:07 -0300 | [diff] [blame] | 1245 | if (err != -EINVAL || cpu > 0 || thread > 0) | 
|  | 1246 | goto out_close; | 
|  | 1247 |  | 
| Peter Zijlstra | 814c8c3 | 2015-03-31 00:19:31 +0200 | [diff] [blame] | 1248 | /* | 
|  | 1249 | * Must probe features in the order they were added to the | 
|  | 1250 | * perf_event_attr interface. | 
|  | 1251 | */ | 
|  | 1252 | if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) { | 
|  | 1253 | perf_missing_features.clockid_wrong = true; | 
|  | 1254 | goto fallback_missing_features; | 
|  | 1255 | } else if (!perf_missing_features.clockid && evsel->attr.use_clockid) { | 
|  | 1256 | perf_missing_features.clockid = true; | 
|  | 1257 | goto fallback_missing_features; | 
|  | 1258 | } else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) { | 
| Yann Droneaud | 57480d2 | 2014-06-30 22:28:47 +0200 | [diff] [blame] | 1259 | perf_missing_features.cloexec = true; | 
|  | 1260 | goto fallback_missing_features; | 
|  | 1261 | } else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) { | 
| Stephane Eranian | 5c5e854 | 2013-08-21 12:10:25 +0200 | [diff] [blame] | 1262 | perf_missing_features.mmap2 = true; | 
|  | 1263 | goto fallback_missing_features; | 
|  | 1264 | } else if (!perf_missing_features.exclude_guest && | 
|  | 1265 | (evsel->attr.exclude_guest || evsel->attr.exclude_host)) { | 
| Arnaldo Carvalho de Melo | 594ac61 | 2012-12-13 13:13:07 -0300 | [diff] [blame] | 1266 | perf_missing_features.exclude_guest = true; | 
|  | 1267 | goto fallback_missing_features; | 
|  | 1268 | } else if (!perf_missing_features.sample_id_all) { | 
|  | 1269 | perf_missing_features.sample_id_all = true; | 
|  | 1270 | goto retry_sample_id; | 
|  | 1271 | } | 
|  | 1272 |  | 
| Arnaldo Carvalho de Melo | 4829060 | 2011-01-03 17:48:12 -0200 | [diff] [blame] | 1273 | out_close: | 
| Arnaldo Carvalho de Melo | 0252208 | 2011-01-04 11:55:27 -0200 | [diff] [blame] | 1274 | do { | 
|  | 1275 | while (--thread >= 0) { | 
|  | 1276 | close(FD(evsel, cpu, thread)); | 
|  | 1277 | FD(evsel, cpu, thread) = -1; | 
|  | 1278 | } | 
| Adrian Hunter | bf8e8f4 | 2014-07-31 09:00:51 +0300 | [diff] [blame] | 1279 | thread = nthreads; | 
| Arnaldo Carvalho de Melo | 0252208 | 2011-01-04 11:55:27 -0200 | [diff] [blame] | 1280 | } while (--cpu >= 0); | 
| Arnaldo Carvalho de Melo | 727ab04 | 2011-10-25 10:42:19 -0200 | [diff] [blame] | 1281 | return err; | 
|  | 1282 | } | 
|  | 1283 |  | 
|  | 1284 | void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads) | 
|  | 1285 | { | 
|  | 1286 | if (evsel->fd == NULL) | 
|  | 1287 | return; | 
|  | 1288 |  | 
|  | 1289 | perf_evsel__close_fd(evsel, ncpus, nthreads); | 
|  | 1290 | perf_evsel__free_fd(evsel); | 
| Arnaldo Carvalho de Melo | 4829060 | 2011-01-03 17:48:12 -0200 | [diff] [blame] | 1291 | } | 
|  | 1292 |  | 
| Arnaldo Carvalho de Melo | 0252208 | 2011-01-04 11:55:27 -0200 | [diff] [blame] | 1293 | static struct { | 
|  | 1294 | struct cpu_map map; | 
|  | 1295 | int cpus[1]; | 
|  | 1296 | } empty_cpu_map = { | 
|  | 1297 | .map.nr	= 1, | 
|  | 1298 | .cpus	= { -1, }, | 
|  | 1299 | }; | 
|  | 1300 |  | 
|  | 1301 | static struct { | 
|  | 1302 | struct thread_map map; | 
|  | 1303 | int threads[1]; | 
|  | 1304 | } empty_thread_map = { | 
|  | 1305 | .map.nr	 = 1, | 
|  | 1306 | .threads = { -1, }, | 
|  | 1307 | }; | 
|  | 1308 |  | 
| Arnaldo Carvalho de Melo | f08199d | 2011-01-11 23:42:19 -0200 | [diff] [blame] | 1309 | int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, | 
| Jiri Olsa | 6a4bb04 | 2012-08-08 12:22:36 +0200 | [diff] [blame] | 1310 | struct thread_map *threads) | 
| Arnaldo Carvalho de Melo | 0252208 | 2011-01-04 11:55:27 -0200 | [diff] [blame] | 1311 | { | 
| Arnaldo Carvalho de Melo | 0252208 | 2011-01-04 11:55:27 -0200 | [diff] [blame] | 1312 | if (cpus == NULL) { | 
|  | 1313 | /* Work around old compiler warnings about strict aliasing */ | 
|  | 1314 | cpus = &empty_cpu_map.map; | 
|  | 1315 | } | 
|  | 1316 |  | 
|  | 1317 | if (threads == NULL) | 
|  | 1318 | threads = &empty_thread_map.map; | 
|  | 1319 |  | 
| Jiri Olsa | 6a4bb04 | 2012-08-08 12:22:36 +0200 | [diff] [blame] | 1320 | return __perf_evsel__open(evsel, cpus, threads); | 
| Arnaldo Carvalho de Melo | 0252208 | 2011-01-04 11:55:27 -0200 | [diff] [blame] | 1321 | } | 
|  | 1322 |  | 
| Arnaldo Carvalho de Melo | f08199d | 2011-01-11 23:42:19 -0200 | [diff] [blame] | 1323 | int perf_evsel__open_per_cpu(struct perf_evsel *evsel, | 
| Jiri Olsa | 6a4bb04 | 2012-08-08 12:22:36 +0200 | [diff] [blame] | 1324 | struct cpu_map *cpus) | 
| Arnaldo Carvalho de Melo | 0252208 | 2011-01-04 11:55:27 -0200 | [diff] [blame] | 1325 | { | 
| Jiri Olsa | 6a4bb04 | 2012-08-08 12:22:36 +0200 | [diff] [blame] | 1326 | return __perf_evsel__open(evsel, cpus, &empty_thread_map.map); | 
| Arnaldo Carvalho de Melo | 0252208 | 2011-01-04 11:55:27 -0200 | [diff] [blame] | 1327 | } | 
|  | 1328 |  | 
| Arnaldo Carvalho de Melo | f08199d | 2011-01-11 23:42:19 -0200 | [diff] [blame] | 1329 | int perf_evsel__open_per_thread(struct perf_evsel *evsel, | 
| Jiri Olsa | 6a4bb04 | 2012-08-08 12:22:36 +0200 | [diff] [blame] | 1330 | struct thread_map *threads) | 
| Arnaldo Carvalho de Melo | 4829060 | 2011-01-03 17:48:12 -0200 | [diff] [blame] | 1331 | { | 
| Jiri Olsa | 6a4bb04 | 2012-08-08 12:22:36 +0200 | [diff] [blame] | 1332 | return __perf_evsel__open(evsel, &empty_cpu_map.map, threads); | 
| Arnaldo Carvalho de Melo | 4829060 | 2011-01-03 17:48:12 -0200 | [diff] [blame] | 1333 | } | 
| Arnaldo Carvalho de Melo | 70082dd | 2011-01-12 17:03:24 -0200 | [diff] [blame] | 1334 |  | 
| Arnaldo Carvalho de Melo | 0807d2d | 2012-09-26 12:48:18 -0300 | [diff] [blame] | 1335 | static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel, | 
|  | 1336 | const union perf_event *event, | 
|  | 1337 | struct perf_sample *sample) | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1338 | { | 
| Arnaldo Carvalho de Melo | 0807d2d | 2012-09-26 12:48:18 -0300 | [diff] [blame] | 1339 | u64 type = evsel->attr.sample_type; | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1340 | const u64 *array = event->sample.array; | 
| Arnaldo Carvalho de Melo | 0807d2d | 2012-09-26 12:48:18 -0300 | [diff] [blame] | 1341 | bool swapped = evsel->needs_swap; | 
| Jiri Olsa | 37073f9 | 2012-05-30 14:23:44 +0200 | [diff] [blame] | 1342 | union u64_swap u; | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1343 |  | 
|  | 1344 | array += ((event->header.size - | 
|  | 1345 | sizeof(event->header)) / sizeof(u64)) - 1; | 
|  | 1346 |  | 
| Adrian Hunter | 7556257 | 2013-08-27 11:23:09 +0300 | [diff] [blame] | 1347 | if (type & PERF_SAMPLE_IDENTIFIER) { | 
|  | 1348 | sample->id = *array; | 
|  | 1349 | array--; | 
|  | 1350 | } | 
|  | 1351 |  | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1352 | if (type & PERF_SAMPLE_CPU) { | 
| Jiri Olsa | 37073f9 | 2012-05-30 14:23:44 +0200 | [diff] [blame] | 1353 | u.val64 = *array; | 
|  | 1354 | if (swapped) { | 
|  | 1355 | /* undo swap of u64, then swap on individual u32s */ | 
|  | 1356 | u.val64 = bswap_64(u.val64); | 
|  | 1357 | u.val32[0] = bswap_32(u.val32[0]); | 
|  | 1358 | } | 
|  | 1359 |  | 
|  | 1360 | sample->cpu = u.val32[0]; | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1361 | array--; | 
|  | 1362 | } | 
|  | 1363 |  | 
|  | 1364 | if (type & PERF_SAMPLE_STREAM_ID) { | 
|  | 1365 | sample->stream_id = *array; | 
|  | 1366 | array--; | 
|  | 1367 | } | 
|  | 1368 |  | 
|  | 1369 | if (type & PERF_SAMPLE_ID) { | 
|  | 1370 | sample->id = *array; | 
|  | 1371 | array--; | 
|  | 1372 | } | 
|  | 1373 |  | 
|  | 1374 | if (type & PERF_SAMPLE_TIME) { | 
|  | 1375 | sample->time = *array; | 
|  | 1376 | array--; | 
|  | 1377 | } | 
|  | 1378 |  | 
|  | 1379 | if (type & PERF_SAMPLE_TID) { | 
| Jiri Olsa | 37073f9 | 2012-05-30 14:23:44 +0200 | [diff] [blame] | 1380 | u.val64 = *array; | 
|  | 1381 | if (swapped) { | 
|  | 1382 | /* undo swap of u64, then swap on individual u32s */ | 
|  | 1383 | u.val64 = bswap_64(u.val64); | 
|  | 1384 | u.val32[0] = bswap_32(u.val32[0]); | 
|  | 1385 | u.val32[1] = bswap_32(u.val32[1]); | 
|  | 1386 | } | 
|  | 1387 |  | 
|  | 1388 | sample->pid = u.val32[0]; | 
|  | 1389 | sample->tid = u.val32[1]; | 
| Adrian Hunter | dd44bc6 | 2013-10-18 15:29:01 +0300 | [diff] [blame] | 1390 | array--; | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1391 | } | 
|  | 1392 |  | 
|  | 1393 | return 0; | 
|  | 1394 | } | 
|  | 1395 |  | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1396 | static inline bool overflow(const void *endp, u16 max_size, const void *offset, | 
|  | 1397 | u64 size) | 
| Frederic Weisbecker | 98e1da9 | 2011-05-21 20:08:15 +0200 | [diff] [blame] | 1398 | { | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1399 | return size > max_size || offset + size > endp; | 
| Frederic Weisbecker | 98e1da9 | 2011-05-21 20:08:15 +0200 | [diff] [blame] | 1400 | } | 
|  | 1401 |  | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1402 | #define OVERFLOW_CHECK(offset, size, max_size)				\ | 
|  | 1403 | do {								\ | 
|  | 1404 | if (overflow(endp, (max_size), (offset), (size)))	\ | 
|  | 1405 | return -EFAULT;					\ | 
|  | 1406 | } while (0) | 
|  | 1407 |  | 
|  | 1408 | #define OVERFLOW_CHECK_u64(offset) \ | 
|  | 1409 | OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64)) | 
|  | 1410 |  | 
| Arnaldo Carvalho de Melo | a3f698f | 2012-08-02 12:23:46 -0300 | [diff] [blame] | 1411 | int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | 
| Arnaldo Carvalho de Melo | 0807d2d | 2012-09-26 12:48:18 -0300 | [diff] [blame] | 1412 | struct perf_sample *data) | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1413 | { | 
| Arnaldo Carvalho de Melo | a3f698f | 2012-08-02 12:23:46 -0300 | [diff] [blame] | 1414 | u64 type = evsel->attr.sample_type; | 
| Arnaldo Carvalho de Melo | 0807d2d | 2012-09-26 12:48:18 -0300 | [diff] [blame] | 1415 | bool swapped = evsel->needs_swap; | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1416 | const u64 *array; | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1417 | u16 max_size = event->header.size; | 
|  | 1418 | const void *endp = (void *)event + max_size; | 
|  | 1419 | u64 sz; | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1420 |  | 
| David Ahern | 936be50 | 2011-09-06 09:12:26 -0600 | [diff] [blame] | 1421 | /* | 
|  | 1422 | * used for cross-endian analysis. See git commit 65014ab3 | 
|  | 1423 | * for why this goofiness is needed. | 
|  | 1424 | */ | 
| Jiri Olsa | 6a11f92 | 2012-05-16 08:59:04 +0200 | [diff] [blame] | 1425 | union u64_swap u; | 
| David Ahern | 936be50 | 2011-09-06 09:12:26 -0600 | [diff] [blame] | 1426 |  | 
| Robert Richter | f3bda2c | 2011-12-15 17:32:39 +0100 | [diff] [blame] | 1427 | memset(data, 0, sizeof(*data)); | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1428 | data->cpu = data->pid = data->tid = -1; | 
|  | 1429 | data->stream_id = data->id = data->time = -1ULL; | 
| Jiri Olsa | bc52908 | 2014-02-03 12:44:41 +0100 | [diff] [blame] | 1430 | data->period = evsel->attr.sample_period; | 
| Andi Kleen | 0548429 | 2013-01-24 16:10:29 +0100 | [diff] [blame] | 1431 | data->weight = 0; | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1432 |  | 
|  | 1433 | if (event->header.type != PERF_RECORD_SAMPLE) { | 
| Arnaldo Carvalho de Melo | a3f698f | 2012-08-02 12:23:46 -0300 | [diff] [blame] | 1434 | if (!evsel->attr.sample_id_all) | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1435 | return 0; | 
| Arnaldo Carvalho de Melo | 0807d2d | 2012-09-26 12:48:18 -0300 | [diff] [blame] | 1436 | return perf_evsel__parse_id_sample(evsel, event, data); | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1437 | } | 
|  | 1438 |  | 
|  | 1439 | array = event->sample.array; | 
|  | 1440 |  | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1441 | /* | 
|  | 1442 | * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes | 
|  | 1443 | * up to PERF_SAMPLE_PERIOD.  After that overflow() must be used to | 
|  | 1444 | * check the format does not go past the end of the event. | 
|  | 1445 | */ | 
| Arnaldo Carvalho de Melo | a3f698f | 2012-08-02 12:23:46 -0300 | [diff] [blame] | 1446 | if (evsel->sample_size + sizeof(event->header) > event->header.size) | 
| Frederic Weisbecker | a285412 | 2011-05-21 19:33:04 +0200 | [diff] [blame] | 1447 | return -EFAULT; | 
|  | 1448 |  | 
| Adrian Hunter | 7556257 | 2013-08-27 11:23:09 +0300 | [diff] [blame] | 1449 | data->id = -1ULL; | 
|  | 1450 | if (type & PERF_SAMPLE_IDENTIFIER) { | 
|  | 1451 | data->id = *array; | 
|  | 1452 | array++; | 
|  | 1453 | } | 
|  | 1454 |  | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1455 | if (type & PERF_SAMPLE_IP) { | 
| Adrian Hunter | ef89325 | 2013-08-27 11:23:06 +0300 | [diff] [blame] | 1456 | data->ip = *array; | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1457 | array++; | 
|  | 1458 | } | 
|  | 1459 |  | 
|  | 1460 | if (type & PERF_SAMPLE_TID) { | 
| David Ahern | 936be50 | 2011-09-06 09:12:26 -0600 | [diff] [blame] | 1461 | u.val64 = *array; | 
|  | 1462 | if (swapped) { | 
|  | 1463 | /* undo swap of u64, then swap on individual u32s */ | 
|  | 1464 | u.val64 = bswap_64(u.val64); | 
|  | 1465 | u.val32[0] = bswap_32(u.val32[0]); | 
|  | 1466 | u.val32[1] = bswap_32(u.val32[1]); | 
|  | 1467 | } | 
|  | 1468 |  | 
|  | 1469 | data->pid = u.val32[0]; | 
|  | 1470 | data->tid = u.val32[1]; | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1471 | array++; | 
|  | 1472 | } | 
|  | 1473 |  | 
|  | 1474 | if (type & PERF_SAMPLE_TIME) { | 
|  | 1475 | data->time = *array; | 
|  | 1476 | array++; | 
|  | 1477 | } | 
|  | 1478 |  | 
| David Ahern | 7cec092 | 2011-05-30 13:08:23 -0600 | [diff] [blame] | 1479 | data->addr = 0; | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1480 | if (type & PERF_SAMPLE_ADDR) { | 
|  | 1481 | data->addr = *array; | 
|  | 1482 | array++; | 
|  | 1483 | } | 
|  | 1484 |  | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1485 | if (type & PERF_SAMPLE_ID) { | 
|  | 1486 | data->id = *array; | 
|  | 1487 | array++; | 
|  | 1488 | } | 
|  | 1489 |  | 
|  | 1490 | if (type & PERF_SAMPLE_STREAM_ID) { | 
|  | 1491 | data->stream_id = *array; | 
|  | 1492 | array++; | 
|  | 1493 | } | 
|  | 1494 |  | 
|  | 1495 | if (type & PERF_SAMPLE_CPU) { | 
| David Ahern | 936be50 | 2011-09-06 09:12:26 -0600 | [diff] [blame] | 1496 |  | 
|  | 1497 | u.val64 = *array; | 
|  | 1498 | if (swapped) { | 
|  | 1499 | /* undo swap of u64, then swap on individual u32s */ | 
|  | 1500 | u.val64 = bswap_64(u.val64); | 
|  | 1501 | u.val32[0] = bswap_32(u.val32[0]); | 
|  | 1502 | } | 
|  | 1503 |  | 
|  | 1504 | data->cpu = u.val32[0]; | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1505 | array++; | 
|  | 1506 | } | 
|  | 1507 |  | 
|  | 1508 | if (type & PERF_SAMPLE_PERIOD) { | 
|  | 1509 | data->period = *array; | 
|  | 1510 | array++; | 
|  | 1511 | } | 
|  | 1512 |  | 
|  | 1513 | if (type & PERF_SAMPLE_READ) { | 
| Jiri Olsa | 9ede473 | 2012-10-10 17:38:13 +0200 | [diff] [blame] | 1514 | u64 read_format = evsel->attr.read_format; | 
|  | 1515 |  | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1516 | OVERFLOW_CHECK_u64(array); | 
| Jiri Olsa | 9ede473 | 2012-10-10 17:38:13 +0200 | [diff] [blame] | 1517 | if (read_format & PERF_FORMAT_GROUP) | 
|  | 1518 | data->read.group.nr = *array; | 
|  | 1519 | else | 
|  | 1520 | data->read.one.value = *array; | 
|  | 1521 |  | 
|  | 1522 | array++; | 
|  | 1523 |  | 
|  | 1524 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1525 | OVERFLOW_CHECK_u64(array); | 
| Jiri Olsa | 9ede473 | 2012-10-10 17:38:13 +0200 | [diff] [blame] | 1526 | data->read.time_enabled = *array; | 
|  | 1527 | array++; | 
|  | 1528 | } | 
|  | 1529 |  | 
|  | 1530 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1531 | OVERFLOW_CHECK_u64(array); | 
| Jiri Olsa | 9ede473 | 2012-10-10 17:38:13 +0200 | [diff] [blame] | 1532 | data->read.time_running = *array; | 
|  | 1533 | array++; | 
|  | 1534 | } | 
|  | 1535 |  | 
|  | 1536 | /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ | 
|  | 1537 | if (read_format & PERF_FORMAT_GROUP) { | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1538 | const u64 max_group_nr = UINT64_MAX / | 
|  | 1539 | sizeof(struct sample_read_value); | 
|  | 1540 |  | 
|  | 1541 | if (data->read.group.nr > max_group_nr) | 
|  | 1542 | return -EFAULT; | 
|  | 1543 | sz = data->read.group.nr * | 
|  | 1544 | sizeof(struct sample_read_value); | 
|  | 1545 | OVERFLOW_CHECK(array, sz, max_size); | 
|  | 1546 | data->read.group.values = | 
|  | 1547 | (struct sample_read_value *)array; | 
|  | 1548 | array = (void *)array + sz; | 
| Jiri Olsa | 9ede473 | 2012-10-10 17:38:13 +0200 | [diff] [blame] | 1549 | } else { | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1550 | OVERFLOW_CHECK_u64(array); | 
| Jiri Olsa | 9ede473 | 2012-10-10 17:38:13 +0200 | [diff] [blame] | 1551 | data->read.one.id = *array; | 
|  | 1552 | array++; | 
|  | 1553 | } | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1554 | } | 
|  | 1555 |  | 
|  | 1556 | if (type & PERF_SAMPLE_CALLCHAIN) { | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1557 | const u64 max_callchain_nr = UINT64_MAX / sizeof(u64); | 
|  | 1558 |  | 
|  | 1559 | OVERFLOW_CHECK_u64(array); | 
|  | 1560 | data->callchain = (struct ip_callchain *)array++; | 
|  | 1561 | if (data->callchain->nr > max_callchain_nr) | 
| Frederic Weisbecker | 98e1da9 | 2011-05-21 20:08:15 +0200 | [diff] [blame] | 1562 | return -EFAULT; | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1563 | sz = data->callchain->nr * sizeof(u64); | 
|  | 1564 | OVERFLOW_CHECK(array, sz, max_size); | 
|  | 1565 | array = (void *)array + sz; | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1566 | } | 
|  | 1567 |  | 
|  | 1568 | if (type & PERF_SAMPLE_RAW) { | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1569 | OVERFLOW_CHECK_u64(array); | 
| David Ahern | 936be50 | 2011-09-06 09:12:26 -0600 | [diff] [blame] | 1570 | u.val64 = *array; | 
|  | 1571 | if (WARN_ONCE(swapped, | 
|  | 1572 | "Endianness of raw data not corrected!\n")) { | 
|  | 1573 | /* undo swap of u64, then swap on individual u32s */ | 
|  | 1574 | u.val64 = bswap_64(u.val64); | 
|  | 1575 | u.val32[0] = bswap_32(u.val32[0]); | 
|  | 1576 | u.val32[1] = bswap_32(u.val32[1]); | 
|  | 1577 | } | 
| David Ahern | 936be50 | 2011-09-06 09:12:26 -0600 | [diff] [blame] | 1578 | data->raw_size = u.val32[0]; | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1579 | array = (void *)array + sizeof(u32); | 
| Frederic Weisbecker | 98e1da9 | 2011-05-21 20:08:15 +0200 | [diff] [blame] | 1580 |  | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1581 | OVERFLOW_CHECK(array, data->raw_size, max_size); | 
|  | 1582 | data->raw_data = (void *)array; | 
|  | 1583 | array = (void *)array + data->raw_size; | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1584 | } | 
|  | 1585 |  | 
| Roberto Agostino Vitillo | b538752 | 2012-02-09 23:21:01 +0100 | [diff] [blame] | 1586 | if (type & PERF_SAMPLE_BRANCH_STACK) { | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1587 | const u64 max_branch_nr = UINT64_MAX / | 
|  | 1588 | sizeof(struct branch_entry); | 
| Roberto Agostino Vitillo | b538752 | 2012-02-09 23:21:01 +0100 | [diff] [blame] | 1589 |  | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1590 | OVERFLOW_CHECK_u64(array); | 
|  | 1591 | data->branch_stack = (struct branch_stack *)array++; | 
| Roberto Agostino Vitillo | b538752 | 2012-02-09 23:21:01 +0100 | [diff] [blame] | 1592 |  | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1593 | if (data->branch_stack->nr > max_branch_nr) | 
|  | 1594 | return -EFAULT; | 
| Roberto Agostino Vitillo | b538752 | 2012-02-09 23:21:01 +0100 | [diff] [blame] | 1595 | sz = data->branch_stack->nr * sizeof(struct branch_entry); | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1596 | OVERFLOW_CHECK(array, sz, max_size); | 
|  | 1597 | array = (void *)array + sz; | 
| Roberto Agostino Vitillo | b538752 | 2012-02-09 23:21:01 +0100 | [diff] [blame] | 1598 | } | 
| Jiri Olsa | 0f6a301 | 2012-08-07 15:20:45 +0200 | [diff] [blame] | 1599 |  | 
|  | 1600 | if (type & PERF_SAMPLE_REGS_USER) { | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1601 | OVERFLOW_CHECK_u64(array); | 
| Adrian Hunter | 5b95a4a3 | 2013-08-27 11:23:10 +0300 | [diff] [blame] | 1602 | data->user_regs.abi = *array; | 
|  | 1603 | array++; | 
| Jiri Olsa | 0f6a301 | 2012-08-07 15:20:45 +0200 | [diff] [blame] | 1604 |  | 
| Adrian Hunter | 5b95a4a3 | 2013-08-27 11:23:10 +0300 | [diff] [blame] | 1605 | if (data->user_regs.abi) { | 
| Jiri Olsa | 352ea45 | 2014-01-07 13:47:25 +0100 | [diff] [blame] | 1606 | u64 mask = evsel->attr.sample_regs_user; | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1607 |  | 
| Jiri Olsa | 352ea45 | 2014-01-07 13:47:25 +0100 | [diff] [blame] | 1608 | sz = hweight_long(mask) * sizeof(u64); | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1609 | OVERFLOW_CHECK(array, sz, max_size); | 
| Jiri Olsa | 352ea45 | 2014-01-07 13:47:25 +0100 | [diff] [blame] | 1610 | data->user_regs.mask = mask; | 
| Jiri Olsa | 0f6a301 | 2012-08-07 15:20:45 +0200 | [diff] [blame] | 1611 | data->user_regs.regs = (u64 *)array; | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1612 | array = (void *)array + sz; | 
| Jiri Olsa | 0f6a301 | 2012-08-07 15:20:45 +0200 | [diff] [blame] | 1613 | } | 
|  | 1614 | } | 
|  | 1615 |  | 
|  | 1616 | if (type & PERF_SAMPLE_STACK_USER) { | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1617 | OVERFLOW_CHECK_u64(array); | 
|  | 1618 | sz = *array++; | 
| Jiri Olsa | 0f6a301 | 2012-08-07 15:20:45 +0200 | [diff] [blame] | 1619 |  | 
|  | 1620 | data->user_stack.offset = ((char *)(array - 1) | 
|  | 1621 | - (char *) event); | 
|  | 1622 |  | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1623 | if (!sz) { | 
| Jiri Olsa | 0f6a301 | 2012-08-07 15:20:45 +0200 | [diff] [blame] | 1624 | data->user_stack.size = 0; | 
|  | 1625 | } else { | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1626 | OVERFLOW_CHECK(array, sz, max_size); | 
| Jiri Olsa | 0f6a301 | 2012-08-07 15:20:45 +0200 | [diff] [blame] | 1627 | data->user_stack.data = (char *)array; | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1628 | array = (void *)array + sz; | 
|  | 1629 | OVERFLOW_CHECK_u64(array); | 
| Adrian Hunter | 54bd269 | 2013-07-04 16:20:34 +0300 | [diff] [blame] | 1630 | data->user_stack.size = *array++; | 
| Jiri Olsa | a65cb4b | 2013-10-02 15:46:39 +0200 | [diff] [blame] | 1631 | if (WARN_ONCE(data->user_stack.size > sz, | 
|  | 1632 | "user stack dump failure\n")) | 
|  | 1633 | return -EFAULT; | 
| Jiri Olsa | 0f6a301 | 2012-08-07 15:20:45 +0200 | [diff] [blame] | 1634 | } | 
|  | 1635 | } | 
|  | 1636 |  | 
| Andi Kleen | 0548429 | 2013-01-24 16:10:29 +0100 | [diff] [blame] | 1637 | data->weight = 0; | 
|  | 1638 | if (type & PERF_SAMPLE_WEIGHT) { | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1639 | OVERFLOW_CHECK_u64(array); | 
| Andi Kleen | 0548429 | 2013-01-24 16:10:29 +0100 | [diff] [blame] | 1640 | data->weight = *array; | 
|  | 1641 | array++; | 
|  | 1642 | } | 
|  | 1643 |  | 
| Stephane Eranian | 98a3b32 | 2013-01-24 16:10:35 +0100 | [diff] [blame] | 1644 | data->data_src = PERF_MEM_DATA_SRC_NONE; | 
|  | 1645 | if (type & PERF_SAMPLE_DATA_SRC) { | 
| Adrian Hunter | 03b6ea9 | 2013-08-27 11:23:04 +0300 | [diff] [blame] | 1646 | OVERFLOW_CHECK_u64(array); | 
| Stephane Eranian | 98a3b32 | 2013-01-24 16:10:35 +0100 | [diff] [blame] | 1647 | data->data_src = *array; | 
|  | 1648 | array++; | 
|  | 1649 | } | 
|  | 1650 |  | 
| Andi Kleen | 475eeab | 2013-09-20 07:40:43 -0700 | [diff] [blame] | 1651 | data->transaction = 0; | 
|  | 1652 | if (type & PERF_SAMPLE_TRANSACTION) { | 
| Adrian Hunter | 87b9552 | 2013-11-01 15:51:36 +0200 | [diff] [blame] | 1653 | OVERFLOW_CHECK_u64(array); | 
| Andi Kleen | 475eeab | 2013-09-20 07:40:43 -0700 | [diff] [blame] | 1654 | data->transaction = *array; | 
|  | 1655 | array++; | 
|  | 1656 | } | 
|  | 1657 |  | 
| Stephane Eranian | 6a21c0b | 2014-09-24 13:48:39 +0200 | [diff] [blame] | 1658 | data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE; | 
|  | 1659 | if (type & PERF_SAMPLE_REGS_INTR) { | 
|  | 1660 | OVERFLOW_CHECK_u64(array); | 
|  | 1661 | data->intr_regs.abi = *array; | 
|  | 1662 | array++; | 
|  | 1663 |  | 
|  | 1664 | if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) { | 
|  | 1665 | u64 mask = evsel->attr.sample_regs_intr; | 
|  | 1666 |  | 
|  | 1667 | sz = hweight_long(mask) * sizeof(u64); | 
|  | 1668 | OVERFLOW_CHECK(array, sz, max_size); | 
|  | 1669 | data->intr_regs.mask = mask; | 
|  | 1670 | data->intr_regs.regs = (u64 *)array; | 
|  | 1671 | array = (void *)array + sz; | 
|  | 1672 | } | 
|  | 1673 | } | 
|  | 1674 |  | 
| Arnaldo Carvalho de Melo | d0dd74e | 2011-01-21 13:46:41 -0200 | [diff] [blame] | 1675 | return 0; | 
|  | 1676 | } | 
| Andrew Vagin | 74eec26 | 2011-11-28 12:03:31 +0300 | [diff] [blame] | 1677 |  | 
| Adrian Hunter | b1cf6f6 | 2013-08-27 11:23:12 +0300 | [diff] [blame] | 1678 | size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, | 
| Jiri Olsa | 352ea45 | 2014-01-07 13:47:25 +0100 | [diff] [blame] | 1679 | u64 read_format) | 
| Adrian Hunter | b1cf6f6 | 2013-08-27 11:23:12 +0300 | [diff] [blame] | 1680 | { | 
|  | 1681 | size_t sz, result = sizeof(struct sample_event); | 
|  | 1682 |  | 
|  | 1683 | if (type & PERF_SAMPLE_IDENTIFIER) | 
|  | 1684 | result += sizeof(u64); | 
|  | 1685 |  | 
|  | 1686 | if (type & PERF_SAMPLE_IP) | 
|  | 1687 | result += sizeof(u64); | 
|  | 1688 |  | 
|  | 1689 | if (type & PERF_SAMPLE_TID) | 
|  | 1690 | result += sizeof(u64); | 
|  | 1691 |  | 
|  | 1692 | if (type & PERF_SAMPLE_TIME) | 
|  | 1693 | result += sizeof(u64); | 
|  | 1694 |  | 
|  | 1695 | if (type & PERF_SAMPLE_ADDR) | 
|  | 1696 | result += sizeof(u64); | 
|  | 1697 |  | 
|  | 1698 | if (type & PERF_SAMPLE_ID) | 
|  | 1699 | result += sizeof(u64); | 
|  | 1700 |  | 
|  | 1701 | if (type & PERF_SAMPLE_STREAM_ID) | 
|  | 1702 | result += sizeof(u64); | 
|  | 1703 |  | 
|  | 1704 | if (type & PERF_SAMPLE_CPU) | 
|  | 1705 | result += sizeof(u64); | 
|  | 1706 |  | 
|  | 1707 | if (type & PERF_SAMPLE_PERIOD) | 
|  | 1708 | result += sizeof(u64); | 
|  | 1709 |  | 
|  | 1710 | if (type & PERF_SAMPLE_READ) { | 
|  | 1711 | result += sizeof(u64); | 
|  | 1712 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | 
|  | 1713 | result += sizeof(u64); | 
|  | 1714 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | 
|  | 1715 | result += sizeof(u64); | 
|  | 1716 | /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ | 
|  | 1717 | if (read_format & PERF_FORMAT_GROUP) { | 
|  | 1718 | sz = sample->read.group.nr * | 
|  | 1719 | sizeof(struct sample_read_value); | 
|  | 1720 | result += sz; | 
|  | 1721 | } else { | 
|  | 1722 | result += sizeof(u64); | 
|  | 1723 | } | 
|  | 1724 | } | 
|  | 1725 |  | 
|  | 1726 | if (type & PERF_SAMPLE_CALLCHAIN) { | 
|  | 1727 | sz = (sample->callchain->nr + 1) * sizeof(u64); | 
|  | 1728 | result += sz; | 
|  | 1729 | } | 
|  | 1730 |  | 
|  | 1731 | if (type & PERF_SAMPLE_RAW) { | 
|  | 1732 | result += sizeof(u32); | 
|  | 1733 | result += sample->raw_size; | 
|  | 1734 | } | 
|  | 1735 |  | 
|  | 1736 | if (type & PERF_SAMPLE_BRANCH_STACK) { | 
|  | 1737 | sz = sample->branch_stack->nr * sizeof(struct branch_entry); | 
|  | 1738 | sz += sizeof(u64); | 
|  | 1739 | result += sz; | 
|  | 1740 | } | 
|  | 1741 |  | 
|  | 1742 | if (type & PERF_SAMPLE_REGS_USER) { | 
|  | 1743 | if (sample->user_regs.abi) { | 
|  | 1744 | result += sizeof(u64); | 
| Jiri Olsa | 352ea45 | 2014-01-07 13:47:25 +0100 | [diff] [blame] | 1745 | sz = hweight_long(sample->user_regs.mask) * sizeof(u64); | 
| Adrian Hunter | b1cf6f6 | 2013-08-27 11:23:12 +0300 | [diff] [blame] | 1746 | result += sz; | 
|  | 1747 | } else { | 
|  | 1748 | result += sizeof(u64); | 
|  | 1749 | } | 
|  | 1750 | } | 
|  | 1751 |  | 
|  | 1752 | if (type & PERF_SAMPLE_STACK_USER) { | 
|  | 1753 | sz = sample->user_stack.size; | 
|  | 1754 | result += sizeof(u64); | 
|  | 1755 | if (sz) { | 
|  | 1756 | result += sz; | 
|  | 1757 | result += sizeof(u64); | 
|  | 1758 | } | 
|  | 1759 | } | 
|  | 1760 |  | 
|  | 1761 | if (type & PERF_SAMPLE_WEIGHT) | 
|  | 1762 | result += sizeof(u64); | 
|  | 1763 |  | 
|  | 1764 | if (type & PERF_SAMPLE_DATA_SRC) | 
|  | 1765 | result += sizeof(u64); | 
|  | 1766 |  | 
| Adrian Hunter | 42d8891 | 2013-11-01 15:51:38 +0200 | [diff] [blame] | 1767 | if (type & PERF_SAMPLE_TRANSACTION) | 
|  | 1768 | result += sizeof(u64); | 
|  | 1769 |  | 
| Stephane Eranian | 6a21c0b | 2014-09-24 13:48:39 +0200 | [diff] [blame] | 1770 | if (type & PERF_SAMPLE_REGS_INTR) { | 
|  | 1771 | if (sample->intr_regs.abi) { | 
|  | 1772 | result += sizeof(u64); | 
|  | 1773 | sz = hweight_long(sample->intr_regs.mask) * sizeof(u64); | 
|  | 1774 | result += sz; | 
|  | 1775 | } else { | 
|  | 1776 | result += sizeof(u64); | 
|  | 1777 | } | 
|  | 1778 | } | 
|  | 1779 |  | 
| Adrian Hunter | b1cf6f6 | 2013-08-27 11:23:12 +0300 | [diff] [blame] | 1780 | return result; | 
|  | 1781 | } | 
|  | 1782 |  | 
| Andrew Vagin | 74eec26 | 2011-11-28 12:03:31 +0300 | [diff] [blame] | 1783 | int perf_event__synthesize_sample(union perf_event *event, u64 type, | 
| Jiri Olsa | 352ea45 | 2014-01-07 13:47:25 +0100 | [diff] [blame] | 1784 | u64 read_format, | 
| Andrew Vagin | 74eec26 | 2011-11-28 12:03:31 +0300 | [diff] [blame] | 1785 | const struct perf_sample *sample, | 
|  | 1786 | bool swapped) | 
|  | 1787 | { | 
|  | 1788 | u64 *array; | 
| Adrian Hunter | d03f217 | 2013-08-27 11:23:11 +0300 | [diff] [blame] | 1789 | size_t sz; | 
| Andrew Vagin | 74eec26 | 2011-11-28 12:03:31 +0300 | [diff] [blame] | 1790 | /* | 
|  | 1791 | * used for cross-endian analysis. See git commit 65014ab3 | 
|  | 1792 | * for why this goofiness is needed. | 
|  | 1793 | */ | 
| Jiri Olsa | 6a11f92 | 2012-05-16 08:59:04 +0200 | [diff] [blame] | 1794 | union u64_swap u; | 
| Andrew Vagin | 74eec26 | 2011-11-28 12:03:31 +0300 | [diff] [blame] | 1795 |  | 
|  | 1796 | array = event->sample.array; | 
|  | 1797 |  | 
| Adrian Hunter | 7556257 | 2013-08-27 11:23:09 +0300 | [diff] [blame] | 1798 | if (type & PERF_SAMPLE_IDENTIFIER) { | 
|  | 1799 | *array = sample->id; | 
|  | 1800 | array++; | 
|  | 1801 | } | 
|  | 1802 |  | 
| Andrew Vagin | 74eec26 | 2011-11-28 12:03:31 +0300 | [diff] [blame] | 1803 | if (type & PERF_SAMPLE_IP) { | 
| Adrian Hunter | ef89325 | 2013-08-27 11:23:06 +0300 | [diff] [blame] | 1804 | *array = sample->ip; | 
| Andrew Vagin | 74eec26 | 2011-11-28 12:03:31 +0300 | [diff] [blame] | 1805 | array++; | 
|  | 1806 | } | 
|  | 1807 |  | 
|  | 1808 | if (type & PERF_SAMPLE_TID) { | 
|  | 1809 | u.val32[0] = sample->pid; | 
|  | 1810 | u.val32[1] = sample->tid; | 
|  | 1811 | if (swapped) { | 
|  | 1812 | /* | 
| Arnaldo Carvalho de Melo | a3f698f | 2012-08-02 12:23:46 -0300 | [diff] [blame] | 1813 | * Inverse of what is done in perf_evsel__parse_sample | 
| Andrew Vagin | 74eec26 | 2011-11-28 12:03:31 +0300 | [diff] [blame] | 1814 | */ | 
|  | 1815 | u.val32[0] = bswap_32(u.val32[0]); | 
|  | 1816 | u.val32[1] = bswap_32(u.val32[1]); | 
|  | 1817 | u.val64 = bswap_64(u.val64); | 
|  | 1818 | } | 
|  | 1819 |  | 
|  | 1820 | *array = u.val64; | 
|  | 1821 | array++; | 
|  | 1822 | } | 
|  | 1823 |  | 
|  | 1824 | if (type & PERF_SAMPLE_TIME) { | 
|  | 1825 | *array = sample->time; | 
|  | 1826 | array++; | 
|  | 1827 | } | 
|  | 1828 |  | 
|  | 1829 | if (type & PERF_SAMPLE_ADDR) { | 
|  | 1830 | *array = sample->addr; | 
|  | 1831 | array++; | 
|  | 1832 | } | 
|  | 1833 |  | 
|  | 1834 | if (type & PERF_SAMPLE_ID) { | 
|  | 1835 | *array = sample->id; | 
|  | 1836 | array++; | 
|  | 1837 | } | 
|  | 1838 |  | 
|  | 1839 | if (type & PERF_SAMPLE_STREAM_ID) { | 
|  | 1840 | *array = sample->stream_id; | 
|  | 1841 | array++; | 
|  | 1842 | } | 
|  | 1843 |  | 
|  | 1844 | if (type & PERF_SAMPLE_CPU) { | 
|  | 1845 | u.val32[0] = sample->cpu; | 
|  | 1846 | if (swapped) { | 
|  | 1847 | /* | 
| Arnaldo Carvalho de Melo | a3f698f | 2012-08-02 12:23:46 -0300 | [diff] [blame] | 1848 | * Inverse of what is done in perf_evsel__parse_sample | 
| Andrew Vagin | 74eec26 | 2011-11-28 12:03:31 +0300 | [diff] [blame] | 1849 | */ | 
|  | 1850 | u.val32[0] = bswap_32(u.val32[0]); | 
|  | 1851 | u.val64 = bswap_64(u.val64); | 
|  | 1852 | } | 
|  | 1853 | *array = u.val64; | 
|  | 1854 | array++; | 
|  | 1855 | } | 
|  | 1856 |  | 
|  | 1857 | if (type & PERF_SAMPLE_PERIOD) { | 
|  | 1858 | *array = sample->period; | 
|  | 1859 | array++; | 
|  | 1860 | } | 
|  | 1861 |  | 
| Adrian Hunter | d03f217 | 2013-08-27 11:23:11 +0300 | [diff] [blame] | 1862 | if (type & PERF_SAMPLE_READ) { | 
|  | 1863 | if (read_format & PERF_FORMAT_GROUP) | 
|  | 1864 | *array = sample->read.group.nr; | 
|  | 1865 | else | 
|  | 1866 | *array = sample->read.one.value; | 
|  | 1867 | array++; | 
|  | 1868 |  | 
|  | 1869 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | 
|  | 1870 | *array = sample->read.time_enabled; | 
|  | 1871 | array++; | 
|  | 1872 | } | 
|  | 1873 |  | 
|  | 1874 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | 
|  | 1875 | *array = sample->read.time_running; | 
|  | 1876 | array++; | 
|  | 1877 | } | 
|  | 1878 |  | 
|  | 1879 | /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ | 
|  | 1880 | if (read_format & PERF_FORMAT_GROUP) { | 
|  | 1881 | sz = sample->read.group.nr * | 
|  | 1882 | sizeof(struct sample_read_value); | 
|  | 1883 | memcpy(array, sample->read.group.values, sz); | 
|  | 1884 | array = (void *)array + sz; | 
|  | 1885 | } else { | 
|  | 1886 | *array = sample->read.one.id; | 
|  | 1887 | array++; | 
|  | 1888 | } | 
|  | 1889 | } | 
|  | 1890 |  | 
|  | 1891 | if (type & PERF_SAMPLE_CALLCHAIN) { | 
|  | 1892 | sz = (sample->callchain->nr + 1) * sizeof(u64); | 
|  | 1893 | memcpy(array, sample->callchain, sz); | 
|  | 1894 | array = (void *)array + sz; | 
|  | 1895 | } | 
|  | 1896 |  | 
|  | 1897 | if (type & PERF_SAMPLE_RAW) { | 
|  | 1898 | u.val32[0] = sample->raw_size; | 
|  | 1899 | if (WARN_ONCE(swapped, | 
|  | 1900 | "Endianness of raw data not corrected!\n")) { | 
|  | 1901 | /* | 
|  | 1902 | * Inverse of what is done in perf_evsel__parse_sample | 
|  | 1903 | */ | 
|  | 1904 | u.val32[0] = bswap_32(u.val32[0]); | 
|  | 1905 | u.val32[1] = bswap_32(u.val32[1]); | 
|  | 1906 | u.val64 = bswap_64(u.val64); | 
|  | 1907 | } | 
|  | 1908 | *array = u.val64; | 
|  | 1909 | array = (void *)array + sizeof(u32); | 
|  | 1910 |  | 
|  | 1911 | memcpy(array, sample->raw_data, sample->raw_size); | 
|  | 1912 | array = (void *)array + sample->raw_size; | 
|  | 1913 | } | 
|  | 1914 |  | 
|  | 1915 | if (type & PERF_SAMPLE_BRANCH_STACK) { | 
|  | 1916 | sz = sample->branch_stack->nr * sizeof(struct branch_entry); | 
|  | 1917 | sz += sizeof(u64); | 
|  | 1918 | memcpy(array, sample->branch_stack, sz); | 
|  | 1919 | array = (void *)array + sz; | 
|  | 1920 | } | 
|  | 1921 |  | 
|  | 1922 | if (type & PERF_SAMPLE_REGS_USER) { | 
|  | 1923 | if (sample->user_regs.abi) { | 
|  | 1924 | *array++ = sample->user_regs.abi; | 
| Jiri Olsa | 352ea45 | 2014-01-07 13:47:25 +0100 | [diff] [blame] | 1925 | sz = hweight_long(sample->user_regs.mask) * sizeof(u64); | 
| Adrian Hunter | d03f217 | 2013-08-27 11:23:11 +0300 | [diff] [blame] | 1926 | memcpy(array, sample->user_regs.regs, sz); | 
|  | 1927 | array = (void *)array + sz; | 
|  | 1928 | } else { | 
|  | 1929 | *array++ = 0; | 
|  | 1930 | } | 
|  | 1931 | } | 
|  | 1932 |  | 
|  | 1933 | if (type & PERF_SAMPLE_STACK_USER) { | 
|  | 1934 | sz = sample->user_stack.size; | 
|  | 1935 | *array++ = sz; | 
|  | 1936 | if (sz) { | 
|  | 1937 | memcpy(array, sample->user_stack.data, sz); | 
|  | 1938 | array = (void *)array + sz; | 
|  | 1939 | *array++ = sz; | 
|  | 1940 | } | 
|  | 1941 | } | 
|  | 1942 |  | 
|  | 1943 | if (type & PERF_SAMPLE_WEIGHT) { | 
|  | 1944 | *array = sample->weight; | 
|  | 1945 | array++; | 
|  | 1946 | } | 
|  | 1947 |  | 
|  | 1948 | if (type & PERF_SAMPLE_DATA_SRC) { | 
|  | 1949 | *array = sample->data_src; | 
|  | 1950 | array++; | 
|  | 1951 | } | 
|  | 1952 |  | 
| Adrian Hunter | 42d8891 | 2013-11-01 15:51:38 +0200 | [diff] [blame] | 1953 | if (type & PERF_SAMPLE_TRANSACTION) { | 
|  | 1954 | *array = sample->transaction; | 
|  | 1955 | array++; | 
|  | 1956 | } | 
|  | 1957 |  | 
| Stephane Eranian | 6a21c0b | 2014-09-24 13:48:39 +0200 | [diff] [blame] | 1958 | if (type & PERF_SAMPLE_REGS_INTR) { | 
|  | 1959 | if (sample->intr_regs.abi) { | 
|  | 1960 | *array++ = sample->intr_regs.abi; | 
|  | 1961 | sz = hweight_long(sample->intr_regs.mask) * sizeof(u64); | 
|  | 1962 | memcpy(array, sample->intr_regs.regs, sz); | 
|  | 1963 | array = (void *)array + sz; | 
|  | 1964 | } else { | 
|  | 1965 | *array++ = 0; | 
|  | 1966 | } | 
|  | 1967 | } | 
|  | 1968 |  | 
| Andrew Vagin | 74eec26 | 2011-11-28 12:03:31 +0300 | [diff] [blame] | 1969 | return 0; | 
|  | 1970 | } | 
| Arnaldo Carvalho de Melo | 5555ded | 2012-09-11 19:24:23 -0300 | [diff] [blame] | 1971 |  | 
| Arnaldo Carvalho de Melo | efd2b92 | 2012-09-18 11:21:50 -0300 | [diff] [blame] | 1972 | struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name) | 
|  | 1973 | { | 
|  | 1974 | return pevent_find_field(evsel->tp_format, name); | 
|  | 1975 | } | 
|  | 1976 |  | 
| Arnaldo Carvalho de Melo | 5d2074e | 2012-09-26 20:22:00 -0300 | [diff] [blame] | 1977 | void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample, | 
| Arnaldo Carvalho de Melo | 5555ded | 2012-09-11 19:24:23 -0300 | [diff] [blame] | 1978 | const char *name) | 
|  | 1979 | { | 
| Arnaldo Carvalho de Melo | efd2b92 | 2012-09-18 11:21:50 -0300 | [diff] [blame] | 1980 | struct format_field *field = perf_evsel__field(evsel, name); | 
| Arnaldo Carvalho de Melo | 5555ded | 2012-09-11 19:24:23 -0300 | [diff] [blame] | 1981 | int offset; | 
|  | 1982 |  | 
| Arnaldo Carvalho de Melo | efd2b92 | 2012-09-18 11:21:50 -0300 | [diff] [blame] | 1983 | if (!field) | 
|  | 1984 | return NULL; | 
| Arnaldo Carvalho de Melo | 5555ded | 2012-09-11 19:24:23 -0300 | [diff] [blame] | 1985 |  | 
|  | 1986 | offset = field->offset; | 
|  | 1987 |  | 
|  | 1988 | if (field->flags & FIELD_IS_DYNAMIC) { | 
|  | 1989 | offset = *(int *)(sample->raw_data + field->offset); | 
|  | 1990 | offset &= 0xffff; | 
|  | 1991 | } | 
|  | 1992 |  | 
|  | 1993 | return sample->raw_data + offset; | 
|  | 1994 | } | 
|  | 1995 |  | 
|  | 1996 | u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, | 
|  | 1997 | const char *name) | 
|  | 1998 | { | 
| Arnaldo Carvalho de Melo | efd2b92 | 2012-09-18 11:21:50 -0300 | [diff] [blame] | 1999 | struct format_field *field = perf_evsel__field(evsel, name); | 
| Arnaldo Carvalho de Melo | e6b6f67 | 2012-09-26 13:13:04 -0300 | [diff] [blame] | 2000 | void *ptr; | 
|  | 2001 | u64 value; | 
| Arnaldo Carvalho de Melo | 5555ded | 2012-09-11 19:24:23 -0300 | [diff] [blame] | 2002 |  | 
| Arnaldo Carvalho de Melo | efd2b92 | 2012-09-18 11:21:50 -0300 | [diff] [blame] | 2003 | if (!field) | 
|  | 2004 | return 0; | 
| Arnaldo Carvalho de Melo | 5555ded | 2012-09-11 19:24:23 -0300 | [diff] [blame] | 2005 |  | 
| Arnaldo Carvalho de Melo | e6b6f67 | 2012-09-26 13:13:04 -0300 | [diff] [blame] | 2006 | ptr = sample->raw_data + field->offset; | 
| Arnaldo Carvalho de Melo | 5555ded | 2012-09-11 19:24:23 -0300 | [diff] [blame] | 2007 |  | 
| Arnaldo Carvalho de Melo | e6b6f67 | 2012-09-26 13:13:04 -0300 | [diff] [blame] | 2008 | switch (field->size) { | 
|  | 2009 | case 1: | 
|  | 2010 | return *(u8 *)ptr; | 
|  | 2011 | case 2: | 
|  | 2012 | value = *(u16 *)ptr; | 
|  | 2013 | break; | 
|  | 2014 | case 4: | 
|  | 2015 | value = *(u32 *)ptr; | 
|  | 2016 | break; | 
|  | 2017 | case 8: | 
| David Ahern | e94eeda | 2015-03-24 16:14:09 -0400 | [diff] [blame] | 2018 | memcpy(&value, ptr, sizeof(u64)); | 
| Arnaldo Carvalho de Melo | e6b6f67 | 2012-09-26 13:13:04 -0300 | [diff] [blame] | 2019 | break; | 
|  | 2020 | default: | 
|  | 2021 | return 0; | 
|  | 2022 | } | 
|  | 2023 |  | 
|  | 2024 | if (!evsel->needs_swap) | 
|  | 2025 | return value; | 
|  | 2026 |  | 
|  | 2027 | switch (field->size) { | 
|  | 2028 | case 2: | 
|  | 2029 | return bswap_16(value); | 
|  | 2030 | case 4: | 
|  | 2031 | return bswap_32(value); | 
|  | 2032 | case 8: | 
|  | 2033 | return bswap_64(value); | 
|  | 2034 | default: | 
|  | 2035 | return 0; | 
|  | 2036 | } | 
|  | 2037 |  | 
|  | 2038 | return 0; | 
| Arnaldo Carvalho de Melo | 5555ded | 2012-09-11 19:24:23 -0300 | [diff] [blame] | 2039 | } | 
| Arnaldo Carvalho de Melo | 0698aed | 2012-12-10 18:17:08 -0300 | [diff] [blame] | 2040 |  | 
|  | 2041 | static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...) | 
|  | 2042 | { | 
|  | 2043 | va_list args; | 
|  | 2044 | int ret = 0; | 
|  | 2045 |  | 
|  | 2046 | if (!*first) { | 
|  | 2047 | ret += fprintf(fp, ","); | 
|  | 2048 | } else { | 
|  | 2049 | ret += fprintf(fp, ":"); | 
|  | 2050 | *first = false; | 
|  | 2051 | } | 
|  | 2052 |  | 
|  | 2053 | va_start(args, fmt); | 
|  | 2054 | ret += vfprintf(fp, fmt, args); | 
|  | 2055 | va_end(args); | 
|  | 2056 | return ret; | 
|  | 2057 | } | 
|  | 2058 |  | 
| Peter Zijlstra | 2c5e8c5 | 2015-04-07 11:09:54 +0200 | [diff] [blame] | 2059 | static int __print_attr__fprintf(FILE *fp, const char *name, const char *val, void *priv) | 
| Arnaldo Carvalho de Melo | 0698aed | 2012-12-10 18:17:08 -0300 | [diff] [blame] | 2060 | { | 
| Peter Zijlstra | 2c5e8c5 | 2015-04-07 11:09:54 +0200 | [diff] [blame] | 2061 | return comma_fprintf(fp, (bool *)priv, " %s: %s", name, val); | 
| Arnaldo Carvalho de Melo | c79a439 | 2012-12-11 10:54:12 -0300 | [diff] [blame] | 2062 | } | 
|  | 2063 |  | 
| Arnaldo Carvalho de Melo | 0698aed | 2012-12-10 18:17:08 -0300 | [diff] [blame] | 2064 | int perf_evsel__fprintf(struct perf_evsel *evsel, | 
|  | 2065 | struct perf_attr_details *details, FILE *fp) | 
|  | 2066 | { | 
|  | 2067 | bool first = true; | 
| Namhyung Kim | e6ab07d | 2013-01-22 18:09:47 +0900 | [diff] [blame] | 2068 | int printed = 0; | 
|  | 2069 |  | 
| Arnaldo Carvalho de Melo | e35ef355 | 2013-02-06 17:20:02 -0300 | [diff] [blame] | 2070 | if (details->event_group) { | 
| Namhyung Kim | e6ab07d | 2013-01-22 18:09:47 +0900 | [diff] [blame] | 2071 | struct perf_evsel *pos; | 
|  | 2072 |  | 
|  | 2073 | if (!perf_evsel__is_group_leader(evsel)) | 
|  | 2074 | return 0; | 
|  | 2075 |  | 
|  | 2076 | if (evsel->nr_members > 1) | 
|  | 2077 | printed += fprintf(fp, "%s{", evsel->group_name ?: ""); | 
|  | 2078 |  | 
|  | 2079 | printed += fprintf(fp, "%s", perf_evsel__name(evsel)); | 
|  | 2080 | for_each_group_member(pos, evsel) | 
|  | 2081 | printed += fprintf(fp, ",%s", perf_evsel__name(pos)); | 
|  | 2082 |  | 
|  | 2083 | if (evsel->nr_members > 1) | 
|  | 2084 | printed += fprintf(fp, "}"); | 
|  | 2085 | goto out; | 
|  | 2086 | } | 
|  | 2087 |  | 
|  | 2088 | printed += fprintf(fp, "%s", perf_evsel__name(evsel)); | 
| Arnaldo Carvalho de Melo | 0698aed | 2012-12-10 18:17:08 -0300 | [diff] [blame] | 2089 |  | 
| Peter Zijlstra | 2c5e8c5 | 2015-04-07 11:09:54 +0200 | [diff] [blame] | 2090 | if (details->verbose) { | 
|  | 2091 | printed += perf_event_attr__fprintf(fp, &evsel->attr, | 
|  | 2092 | __print_attr__fprintf, &first); | 
|  | 2093 | } else if (details->freq) { | 
| Arnaldo Carvalho de Melo | 0698aed | 2012-12-10 18:17:08 -0300 | [diff] [blame] | 2094 | printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64, | 
|  | 2095 | (u64)evsel->attr.sample_freq); | 
|  | 2096 | } | 
| Namhyung Kim | e6ab07d | 2013-01-22 18:09:47 +0900 | [diff] [blame] | 2097 | out: | 
| Arnaldo Carvalho de Melo | 0698aed | 2012-12-10 18:17:08 -0300 | [diff] [blame] | 2098 | fputc('\n', fp); | 
|  | 2099 | return ++printed; | 
|  | 2100 | } | 
| Arnaldo Carvalho de Melo | c0a5434 | 2012-12-13 14:16:30 -0300 | [diff] [blame] | 2101 |  | 
|  | 2102 | bool perf_evsel__fallback(struct perf_evsel *evsel, int err, | 
|  | 2103 | char *msg, size_t msgsize) | 
|  | 2104 | { | 
| David Ahern | 2b821cc | 2013-07-18 17:27:59 -0600 | [diff] [blame] | 2105 | if ((err == ENOENT || err == ENXIO || err == ENODEV) && | 
| Arnaldo Carvalho de Melo | c0a5434 | 2012-12-13 14:16:30 -0300 | [diff] [blame] | 2106 | evsel->attr.type   == PERF_TYPE_HARDWARE && | 
|  | 2107 | evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) { | 
|  | 2108 | /* | 
|  | 2109 | * If it's cycles then fall back to hrtimer based | 
|  | 2110 | * cpu-clock-tick sw counter, which is always available even if | 
|  | 2111 | * no PMU support. | 
|  | 2112 | * | 
|  | 2113 | * PPC returns ENXIO until 2.6.37 (behavior changed with commit | 
|  | 2114 | * b0a873e). | 
|  | 2115 | */ | 
|  | 2116 | scnprintf(msg, msgsize, "%s", | 
|  | 2117 | "The cycles event is not supported, trying to fall back to cpu-clock-ticks"); | 
|  | 2118 |  | 
|  | 2119 | evsel->attr.type   = PERF_TYPE_SOFTWARE; | 
|  | 2120 | evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK; | 
|  | 2121 |  | 
| Arnaldo Carvalho de Melo | 0466252 | 2013-12-26 17:41:15 -0300 | [diff] [blame] | 2122 | zfree(&evsel->name); | 
| Arnaldo Carvalho de Melo | c0a5434 | 2012-12-13 14:16:30 -0300 | [diff] [blame] | 2123 | return true; | 
|  | 2124 | } | 
|  | 2125 |  | 
|  | 2126 | return false; | 
|  | 2127 | } | 
| Arnaldo Carvalho de Melo | 56e52e8 | 2012-12-13 15:10:58 -0300 | [diff] [blame] | 2128 |  | 
| Arnaldo Carvalho de Melo | 602ad87 | 2013-11-12 16:46:16 -0300 | [diff] [blame] | 2129 | int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, | 
| Arnaldo Carvalho de Melo | 56e52e8 | 2012-12-13 15:10:58 -0300 | [diff] [blame] | 2130 | int err, char *msg, size_t size) | 
|  | 2131 | { | 
| Masami Hiramatsu | 6e81c74 | 2014-08-14 02:22:36 +0000 | [diff] [blame] | 2132 | char sbuf[STRERR_BUFSIZE]; | 
|  | 2133 |  | 
| Arnaldo Carvalho de Melo | 56e52e8 | 2012-12-13 15:10:58 -0300 | [diff] [blame] | 2134 | switch (err) { | 
|  | 2135 | case EPERM: | 
|  | 2136 | case EACCES: | 
| David Ahern | b69e63a | 2013-05-25 17:54:00 -0600 | [diff] [blame] | 2137 | return scnprintf(msg, size, | 
| Arnaldo Carvalho de Melo | 56e52e8 | 2012-12-13 15:10:58 -0300 | [diff] [blame] | 2138 | "You may not have permission to collect %sstats.\n" | 
|  | 2139 | "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n" | 
|  | 2140 | " -1 - Not paranoid at all\n" | 
|  | 2141 | "  0 - Disallow raw tracepoint access for unpriv\n" | 
|  | 2142 | "  1 - Disallow cpu events for unpriv\n" | 
|  | 2143 | "  2 - Disallow kernel profiling for unpriv", | 
|  | 2144 | target->system_wide ? "system-wide " : ""); | 
|  | 2145 | case ENOENT: | 
|  | 2146 | return scnprintf(msg, size, "The %s event is not supported.", | 
|  | 2147 | perf_evsel__name(evsel)); | 
|  | 2148 | case EMFILE: | 
|  | 2149 | return scnprintf(msg, size, "%s", | 
|  | 2150 | "Too many events are opened.\n" | 
| Jiri Olsa | 18ffdfe | 2015-05-25 22:51:54 +0200 | [diff] [blame] | 2151 | "Probably the maximum number of open file descriptors has been reached.\n" | 
|  | 2152 | "Hint: Try again after reducing the number of events.\n" | 
|  | 2153 | "Hint: Try increasing the limit with 'ulimit -n <limit>'"); | 
| Arnaldo Carvalho de Melo | 56e52e8 | 2012-12-13 15:10:58 -0300 | [diff] [blame] | 2154 | case ENODEV: | 
|  | 2155 | if (target->cpu_list) | 
|  | 2156 | return scnprintf(msg, size, "%s", | 
|  | 2157 | "No such device - did you specify an out-of-range profile CPU?\n"); | 
|  | 2158 | break; | 
|  | 2159 | case EOPNOTSUPP: | 
|  | 2160 | if (evsel->attr.precise_ip) | 
|  | 2161 | return scnprintf(msg, size, "%s", | 
|  | 2162 | "\'precise\' request may not be supported. Try removing 'p' modifier."); | 
|  | 2163 | #if defined(__i386__) || defined(__x86_64__) | 
|  | 2164 | if (evsel->attr.type == PERF_TYPE_HARDWARE) | 
|  | 2165 | return scnprintf(msg, size, "%s", | 
|  | 2166 | "No hardware sampling interrupt available.\n" | 
|  | 2167 | "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it."); | 
|  | 2168 | #endif | 
|  | 2169 | break; | 
| Jiri Olsa | 63914ac | 2014-08-01 17:46:54 +0200 | [diff] [blame] | 2170 | case EBUSY: | 
|  | 2171 | if (find_process("oprofiled")) | 
|  | 2172 | return scnprintf(msg, size, | 
|  | 2173 | "The PMU counters are busy/taken by another profiler.\n" | 
|  | 2174 | "We found oprofile daemon running, please stop it and try again."); | 
|  | 2175 | break; | 
| Peter Zijlstra | 814c8c3 | 2015-03-31 00:19:31 +0200 | [diff] [blame] | 2176 | case EINVAL: | 
|  | 2177 | if (perf_missing_features.clockid) | 
|  | 2178 | return scnprintf(msg, size, "clockid feature not supported."); | 
|  | 2179 | if (perf_missing_features.clockid_wrong) | 
|  | 2180 | return scnprintf(msg, size, "wrong clockid (%d).", clockid); | 
|  | 2181 | break; | 
| Arnaldo Carvalho de Melo | 56e52e8 | 2012-12-13 15:10:58 -0300 | [diff] [blame] | 2182 | default: | 
|  | 2183 | break; | 
|  | 2184 | } | 
|  | 2185 |  | 
|  | 2186 | return scnprintf(msg, size, | 
| Masami Hiramatsu | 6e81c74 | 2014-08-14 02:22:36 +0000 | [diff] [blame] | 2187 | "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" | 
| Arnaldo Carvalho de Melo | 56e52e8 | 2012-12-13 15:10:58 -0300 | [diff] [blame] | 2188 | "/bin/dmesg may provide additional information.\n" | 
|  | 2189 | "No CONFIG_PERF_EVENTS=y kernel support configured?\n", | 
| Masami Hiramatsu | 6e81c74 | 2014-08-14 02:22:36 +0000 | [diff] [blame] | 2190 | err, strerror_r(err, sbuf, sizeof(sbuf)), | 
|  | 2191 | perf_evsel__name(evsel)); | 
| Arnaldo Carvalho de Melo | 56e52e8 | 2012-12-13 15:10:58 -0300 | [diff] [blame] | 2192 | } |