blob: d9cd853818ad5709efcf0a389c7cecc35fc87e1b [file] [log] [blame]
David Howells607ca462012-10-13 10:46:48 +01001/*
2 * Performance events:
3 *
4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
7 *
8 * Data type definitions, declarations, prototypes.
9 *
10 * Started by: Thomas Gleixner and Ingo Molnar
11 *
12 * For licencing details see kernel-base/COPYING
13 */
14#ifndef _UAPI_LINUX_PERF_EVENT_H
15#define _UAPI_LINUX_PERF_EVENT_H
16
17#include <linux/types.h>
18#include <linux/ioctl.h>
19#include <asm/byteorder.h>
20
21/*
22 * User-space ABI bits:
23 */
24
25/*
26 * attr.type
27 */
28enum perf_type_id {
29 PERF_TYPE_HARDWARE = 0,
30 PERF_TYPE_SOFTWARE = 1,
31 PERF_TYPE_TRACEPOINT = 2,
32 PERF_TYPE_HW_CACHE = 3,
33 PERF_TYPE_RAW = 4,
34 PERF_TYPE_BREAKPOINT = 5,
35
36 PERF_TYPE_MAX, /* non-ABI */
37};
38
39/*
40 * Generalized performance event event_id types, used by the
41 * attr.event_id parameter of the sys_perf_event_open()
42 * syscall:
43 */
44enum perf_hw_id {
45 /*
46 * Common hardware events, generalized by the kernel:
47 */
48 PERF_COUNT_HW_CPU_CYCLES = 0,
49 PERF_COUNT_HW_INSTRUCTIONS = 1,
50 PERF_COUNT_HW_CACHE_REFERENCES = 2,
51 PERF_COUNT_HW_CACHE_MISSES = 3,
52 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
53 PERF_COUNT_HW_BRANCH_MISSES = 5,
54 PERF_COUNT_HW_BUS_CYCLES = 6,
55 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
56 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
57 PERF_COUNT_HW_REF_CPU_CYCLES = 9,
58
59 PERF_COUNT_HW_MAX, /* non-ABI */
60};
61
62/*
63 * Generalized hardware cache events:
64 *
65 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
66 * { read, write, prefetch } x
67 * { accesses, misses }
68 */
69enum perf_hw_cache_id {
70 PERF_COUNT_HW_CACHE_L1D = 0,
71 PERF_COUNT_HW_CACHE_L1I = 1,
72 PERF_COUNT_HW_CACHE_LL = 2,
73 PERF_COUNT_HW_CACHE_DTLB = 3,
74 PERF_COUNT_HW_CACHE_ITLB = 4,
75 PERF_COUNT_HW_CACHE_BPU = 5,
76 PERF_COUNT_HW_CACHE_NODE = 6,
77
78 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
79};
80
81enum perf_hw_cache_op_id {
82 PERF_COUNT_HW_CACHE_OP_READ = 0,
83 PERF_COUNT_HW_CACHE_OP_WRITE = 1,
84 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
85
86 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
87};
88
89enum perf_hw_cache_op_result_id {
90 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
91 PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
92
93 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
94};
95
96/*
97 * Special "software" events provided by the kernel, even if the hardware
98 * does not support performance events. These events measure various
99 * physical and sw events of the kernel (and allow the profiling of them as
100 * well):
101 */
102enum perf_sw_ids {
103 PERF_COUNT_SW_CPU_CLOCK = 0,
104 PERF_COUNT_SW_TASK_CLOCK = 1,
105 PERF_COUNT_SW_PAGE_FAULTS = 2,
106 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
107 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
108 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
109 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
110 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
111 PERF_COUNT_SW_EMULATION_FAULTS = 8,
Adrian Hunterfa0097e2013-08-31 21:50:51 +0300112 PERF_COUNT_SW_DUMMY = 9,
David Howells607ca462012-10-13 10:46:48 +0100113
114 PERF_COUNT_SW_MAX, /* non-ABI */
115};
116
117/*
118 * Bits that can be set in attr.sample_type to request information
119 * in the overflow packets.
120 */
121enum perf_event_sample_format {
122 PERF_SAMPLE_IP = 1U << 0,
123 PERF_SAMPLE_TID = 1U << 1,
124 PERF_SAMPLE_TIME = 1U << 2,
125 PERF_SAMPLE_ADDR = 1U << 3,
126 PERF_SAMPLE_READ = 1U << 4,
127 PERF_SAMPLE_CALLCHAIN = 1U << 5,
128 PERF_SAMPLE_ID = 1U << 6,
129 PERF_SAMPLE_CPU = 1U << 7,
130 PERF_SAMPLE_PERIOD = 1U << 8,
131 PERF_SAMPLE_STREAM_ID = 1U << 9,
132 PERF_SAMPLE_RAW = 1U << 10,
133 PERF_SAMPLE_BRANCH_STACK = 1U << 11,
134 PERF_SAMPLE_REGS_USER = 1U << 12,
135 PERF_SAMPLE_STACK_USER = 1U << 13,
Andi Kleenc3feedf2013-01-24 16:10:28 +0100136 PERF_SAMPLE_WEIGHT = 1U << 14,
Stephane Eraniand6be9ad2013-01-24 16:10:31 +0100137 PERF_SAMPLE_DATA_SRC = 1U << 15,
Adrian Hunterff3d5272013-08-27 11:23:07 +0300138 PERF_SAMPLE_IDENTIFIER = 1U << 16,
Andi Kleenfdfbbd02013-09-20 07:40:39 -0700139 PERF_SAMPLE_TRANSACTION = 1U << 17,
David Howells607ca462012-10-13 10:46:48 +0100140
Andi Kleenfdfbbd02013-09-20 07:40:39 -0700141 PERF_SAMPLE_MAX = 1U << 18, /* non-ABI */
David Howells607ca462012-10-13 10:46:48 +0100142};
143
144/*
145 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
146 *
147 * If the user does not pass priv level information via branch_sample_type,
148 * the kernel uses the event's priv level. Branch and event priv levels do
149 * not have to match. Branch priv level is checked for permissions.
150 *
151 * The branch types can be combined, however BRANCH_ANY covers all types
152 * of branches and therefore it supersedes all the other types.
153 */
154enum perf_branch_sample_type {
155 PERF_SAMPLE_BRANCH_USER = 1U << 0, /* user branches */
156 PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, /* kernel branches */
157 PERF_SAMPLE_BRANCH_HV = 1U << 2, /* hypervisor branches */
158
159 PERF_SAMPLE_BRANCH_ANY = 1U << 3, /* any branch types */
160 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, /* any call branch */
161 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, /* any return branch */
162 PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, /* indirect calls */
Andi Kleen135c5612013-06-17 17:36:51 -0700163 PERF_SAMPLE_BRANCH_ABORT_TX = 1U << 7, /* transaction aborts */
164 PERF_SAMPLE_BRANCH_IN_TX = 1U << 8, /* in transaction */
165 PERF_SAMPLE_BRANCH_NO_TX = 1U << 9, /* not in transaction */
Anshuman Khandualbac52132014-05-22 12:50:07 +0530166 PERF_SAMPLE_BRANCH_COND = 1U << 10, /* conditional branches */
David Howells607ca462012-10-13 10:46:48 +0100167
Anshuman Khandualbac52132014-05-22 12:50:07 +0530168 PERF_SAMPLE_BRANCH_MAX = 1U << 11, /* non-ABI */
David Howells607ca462012-10-13 10:46:48 +0100169};
170
171#define PERF_SAMPLE_BRANCH_PLM_ALL \
172 (PERF_SAMPLE_BRANCH_USER|\
173 PERF_SAMPLE_BRANCH_KERNEL|\
174 PERF_SAMPLE_BRANCH_HV)
175
176/*
177 * Values to determine ABI of the registers dump.
178 */
179enum perf_sample_regs_abi {
180 PERF_SAMPLE_REGS_ABI_NONE = 0,
181 PERF_SAMPLE_REGS_ABI_32 = 1,
182 PERF_SAMPLE_REGS_ABI_64 = 2,
183};
184
185/*
Andi Kleenfdfbbd02013-09-20 07:40:39 -0700186 * Values for the memory transaction event qualifier, mostly for
187 * abort events. Multiple bits can be set.
188 */
189enum {
190 PERF_TXN_ELISION = (1 << 0), /* From elision */
191 PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */
192 PERF_TXN_SYNC = (1 << 2), /* Instruction is related */
193 PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */
194 PERF_TXN_RETRY = (1 << 4), /* Retry possible */
195 PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */
196 PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
197 PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */
198
199 PERF_TXN_MAX = (1 << 8), /* non-ABI */
200
201 /* bits 32..63 are reserved for the abort code */
202
203 PERF_TXN_ABORT_MASK = (0xffffffffULL << 32),
204 PERF_TXN_ABORT_SHIFT = 32,
205};
206
207/*
David Howells607ca462012-10-13 10:46:48 +0100208 * The format of the data returned by read() on a perf event fd,
209 * as specified by attr.read_format:
210 *
211 * struct read_format {
212 * { u64 value;
213 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
214 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
215 * { u64 id; } && PERF_FORMAT_ID
216 * } && !PERF_FORMAT_GROUP
217 *
218 * { u64 nr;
219 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
220 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
221 * { u64 value;
222 * { u64 id; } && PERF_FORMAT_ID
223 * } cntr[nr];
224 * } && PERF_FORMAT_GROUP
225 * };
226 */
227enum perf_event_read_format {
228 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
229 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
230 PERF_FORMAT_ID = 1U << 2,
231 PERF_FORMAT_GROUP = 1U << 3,
232
233 PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
234};
235
236#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
237#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
238#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
239#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
240 /* add: sample_stack_user */
241
242/*
243 * Hardware event_id to monitor via a performance monitoring event:
244 */
245struct perf_event_attr {
246
247 /*
248 * Major type: hardware/software/tracepoint/etc.
249 */
250 __u32 type;
251
252 /*
253 * Size of the attr structure, for fwd/bwd compat.
254 */
255 __u32 size;
256
257 /*
258 * Type specific configuration information.
259 */
260 __u64 config;
261
262 union {
263 __u64 sample_period;
264 __u64 sample_freq;
265 };
266
267 __u64 sample_type;
268 __u64 read_format;
269
270 __u64 disabled : 1, /* off by default */
271 inherit : 1, /* children inherit it */
272 pinned : 1, /* must always be on PMU */
273 exclusive : 1, /* only group on PMU */
274 exclude_user : 1, /* don't count user */
275 exclude_kernel : 1, /* ditto kernel */
276 exclude_hv : 1, /* ditto hypervisor */
277 exclude_idle : 1, /* don't count when idle */
278 mmap : 1, /* include mmap data */
279 comm : 1, /* include comm data */
280 freq : 1, /* use freq, not period */
281 inherit_stat : 1, /* per task counts */
282 enable_on_exec : 1, /* next exec enables */
283 task : 1, /* trace fork/exit */
284 watermark : 1, /* wakeup_watermark */
285 /*
286 * precise_ip:
287 *
288 * 0 - SAMPLE_IP can have arbitrary skid
289 * 1 - SAMPLE_IP must have constant skid
290 * 2 - SAMPLE_IP requested to have 0 skid
291 * 3 - SAMPLE_IP must have 0 skid
292 *
293 * See also PERF_RECORD_MISC_EXACT_IP
294 */
295 precise_ip : 2, /* skid constraint */
296 mmap_data : 1, /* non-exec mmap data */
297 sample_id_all : 1, /* sample_type all events */
298
299 exclude_host : 1, /* don't count in host */
300 exclude_guest : 1, /* don't count in guest */
301
302 exclude_callchain_kernel : 1, /* exclude kernel callchains */
303 exclude_callchain_user : 1, /* exclude user callchains */
Stephane Eranian13d7a242013-08-21 12:10:24 +0200304 mmap2 : 1, /* include mmap with inode data */
David Howells607ca462012-10-13 10:46:48 +0100305
Stephane Eranian13d7a242013-08-21 12:10:24 +0200306 __reserved_1 : 40;
David Howells607ca462012-10-13 10:46:48 +0100307
308 union {
309 __u32 wakeup_events; /* wakeup every n events */
310 __u32 wakeup_watermark; /* bytes before wakeup */
311 };
312
313 __u32 bp_type;
314 union {
315 __u64 bp_addr;
316 __u64 config1; /* extension of config */
317 };
318 union {
319 __u64 bp_len;
320 __u64 config2; /* extension of config1 */
321 };
322 __u64 branch_sample_type; /* enum perf_branch_sample_type */
323
324 /*
325 * Defines set of user regs to dump on samples.
326 * See asm/perf_regs.h for details.
327 */
328 __u64 sample_regs_user;
329
330 /*
331 * Defines size of the user stack to dump on samples.
332 */
333 __u32 sample_stack_user;
334
335 /* Align to u64. */
336 __u32 __reserved_2;
337};
338
339#define perf_flags(attr) (*(&(attr)->read_format + 1))
340
341/*
342 * Ioctls that can be done on a perf event fd:
343 */
344#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
345#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
346#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
347#define PERF_EVENT_IOC_RESET _IO ('$', 3)
348#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
349#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
350#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
Vince Weavera8e01082013-09-17 14:53:41 -0400351#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
David Howells607ca462012-10-13 10:46:48 +0100352
353enum perf_event_ioc_flags {
354 PERF_IOC_FLAG_GROUP = 1U << 0,
355};
356
357/*
358 * Structure of the page that can be mapped via mmap
359 */
360struct perf_event_mmap_page {
361 __u32 version; /* version number of this structure */
362 __u32 compat_version; /* lowest version this is compat with */
363
364 /*
365 * Bits needed to read the hw events in user-space.
366 *
367 * u32 seq, time_mult, time_shift, idx, width;
368 * u64 count, enabled, running;
369 * u64 cyc, time_offset;
370 * s64 pmc = 0;
371 *
372 * do {
373 * seq = pc->lock;
374 * barrier()
375 *
376 * enabled = pc->time_enabled;
377 * running = pc->time_running;
378 *
379 * if (pc->cap_usr_time && enabled != running) {
380 * cyc = rdtsc();
381 * time_offset = pc->time_offset;
382 * time_mult = pc->time_mult;
383 * time_shift = pc->time_shift;
384 * }
385 *
386 * idx = pc->index;
387 * count = pc->offset;
388 * if (pc->cap_usr_rdpmc && idx) {
389 * width = pc->pmc_width;
390 * pmc = rdpmc(idx - 1);
391 * }
392 *
393 * barrier();
394 * } while (pc->lock != seq);
395 *
396 * NOTE: for obvious reason this only works on self-monitoring
397 * processes.
398 */
399 __u32 lock; /* seqlock for synchronization */
400 __u32 index; /* hardware event identifier */
401 __s64 offset; /* add to hardware event value */
402 __u64 time_enabled; /* time event active */
403 __u64 time_running; /* time event on cpu */
404 union {
405 __u64 capabilities;
Adrian Hunter860f0852013-06-28 16:22:17 +0300406 struct {
Peter Zijlstrafa731582013-09-19 10:16:42 +0200407 __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
408 cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
409
410 cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
411 cap_user_time : 1, /* The time_* fields are used */
412 cap_user_time_zero : 1, /* The time_zero field is used */
413 cap_____res : 59;
Adrian Hunter860f0852013-06-28 16:22:17 +0300414 };
David Howells607ca462012-10-13 10:46:48 +0100415 };
416
417 /*
418 * If cap_usr_rdpmc this field provides the bit-width of the value
419 * read using the rdpmc() or equivalent instruction. This can be used
420 * to sign extend the result like:
421 *
422 * pmc <<= 64 - width;
423 * pmc >>= 64 - width; // signed shift right
424 * count += pmc;
425 */
426 __u16 pmc_width;
427
428 /*
429 * If cap_usr_time the below fields can be used to compute the time
430 * delta since time_enabled (in ns) using rdtsc or similar.
431 *
432 * u64 quot, rem;
433 * u64 delta;
434 *
435 * quot = (cyc >> time_shift);
436 * rem = cyc & ((1 << time_shift) - 1);
437 * delta = time_offset + quot * time_mult +
438 * ((rem * time_mult) >> time_shift);
439 *
440 * Where time_offset,time_mult,time_shift and cyc are read in the
441 * seqcount loop described above. This delta can then be added to
442 * enabled and possible running (if idx), improving the scaling:
443 *
444 * enabled += delta;
445 * if (idx)
446 * running += delta;
447 *
448 * quot = count / running;
449 * rem = count % running;
450 * count = quot * enabled + (rem * enabled) / running;
451 */
452 __u16 time_shift;
453 __u32 time_mult;
454 __u64 time_offset;
Adrian Hunterc73deb62013-06-28 16:22:18 +0300455 /*
456 * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
457 * from sample timestamps.
458 *
459 * time = timestamp - time_zero;
460 * quot = time / time_mult;
461 * rem = time % time_mult;
462 * cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
463 *
464 * And vice versa:
465 *
466 * quot = cyc >> time_shift;
467 * rem = cyc & ((1 << time_shift) - 1);
468 * timestamp = time_zero + quot * time_mult +
469 * ((rem * time_mult) >> time_shift);
470 */
471 __u64 time_zero;
Peter Zijlstrafa731582013-09-19 10:16:42 +0200472 __u32 size; /* Header size up to __reserved[] fields. */
David Howells607ca462012-10-13 10:46:48 +0100473
474 /*
475 * Hole for extension of the self monitor capabilities
476 */
477
Peter Zijlstrafa731582013-09-19 10:16:42 +0200478 __u8 __reserved[118*8+4]; /* align to 1k. */
David Howells607ca462012-10-13 10:46:48 +0100479
480 /*
481 * Control data for the mmap() data buffer.
482 *
Peter Zijlstrabf378d32013-10-28 13:55:29 +0100483 * User-space reading the @data_head value should issue an smp_rmb(),
484 * after reading this value.
David Howells607ca462012-10-13 10:46:48 +0100485 *
486 * When the mapping is PROT_WRITE the @data_tail value should be
Peter Zijlstrabf378d32013-10-28 13:55:29 +0100487 * written by userspace to reflect the last read data, after issueing
488 * an smp_mb() to separate the data read from the ->data_tail store.
489 * In this case the kernel will not over-write unread data.
490 *
491 * See perf_output_put_handle() for the data ordering.
David Howells607ca462012-10-13 10:46:48 +0100492 */
493 __u64 data_head; /* head in the data section */
494 __u64 data_tail; /* user-space written tail */
495};
496
497#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
498#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
499#define PERF_RECORD_MISC_KERNEL (1 << 0)
500#define PERF_RECORD_MISC_USER (2 << 0)
501#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
502#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
503#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
504
Stephane Eranian2fe85422013-01-24 16:10:39 +0100505#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
David Howells607ca462012-10-13 10:46:48 +0100506/*
507 * Indicates that the content of PERF_SAMPLE_IP points to
508 * the actual instruction that triggered the event. See also
509 * perf_event_attr::precise_ip.
510 */
511#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
512/*
513 * Reserve the last bit to indicate some extended misc field
514 */
515#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
516
517struct perf_event_header {
518 __u32 type;
519 __u16 misc;
520 __u16 size;
521};
522
523enum perf_event_type {
524
525 /*
526 * If perf_event_attr.sample_id_all is set then all event types will
527 * have the sample_type selected fields related to where/when
Adrian Hunterff3d5272013-08-27 11:23:07 +0300528 * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
529 * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
530 * just after the perf_event_header and the fields already present for
531 * the existing fields, i.e. at the end of the payload. That way a newer
532 * perf.data file will be supported by older perf tools, with these new
533 * optional fields being ignored.
David Howells607ca462012-10-13 10:46:48 +0100534 *
Peter Zijlstraa5cdd402013-07-16 17:09:07 +0200535 * struct sample_id {
536 * { u32 pid, tid; } && PERF_SAMPLE_TID
537 * { u64 time; } && PERF_SAMPLE_TIME
538 * { u64 id; } && PERF_SAMPLE_ID
539 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
540 * { u32 cpu, res; } && PERF_SAMPLE_CPU
Adrian Hunterff3d5272013-08-27 11:23:07 +0300541 * { u64 id; } && PERF_SAMPLE_IDENTIFIER
Peter Zijlstraa5cdd402013-07-16 17:09:07 +0200542 * } && perf_event_attr::sample_id_all
Adrian Hunterff3d5272013-08-27 11:23:07 +0300543 *
544 * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
545 * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
546 * relative to header.size.
Peter Zijlstraa5cdd402013-07-16 17:09:07 +0200547 */
548
549 /*
David Howells607ca462012-10-13 10:46:48 +0100550 * The MMAP events record the PROT_EXEC mappings so that we can
551 * correlate userspace IPs to code. They have the following structure:
552 *
553 * struct {
554 * struct perf_event_header header;
555 *
556 * u32 pid, tid;
557 * u64 addr;
558 * u64 len;
559 * u64 pgoff;
560 * char filename[];
Peter Zijlstrac5eccee2013-09-13 23:39:17 +0200561 * struct sample_id sample_id;
David Howells607ca462012-10-13 10:46:48 +0100562 * };
563 */
564 PERF_RECORD_MMAP = 1,
565
566 /*
567 * struct {
568 * struct perf_event_header header;
569 * u64 id;
570 * u64 lost;
Peter Zijlstraa5cdd402013-07-16 17:09:07 +0200571 * struct sample_id sample_id;
David Howells607ca462012-10-13 10:46:48 +0100572 * };
573 */
574 PERF_RECORD_LOST = 2,
575
576 /*
577 * struct {
578 * struct perf_event_header header;
579 *
580 * u32 pid, tid;
581 * char comm[];
Peter Zijlstraa5cdd402013-07-16 17:09:07 +0200582 * struct sample_id sample_id;
David Howells607ca462012-10-13 10:46:48 +0100583 * };
584 */
585 PERF_RECORD_COMM = 3,
586
587 /*
588 * struct {
589 * struct perf_event_header header;
590 * u32 pid, ppid;
591 * u32 tid, ptid;
592 * u64 time;
Peter Zijlstraa5cdd402013-07-16 17:09:07 +0200593 * struct sample_id sample_id;
David Howells607ca462012-10-13 10:46:48 +0100594 * };
595 */
596 PERF_RECORD_EXIT = 4,
597
598 /*
599 * struct {
600 * struct perf_event_header header;
601 * u64 time;
602 * u64 id;
603 * u64 stream_id;
Peter Zijlstraa5cdd402013-07-16 17:09:07 +0200604 * struct sample_id sample_id;
David Howells607ca462012-10-13 10:46:48 +0100605 * };
606 */
607 PERF_RECORD_THROTTLE = 5,
608 PERF_RECORD_UNTHROTTLE = 6,
609
610 /*
611 * struct {
612 * struct perf_event_header header;
613 * u32 pid, ppid;
614 * u32 tid, ptid;
615 * u64 time;
Peter Zijlstraa5cdd402013-07-16 17:09:07 +0200616 * struct sample_id sample_id;
David Howells607ca462012-10-13 10:46:48 +0100617 * };
618 */
619 PERF_RECORD_FORK = 7,
620
621 /*
622 * struct {
623 * struct perf_event_header header;
624 * u32 pid, tid;
625 *
626 * struct read_format values;
Peter Zijlstraa5cdd402013-07-16 17:09:07 +0200627 * struct sample_id sample_id;
David Howells607ca462012-10-13 10:46:48 +0100628 * };
629 */
630 PERF_RECORD_READ = 8,
631
632 /*
633 * struct {
634 * struct perf_event_header header;
635 *
Adrian Hunterff3d5272013-08-27 11:23:07 +0300636 * #
637 * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
638 * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
639 * # is fixed relative to header.
640 * #
641 *
642 * { u64 id; } && PERF_SAMPLE_IDENTIFIER
David Howells607ca462012-10-13 10:46:48 +0100643 * { u64 ip; } && PERF_SAMPLE_IP
644 * { u32 pid, tid; } && PERF_SAMPLE_TID
645 * { u64 time; } && PERF_SAMPLE_TIME
646 * { u64 addr; } && PERF_SAMPLE_ADDR
647 * { u64 id; } && PERF_SAMPLE_ID
648 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
649 * { u32 cpu, res; } && PERF_SAMPLE_CPU
650 * { u64 period; } && PERF_SAMPLE_PERIOD
651 *
652 * { struct read_format values; } && PERF_SAMPLE_READ
653 *
654 * { u64 nr,
655 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
656 *
657 * #
658 * # The RAW record below is opaque data wrt the ABI
659 * #
660 * # That is, the ABI doesn't make any promises wrt to
661 * # the stability of its content, it may vary depending
662 * # on event, hardware, kernel version and phase of
663 * # the moon.
664 * #
665 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
666 * #
667 *
668 * { u32 size;
669 * char data[size];}&& PERF_SAMPLE_RAW
670 *
Vince Weaverb878e7f2013-01-08 14:44:25 -0500671 * { u64 nr;
672 * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
David Howells607ca462012-10-13 10:46:48 +0100673 *
674 * { u64 abi; # enum perf_sample_regs_abi
675 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
676 *
677 * { u64 size;
678 * char data[size];
679 * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
Andi Kleenc3feedf2013-01-24 16:10:28 +0100680 *
681 * { u64 weight; } && PERF_SAMPLE_WEIGHT
Peter Zijlstraa5cdd402013-07-16 17:09:07 +0200682 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
Vince Weaver189b84f2013-12-13 15:52:25 -0500683 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
David Howells607ca462012-10-13 10:46:48 +0100684 * };
685 */
686 PERF_RECORD_SAMPLE = 9,
687
Stephane Eranian13d7a242013-08-21 12:10:24 +0200688 /*
689 * The MMAP2 records are an augmented version of MMAP, they add
690 * maj, min, ino numbers to be used to uniquely identify each mapping
691 *
692 * struct {
693 * struct perf_event_header header;
694 *
695 * u32 pid, tid;
696 * u64 addr;
697 * u64 len;
698 * u64 pgoff;
699 * u32 maj;
700 * u32 min;
701 * u64 ino;
702 * u64 ino_generation;
703 * char filename[];
704 * struct sample_id sample_id;
705 * };
706 */
707 PERF_RECORD_MMAP2 = 10,
708
David Howells607ca462012-10-13 10:46:48 +0100709 PERF_RECORD_MAX, /* non-ABI */
710};
711
712#define PERF_MAX_STACK_DEPTH 127
713
714enum perf_callchain_context {
715 PERF_CONTEXT_HV = (__u64)-32,
716 PERF_CONTEXT_KERNEL = (__u64)-128,
717 PERF_CONTEXT_USER = (__u64)-512,
718
719 PERF_CONTEXT_GUEST = (__u64)-2048,
720 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
721 PERF_CONTEXT_GUEST_USER = (__u64)-2560,
722
723 PERF_CONTEXT_MAX = (__u64)-4095,
724};
725
Peter Zijlstra643fd0b2014-04-23 12:22:54 +0200726#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
727#define PERF_FLAG_FD_OUTPUT (1UL << 1)
728#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
729#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
David Howells607ca462012-10-13 10:46:48 +0100730
Stephane Eraniand6be9ad2013-01-24 16:10:31 +0100731union perf_mem_data_src {
732 __u64 val;
733 struct {
734 __u64 mem_op:5, /* type of opcode */
735 mem_lvl:14, /* memory hierarchy level */
736 mem_snoop:5, /* snoop mode */
737 mem_lock:2, /* lock instr */
738 mem_dtlb:7, /* tlb access */
739 mem_rsvd:31;
740 };
741};
742
743/* type of opcode (load/store/prefetch,code) */
744#define PERF_MEM_OP_NA 0x01 /* not available */
745#define PERF_MEM_OP_LOAD 0x02 /* load instruction */
746#define PERF_MEM_OP_STORE 0x04 /* store instruction */
747#define PERF_MEM_OP_PFETCH 0x08 /* prefetch */
748#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
749#define PERF_MEM_OP_SHIFT 0
750
751/* memory hierarchy (memory level, hit or miss) */
752#define PERF_MEM_LVL_NA 0x01 /* not available */
753#define PERF_MEM_LVL_HIT 0x02 /* hit level */
754#define PERF_MEM_LVL_MISS 0x04 /* miss level */
755#define PERF_MEM_LVL_L1 0x08 /* L1 */
756#define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */
Stephane Eraniancc2f5a82013-04-05 16:49:41 +0200757#define PERF_MEM_LVL_L2 0x20 /* L2 */
758#define PERF_MEM_LVL_L3 0x40 /* L3 */
Stephane Eraniand6be9ad2013-01-24 16:10:31 +0100759#define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */
760#define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */
761#define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */
762#define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */
763#define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */
764#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */
765#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
766#define PERF_MEM_LVL_SHIFT 5
767
768/* snoop mode */
769#define PERF_MEM_SNOOP_NA 0x01 /* not available */
770#define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */
771#define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */
772#define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */
773#define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */
774#define PERF_MEM_SNOOP_SHIFT 19
775
776/* locked instruction */
777#define PERF_MEM_LOCK_NA 0x01 /* not available */
778#define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */
779#define PERF_MEM_LOCK_SHIFT 24
780
781/* TLB access */
782#define PERF_MEM_TLB_NA 0x01 /* not available */
783#define PERF_MEM_TLB_HIT 0x02 /* hit level */
784#define PERF_MEM_TLB_MISS 0x04 /* miss level */
785#define PERF_MEM_TLB_L1 0x08 /* L1 */
786#define PERF_MEM_TLB_L2 0x10 /* L2 */
787#define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/
788#define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
789#define PERF_MEM_TLB_SHIFT 26
790
791#define PERF_MEM_S(a, s) \
Mike Frysinger0d9dfc22014-01-23 15:54:11 -0800792 (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
Stephane Eraniand6be9ad2013-01-24 16:10:31 +0100793
Vince Weaver274481d2013-08-23 15:51:03 -0400794/*
795 * single taken branch record layout:
796 *
797 * from: source instruction (may not always be a branch insn)
798 * to: branch target
799 * mispred: branch target was mispredicted
800 * predicted: branch target was predicted
801 *
802 * support for mispred, predicted is optional. In case it
803 * is not supported mispred = predicted = 0.
804 *
805 * in_tx: running in a hardware transaction
806 * abort: aborting a hardware transaction
807 */
808struct perf_branch_entry {
809 __u64 from;
810 __u64 to;
811 __u64 mispred:1, /* target mispredicted */
812 predicted:1,/* target predicted */
813 in_tx:1, /* in transaction */
814 abort:1, /* transaction abort */
815 reserved:60;
816};
817
David Howells607ca462012-10-13 10:46:48 +0100818#endif /* _UAPI_LINUX_PERF_EVENT_H */