Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1 | /* |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 2 | * Performance events: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 3 | * |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 5 | * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar |
| 6 | * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 7 | * |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 8 | * Data type definitions, declarations, prototypes. |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 9 | * |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 10 | * Started by: Thomas Gleixner and Ingo Molnar |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 11 | * |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 12 | * For licencing details see kernel-base/COPYING |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 13 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 14 | #ifndef _LINUX_PERF_EVENT_H |
| 15 | #define _LINUX_PERF_EVENT_H |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 16 | |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 17 | #include <uapi/linux/perf_event.h> |
Peter Zijlstra | a4be7c2 | 2009-08-19 11:18:27 +0200 | [diff] [blame] | 18 | |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 19 | /* |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 20 | * Kernel-internal data types and definitions: |
Ingo Molnar | 9f66a38 | 2008-12-10 12:33:23 +0100 | [diff] [blame] | 21 | */ |
| 22 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 23 | #ifdef CONFIG_PERF_EVENTS |
| 24 | # include <asm/perf_event.h> |
Peter Zijlstra | 7be7923 | 2010-06-09 11:57:23 +0200 | [diff] [blame] | 25 | # include <asm/local64.h> |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 26 | #endif |
| 27 | |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 28 | struct perf_guest_info_callbacks { |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 29 | int (*is_in_guest)(void); |
| 30 | int (*is_user_mode)(void); |
| 31 | unsigned long (*get_guest_ip)(void); |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 32 | }; |
| 33 | |
Arnd Bergmann | 2ff6cfd | 2009-12-07 17:12:58 +0100 | [diff] [blame] | 34 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
| 35 | #include <asm/hw_breakpoint.h> |
| 36 | #endif |
| 37 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 38 | #include <linux/list.h> |
| 39 | #include <linux/mutex.h> |
| 40 | #include <linux/rculist.h> |
| 41 | #include <linux/rcupdate.h> |
| 42 | #include <linux/spinlock.h> |
Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 43 | #include <linux/hrtimer.h> |
Peter Zijlstra | 3c446b3d | 2009-04-06 11:45:01 +0200 | [diff] [blame] | 44 | #include <linux/fs.h> |
Peter Zijlstra | 709e50c | 2009-06-02 14:13:15 +0200 | [diff] [blame] | 45 | #include <linux/pid_namespace.h> |
Peter Zijlstra | 906010b | 2009-09-21 16:08:49 +0200 | [diff] [blame] | 46 | #include <linux/workqueue.h> |
Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 47 | #include <linux/ftrace.h> |
Peter Zijlstra | 85cfabb | 2010-03-11 13:06:56 +0100 | [diff] [blame] | 48 | #include <linux/cpu.h> |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 49 | #include <linux/irq_work.h> |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 50 | #include <linux/static_key.h> |
Andrew Jones | 851cf6e | 2013-08-09 19:51:57 +0530 | [diff] [blame] | 51 | #include <linux/jump_label_ratelimit.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 52 | #include <linux/atomic.h> |
Jiri Olsa | 641cc93 | 2012-03-15 20:09:14 +0100 | [diff] [blame] | 53 | #include <linux/sysfs.h> |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 54 | #include <linux/perf_regs.h> |
Jiri Olsa | fadfe7b | 2014-08-01 14:33:02 +0200 | [diff] [blame] | 55 | #include <linux/workqueue.h> |
Matt Fleming | 39bed6c | 2015-01-23 18:45:40 +0000 | [diff] [blame] | 56 | #include <linux/cgroup.h> |
Peter Zijlstra | fa58815 | 2010-05-18 10:54:20 +0200 | [diff] [blame] | 57 | #include <asm/local.h> |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 58 | |
Peter Zijlstra | f9188e0 | 2009-06-18 22:20:52 +0200 | [diff] [blame] | 59 | struct perf_callchain_entry { |
| 60 | __u64 nr; |
Arnaldo Carvalho de Melo | c5dfd78 | 2016-04-21 12:28:50 -0300 | [diff] [blame] | 61 | __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */ |
Peter Zijlstra | f9188e0 | 2009-06-18 22:20:52 +0200 | [diff] [blame] | 62 | }; |
| 63 | |
Arnaldo Carvalho de Melo | cfbcf46 | 2016-04-28 12:30:53 -0300 | [diff] [blame] | 64 | struct perf_callchain_entry_ctx { |
| 65 | struct perf_callchain_entry *entry; |
| 66 | u32 max_stack; |
Arnaldo Carvalho de Melo | 3b1fff0 | 2016-05-10 18:08:32 -0300 | [diff] [blame] | 67 | u32 nr; |
Arnaldo Carvalho de Melo | c85b033 | 2016-05-12 13:06:21 -0300 | [diff] [blame] | 68 | short contexts; |
| 69 | bool contexts_maxed; |
Arnaldo Carvalho de Melo | cfbcf46 | 2016-04-28 12:30:53 -0300 | [diff] [blame] | 70 | }; |
| 71 | |
Daniel Borkmann | 7e3f977 | 2016-07-14 18:08:03 +0200 | [diff] [blame] | 72 | typedef unsigned long (*perf_copy_f)(void *dst, const void *src, |
Daniel Borkmann | aa7145c | 2016-07-22 01:19:42 +0200 | [diff] [blame] | 73 | unsigned long off, unsigned long len); |
Daniel Borkmann | 7e3f977 | 2016-07-14 18:08:03 +0200 | [diff] [blame] | 74 | |
| 75 | struct perf_raw_frag { |
| 76 | union { |
| 77 | struct perf_raw_frag *next; |
| 78 | unsigned long pad; |
| 79 | }; |
| 80 | perf_copy_f copy; |
Frederic Weisbecker | 3a43ce6 | 2009-08-08 04:26:37 +0200 | [diff] [blame] | 81 | void *data; |
Daniel Borkmann | 7e3f977 | 2016-07-14 18:08:03 +0200 | [diff] [blame] | 82 | u32 size; |
| 83 | } __packed; |
| 84 | |
| 85 | struct perf_raw_record { |
| 86 | struct perf_raw_frag frag; |
| 87 | u32 size; |
Frederic Weisbecker | f413cdb | 2009-08-07 01:25:54 +0200 | [diff] [blame] | 88 | }; |
| 89 | |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 90 | /* |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 91 | * branch stack layout: |
| 92 | * nr: number of taken branches stored in entries[] |
| 93 | * |
| 94 | * Note that nr can vary from sample to sample |
| 95 | * branches (to, from) are stored from most recent |
| 96 | * to least recent, i.e., entries[0] contains the most |
| 97 | * recent branch. |
| 98 | */ |
Peter Zijlstra | caff2be | 2010-03-03 12:02:30 +0100 | [diff] [blame] | 99 | struct perf_branch_stack { |
| 100 | __u64 nr; |
| 101 | struct perf_branch_entry entries[0]; |
| 102 | }; |
| 103 | |
Paul Mackerras | f3dfd26 | 2009-02-26 22:43:46 +1100 | [diff] [blame] | 104 | struct task_struct; |
| 105 | |
Stephane Eranian | efc9f05 | 2011-06-06 16:57:03 +0200 | [diff] [blame] | 106 | /* |
| 107 | * extra PMU register associated with an event |
| 108 | */ |
| 109 | struct hw_perf_event_extra { |
| 110 | u64 config; /* register value */ |
| 111 | unsigned int reg; /* register address or index */ |
| 112 | int alloc; /* extra register already allocated */ |
| 113 | int idx; /* index in shared_regs->regs[] */ |
| 114 | }; |
| 115 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 116 | /** |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 117 | * struct hw_perf_event - performance event hardware details: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 118 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 119 | struct hw_perf_event { |
| 120 | #ifdef CONFIG_PERF_EVENTS |
Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 121 | union { |
| 122 | struct { /* hardware */ |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 123 | u64 config; |
Stephane Eranian | 447a194 | 2010-02-01 14:50:01 +0200 | [diff] [blame] | 124 | u64 last_tag; |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 125 | unsigned long config_base; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 126 | unsigned long event_base; |
Vince Weaver | c48b605 | 2012-03-01 17:28:14 -0500 | [diff] [blame] | 127 | int event_base_rdpmc; |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 128 | int idx; |
Stephane Eranian | 447a194 | 2010-02-01 14:50:01 +0200 | [diff] [blame] | 129 | int last_cpu; |
Stephane Eranian | 9fac2cf | 2013-01-24 16:10:27 +0100 | [diff] [blame] | 130 | int flags; |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 131 | |
Stephane Eranian | efc9f05 | 2011-06-06 16:57:03 +0200 | [diff] [blame] | 132 | struct hw_perf_event_extra extra_reg; |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 133 | struct hw_perf_event_extra branch_reg; |
Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 134 | }; |
Soeren Sandmann | 721a669 | 2009-09-15 14:33:08 +0200 | [diff] [blame] | 135 | struct { /* software */ |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 136 | struct hrtimer hrtimer; |
Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 137 | }; |
Oleg Nesterov | f22c1bb | 2013-02-02 16:27:52 +0100 | [diff] [blame] | 138 | struct { /* tracepoint */ |
Oleg Nesterov | f22c1bb | 2013-02-02 16:27:52 +0100 | [diff] [blame] | 139 | /* for tp_event->class */ |
| 140 | struct list_head tp_list; |
| 141 | }; |
Matt Fleming | 4afbb24 | 2015-01-23 18:45:44 +0000 | [diff] [blame] | 142 | struct { /* intel_cqm */ |
| 143 | int cqm_state; |
Thomas Gleixner | b3df4ec | 2015-05-19 00:00:51 +0000 | [diff] [blame] | 144 | u32 cqm_rmid; |
Vikas Shivappa | a223c1c | 2016-03-10 15:32:07 -0800 | [diff] [blame] | 145 | int is_group_event; |
Matt Fleming | 4afbb24 | 2015-01-23 18:45:44 +0000 | [diff] [blame] | 146 | struct list_head cqm_events_entry; |
| 147 | struct list_head cqm_groups_entry; |
| 148 | struct list_head cqm_group_entry; |
| 149 | }; |
Alexander Shishkin | ec0d772 | 2015-01-14 14:18:23 +0200 | [diff] [blame] | 150 | struct { /* itrace */ |
| 151 | int itrace_started; |
| 152 | }; |
Huang Rui | c7ab62b | 2016-03-09 13:45:06 +0800 | [diff] [blame] | 153 | struct { /* amd_power */ |
| 154 | u64 pwr_acc; |
| 155 | u64 ptsc; |
| 156 | }; |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 157 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 158 | struct { /* breakpoint */ |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 159 | /* |
| 160 | * Crufty hack to avoid the chicken and egg |
| 161 | * problem hw_breakpoint has with context |
| 162 | * creation and event initalization. |
| 163 | */ |
Oleg Nesterov | f22c1bb | 2013-02-02 16:27:52 +0100 | [diff] [blame] | 164 | struct arch_hw_breakpoint info; |
| 165 | struct list_head bp_list; |
Frederic Weisbecker | 45a7337 | 2010-06-23 23:00:37 +0200 | [diff] [blame] | 166 | }; |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 167 | #endif |
Peter Zijlstra | d6d020e | 2009-03-13 12:21:35 +0100 | [diff] [blame] | 168 | }; |
Peter Zijlstra | b0e8787 | 2015-08-28 14:06:07 +0200 | [diff] [blame] | 169 | /* |
| 170 | * If the event is a per task event, this will point to the task in |
| 171 | * question. See the comment in perf_event_alloc(). |
| 172 | */ |
Peter Zijlstra | 50f16a8 | 2015-03-05 22:10:19 +0100 | [diff] [blame] | 173 | struct task_struct *target; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 174 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 175 | /* |
| 176 | * PMU would store hardware filter configuration |
| 177 | * here. |
| 178 | */ |
| 179 | void *addr_filters; |
| 180 | |
| 181 | /* Last sync'ed generation of filters */ |
| 182 | unsigned long addr_filters_gen; |
| 183 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 184 | /* |
Peter Zijlstra | b0e8787 | 2015-08-28 14:06:07 +0200 | [diff] [blame] | 185 | * hw_perf_event::state flags; used to track the PERF_EF_* state. |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 186 | */ |
| 187 | #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ |
| 188 | #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ |
| 189 | #define PERF_HES_ARCH 0x04 |
| 190 | |
Peter Zijlstra | b0e8787 | 2015-08-28 14:06:07 +0200 | [diff] [blame] | 191 | int state; |
| 192 | |
| 193 | /* |
| 194 | * The last observed hardware counter value, updated with a |
| 195 | * local64_cmpxchg() such that pmu::read() can be called nested. |
| 196 | */ |
| 197 | local64_t prev_count; |
| 198 | |
| 199 | /* |
| 200 | * The period to start the next sample with. |
| 201 | */ |
| 202 | u64 sample_period; |
| 203 | |
| 204 | /* |
| 205 | * The period we started this sample with. |
| 206 | */ |
| 207 | u64 last_period; |
| 208 | |
| 209 | /* |
| 210 | * However much is left of the current period; note that this is |
| 211 | * a full 64bit value and allows for generation of periods longer |
| 212 | * than hardware might allow. |
| 213 | */ |
| 214 | local64_t period_left; |
| 215 | |
| 216 | /* |
| 217 | * State for throttling the event, see __perf_event_overflow() and |
| 218 | * perf_adjust_freq_unthr_context(). |
| 219 | */ |
| 220 | u64 interrupts_seq; |
| 221 | u64 interrupts; |
| 222 | |
| 223 | /* |
| 224 | * State for freq target events, see __perf_event_overflow() and |
| 225 | * perf_adjust_freq_unthr_context(). |
| 226 | */ |
| 227 | u64 freq_time_stamp; |
| 228 | u64 freq_count_stamp; |
| 229 | #endif |
| 230 | }; |
| 231 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 232 | struct perf_event; |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 233 | |
Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 234 | /* |
| 235 | * Common implementation detail of pmu::{start,commit,cancel}_txn |
| 236 | */ |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 237 | #define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */ |
Sukadev Bhattiprolu | 4a00c16 | 2015-09-03 20:07:51 -0700 | [diff] [blame] | 238 | #define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */ |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 239 | |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 240 | /** |
Vince Weaver | 53b2533 | 2014-05-16 17:12:12 -0400 | [diff] [blame] | 241 | * pmu::capabilities flags |
| 242 | */ |
| 243 | #define PERF_PMU_CAP_NO_INTERRUPT 0x01 |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 244 | #define PERF_PMU_CAP_NO_NMI 0x02 |
Alexander Shishkin | 0a4e38e | 2015-01-14 14:18:12 +0200 | [diff] [blame] | 245 | #define PERF_PMU_CAP_AUX_NO_SG 0x04 |
Alexander Shishkin | 6a27923 | 2015-01-14 14:18:13 +0200 | [diff] [blame] | 246 | #define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08 |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 247 | #define PERF_PMU_CAP_EXCLUSIVE 0x10 |
Alexander Shishkin | ec0d772 | 2015-01-14 14:18:23 +0200 | [diff] [blame] | 248 | #define PERF_PMU_CAP_ITRACE 0x20 |
Mark Rutland | 5101ef2 | 2016-04-26 11:33:46 +0100 | [diff] [blame] | 249 | #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 |
Vince Weaver | 53b2533 | 2014-05-16 17:12:12 -0400 | [diff] [blame] | 250 | |
| 251 | /** |
Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 252 | * struct pmu - generic performance monitoring unit |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 253 | */ |
Robert Richter | 4aeb0b4 | 2009-04-29 12:47:03 +0200 | [diff] [blame] | 254 | struct pmu { |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 255 | struct list_head entry; |
| 256 | |
Yan, Zheng | c464c76 | 2014-03-18 16:56:41 +0800 | [diff] [blame] | 257 | struct module *module; |
Peter Zijlstra | abe4340 | 2010-11-17 23:17:37 +0100 | [diff] [blame] | 258 | struct device *dev; |
Peter Zijlstra | 0c9d42e | 2011-11-20 23:30:47 +0100 | [diff] [blame] | 259 | const struct attribute_group **attr_groups; |
Mischa Jonker | 03d8e80 | 2013-06-04 11:45:48 +0200 | [diff] [blame] | 260 | const char *name; |
Peter Zijlstra | 2e80a82 | 2010-11-17 23:17:36 +0100 | [diff] [blame] | 261 | int type; |
| 262 | |
Vince Weaver | 53b2533 | 2014-05-16 17:12:12 -0400 | [diff] [blame] | 263 | /* |
| 264 | * various common per-pmu feature flags |
| 265 | */ |
| 266 | int capabilities; |
| 267 | |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 268 | int * __percpu pmu_disable_count; |
| 269 | struct perf_cpu_context * __percpu pmu_cpu_context; |
Alexander Shishkin | bed5b25 | 2015-01-30 12:31:06 +0200 | [diff] [blame] | 270 | atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ |
Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 271 | int task_ctx_nr; |
Stephane Eranian | 62b8563 | 2013-04-03 14:21:34 +0200 | [diff] [blame] | 272 | int hrtimer_interval_ms; |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 273 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 274 | /* number of address filters this PMU can do */ |
| 275 | unsigned int nr_addr_filters; |
| 276 | |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 277 | /* |
| 278 | * Fully disable/enable this PMU, can be used to protect from the PMI |
| 279 | * as well as for lazy/batch writing of the MSRs. |
| 280 | */ |
Peter Zijlstra | ad5133b | 2010-06-15 12:22:39 +0200 | [diff] [blame] | 281 | void (*pmu_enable) (struct pmu *pmu); /* optional */ |
| 282 | void (*pmu_disable) (struct pmu *pmu); /* optional */ |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 283 | |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 284 | /* |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 285 | * Try and initialize the event for this PMU. |
Peter Zijlstra | b0e8787 | 2015-08-28 14:06:07 +0200 | [diff] [blame] | 286 | * |
| 287 | * Returns: |
| 288 | * -ENOENT -- @event is not for this PMU |
| 289 | * |
| 290 | * -ENODEV -- @event is for this PMU but PMU not present |
| 291 | * -EBUSY -- @event is for this PMU but PMU temporarily unavailable |
| 292 | * -EINVAL -- @event is for this PMU but @event is not valid |
| 293 | * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported |
| 294 | * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges |
| 295 | * |
| 296 | * 0 -- @event is for this PMU and valid |
| 297 | * |
| 298 | * Other error return values are allowed. |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 299 | */ |
| 300 | int (*event_init) (struct perf_event *event); |
| 301 | |
Andy Lutomirski | 1e0fb9e | 2014-10-24 15:58:10 -0700 | [diff] [blame] | 302 | /* |
| 303 | * Notification that the event was mapped or unmapped. Called |
| 304 | * in the context of the mapping task. |
| 305 | */ |
| 306 | void (*event_mapped) (struct perf_event *event); /*optional*/ |
| 307 | void (*event_unmapped) (struct perf_event *event); /*optional*/ |
| 308 | |
Peter Zijlstra | b0e8787 | 2015-08-28 14:06:07 +0200 | [diff] [blame] | 309 | /* |
| 310 | * Flags for ->add()/->del()/ ->start()/->stop(). There are |
| 311 | * matching hw_perf_event::state flags. |
| 312 | */ |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 313 | #define PERF_EF_START 0x01 /* start the counter when adding */ |
| 314 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ |
| 315 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ |
| 316 | |
| 317 | /* |
Peter Zijlstra | b0e8787 | 2015-08-28 14:06:07 +0200 | [diff] [blame] | 318 | * Adds/Removes a counter to/from the PMU, can be done inside a |
| 319 | * transaction, see the ->*_txn() methods. |
| 320 | * |
| 321 | * The add/del callbacks will reserve all hardware resources required |
| 322 | * to service the event, this includes any counter constraint |
| 323 | * scheduling etc. |
| 324 | * |
| 325 | * Called with IRQs disabled and the PMU disabled on the CPU the event |
| 326 | * is on. |
| 327 | * |
| 328 | * ->add() called without PERF_EF_START should result in the same state |
| 329 | * as ->add() followed by ->stop(). |
| 330 | * |
| 331 | * ->del() must always PERF_EF_UPDATE stop an event. If it calls |
| 332 | * ->stop() that must deal with already being stopped without |
| 333 | * PERF_EF_UPDATE. |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 334 | */ |
| 335 | int (*add) (struct perf_event *event, int flags); |
| 336 | void (*del) (struct perf_event *event, int flags); |
| 337 | |
| 338 | /* |
Peter Zijlstra | b0e8787 | 2015-08-28 14:06:07 +0200 | [diff] [blame] | 339 | * Starts/Stops a counter present on the PMU. |
| 340 | * |
| 341 | * The PMI handler should stop the counter when perf_event_overflow() |
| 342 | * returns !0. ->start() will be used to continue. |
| 343 | * |
| 344 | * Also used to change the sample period. |
| 345 | * |
| 346 | * Called with IRQs disabled and the PMU disabled on the CPU the event |
| 347 | * is on -- will be called from NMI context with the PMU generates |
| 348 | * NMIs. |
| 349 | * |
| 350 | * ->stop() with PERF_EF_UPDATE will read the counter and update |
| 351 | * period/count values like ->read() would. |
| 352 | * |
| 353 | * ->start() with PERF_EF_RELOAD will reprogram the the counter |
| 354 | * value, must be preceded by a ->stop() with PERF_EF_UPDATE. |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 355 | */ |
| 356 | void (*start) (struct perf_event *event, int flags); |
| 357 | void (*stop) (struct perf_event *event, int flags); |
| 358 | |
| 359 | /* |
| 360 | * Updates the counter value of the event. |
Peter Zijlstra | b0e8787 | 2015-08-28 14:06:07 +0200 | [diff] [blame] | 361 | * |
| 362 | * For sampling capable PMUs this will also update the software period |
| 363 | * hw_perf_event::period_left field. |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 364 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 365 | void (*read) (struct perf_event *event); |
Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 366 | |
| 367 | /* |
Peter Zijlstra | 24cd7f5 | 2010-06-11 17:32:03 +0200 | [diff] [blame] | 368 | * Group events scheduling is treated as a transaction, add |
| 369 | * group events as a whole and perform one schedulability test. |
| 370 | * If the test fails, roll back the whole group |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 371 | * |
| 372 | * Start the transaction, after this ->add() doesn't need to |
Peter Zijlstra | 24cd7f5 | 2010-06-11 17:32:03 +0200 | [diff] [blame] | 373 | * do schedulability tests. |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 374 | * |
| 375 | * Optional. |
Lin Ming | 6bde9b6 | 2010-04-23 13:56:00 +0800 | [diff] [blame] | 376 | */ |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 377 | void (*start_txn) (struct pmu *pmu, unsigned int txn_flags); |
Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 378 | /* |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 379 | * If ->start_txn() disabled the ->add() schedulability test |
Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 380 | * then ->commit_txn() is required to perform one. On success |
| 381 | * the transaction is closed. On error the transaction is kept |
| 382 | * open until ->cancel_txn() is called. |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 383 | * |
| 384 | * Optional. |
Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 385 | */ |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 386 | int (*commit_txn) (struct pmu *pmu); |
Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 387 | /* |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 388 | * Will cancel the transaction, assumes ->del() is called |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 389 | * for each successful ->add() during the transaction. |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 390 | * |
| 391 | * Optional. |
Peter Zijlstra | 8d2cacb | 2010-05-25 17:49:05 +0200 | [diff] [blame] | 392 | */ |
Sukadev Bhattiprolu | fbbe070 | 2015-09-03 20:07:45 -0700 | [diff] [blame] | 393 | void (*cancel_txn) (struct pmu *pmu); |
Peter Zijlstra | 35edc2a | 2011-11-20 20:36:02 +0100 | [diff] [blame] | 394 | |
| 395 | /* |
| 396 | * Will return the value for perf_event_mmap_page::index for this event, |
| 397 | * if no implementation is provided it will default to: event->hw.idx + 1. |
| 398 | */ |
| 399 | int (*event_idx) (struct perf_event *event); /*optional */ |
Stephane Eranian | d010b33 | 2012-02-09 23:21:00 +0100 | [diff] [blame] | 400 | |
| 401 | /* |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 402 | * context-switches callback |
| 403 | */ |
| 404 | void (*sched_task) (struct perf_event_context *ctx, |
| 405 | bool sched_in); |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 406 | /* |
| 407 | * PMU specific data size |
| 408 | */ |
| 409 | size_t task_ctx_size; |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 410 | |
Matt Fleming | eacd3ec | 2015-01-23 18:45:41 +0000 | [diff] [blame] | 411 | |
| 412 | /* |
| 413 | * Return the count value for a counter. |
| 414 | */ |
| 415 | u64 (*count) (struct perf_event *event); /*optional*/ |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 416 | |
| 417 | /* |
| 418 | * Set up pmu-private data structures for an AUX area |
| 419 | */ |
| 420 | void *(*setup_aux) (int cpu, void **pages, |
| 421 | int nr_pages, bool overwrite); |
| 422 | /* optional */ |
| 423 | |
| 424 | /* |
| 425 | * Free pmu-private AUX data structures |
| 426 | */ |
| 427 | void (*free_aux) (void *aux); /* optional */ |
Mark Rutland | 66eb579 | 2015-05-13 17:12:23 +0100 | [diff] [blame] | 428 | |
| 429 | /* |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 430 | * Validate address range filters: make sure the HW supports the |
| 431 | * requested configuration and number of filters; return 0 if the |
| 432 | * supplied filters are valid, -errno otherwise. |
| 433 | * |
| 434 | * Runs in the context of the ioctl()ing process and is not serialized |
| 435 | * with the rest of the PMU callbacks. |
| 436 | */ |
| 437 | int (*addr_filters_validate) (struct list_head *filters); |
| 438 | /* optional */ |
| 439 | |
| 440 | /* |
| 441 | * Synchronize address range filter configuration: |
| 442 | * translate hw-agnostic filters into hardware configuration in |
| 443 | * event::hw::addr_filters. |
| 444 | * |
| 445 | * Runs as a part of filter sync sequence that is done in ->start() |
| 446 | * callback by calling perf_event_addr_filters_sync(). |
| 447 | * |
| 448 | * May (and should) traverse event::addr_filters::list, for which its |
| 449 | * caller provides necessary serialization. |
| 450 | */ |
| 451 | void (*addr_filters_sync) (struct perf_event *event); |
| 452 | /* optional */ |
| 453 | |
| 454 | /* |
Mark Rutland | 66eb579 | 2015-05-13 17:12:23 +0100 | [diff] [blame] | 455 | * Filter events for PMU-specific reasons. |
| 456 | */ |
| 457 | int (*filter_match) (struct perf_event *event); /* optional */ |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 458 | }; |
| 459 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 460 | /** |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 461 | * struct perf_addr_filter - address range filter definition |
| 462 | * @entry: event's filter list linkage |
| 463 | * @inode: object file's inode for file-based filters |
| 464 | * @offset: filter range offset |
| 465 | * @size: filter range size |
| 466 | * @range: 1: range, 0: address |
| 467 | * @filter: 1: filter/start, 0: stop |
| 468 | * |
| 469 | * This is a hardware-agnostic filter configuration as specified by the user. |
| 470 | */ |
| 471 | struct perf_addr_filter { |
| 472 | struct list_head entry; |
| 473 | struct inode *inode; |
| 474 | unsigned long offset; |
| 475 | unsigned long size; |
| 476 | unsigned int range : 1, |
| 477 | filter : 1; |
| 478 | }; |
| 479 | |
| 480 | /** |
| 481 | * struct perf_addr_filters_head - container for address range filters |
| 482 | * @list: list of filters for this event |
| 483 | * @lock: spinlock that serializes accesses to the @list and event's |
| 484 | * (and its children's) filter generations. |
Alexander Shishkin | 6ce77bf | 2017-01-26 11:40:57 +0200 | [diff] [blame] | 485 | * @nr_file_filters: number of file-based filters |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 486 | * |
| 487 | * A child event will use parent's @list (and therefore @lock), so they are |
| 488 | * bundled together; see perf_event_addr_filters(). |
| 489 | */ |
| 490 | struct perf_addr_filters_head { |
| 491 | struct list_head list; |
| 492 | raw_spinlock_t lock; |
Alexander Shishkin | 6ce77bf | 2017-01-26 11:40:57 +0200 | [diff] [blame] | 493 | unsigned int nr_file_filters; |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 494 | }; |
| 495 | |
| 496 | /** |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 497 | * enum perf_event_active_state - the states of a event |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 498 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 499 | enum perf_event_active_state { |
Peter Zijlstra | a69b0ca | 2016-02-24 18:45:44 +0100 | [diff] [blame] | 500 | PERF_EVENT_STATE_DEAD = -4, |
Jiri Olsa | 179033b | 2014-08-07 11:48:26 -0400 | [diff] [blame] | 501 | PERF_EVENT_STATE_EXIT = -3, |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 502 | PERF_EVENT_STATE_ERROR = -2, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 503 | PERF_EVENT_STATE_OFF = -1, |
| 504 | PERF_EVENT_STATE_INACTIVE = 0, |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 505 | PERF_EVENT_STATE_ACTIVE = 1, |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 506 | }; |
| 507 | |
Ingo Molnar | 9b51f66 | 2008-12-12 13:49:45 +0100 | [diff] [blame] | 508 | struct file; |
Peter Zijlstra | 453f19e | 2009-11-20 22:19:43 +0100 | [diff] [blame] | 509 | struct perf_sample_data; |
| 510 | |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 511 | typedef void (*perf_overflow_handler_t)(struct perf_event *, |
Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 512 | struct perf_sample_data *, |
| 513 | struct pt_regs *regs); |
| 514 | |
David Carrillo-Cisneros | 4ff6a8d | 2016-08-17 13:55:05 -0700 | [diff] [blame] | 515 | /* |
| 516 | * Event capabilities. For event_caps and groups caps. |
| 517 | * |
| 518 | * PERF_EV_CAP_SOFTWARE: Is a software event. |
David Carrillo-Cisneros | d6a2f903 | 2016-08-17 13:55:06 -0700 | [diff] [blame] | 519 | * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read |
| 520 | * from any CPU in the package where it is active. |
David Carrillo-Cisneros | 4ff6a8d | 2016-08-17 13:55:05 -0700 | [diff] [blame] | 521 | */ |
| 522 | #define PERF_EV_CAP_SOFTWARE BIT(0) |
David Carrillo-Cisneros | d6a2f903 | 2016-08-17 13:55:06 -0700 | [diff] [blame] | 523 | #define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1) |
Frederic Weisbecker | d6f962b | 2010-01-10 01:25:51 +0100 | [diff] [blame] | 524 | |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 525 | #define SWEVENT_HLIST_BITS 8 |
| 526 | #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 527 | |
| 528 | struct swevent_hlist { |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 529 | struct hlist_head heads[SWEVENT_HLIST_SIZE]; |
| 530 | struct rcu_head rcu_head; |
Frederic Weisbecker | 76e1d90 | 2010-04-05 15:35:57 +0200 | [diff] [blame] | 531 | }; |
| 532 | |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 533 | #define PERF_ATTACH_CONTEXT 0x01 |
| 534 | #define PERF_ATTACH_GROUP 0x02 |
Peter Zijlstra | d580ff8 | 2010-10-14 17:43:23 +0200 | [diff] [blame] | 535 | #define PERF_ATTACH_TASK 0x04 |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 536 | #define PERF_ATTACH_TASK_DATA 0x08 |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 537 | |
Li Zefan | 877c685 | 2013-03-05 11:38:08 +0800 | [diff] [blame] | 538 | struct perf_cgroup; |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 539 | struct ring_buffer; |
| 540 | |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 541 | struct pmu_event_list { |
| 542 | raw_spinlock_t lock; |
| 543 | struct list_head list; |
| 544 | }; |
| 545 | |
Ingo Molnar | 6a93070 | 2008-12-11 15:17:03 +0100 | [diff] [blame] | 546 | /** |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 547 | * struct perf_event - performance event kernel representation: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 548 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 549 | struct perf_event { |
| 550 | #ifdef CONFIG_PERF_EVENTS |
Peter Zijlstra | 9886167 | 2013-10-03 16:02:23 +0200 | [diff] [blame] | 551 | /* |
| 552 | * entry onto perf_event_context::event_list; |
| 553 | * modifications require ctx->lock |
| 554 | * RCU safe iterations. |
| 555 | */ |
Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 556 | struct list_head event_entry; |
Peter Zijlstra | 9886167 | 2013-10-03 16:02:23 +0200 | [diff] [blame] | 557 | |
| 558 | /* |
| 559 | * XXX: group_entry and sibling_list should be mutually exclusive; |
| 560 | * either you're a sibling on a group, or you're the group leader. |
| 561 | * Rework the code to always use the same list element. |
| 562 | * |
| 563 | * Locked for modification by both ctx->mutex and ctx->lock; holding |
| 564 | * either sufficies for read. |
| 565 | */ |
| 566 | struct list_head group_entry; |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 567 | struct list_head sibling_list; |
Peter Zijlstra | 9886167 | 2013-10-03 16:02:23 +0200 | [diff] [blame] | 568 | |
| 569 | /* |
| 570 | * We need storage to track the entries in perf_pmu_migrate_context; we |
| 571 | * cannot use the event_entry because of RCU and we want to keep the |
| 572 | * group in tact which avoids us using the other two entries. |
| 573 | */ |
| 574 | struct list_head migrate_entry; |
| 575 | |
Stephane Eranian | f3ae75d | 2014-01-08 11:15:52 +0100 | [diff] [blame] | 576 | struct hlist_node hlist_entry; |
| 577 | struct list_head active_entry; |
Ingo Molnar | 0127c3e | 2009-05-25 22:03:26 +0200 | [diff] [blame] | 578 | int nr_siblings; |
David Carrillo-Cisneros | 4ff6a8d | 2016-08-17 13:55:05 -0700 | [diff] [blame] | 579 | |
| 580 | /* Not serialized. Only written during event initialization. */ |
| 581 | int event_caps; |
| 582 | /* The cumulative AND of all event_caps for events in this group. */ |
| 583 | int group_caps; |
| 584 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 585 | struct perf_event *group_leader; |
Peter Zijlstra | a4eaf7f | 2010-06-16 14:37:10 +0200 | [diff] [blame] | 586 | struct pmu *pmu; |
Thomas Gleixner | 54d751d | 2016-02-22 22:19:14 +0000 | [diff] [blame] | 587 | void *pmu_private; |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 588 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 589 | enum perf_event_active_state state; |
Peter Zijlstra | 8a49542 | 2010-05-27 15:47:49 +0200 | [diff] [blame] | 590 | unsigned int attach_state; |
Peter Zijlstra | e785059 | 2010-05-21 14:43:08 +0200 | [diff] [blame] | 591 | local64_t count; |
Peter Zijlstra | a6e6dea | 2010-05-21 14:27:58 +0200 | [diff] [blame] | 592 | atomic64_t child_count; |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 593 | |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 594 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 595 | * These are the total time in nanoseconds that the event |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 596 | * has been enabled (i.e. eligible to run, and the task has |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 597 | * been scheduled in, if this is a per-task event) |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 598 | * and running (scheduled onto the CPU), respectively. |
| 599 | * |
| 600 | * They are computed from tstamp_enabled, tstamp_running and |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 601 | * tstamp_stopped when the event is in INACTIVE or ACTIVE state. |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 602 | */ |
| 603 | u64 total_time_enabled; |
| 604 | u64 total_time_running; |
| 605 | |
| 606 | /* |
| 607 | * These are timestamps used for computing total_time_enabled |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 608 | * and total_time_running when the event is in INACTIVE or |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 609 | * ACTIVE state, measured in nanoseconds from an arbitrary point |
| 610 | * in time. |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 611 | * tstamp_enabled: the notional time when the event was enabled |
| 612 | * tstamp_running: the notional time when the event was scheduled on |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 613 | * tstamp_stopped: in INACTIVE state, the notional time when the |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 614 | * event was scheduled off. |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 615 | */ |
| 616 | u64 tstamp_enabled; |
| 617 | u64 tstamp_running; |
| 618 | u64 tstamp_stopped; |
| 619 | |
Stephane Eranian | eed0152 | 2010-10-26 16:08:01 +0200 | [diff] [blame] | 620 | /* |
| 621 | * timestamp shadows the actual context timing but it can |
| 622 | * be safely used in NMI interrupt context. It reflects the |
| 623 | * context time as it was when the event was last scheduled in. |
| 624 | * |
| 625 | * ctx_time already accounts for ctx->timestamp. Therefore to |
| 626 | * compute ctx_time for a sample, simply add perf_clock(). |
| 627 | */ |
| 628 | u64 shadow_ctx_time; |
| 629 | |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 630 | struct perf_event_attr attr; |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 631 | u16 header_size; |
Arnaldo Carvalho de Melo | 6844c09 | 2010-12-03 16:36:35 -0200 | [diff] [blame] | 632 | u16 id_header_size; |
Arnaldo Carvalho de Melo | c320c7b | 2010-10-20 12:50:11 -0200 | [diff] [blame] | 633 | u16 read_size; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 634 | struct hw_perf_event hw; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 635 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 636 | struct perf_event_context *ctx; |
Al Viro | a6fa941 | 2012-08-20 14:59:25 +0100 | [diff] [blame] | 637 | atomic_long_t refcount; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 638 | |
| 639 | /* |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 640 | * These accumulate total time (in nanoseconds) that children |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 641 | * events have been enabled and running, respectively. |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 642 | */ |
| 643 | atomic64_t child_total_time_enabled; |
| 644 | atomic64_t child_total_time_running; |
| 645 | |
| 646 | /* |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 647 | * Protect attach/detach and child_list: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 648 | */ |
Peter Zijlstra | fccc714 | 2009-05-23 18:28:56 +0200 | [diff] [blame] | 649 | struct mutex child_mutex; |
| 650 | struct list_head child_list; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 651 | struct perf_event *parent; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 652 | |
| 653 | int oncpu; |
| 654 | int cpu; |
| 655 | |
Peter Zijlstra | 082ff5a | 2009-05-23 18:29:00 +0200 | [diff] [blame] | 656 | struct list_head owner_entry; |
| 657 | struct task_struct *owner; |
| 658 | |
Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 659 | /* mmap bits */ |
| 660 | struct mutex mmap_mutex; |
| 661 | atomic_t mmap_count; |
Peter Zijlstra | 26cb63a | 2013-05-28 10:55:48 +0200 | [diff] [blame] | 662 | |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 663 | struct ring_buffer *rb; |
Peter Zijlstra | 10c6db1 | 2011-11-26 02:47:31 +0100 | [diff] [blame] | 664 | struct list_head rb_entry; |
Peter Zijlstra | b69cf53 | 2014-03-14 10:50:33 +0100 | [diff] [blame] | 665 | unsigned long rcu_batches; |
| 666 | int rcu_pending; |
Paul Mackerras | 37d8182 | 2009-03-23 18:22:08 +0100 | [diff] [blame] | 667 | |
Peter Zijlstra | 7b732a7 | 2009-03-23 18:22:10 +0100 | [diff] [blame] | 668 | /* poll related */ |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 669 | wait_queue_head_t waitq; |
Peter Zijlstra | 3c446b3d | 2009-04-06 11:45:01 +0200 | [diff] [blame] | 670 | struct fasync_struct *fasync; |
Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 671 | |
| 672 | /* delayed work for NMIs and such */ |
| 673 | int pending_wakeup; |
Peter Zijlstra | 4c9e254 | 2009-04-06 11:45:09 +0200 | [diff] [blame] | 674 | int pending_kill; |
Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 675 | int pending_disable; |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 676 | struct irq_work pending; |
Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 677 | |
Peter Zijlstra | 79f1464 | 2009-04-06 11:45:07 +0200 | [diff] [blame] | 678 | atomic_t event_limit; |
| 679 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 680 | /* address range filters */ |
| 681 | struct perf_addr_filters_head addr_filters; |
| 682 | /* vma address array for file-based filders */ |
| 683 | unsigned long *addr_filters_offs; |
| 684 | unsigned long addr_filters_gen; |
| 685 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 686 | void (*destroy)(struct perf_event *); |
Peter Zijlstra | 592903c | 2009-03-13 12:21:36 +0100 | [diff] [blame] | 687 | struct rcu_head rcu_head; |
Peter Zijlstra | 709e50c | 2009-06-02 14:13:15 +0200 | [diff] [blame] | 688 | |
| 689 | struct pid_namespace *ns; |
Peter Zijlstra | 8e5799b | 2009-06-02 15:08:15 +0200 | [diff] [blame] | 690 | u64 id; |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 691 | |
Peter Zijlstra | 34f4392 | 2015-02-20 14:05:38 +0100 | [diff] [blame] | 692 | u64 (*clock)(void); |
Frederic Weisbecker | b326e95 | 2009-12-05 09:44:31 +0100 | [diff] [blame] | 693 | perf_overflow_handler_t overflow_handler; |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 694 | void *overflow_handler_context; |
Alexei Starovoitov | aa6a5f3 | 2016-09-01 18:37:24 -0700 | [diff] [blame] | 695 | #ifdef CONFIG_BPF_SYSCALL |
| 696 | perf_overflow_handler_t orig_overflow_handler; |
| 697 | struct bpf_prog *prog; |
| 698 | #endif |
Peter Zijlstra | 453f19e | 2009-11-20 22:19:43 +0100 | [diff] [blame] | 699 | |
Li Zefan | 07b139c | 2009-12-21 14:27:35 +0800 | [diff] [blame] | 700 | #ifdef CONFIG_EVENT_TRACING |
Steven Rostedt (Red Hat) | 2425bcb | 2015-05-05 11:45:27 -0400 | [diff] [blame] | 701 | struct trace_event_call *tp_event; |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 702 | struct event_filter *filter; |
Jiri Olsa | ced3900 | 2012-02-15 15:51:52 +0100 | [diff] [blame] | 703 | #ifdef CONFIG_FUNCTION_TRACER |
| 704 | struct ftrace_ops ftrace_ops; |
| 705 | #endif |
Ingo Molnar | ee06094 | 2008-12-13 09:00:03 +0100 | [diff] [blame] | 706 | #endif |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 707 | |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 708 | #ifdef CONFIG_CGROUP_PERF |
| 709 | struct perf_cgroup *cgrp; /* cgroup event is attach to */ |
| 710 | int cgrp_defer_enabled; |
| 711 | #endif |
| 712 | |
Kan Liang | f2fb6be | 2016-03-23 11:24:37 -0700 | [diff] [blame] | 713 | struct list_head sb_list; |
Li Zefan | 6fb2915 | 2009-10-15 11:21:42 +0800 | [diff] [blame] | 714 | #endif /* CONFIG_PERF_EVENTS */ |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 715 | }; |
| 716 | |
| 717 | /** |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 718 | * struct perf_event_context - event context structure |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 719 | * |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 720 | * Used as a container for task events and CPU events as well: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 721 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 722 | struct perf_event_context { |
Peter Zijlstra | 108b02c | 2010-09-06 14:32:03 +0200 | [diff] [blame] | 723 | struct pmu *pmu; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 724 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 725 | * Protect the states of the events in the list, |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 726 | * nr_active, and the list: |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 727 | */ |
Thomas Gleixner | e625cce1 | 2009-11-17 18:02:06 +0100 | [diff] [blame] | 728 | raw_spinlock_t lock; |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 729 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 730 | * Protect the list of events. Locking either mutex or lock |
Paul Mackerras | d859e29 | 2009-01-17 18:10:22 +1100 | [diff] [blame] | 731 | * is sufficient to ensure the list doesn't change; to change |
| 732 | * the list you need to lock both the mutex and the spinlock. |
| 733 | */ |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 734 | struct mutex mutex; |
Ingo Molnar | 04289bb | 2008-12-11 08:38:42 +0100 | [diff] [blame] | 735 | |
Mark Rutland | 2fde4f9 | 2015-01-07 15:01:54 +0000 | [diff] [blame] | 736 | struct list_head active_ctx_list; |
Frederic Weisbecker | 889ff01 | 2010-01-09 20:04:47 +0100 | [diff] [blame] | 737 | struct list_head pinned_groups; |
| 738 | struct list_head flexible_groups; |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 739 | struct list_head event_list; |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 740 | int nr_events; |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 741 | int nr_active; |
| 742 | int is_active; |
Peter Zijlstra | bfbd338 | 2009-06-24 21:11:59 +0200 | [diff] [blame] | 743 | int nr_stat; |
Peter Zijlstra | 0f5a260 | 2011-11-16 14:38:16 +0100 | [diff] [blame] | 744 | int nr_freq; |
Thomas Gleixner | dddd337 | 2010-11-24 10:05:55 +0100 | [diff] [blame] | 745 | int rotate_disable; |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 746 | atomic_t refcount; |
| 747 | struct task_struct *task; |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 748 | |
| 749 | /* |
Peter Zijlstra | 4af4998 | 2009-04-06 11:45:10 +0200 | [diff] [blame] | 750 | * Context clock, runs when context enabled. |
Paul Mackerras | 53cfbf5 | 2009-03-25 22:46:58 +1100 | [diff] [blame] | 751 | */ |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 752 | u64 time; |
| 753 | u64 timestamp; |
Paul Mackerras | 564c2b2 | 2009-05-22 14:27:22 +1000 | [diff] [blame] | 754 | |
| 755 | /* |
| 756 | * These fields let us detect when two contexts have both |
| 757 | * been cloned (inherited) from a common ancestor. |
| 758 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 759 | struct perf_event_context *parent_ctx; |
Ingo Molnar | a308444 | 2009-06-11 14:44:26 +0200 | [diff] [blame] | 760 | u64 parent_gen; |
| 761 | u64 generation; |
| 762 | int pin_count; |
David Carrillo-Cisneros | db4a835 | 2016-08-02 00:48:12 -0700 | [diff] [blame] | 763 | #ifdef CONFIG_CGROUP_PERF |
Stephane Eranian | d010b33 | 2012-02-09 23:21:00 +0100 | [diff] [blame] | 764 | int nr_cgroups; /* cgroup evts */ |
David Carrillo-Cisneros | db4a835 | 2016-08-02 00:48:12 -0700 | [diff] [blame] | 765 | #endif |
Yan, Zheng | 4af57ef | 2014-11-04 21:56:01 -0500 | [diff] [blame] | 766 | void *task_ctx_data; /* pmu specific data */ |
Richard Kennedy | 28009ce | 2011-06-07 16:33:38 +0100 | [diff] [blame] | 767 | struct rcu_head rcu_head; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 768 | }; |
| 769 | |
Frederic Weisbecker | 7ae07ea | 2010-08-14 20:45:13 +0200 | [diff] [blame] | 770 | /* |
| 771 | * Number of contexts where an event can trigger: |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 772 | * task, softirq, hardirq, nmi. |
Frederic Weisbecker | 7ae07ea | 2010-08-14 20:45:13 +0200 | [diff] [blame] | 773 | */ |
| 774 | #define PERF_NR_CONTEXTS 4 |
| 775 | |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 776 | /** |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 777 | * struct perf_event_cpu_context - per cpu event context structure |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 778 | */ |
| 779 | struct perf_cpu_context { |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 780 | struct perf_event_context ctx; |
| 781 | struct perf_event_context *task_ctx; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 782 | int active_oncpu; |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 783 | int exclusive; |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 784 | |
| 785 | raw_spinlock_t hrtimer_lock; |
Stephane Eranian | 9e63020 | 2013-04-03 14:21:33 +0200 | [diff] [blame] | 786 | struct hrtimer hrtimer; |
| 787 | ktime_t hrtimer_interval; |
Peter Zijlstra | 4cfafd3 | 2015-05-14 12:23:11 +0200 | [diff] [blame] | 788 | unsigned int hrtimer_active; |
| 789 | |
David Carrillo-Cisneros | db4a835 | 2016-08-02 00:48:12 -0700 | [diff] [blame] | 790 | #ifdef CONFIG_CGROUP_PERF |
Stephane Eranian | e5d1367 | 2011-02-14 11:20:01 +0200 | [diff] [blame] | 791 | struct perf_cgroup *cgrp; |
David Carrillo-Cisneros | 058fe1c | 2017-01-18 11:24:53 -0800 | [diff] [blame] | 792 | struct list_head cgrp_cpuctx_entry; |
David Carrillo-Cisneros | db4a835 | 2016-08-02 00:48:12 -0700 | [diff] [blame] | 793 | #endif |
Peter Zijlstra | e48c178 | 2016-07-06 09:18:30 +0200 | [diff] [blame] | 794 | |
| 795 | struct list_head sched_cb_entry; |
| 796 | int sched_cb_usage; |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 797 | }; |
| 798 | |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 799 | struct perf_output_handle { |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 800 | struct perf_event *event; |
Frederic Weisbecker | 7636913 | 2011-05-19 19:55:04 +0200 | [diff] [blame] | 801 | struct ring_buffer *rb; |
Peter Zijlstra | 6d1acfd | 2010-05-18 11:12:48 +0200 | [diff] [blame] | 802 | unsigned long wakeup; |
Peter Zijlstra | 5d967a8 | 2010-05-20 16:46:39 +0200 | [diff] [blame] | 803 | unsigned long size; |
Alexander Shishkin | fdc2670 | 2015-01-14 14:18:16 +0200 | [diff] [blame] | 804 | union { |
| 805 | void *addr; |
| 806 | unsigned long head; |
| 807 | }; |
Peter Zijlstra | 5d967a8 | 2010-05-20 16:46:39 +0200 | [diff] [blame] | 808 | int page; |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 809 | }; |
| 810 | |
Alexei Starovoitov | 0515e59 | 2016-09-01 18:37:22 -0700 | [diff] [blame] | 811 | struct bpf_perf_event_data_kern { |
| 812 | struct pt_regs *regs; |
| 813 | struct perf_sample_data *data; |
| 814 | }; |
| 815 | |
Matt Fleming | 39bed6c | 2015-01-23 18:45:40 +0000 | [diff] [blame] | 816 | #ifdef CONFIG_CGROUP_PERF |
| 817 | |
| 818 | /* |
| 819 | * perf_cgroup_info keeps track of time_enabled for a cgroup. |
| 820 | * This is a per-cpu dynamically allocated data structure. |
| 821 | */ |
| 822 | struct perf_cgroup_info { |
| 823 | u64 time; |
| 824 | u64 timestamp; |
| 825 | }; |
| 826 | |
| 827 | struct perf_cgroup { |
| 828 | struct cgroup_subsys_state css; |
| 829 | struct perf_cgroup_info __percpu *info; |
| 830 | }; |
| 831 | |
| 832 | /* |
| 833 | * Must ensure cgroup is pinned (css_get) before calling |
| 834 | * this function. In other words, we cannot call this function |
| 835 | * if there is no cgroup event for the current CPU context. |
| 836 | */ |
| 837 | static inline struct perf_cgroup * |
Stephane Eranian | 614e4c4 | 2015-11-12 11:00:04 +0100 | [diff] [blame] | 838 | perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx) |
Matt Fleming | 39bed6c | 2015-01-23 18:45:40 +0000 | [diff] [blame] | 839 | { |
Stephane Eranian | 614e4c4 | 2015-11-12 11:00:04 +0100 | [diff] [blame] | 840 | return container_of(task_css_check(task, perf_event_cgrp_id, |
| 841 | ctx ? lockdep_is_held(&ctx->lock) |
| 842 | : true), |
Matt Fleming | 39bed6c | 2015-01-23 18:45:40 +0000 | [diff] [blame] | 843 | struct perf_cgroup, css); |
| 844 | } |
| 845 | #endif /* CONFIG_CGROUP_PERF */ |
| 846 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 847 | #ifdef CONFIG_PERF_EVENTS |
Robert Richter | 829b42d | 2009-04-29 12:46:59 +0200 | [diff] [blame] | 848 | |
Alexander Shishkin | fdc2670 | 2015-01-14 14:18:16 +0200 | [diff] [blame] | 849 | extern void *perf_aux_output_begin(struct perf_output_handle *handle, |
| 850 | struct perf_event *event); |
| 851 | extern void perf_aux_output_end(struct perf_output_handle *handle, |
| 852 | unsigned long size, bool truncated); |
| 853 | extern int perf_aux_output_skip(struct perf_output_handle *handle, |
| 854 | unsigned long size); |
| 855 | extern void *perf_get_aux(struct perf_output_handle *handle); |
| 856 | |
Mischa Jonker | 03d8e80 | 2013-06-04 11:45:48 +0200 | [diff] [blame] | 857 | extern int perf_pmu_register(struct pmu *pmu, const char *name, int type); |
Peter Zijlstra | b0a873e | 2010-06-11 13:35:08 +0200 | [diff] [blame] | 858 | extern void perf_pmu_unregister(struct pmu *pmu); |
Ingo Molnar | 621a01e | 2008-12-11 12:46:46 +0100 | [diff] [blame] | 859 | |
Matt Fleming | 3bf101b | 2010-09-27 20:22:24 +0100 | [diff] [blame] | 860 | extern int perf_num_counters(void); |
Matt Fleming | 84c7991 | 2010-10-03 21:41:13 +0100 | [diff] [blame] | 861 | extern const char *perf_pmu_name(void); |
Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 862 | extern void __perf_event_task_sched_in(struct task_struct *prev, |
| 863 | struct task_struct *task); |
| 864 | extern void __perf_event_task_sched_out(struct task_struct *prev, |
| 865 | struct task_struct *next); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 866 | extern int perf_event_init_task(struct task_struct *child); |
| 867 | extern void perf_event_exit_task(struct task_struct *child); |
| 868 | extern void perf_event_free_task(struct task_struct *task); |
Peter Zijlstra | 4e231c7 | 2010-09-09 21:01:59 +0200 | [diff] [blame] | 869 | extern void perf_event_delayed_put(struct task_struct *task); |
Alexei Starovoitov | e03e7ee | 2016-01-25 20:59:49 -0800 | [diff] [blame] | 870 | extern struct file *perf_event_get(unsigned int fd); |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 871 | extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 872 | extern void perf_event_print_debug(void); |
Peter Zijlstra | 33696fc | 2010-06-14 08:49:00 +0200 | [diff] [blame] | 873 | extern void perf_pmu_disable(struct pmu *pmu); |
| 874 | extern void perf_pmu_enable(struct pmu *pmu); |
Yan, Zheng | ba53250 | 2014-11-04 21:55:58 -0500 | [diff] [blame] | 875 | extern void perf_sched_cb_dec(struct pmu *pmu); |
| 876 | extern void perf_sched_cb_inc(struct pmu *pmu); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 877 | extern int perf_event_task_disable(void); |
| 878 | extern int perf_event_task_enable(void); |
Avi Kivity | 26ca5c1 | 2011-06-29 18:42:37 +0300 | [diff] [blame] | 879 | extern int perf_event_refresh(struct perf_event *event, int refresh); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 880 | extern void perf_event_update_userpage(struct perf_event *event); |
Arjan van de Ven | fb0459d | 2009-09-25 12:25:56 +0200 | [diff] [blame] | 881 | extern int perf_event_release_kernel(struct perf_event *event); |
| 882 | extern struct perf_event * |
| 883 | perf_event_create_kernel_counter(struct perf_event_attr *attr, |
| 884 | int cpu, |
Matt Helsley | 38a81da | 2010-09-13 13:01:20 -0700 | [diff] [blame] | 885 | struct task_struct *task, |
Avi Kivity | 4dc0da8 | 2011-06-29 18:42:35 +0300 | [diff] [blame] | 886 | perf_overflow_handler_t callback, |
| 887 | void *context); |
Yan, Zheng | 0cda4c0 | 2012-06-15 14:31:33 +0800 | [diff] [blame] | 888 | extern void perf_pmu_migrate_context(struct pmu *pmu, |
| 889 | int src_cpu, int dst_cpu); |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 890 | extern u64 perf_event_read_local(struct perf_event *event); |
Peter Zijlstra | 59ed446 | 2009-11-20 22:19:55 +0100 | [diff] [blame] | 891 | extern u64 perf_event_read_value(struct perf_event *event, |
| 892 | u64 *enabled, u64 *running); |
Ingo Molnar | 5c92d12 | 2008-12-11 13:21:10 +0100 | [diff] [blame] | 893 | |
Stephane Eranian | d010b33 | 2012-02-09 23:21:00 +0100 | [diff] [blame] | 894 | |
Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 895 | struct perf_sample_data { |
Peter Zijlstra | 2565711 | 2014-09-24 13:48:42 +0200 | [diff] [blame] | 896 | /* |
| 897 | * Fields set by perf_sample_data_init(), group so as to |
| 898 | * minimize the cachelines touched. |
| 899 | */ |
| 900 | u64 addr; |
| 901 | struct perf_raw_record *raw; |
| 902 | struct perf_branch_stack *br_stack; |
| 903 | u64 period; |
| 904 | u64 weight; |
| 905 | u64 txn; |
| 906 | union perf_mem_data_src data_src; |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 907 | |
Peter Zijlstra | 2565711 | 2014-09-24 13:48:42 +0200 | [diff] [blame] | 908 | /* |
| 909 | * The other fields, optionally {set,used} by |
| 910 | * perf_{prepare,output}_sample(). |
| 911 | */ |
| 912 | u64 type; |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 913 | u64 ip; |
| 914 | struct { |
| 915 | u32 pid; |
| 916 | u32 tid; |
| 917 | } tid_entry; |
| 918 | u64 time; |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 919 | u64 id; |
| 920 | u64 stream_id; |
| 921 | struct { |
| 922 | u32 cpu; |
| 923 | u32 reserved; |
| 924 | } cpu_entry; |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 925 | struct perf_callchain_entry *callchain; |
Andy Lutomirski | 88a7c26 | 2015-01-04 10:36:19 -0800 | [diff] [blame] | 926 | |
| 927 | /* |
| 928 | * regs_user may point to task_pt_regs or to regs_user_copy, depending |
| 929 | * on arch details. |
| 930 | */ |
Stephane Eranian | 60e2364 | 2014-09-24 13:48:37 +0200 | [diff] [blame] | 931 | struct perf_regs regs_user; |
Andy Lutomirski | 88a7c26 | 2015-01-04 10:36:19 -0800 | [diff] [blame] | 932 | struct pt_regs regs_user_copy; |
| 933 | |
Stephane Eranian | 60e2364 | 2014-09-24 13:48:37 +0200 | [diff] [blame] | 934 | struct perf_regs regs_intr; |
Jiri Olsa | c5ebced | 2012-08-07 15:20:40 +0200 | [diff] [blame] | 935 | u64 stack_user_size; |
Peter Zijlstra | 2565711 | 2014-09-24 13:48:42 +0200 | [diff] [blame] | 936 | } ____cacheline_aligned; |
Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 937 | |
Stephane Eranian | 770eee1 | 2014-08-11 21:27:12 +0200 | [diff] [blame] | 938 | /* default value for data source */ |
| 939 | #define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\ |
| 940 | PERF_MEM_S(LVL, NA) |\ |
| 941 | PERF_MEM_S(SNOOP, NA) |\ |
| 942 | PERF_MEM_S(LOCK, NA) |\ |
| 943 | PERF_MEM_S(TLB, NA)) |
| 944 | |
Robert Richter | fd0d000 | 2012-04-02 20:19:08 +0200 | [diff] [blame] | 945 | static inline void perf_sample_data_init(struct perf_sample_data *data, |
| 946 | u64 addr, u64 period) |
Peter Zijlstra | dc1d628 | 2010-03-03 15:55:04 +0100 | [diff] [blame] | 947 | { |
Robert Richter | fd0d000 | 2012-04-02 20:19:08 +0200 | [diff] [blame] | 948 | /* remaining struct members initialized in perf_prepare_sample() */ |
Peter Zijlstra | dc1d628 | 2010-03-03 15:55:04 +0100 | [diff] [blame] | 949 | data->addr = addr; |
| 950 | data->raw = NULL; |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 951 | data->br_stack = NULL; |
Jiri Olsa | 4018994 | 2012-08-07 15:20:37 +0200 | [diff] [blame] | 952 | data->period = period; |
Andi Kleen | c3feedf | 2013-01-24 16:10:28 +0100 | [diff] [blame] | 953 | data->weight = 0; |
Stephane Eranian | 770eee1 | 2014-08-11 21:27:12 +0200 | [diff] [blame] | 954 | data->data_src.val = PERF_MEM_NA; |
Andi Kleen | fdfbbd0 | 2013-09-20 07:40:39 -0700 | [diff] [blame] | 955 | data->txn = 0; |
Peter Zijlstra | dc1d628 | 2010-03-03 15:55:04 +0100 | [diff] [blame] | 956 | } |
| 957 | |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 958 | extern void perf_output_sample(struct perf_output_handle *handle, |
| 959 | struct perf_event_header *header, |
| 960 | struct perf_sample_data *data, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 961 | struct perf_event *event); |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 962 | extern void perf_prepare_sample(struct perf_event_header *header, |
| 963 | struct perf_sample_data *data, |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 964 | struct perf_event *event, |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 965 | struct pt_regs *regs); |
| 966 | |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 967 | extern int perf_event_overflow(struct perf_event *event, |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 968 | struct perf_sample_data *data, |
| 969 | struct pt_regs *regs); |
Peter Zijlstra | df1a132 | 2009-06-10 21:02:22 +0200 | [diff] [blame] | 970 | |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 971 | extern void perf_event_output_forward(struct perf_event *event, |
| 972 | struct perf_sample_data *data, |
| 973 | struct pt_regs *regs); |
| 974 | extern void perf_event_output_backward(struct perf_event *event, |
| 975 | struct perf_sample_data *data, |
| 976 | struct pt_regs *regs); |
Yan, Zheng | 2150908 | 2015-05-06 15:33:49 -0400 | [diff] [blame] | 977 | extern void perf_event_output(struct perf_event *event, |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 978 | struct perf_sample_data *data, |
| 979 | struct pt_regs *regs); |
Yan, Zheng | 2150908 | 2015-05-06 15:33:49 -0400 | [diff] [blame] | 980 | |
Wang Nan | 1879445 | 2016-03-28 06:41:30 +0000 | [diff] [blame] | 981 | static inline bool |
| 982 | is_default_overflow_handler(struct perf_event *event) |
| 983 | { |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 984 | if (likely(event->overflow_handler == perf_event_output_forward)) |
| 985 | return true; |
| 986 | if (unlikely(event->overflow_handler == perf_event_output_backward)) |
| 987 | return true; |
| 988 | return false; |
Wang Nan | 1879445 | 2016-03-28 06:41:30 +0000 | [diff] [blame] | 989 | } |
| 990 | |
Yan, Zheng | 2150908 | 2015-05-06 15:33:49 -0400 | [diff] [blame] | 991 | extern void |
| 992 | perf_event_header__init_id(struct perf_event_header *header, |
| 993 | struct perf_sample_data *data, |
| 994 | struct perf_event *event); |
| 995 | extern void |
| 996 | perf_event__output_id_sample(struct perf_event *event, |
| 997 | struct perf_output_handle *handle, |
| 998 | struct perf_sample_data *sample); |
| 999 | |
Kan Liang | f38b0db | 2015-05-10 15:13:14 -0400 | [diff] [blame] | 1000 | extern void |
| 1001 | perf_log_lost_samples(struct perf_event *event, u64 lost); |
| 1002 | |
Franck Bui-Huu | 6c7e550 | 2010-11-23 16:21:43 +0100 | [diff] [blame] | 1003 | static inline bool is_sampling_event(struct perf_event *event) |
| 1004 | { |
| 1005 | return event->attr.sample_period != 0; |
| 1006 | } |
| 1007 | |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1008 | /* |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1009 | * Return 1 for a software event, 0 for a hardware event |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1010 | */ |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1011 | static inline int is_software_event(struct perf_event *event) |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1012 | { |
David Carrillo-Cisneros | 4ff6a8d | 2016-08-17 13:55:05 -0700 | [diff] [blame] | 1013 | return event->event_caps & PERF_EV_CAP_SOFTWARE; |
Paul Mackerras | 3b6f9e5 | 2009-01-14 21:00:30 +1100 | [diff] [blame] | 1014 | } |
| 1015 | |
Ingo Molnar | c5905af | 2012-02-24 08:31:31 +0100 | [diff] [blame] | 1016 | extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
Peter Zijlstra | f29ac75 | 2009-06-19 18:27:26 +0200 | [diff] [blame] | 1017 | |
Peter Zijlstra (Intel) | 86038c5 | 2014-12-16 12:47:34 +0100 | [diff] [blame] | 1018 | extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1019 | extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); |
Peter Zijlstra | f29ac75 | 2009-06-19 18:27:26 +0200 | [diff] [blame] | 1020 | |
Frederic Weisbecker | b0f82b8 | 2010-05-20 07:47:21 +0200 | [diff] [blame] | 1021 | #ifndef perf_arch_fetch_caller_regs |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1022 | static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } |
Frederic Weisbecker | b0f82b8 | 2010-05-20 07:47:21 +0200 | [diff] [blame] | 1023 | #endif |
Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 1024 | |
| 1025 | /* |
| 1026 | * Take a snapshot of the regs. Skip ip and frame pointer to |
| 1027 | * the nth caller. We only need a few of the regs: |
| 1028 | * - ip for PERF_SAMPLE_IP |
| 1029 | * - cs for user_mode() tests |
| 1030 | * - bp for callchains |
| 1031 | * - eflags, for future purposes, just in case |
| 1032 | */ |
Frederic Weisbecker | b0f82b8 | 2010-05-20 07:47:21 +0200 | [diff] [blame] | 1033 | static inline void perf_fetch_caller_regs(struct pt_regs *regs) |
Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 1034 | { |
Frederic Weisbecker | b0f82b8 | 2010-05-20 07:47:21 +0200 | [diff] [blame] | 1035 | perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); |
Frederic Weisbecker | 5331d7b | 2010-03-04 21:15:56 +0100 | [diff] [blame] | 1036 | } |
| 1037 | |
Peter Zijlstra | 7e54a5a | 2010-10-14 22:32:45 +0200 | [diff] [blame] | 1038 | static __always_inline void |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1039 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) |
Frederic Weisbecker | e49a5bd | 2010-03-22 19:40:03 +0100 | [diff] [blame] | 1040 | { |
Peter Zijlstra (Intel) | 86038c5 | 2014-12-16 12:47:34 +0100 | [diff] [blame] | 1041 | if (static_key_false(&perf_swevent_enabled[event_id])) |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1042 | __perf_sw_event(event_id, nr, regs, addr); |
Peter Zijlstra (Intel) | 86038c5 | 2014-12-16 12:47:34 +0100 | [diff] [blame] | 1043 | } |
| 1044 | |
| 1045 | DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]); |
| 1046 | |
| 1047 | /* |
| 1048 | * 'Special' version for the scheduler, it hard assumes no recursion, |
| 1049 | * which is guaranteed by us not actually scheduling inside other swevents |
| 1050 | * because those disable preemption. |
| 1051 | */ |
| 1052 | static __always_inline void |
| 1053 | perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) |
| 1054 | { |
| 1055 | if (static_key_false(&perf_swevent_enabled[event_id])) { |
| 1056 | struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); |
| 1057 | |
| 1058 | perf_fetch_caller_regs(regs); |
| 1059 | ___perf_sw_event(event_id, nr, regs, addr); |
Frederic Weisbecker | e49a5bd | 2010-03-22 19:40:03 +0100 | [diff] [blame] | 1060 | } |
| 1061 | } |
| 1062 | |
Peter Zijlstra | 9107c89 | 2016-02-24 18:45:45 +0100 | [diff] [blame] | 1063 | extern struct static_key_false perf_sched_events; |
Peter Zijlstra | ee6dcfa | 2010-11-26 13:49:04 +0100 | [diff] [blame] | 1064 | |
Peter Zijlstra | ff303e6 | 2015-04-17 20:05:30 +0200 | [diff] [blame] | 1065 | static __always_inline bool |
| 1066 | perf_sw_migrate_enabled(void) |
| 1067 | { |
| 1068 | if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS])) |
| 1069 | return true; |
| 1070 | return false; |
| 1071 | } |
| 1072 | |
| 1073 | static inline void perf_event_task_migrate(struct task_struct *task) |
| 1074 | { |
| 1075 | if (perf_sw_migrate_enabled()) |
| 1076 | task->sched_migrated = 1; |
| 1077 | } |
| 1078 | |
Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 1079 | static inline void perf_event_task_sched_in(struct task_struct *prev, |
Stephane Eranian | a8d757e | 2011-08-25 15:58:03 +0200 | [diff] [blame] | 1080 | struct task_struct *task) |
Peter Zijlstra | ee6dcfa | 2010-11-26 13:49:04 +0100 | [diff] [blame] | 1081 | { |
Peter Zijlstra | 9107c89 | 2016-02-24 18:45:45 +0100 | [diff] [blame] | 1082 | if (static_branch_unlikely(&perf_sched_events)) |
Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 1083 | __perf_event_task_sched_in(prev, task); |
Peter Zijlstra | ff303e6 | 2015-04-17 20:05:30 +0200 | [diff] [blame] | 1084 | |
| 1085 | if (perf_sw_migrate_enabled() && task->sched_migrated) { |
| 1086 | struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]); |
| 1087 | |
| 1088 | perf_fetch_caller_regs(regs); |
| 1089 | ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0); |
| 1090 | task->sched_migrated = 0; |
| 1091 | } |
Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 1092 | } |
| 1093 | |
| 1094 | static inline void perf_event_task_sched_out(struct task_struct *prev, |
| 1095 | struct task_struct *next) |
| 1096 | { |
Peter Zijlstra (Intel) | 86038c5 | 2014-12-16 12:47:34 +0100 | [diff] [blame] | 1097 | perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); |
Peter Zijlstra | ee6dcfa | 2010-11-26 13:49:04 +0100 | [diff] [blame] | 1098 | |
Peter Zijlstra | 9107c89 | 2016-02-24 18:45:45 +0100 | [diff] [blame] | 1099 | if (static_branch_unlikely(&perf_sched_events)) |
Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 1100 | __perf_event_task_sched_out(prev, next); |
Peter Zijlstra | ee6dcfa | 2010-11-26 13:49:04 +0100 | [diff] [blame] | 1101 | } |
| 1102 | |
Matt Fleming | eacd3ec | 2015-01-23 18:45:41 +0000 | [diff] [blame] | 1103 | static inline u64 __perf_event_count(struct perf_event *event) |
| 1104 | { |
| 1105 | return local64_read(&event->count) + atomic64_read(&event->child_count); |
| 1106 | } |
| 1107 | |
Eric B Munson | 3af9e85 | 2010-05-18 15:30:49 +0100 | [diff] [blame] | 1108 | extern void perf_event_mmap(struct vm_area_struct *vma); |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 1109 | extern struct perf_guest_info_callbacks *perf_guest_cbs; |
Zhang, Yanmin | dcf46b9 | 2010-04-20 10:13:58 +0800 | [diff] [blame] | 1110 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); |
| 1111 | extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 1112 | |
Peter Zijlstra | e041e32 | 2014-05-21 17:32:19 +0200 | [diff] [blame] | 1113 | extern void perf_event_exec(void); |
Adrian Hunter | 82b8977 | 2014-05-28 11:45:04 +0300 | [diff] [blame] | 1114 | extern void perf_event_comm(struct task_struct *tsk, bool exec); |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1115 | extern void perf_event_fork(struct task_struct *tsk); |
Peter Zijlstra | 8d1b2d9 | 2009-04-08 15:01:30 +0200 | [diff] [blame] | 1116 | |
Frederic Weisbecker | 56962b444 | 2010-06-30 23:03:51 +0200 | [diff] [blame] | 1117 | /* Callchains */ |
| 1118 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); |
| 1119 | |
Arnaldo Carvalho de Melo | cfbcf46 | 2016-04-28 12:30:53 -0300 | [diff] [blame] | 1120 | extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); |
| 1121 | extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs); |
Alexei Starovoitov | 568b329 | 2016-02-17 19:58:57 -0800 | [diff] [blame] | 1122 | extern struct perf_callchain_entry * |
| 1123 | get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, |
Arnaldo Carvalho de Melo | cfbcf46 | 2016-04-28 12:30:53 -0300 | [diff] [blame] | 1124 | u32 max_stack, bool crosstask, bool add_mark); |
Arnaldo Carvalho de Melo | 97c79a3 | 2016-04-28 13:16:33 -0300 | [diff] [blame] | 1125 | extern int get_callchain_buffers(int max_stack); |
Alexei Starovoitov | 568b329 | 2016-02-17 19:58:57 -0800 | [diff] [blame] | 1126 | extern void put_callchain_buffers(void); |
Frederic Weisbecker | 56962b444 | 2010-06-30 23:03:51 +0200 | [diff] [blame] | 1127 | |
Arnaldo Carvalho de Melo | c5dfd78 | 2016-04-21 12:28:50 -0300 | [diff] [blame] | 1128 | extern int sysctl_perf_event_max_stack; |
Arnaldo Carvalho de Melo | c85b033 | 2016-05-12 13:06:21 -0300 | [diff] [blame] | 1129 | extern int sysctl_perf_event_max_contexts_per_stack; |
Arnaldo Carvalho de Melo | c5dfd78 | 2016-04-21 12:28:50 -0300 | [diff] [blame] | 1130 | |
Arnaldo Carvalho de Melo | c85b033 | 2016-05-12 13:06:21 -0300 | [diff] [blame] | 1131 | static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip) |
| 1132 | { |
| 1133 | if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) { |
| 1134 | struct perf_callchain_entry *entry = ctx->entry; |
| 1135 | entry->ip[entry->nr++] = ip; |
| 1136 | ++ctx->contexts; |
| 1137 | return 0; |
| 1138 | } else { |
| 1139 | ctx->contexts_maxed = true; |
| 1140 | return -1; /* no more room, stop walking the stack */ |
| 1141 | } |
| 1142 | } |
Arnaldo Carvalho de Melo | 3e4de4e | 2016-05-12 13:01:50 -0300 | [diff] [blame] | 1143 | |
Arnaldo Carvalho de Melo | cfbcf46 | 2016-04-28 12:30:53 -0300 | [diff] [blame] | 1144 | static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip) |
Frederic Weisbecker | 70791ce | 2010-06-29 19:34:05 +0200 | [diff] [blame] | 1145 | { |
Arnaldo Carvalho de Melo | c85b033 | 2016-05-12 13:06:21 -0300 | [diff] [blame] | 1146 | if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) { |
Arnaldo Carvalho de Melo | 3b1fff0 | 2016-05-10 18:08:32 -0300 | [diff] [blame] | 1147 | struct perf_callchain_entry *entry = ctx->entry; |
Frederic Weisbecker | 70791ce | 2010-06-29 19:34:05 +0200 | [diff] [blame] | 1148 | entry->ip[entry->nr++] = ip; |
Arnaldo Carvalho de Melo | 3b1fff0 | 2016-05-10 18:08:32 -0300 | [diff] [blame] | 1149 | ++ctx->nr; |
Alexei Starovoitov | 568b329 | 2016-02-17 19:58:57 -0800 | [diff] [blame] | 1150 | return 0; |
| 1151 | } else { |
| 1152 | return -1; /* no more room, stop walking the stack */ |
| 1153 | } |
Frederic Weisbecker | 70791ce | 2010-06-29 19:34:05 +0200 | [diff] [blame] | 1154 | } |
Peter Zijlstra | 394ee07 | 2009-03-30 19:07:14 +0200 | [diff] [blame] | 1155 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1156 | extern int sysctl_perf_event_paranoid; |
| 1157 | extern int sysctl_perf_event_mlock; |
| 1158 | extern int sysctl_perf_event_sample_rate; |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 1159 | extern int sysctl_perf_cpu_time_max_percent; |
| 1160 | |
| 1161 | extern void perf_sample_event_took(u64 sample_len_ns); |
Peter Zijlstra | 1ccd154 | 2009-04-09 10:53:45 +0200 | [diff] [blame] | 1162 | |
Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 1163 | extern int perf_proc_update_handler(struct ctl_table *table, int write, |
| 1164 | void __user *buffer, size_t *lenp, |
| 1165 | loff_t *ppos); |
Dave Hansen | 14c63f1 | 2013-06-21 08:51:36 -0700 | [diff] [blame] | 1166 | extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, |
| 1167 | void __user *buffer, size_t *lenp, |
| 1168 | loff_t *ppos); |
| 1169 | |
Arnaldo Carvalho de Melo | c5dfd78 | 2016-04-21 12:28:50 -0300 | [diff] [blame] | 1170 | int perf_event_max_stack_handler(struct ctl_table *table, int write, |
| 1171 | void __user *buffer, size_t *lenp, loff_t *ppos); |
Peter Zijlstra | 163ec43 | 2011-02-16 11:22:34 +0100 | [diff] [blame] | 1172 | |
Peter Zijlstra | 320ebf0 | 2010-03-02 12:35:37 +0100 | [diff] [blame] | 1173 | static inline bool perf_paranoid_tracepoint_raw(void) |
| 1174 | { |
| 1175 | return sysctl_perf_event_paranoid > -1; |
| 1176 | } |
| 1177 | |
| 1178 | static inline bool perf_paranoid_cpu(void) |
| 1179 | { |
| 1180 | return sysctl_perf_event_paranoid > 0; |
| 1181 | } |
| 1182 | |
| 1183 | static inline bool perf_paranoid_kernel(void) |
| 1184 | { |
| 1185 | return sysctl_perf_event_paranoid > 1; |
| 1186 | } |
| 1187 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1188 | extern void perf_event_init(void); |
Alexei Starovoitov | 1e1dcd9 | 2016-04-06 18:43:24 -0700 | [diff] [blame] | 1189 | extern void perf_tp_event(u16 event_type, u64 count, void *record, |
Peter Zijlstra | 1c024eca | 2010-05-19 14:02:22 +0200 | [diff] [blame] | 1190 | int entry_size, struct pt_regs *regs, |
Andrew Vagin | e6dab5f | 2012-07-11 18:14:58 +0400 | [diff] [blame] | 1191 | struct hlist_head *head, int rctx, |
| 1192 | struct task_struct *task); |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 1193 | extern void perf_bp_event(struct perf_event *event, void *data); |
Ingo Molnar | 0d905bc | 2009-05-04 19:13:30 +0200 | [diff] [blame] | 1194 | |
Paul Mackerras | 9d23a90 | 2009-05-14 21:48:08 +1000 | [diff] [blame] | 1195 | #ifndef perf_misc_flags |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1196 | # define perf_misc_flags(regs) \ |
| 1197 | (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) |
| 1198 | # define perf_instruction_pointer(regs) instruction_pointer(regs) |
Paul Mackerras | 9d23a90 | 2009-05-14 21:48:08 +1000 | [diff] [blame] | 1199 | #endif |
| 1200 | |
Stephane Eranian | bce38cd | 2012-02-09 23:20:51 +0100 | [diff] [blame] | 1201 | static inline bool has_branch_stack(struct perf_event *event) |
| 1202 | { |
| 1203 | return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; |
| 1204 | } |
| 1205 | |
Yan, Zheng | a46a230 | 2014-11-04 21:56:06 -0500 | [diff] [blame] | 1206 | static inline bool needs_branch_stack(struct perf_event *event) |
| 1207 | { |
| 1208 | return event->attr.branch_sample_type != 0; |
| 1209 | } |
| 1210 | |
Peter Zijlstra | 45bfb2e | 2015-01-14 14:18:11 +0200 | [diff] [blame] | 1211 | static inline bool has_aux(struct perf_event *event) |
| 1212 | { |
| 1213 | return event->pmu->setup_aux; |
| 1214 | } |
| 1215 | |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 1216 | static inline bool is_write_backward(struct perf_event *event) |
| 1217 | { |
| 1218 | return !!event->attr.write_backward; |
| 1219 | } |
| 1220 | |
Alexander Shishkin | 375637b | 2016-04-27 18:44:46 +0300 | [diff] [blame] | 1221 | static inline bool has_addr_filter(struct perf_event *event) |
| 1222 | { |
| 1223 | return event->pmu->nr_addr_filters; |
| 1224 | } |
| 1225 | |
| 1226 | /* |
| 1227 | * An inherited event uses parent's filters |
| 1228 | */ |
| 1229 | static inline struct perf_addr_filters_head * |
| 1230 | perf_event_addr_filters(struct perf_event *event) |
| 1231 | { |
| 1232 | struct perf_addr_filters_head *ifh = &event->addr_filters; |
| 1233 | |
| 1234 | if (event->parent) |
| 1235 | ifh = &event->parent->addr_filters; |
| 1236 | |
| 1237 | return ifh; |
| 1238 | } |
| 1239 | |
| 1240 | extern void perf_event_addr_filters_sync(struct perf_event *event); |
| 1241 | |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1242 | extern int perf_output_begin(struct perf_output_handle *handle, |
Peter Zijlstra | a7ac67e | 2011-06-27 16:47:16 +0200 | [diff] [blame] | 1243 | struct perf_event *event, unsigned int size); |
Wang Nan | 9ecda41 | 2016-04-05 14:11:18 +0000 | [diff] [blame] | 1244 | extern int perf_output_begin_forward(struct perf_output_handle *handle, |
| 1245 | struct perf_event *event, |
| 1246 | unsigned int size); |
| 1247 | extern int perf_output_begin_backward(struct perf_output_handle *handle, |
| 1248 | struct perf_event *event, |
| 1249 | unsigned int size); |
| 1250 | |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1251 | extern void perf_output_end(struct perf_output_handle *handle); |
Frederic Weisbecker | 91d7753 | 2012-08-07 15:20:38 +0200 | [diff] [blame] | 1252 | extern unsigned int perf_output_copy(struct perf_output_handle *handle, |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1253 | const void *buf, unsigned int len); |
Jiri Olsa | 5685e0f | 2012-08-07 15:20:39 +0200 | [diff] [blame] | 1254 | extern unsigned int perf_output_skip(struct perf_output_handle *handle, |
| 1255 | unsigned int len); |
Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 1256 | extern int perf_swevent_get_recursion_context(void); |
| 1257 | extern void perf_swevent_put_recursion_context(int rctx); |
Jiri Olsa | ab57384 | 2013-05-01 17:25:44 +0200 | [diff] [blame] | 1258 | extern u64 perf_swevent_set_period(struct perf_event *event); |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 1259 | extern void perf_event_enable(struct perf_event *event); |
| 1260 | extern void perf_event_disable(struct perf_event *event); |
Peter Zijlstra | fae3fde | 2016-01-11 15:00:50 +0100 | [diff] [blame] | 1261 | extern void perf_event_disable_local(struct perf_event *event); |
Jiri Olsa | 5aab90c | 2016-10-26 11:48:24 +0200 | [diff] [blame] | 1262 | extern void perf_event_disable_inatomic(struct perf_event *event); |
Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 1263 | extern void perf_event_task_tick(void); |
Jiri Olsa | 475113d | 2016-12-28 14:31:03 +0100 | [diff] [blame] | 1264 | extern int perf_event_account_interrupt(struct perf_event *event); |
Peter Zijlstra | e041e32 | 2014-05-21 17:32:19 +0200 | [diff] [blame] | 1265 | #else /* !CONFIG_PERF_EVENTS: */ |
Alexander Shishkin | fdc2670 | 2015-01-14 14:18:16 +0200 | [diff] [blame] | 1266 | static inline void * |
| 1267 | perf_aux_output_begin(struct perf_output_handle *handle, |
| 1268 | struct perf_event *event) { return NULL; } |
| 1269 | static inline void |
| 1270 | perf_aux_output_end(struct perf_output_handle *handle, unsigned long size, |
| 1271 | bool truncated) { } |
| 1272 | static inline int |
| 1273 | perf_aux_output_skip(struct perf_output_handle *handle, |
| 1274 | unsigned long size) { return -EINVAL; } |
| 1275 | static inline void * |
| 1276 | perf_get_aux(struct perf_output_handle *handle) { return NULL; } |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1277 | static inline void |
Peter Zijlstra | ff303e6 | 2015-04-17 20:05:30 +0200 | [diff] [blame] | 1278 | perf_event_task_migrate(struct task_struct *task) { } |
| 1279 | static inline void |
Jiri Olsa | ab0cce5 | 2012-05-23 13:13:02 +0200 | [diff] [blame] | 1280 | perf_event_task_sched_in(struct task_struct *prev, |
| 1281 | struct task_struct *task) { } |
| 1282 | static inline void |
| 1283 | perf_event_task_sched_out(struct task_struct *prev, |
| 1284 | struct task_struct *next) { } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1285 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } |
| 1286 | static inline void perf_event_exit_task(struct task_struct *child) { } |
| 1287 | static inline void perf_event_free_task(struct task_struct *task) { } |
Peter Zijlstra | 4e231c7 | 2010-09-09 21:01:59 +0200 | [diff] [blame] | 1288 | static inline void perf_event_delayed_put(struct task_struct *task) { } |
Alexei Starovoitov | e03e7ee | 2016-01-25 20:59:49 -0800 | [diff] [blame] | 1289 | static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); } |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 1290 | static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event) |
| 1291 | { |
| 1292 | return ERR_PTR(-EINVAL); |
| 1293 | } |
| 1294 | static inline u64 perf_event_read_local(struct perf_event *event) { return -EINVAL; } |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 1295 | static inline void perf_event_print_debug(void) { } |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 1296 | static inline int perf_event_task_disable(void) { return -EINVAL; } |
| 1297 | static inline int perf_event_task_enable(void) { return -EINVAL; } |
Avi Kivity | 26ca5c1 | 2011-06-29 18:42:37 +0300 | [diff] [blame] | 1298 | static inline int perf_event_refresh(struct perf_event *event, int refresh) |
| 1299 | { |
| 1300 | return -EINVAL; |
| 1301 | } |
Peter Zijlstra | 15dbf27 | 2009-03-13 12:21:32 +0100 | [diff] [blame] | 1302 | |
Peter Zijlstra | 925d519 | 2009-03-30 19:07:02 +0200 | [diff] [blame] | 1303 | static inline void |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 1304 | perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } |
Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 1305 | static inline void |
Peter Zijlstra (Intel) | 86038c5 | 2014-12-16 12:47:34 +0100 | [diff] [blame] | 1306 | perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { } |
| 1307 | static inline void |
Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 1308 | perf_bp_event(struct perf_event *event, void *data) { } |
Peter Zijlstra | 0a4a939 | 2009-03-30 19:07:05 +0200 | [diff] [blame] | 1309 | |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 1310 | static inline int perf_register_guest_info_callbacks |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1311 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 1312 | static inline int perf_unregister_guest_info_callbacks |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1313 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
Zhang, Yanmin | 39447b3 | 2010-04-19 13:32:41 +0800 | [diff] [blame] | 1314 | |
Ingo Molnar | 57c0c15 | 2009-09-21 12:20:38 +0200 | [diff] [blame] | 1315 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
Peter Zijlstra | e041e32 | 2014-05-21 17:32:19 +0200 | [diff] [blame] | 1316 | static inline void perf_event_exec(void) { } |
Adrian Hunter | 82b8977 | 2014-05-28 11:45:04 +0300 | [diff] [blame] | 1317 | static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1318 | static inline void perf_event_fork(struct task_struct *tsk) { } |
| 1319 | static inline void perf_event_init(void) { } |
Ingo Molnar | 184f412 | 2010-01-27 08:39:39 +0100 | [diff] [blame] | 1320 | static inline int perf_swevent_get_recursion_context(void) { return -1; } |
Peter Zijlstra | 4ed7c92 | 2009-11-23 11:37:29 +0100 | [diff] [blame] | 1321 | static inline void perf_swevent_put_recursion_context(int rctx) { } |
Jiri Olsa | ab57384 | 2013-05-01 17:25:44 +0200 | [diff] [blame] | 1322 | static inline u64 perf_swevent_set_period(struct perf_event *event) { return 0; } |
Frederic Weisbecker | 44234ad | 2009-12-09 09:25:48 +0100 | [diff] [blame] | 1323 | static inline void perf_event_enable(struct perf_event *event) { } |
| 1324 | static inline void perf_event_disable(struct perf_event *event) { } |
K.Prasad | 500ad2d | 2012-08-02 13:46:35 +0530 | [diff] [blame] | 1325 | static inline int __perf_event_disable(void *info) { return -1; } |
Peter Zijlstra | e9d2b06 | 2010-09-17 11:28:50 +0200 | [diff] [blame] | 1326 | static inline void perf_event_task_tick(void) { } |
Kaixu Xia | ffe8690 | 2015-08-06 07:02:32 +0000 | [diff] [blame] | 1327 | static inline int perf_event_release_kernel(struct perf_event *event) { return 0; } |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1328 | #endif |
| 1329 | |
David Rientjes | 6c4d3bc | 2013-03-17 15:49:10 -0700 | [diff] [blame] | 1330 | #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) |
| 1331 | extern void perf_restore_debug_store(void); |
| 1332 | #else |
Stephane Eranian | 1d9d863 | 2013-03-15 14:26:07 +0100 | [diff] [blame] | 1333 | static inline void perf_restore_debug_store(void) { } |
Thomas Gleixner | 0793a61 | 2008-12-04 20:12:29 +0100 | [diff] [blame] | 1334 | #endif |
| 1335 | |
Daniel Borkmann | 7e3f977 | 2016-07-14 18:08:03 +0200 | [diff] [blame] | 1336 | static __always_inline bool perf_raw_frag_last(const struct perf_raw_frag *frag) |
| 1337 | { |
| 1338 | return frag->pad < sizeof(u64); |
| 1339 | } |
| 1340 | |
Ingo Molnar | e7e7ee2 | 2011-05-04 08:42:29 +0200 | [diff] [blame] | 1341 | #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) |
Markus Metzger | 5622f29 | 2009-09-15 13:00:23 +0200 | [diff] [blame] | 1342 | |
Sukadev Bhattiprolu | 2663960 | 2013-01-22 22:24:23 -0800 | [diff] [blame] | 1343 | struct perf_pmu_events_attr { |
| 1344 | struct device_attribute attr; |
| 1345 | u64 id; |
Stephane Eranian | 3a54aaa | 2013-01-24 16:10:26 +0100 | [diff] [blame] | 1346 | const char *event_str; |
Sukadev Bhattiprolu | 2663960 | 2013-01-22 22:24:23 -0800 | [diff] [blame] | 1347 | }; |
| 1348 | |
Andi Kleen | fc07e9f | 2016-05-19 17:09:56 -0700 | [diff] [blame] | 1349 | struct perf_pmu_events_ht_attr { |
| 1350 | struct device_attribute attr; |
| 1351 | u64 id; |
| 1352 | const char *event_str_ht; |
| 1353 | const char *event_str_noht; |
| 1354 | }; |
| 1355 | |
Cody P Schafer | fd979c0 | 2015-01-30 13:45:57 -0800 | [diff] [blame] | 1356 | ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr, |
| 1357 | char *page); |
| 1358 | |
Sukadev Bhattiprolu | 2663960 | 2013-01-22 22:24:23 -0800 | [diff] [blame] | 1359 | #define PMU_EVENT_ATTR(_name, _var, _id, _show) \ |
| 1360 | static struct perf_pmu_events_attr _var = { \ |
| 1361 | .attr = __ATTR(_name, 0444, _show, NULL), \ |
| 1362 | .id = _id, \ |
| 1363 | }; |
| 1364 | |
Cody P Schafer | f0405b8 | 2015-01-30 13:45:58 -0800 | [diff] [blame] | 1365 | #define PMU_EVENT_ATTR_STRING(_name, _var, _str) \ |
| 1366 | static struct perf_pmu_events_attr _var = { \ |
| 1367 | .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ |
| 1368 | .id = 0, \ |
| 1369 | .event_str = _str, \ |
| 1370 | }; |
| 1371 | |
Jiri Olsa | 641cc93 | 2012-03-15 20:09:14 +0100 | [diff] [blame] | 1372 | #define PMU_FORMAT_ATTR(_name, _format) \ |
| 1373 | static ssize_t \ |
| 1374 | _name##_show(struct device *dev, \ |
| 1375 | struct device_attribute *attr, \ |
| 1376 | char *page) \ |
| 1377 | { \ |
| 1378 | BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ |
| 1379 | return sprintf(page, _format "\n"); \ |
| 1380 | } \ |
| 1381 | \ |
| 1382 | static struct device_attribute format_attr_##_name = __ATTR_RO(_name) |
| 1383 | |
Thomas Gleixner | 00e16c3 | 2016-07-13 17:16:09 +0000 | [diff] [blame] | 1384 | /* Performance counter hotplug functions */ |
| 1385 | #ifdef CONFIG_PERF_EVENTS |
| 1386 | int perf_event_init_cpu(unsigned int cpu); |
| 1387 | int perf_event_exit_cpu(unsigned int cpu); |
| 1388 | #else |
| 1389 | #define perf_event_init_cpu NULL |
| 1390 | #define perf_event_exit_cpu NULL |
| 1391 | #endif |
| 1392 | |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1393 | #endif /* _LINUX_PERF_EVENT_H */ |