blob: 6d5e7cfd97e7fd36b1542d43dbdf18defb425870 [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
2 * Performance counter x86 architecture code
3 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnar241771e2008-12-03 10:39:53 +01009 *
10 * For licencing details see kernel-base/COPYING
11 */
12
13#include <linux/perf_counter.h>
14#include <linux/capability.h>
15#include <linux/notifier.h>
16#include <linux/hardirq.h>
17#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010018#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010019#include <linux/kdebug.h>
20#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020021#include <linux/uaccess.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010022
Ingo Molnar241771e2008-12-03 10:39:53 +010023#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020024#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020025#include <asm/nmi.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010026
Ingo Molnar862a1a52008-12-17 13:09:20 +010027static u64 perf_counter_mask __read_mostly;
Ingo Molnar703e9372008-12-17 10:51:15 +010028
Ingo Molnar241771e2008-12-03 10:39:53 +010029struct cpu_hw_counters {
Ingo Molnar862a1a52008-12-17 13:09:20 +010030 struct perf_counter *counters[X86_PMC_IDX_MAX];
Robert Richter43f62012009-04-29 16:55:56 +020031 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
32 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Mike Galbraith4b39fd92009-01-23 14:36:16 +010033 unsigned long interrupts;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010034 int enabled;
Ingo Molnar241771e2008-12-03 10:39:53 +010035};
36
37/*
Robert Richter5f4ec282009-04-29 12:47:04 +020038 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +010039 */
Robert Richter5f4ec282009-04-29 12:47:04 +020040struct x86_pmu {
Robert Richterfaa28ae2009-04-29 12:47:13 +020041 const char *name;
42 int version;
Yong Wanga3288102009-06-03 13:12:55 +080043 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +020044 void (*disable_all)(void);
45 void (*enable_all)(void);
Robert Richter7c90cc42009-04-29 12:47:18 +020046 void (*enable)(struct hw_perf_counter *, int);
Robert Richterd4369892009-04-29 12:47:19 +020047 void (*disable)(struct hw_perf_counter *, int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +053048 unsigned eventsel;
49 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010050 u64 (*event_map)(int);
51 u64 (*raw_event)(u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +053052 int max_events;
Robert Richter0933e5c2009-04-29 12:47:12 +020053 int num_counters;
54 int num_counters_fixed;
55 int counter_bits;
56 u64 counter_mask;
Robert Richterc619b8f2009-04-29 12:47:23 +020057 u64 max_period;
Peter Zijlstra9e35ad32009-05-13 16:21:38 +020058 u64 intel_ctrl;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053059};
60
Robert Richter4a06bd82009-04-29 12:47:11 +020061static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053062
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010063static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
64 .enabled = 1,
65};
Ingo Molnar241771e2008-12-03 10:39:53 +010066
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053067/*
68 * Intel PerfMon v3. Used on Core2 and later.
69 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010070static const u64 intel_perfmon_event_map[] =
Ingo Molnar241771e2008-12-03 10:39:53 +010071{
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +020072 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
73 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
74 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
75 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
76 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
77 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
78 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
Ingo Molnar241771e2008-12-03 10:39:53 +010079};
80
Robert Richter5f4ec282009-04-29 12:47:04 +020081static u64 intel_pmu_event_map(int event)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053082{
83 return intel_perfmon_event_map[event];
84}
Ingo Molnar241771e2008-12-03 10:39:53 +010085
Ingo Molnar8326f442009-06-05 20:22:46 +020086/*
87 * Generalized hw caching related event table, filled
88 * in on a per model basis. A value of 0 means
89 * 'not supported', -1 means 'event makes no sense on
90 * this CPU', any other value means the raw event
91 * ID.
92 */
93
94#define C(x) PERF_COUNT_HW_CACHE_##x
95
96static u64 __read_mostly hw_cache_event_ids
97 [PERF_COUNT_HW_CACHE_MAX]
98 [PERF_COUNT_HW_CACHE_OP_MAX]
99 [PERF_COUNT_HW_CACHE_RESULT_MAX];
100
101static const u64 nehalem_hw_cache_event_ids
102 [PERF_COUNT_HW_CACHE_MAX]
103 [PERF_COUNT_HW_CACHE_OP_MAX]
104 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
105{
106 [ C(L1D) ] = {
107 [ C(OP_READ) ] = {
108 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
109 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
110 },
111 [ C(OP_WRITE) ] = {
112 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
113 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
114 },
115 [ C(OP_PREFETCH) ] = {
116 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
117 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
118 },
119 },
120 [ C(L1I ) ] = {
121 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800122 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
Ingo Molnar8326f442009-06-05 20:22:46 +0200123 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
124 },
125 [ C(OP_WRITE) ] = {
126 [ C(RESULT_ACCESS) ] = -1,
127 [ C(RESULT_MISS) ] = -1,
128 },
129 [ C(OP_PREFETCH) ] = {
130 [ C(RESULT_ACCESS) ] = 0x0,
131 [ C(RESULT_MISS) ] = 0x0,
132 },
133 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200134 [ C(LL ) ] = {
Ingo Molnar8326f442009-06-05 20:22:46 +0200135 [ C(OP_READ) ] = {
136 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
137 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
138 },
139 [ C(OP_WRITE) ] = {
140 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
141 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
142 },
143 [ C(OP_PREFETCH) ] = {
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200144 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
145 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
Ingo Molnar8326f442009-06-05 20:22:46 +0200146 },
147 },
148 [ C(DTLB) ] = {
149 [ C(OP_READ) ] = {
150 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
151 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
152 },
153 [ C(OP_WRITE) ] = {
154 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
155 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
156 },
157 [ C(OP_PREFETCH) ] = {
158 [ C(RESULT_ACCESS) ] = 0x0,
159 [ C(RESULT_MISS) ] = 0x0,
160 },
161 },
162 [ C(ITLB) ] = {
163 [ C(OP_READ) ] = {
164 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
Yong Wangfecc8ac2009-06-09 21:15:53 +0800165 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
Ingo Molnar8326f442009-06-05 20:22:46 +0200166 },
167 [ C(OP_WRITE) ] = {
168 [ C(RESULT_ACCESS) ] = -1,
169 [ C(RESULT_MISS) ] = -1,
170 },
171 [ C(OP_PREFETCH) ] = {
172 [ C(RESULT_ACCESS) ] = -1,
173 [ C(RESULT_MISS) ] = -1,
174 },
175 },
176 [ C(BPU ) ] = {
177 [ C(OP_READ) ] = {
178 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
179 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
180 },
181 [ C(OP_WRITE) ] = {
182 [ C(RESULT_ACCESS) ] = -1,
183 [ C(RESULT_MISS) ] = -1,
184 },
185 [ C(OP_PREFETCH) ] = {
186 [ C(RESULT_ACCESS) ] = -1,
187 [ C(RESULT_MISS) ] = -1,
188 },
189 },
190};
191
192static const u64 core2_hw_cache_event_ids
193 [PERF_COUNT_HW_CACHE_MAX]
194 [PERF_COUNT_HW_CACHE_OP_MAX]
195 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
196{
Thomas Gleixner0312af82009-06-08 07:42:04 +0200197 [ C(L1D) ] = {
198 [ C(OP_READ) ] = {
199 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
200 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
201 },
202 [ C(OP_WRITE) ] = {
203 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
204 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
205 },
206 [ C(OP_PREFETCH) ] = {
207 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
208 [ C(RESULT_MISS) ] = 0,
209 },
210 },
211 [ C(L1I ) ] = {
212 [ C(OP_READ) ] = {
213 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
214 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
215 },
216 [ C(OP_WRITE) ] = {
217 [ C(RESULT_ACCESS) ] = -1,
218 [ C(RESULT_MISS) ] = -1,
219 },
220 [ C(OP_PREFETCH) ] = {
221 [ C(RESULT_ACCESS) ] = 0,
222 [ C(RESULT_MISS) ] = 0,
223 },
224 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200225 [ C(LL ) ] = {
Thomas Gleixner0312af82009-06-08 07:42:04 +0200226 [ C(OP_READ) ] = {
227 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
228 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
229 },
230 [ C(OP_WRITE) ] = {
231 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
232 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
233 },
234 [ C(OP_PREFETCH) ] = {
235 [ C(RESULT_ACCESS) ] = 0,
236 [ C(RESULT_MISS) ] = 0,
237 },
238 },
239 [ C(DTLB) ] = {
240 [ C(OP_READ) ] = {
241 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
242 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
243 },
244 [ C(OP_WRITE) ] = {
245 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
246 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
247 },
248 [ C(OP_PREFETCH) ] = {
249 [ C(RESULT_ACCESS) ] = 0,
250 [ C(RESULT_MISS) ] = 0,
251 },
252 },
253 [ C(ITLB) ] = {
254 [ C(OP_READ) ] = {
255 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
256 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
257 },
258 [ C(OP_WRITE) ] = {
259 [ C(RESULT_ACCESS) ] = -1,
260 [ C(RESULT_MISS) ] = -1,
261 },
262 [ C(OP_PREFETCH) ] = {
263 [ C(RESULT_ACCESS) ] = -1,
264 [ C(RESULT_MISS) ] = -1,
265 },
266 },
267 [ C(BPU ) ] = {
268 [ C(OP_READ) ] = {
269 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
270 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
271 },
272 [ C(OP_WRITE) ] = {
273 [ C(RESULT_ACCESS) ] = -1,
274 [ C(RESULT_MISS) ] = -1,
275 },
276 [ C(OP_PREFETCH) ] = {
277 [ C(RESULT_ACCESS) ] = -1,
278 [ C(RESULT_MISS) ] = -1,
279 },
280 },
Ingo Molnar8326f442009-06-05 20:22:46 +0200281};
282
283static const u64 atom_hw_cache_event_ids
284 [PERF_COUNT_HW_CACHE_MAX]
285 [PERF_COUNT_HW_CACHE_OP_MAX]
286 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
287{
Thomas Gleixnerad689222009-06-08 09:30:41 +0200288 [ C(L1D) ] = {
289 [ C(OP_READ) ] = {
290 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
291 [ C(RESULT_MISS) ] = 0,
292 },
293 [ C(OP_WRITE) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800294 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200295 [ C(RESULT_MISS) ] = 0,
296 },
297 [ C(OP_PREFETCH) ] = {
298 [ C(RESULT_ACCESS) ] = 0x0,
299 [ C(RESULT_MISS) ] = 0,
300 },
301 },
302 [ C(L1I ) ] = {
303 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800304 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
305 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200306 },
307 [ C(OP_WRITE) ] = {
308 [ C(RESULT_ACCESS) ] = -1,
309 [ C(RESULT_MISS) ] = -1,
310 },
311 [ C(OP_PREFETCH) ] = {
312 [ C(RESULT_ACCESS) ] = 0,
313 [ C(RESULT_MISS) ] = 0,
314 },
315 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200316 [ C(LL ) ] = {
Thomas Gleixnerad689222009-06-08 09:30:41 +0200317 [ C(OP_READ) ] = {
318 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
319 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
320 },
321 [ C(OP_WRITE) ] = {
322 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
323 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
324 },
325 [ C(OP_PREFETCH) ] = {
326 [ C(RESULT_ACCESS) ] = 0,
327 [ C(RESULT_MISS) ] = 0,
328 },
329 },
330 [ C(DTLB) ] = {
331 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800332 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200333 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
334 },
335 [ C(OP_WRITE) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800336 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200337 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
338 },
339 [ C(OP_PREFETCH) ] = {
340 [ C(RESULT_ACCESS) ] = 0,
341 [ C(RESULT_MISS) ] = 0,
342 },
343 },
344 [ C(ITLB) ] = {
345 [ C(OP_READ) ] = {
346 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
347 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
348 },
349 [ C(OP_WRITE) ] = {
350 [ C(RESULT_ACCESS) ] = -1,
351 [ C(RESULT_MISS) ] = -1,
352 },
353 [ C(OP_PREFETCH) ] = {
354 [ C(RESULT_ACCESS) ] = -1,
355 [ C(RESULT_MISS) ] = -1,
356 },
357 },
358 [ C(BPU ) ] = {
359 [ C(OP_READ) ] = {
360 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
361 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
362 },
363 [ C(OP_WRITE) ] = {
364 [ C(RESULT_ACCESS) ] = -1,
365 [ C(RESULT_MISS) ] = -1,
366 },
367 [ C(OP_PREFETCH) ] = {
368 [ C(RESULT_ACCESS) ] = -1,
369 [ C(RESULT_MISS) ] = -1,
370 },
371 },
Ingo Molnar8326f442009-06-05 20:22:46 +0200372};
373
Robert Richter5f4ec282009-04-29 12:47:04 +0200374static u64 intel_pmu_raw_event(u64 event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100375{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100376#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
377#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200378#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
379#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100380#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100381
Ingo Molnar128f0482009-06-03 22:19:36 +0200382#define CORE_EVNTSEL_MASK \
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100383 (CORE_EVNTSEL_EVENT_MASK | \
384 CORE_EVNTSEL_UNIT_MASK | \
Peter Zijlstraff99be52009-05-25 17:39:03 +0200385 CORE_EVNTSEL_EDGE_MASK | \
386 CORE_EVNTSEL_INV_MASK | \
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100387 CORE_EVNTSEL_COUNTER_MASK)
388
389 return event & CORE_EVNTSEL_MASK;
390}
391
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530392static const u64 amd_hw_cache_event_ids
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200393 [PERF_COUNT_HW_CACHE_MAX]
394 [PERF_COUNT_HW_CACHE_OP_MAX]
395 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
396{
397 [ C(L1D) ] = {
398 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530399 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
400 [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200401 },
402 [ C(OP_WRITE) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530403 [ C(RESULT_ACCESS) ] = 0x0042, /* Data Cache Refills from L2 */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200404 [ C(RESULT_MISS) ] = 0,
405 },
406 [ C(OP_PREFETCH) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530407 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
408 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200409 },
410 },
411 [ C(L1I ) ] = {
412 [ C(OP_READ) ] = {
413 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
414 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
415 },
416 [ C(OP_WRITE) ] = {
417 [ C(RESULT_ACCESS) ] = -1,
418 [ C(RESULT_MISS) ] = -1,
419 },
420 [ C(OP_PREFETCH) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530421 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200422 [ C(RESULT_MISS) ] = 0,
423 },
424 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200425 [ C(LL ) ] = {
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200426 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530427 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
428 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200429 },
430 [ C(OP_WRITE) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530431 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200432 [ C(RESULT_MISS) ] = 0,
433 },
434 [ C(OP_PREFETCH) ] = {
435 [ C(RESULT_ACCESS) ] = 0,
436 [ C(RESULT_MISS) ] = 0,
437 },
438 },
439 [ C(DTLB) ] = {
440 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530441 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
442 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200443 },
444 [ C(OP_WRITE) ] = {
445 [ C(RESULT_ACCESS) ] = 0,
446 [ C(RESULT_MISS) ] = 0,
447 },
448 [ C(OP_PREFETCH) ] = {
449 [ C(RESULT_ACCESS) ] = 0,
450 [ C(RESULT_MISS) ] = 0,
451 },
452 },
453 [ C(ITLB) ] = {
454 [ C(OP_READ) ] = {
455 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
456 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
457 },
458 [ C(OP_WRITE) ] = {
459 [ C(RESULT_ACCESS) ] = -1,
460 [ C(RESULT_MISS) ] = -1,
461 },
462 [ C(OP_PREFETCH) ] = {
463 [ C(RESULT_ACCESS) ] = -1,
464 [ C(RESULT_MISS) ] = -1,
465 },
466 },
467 [ C(BPU ) ] = {
468 [ C(OP_READ) ] = {
469 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
470 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
471 },
472 [ C(OP_WRITE) ] = {
473 [ C(RESULT_ACCESS) ] = -1,
474 [ C(RESULT_MISS) ] = -1,
475 },
476 [ C(OP_PREFETCH) ] = {
477 [ C(RESULT_ACCESS) ] = -1,
478 [ C(RESULT_MISS) ] = -1,
479 },
480 },
481};
482
Ingo Molnar241771e2008-12-03 10:39:53 +0100483/*
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530484 * AMD Performance Monitor K7 and later.
485 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100486static const u64 amd_perfmon_event_map[] =
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530487{
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200488 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
489 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
490 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
491 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
492 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
493 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530494};
495
Robert Richter5f4ec282009-04-29 12:47:04 +0200496static u64 amd_pmu_event_map(int event)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530497{
498 return amd_perfmon_event_map[event];
499}
500
Robert Richter5f4ec282009-04-29 12:47:04 +0200501static u64 amd_pmu_raw_event(u64 event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100502{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100503#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
504#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200505#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
506#define K7_EVNTSEL_INV_MASK 0x000800000ULL
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100507#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100508
509#define K7_EVNTSEL_MASK \
510 (K7_EVNTSEL_EVENT_MASK | \
511 K7_EVNTSEL_UNIT_MASK | \
Peter Zijlstraff99be52009-05-25 17:39:03 +0200512 K7_EVNTSEL_EDGE_MASK | \
513 K7_EVNTSEL_INV_MASK | \
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100514 K7_EVNTSEL_COUNTER_MASK)
515
516 return event & K7_EVNTSEL_MASK;
517}
518
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530519/*
Ingo Molnaree060942008-12-13 09:00:03 +0100520 * Propagate counter elapsed time into the generic counter.
521 * Can only be executed on the CPU where the counter is active.
522 * Returns the delta events processed.
523 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200524static u64
Ingo Molnaree060942008-12-13 09:00:03 +0100525x86_perf_counter_update(struct perf_counter *counter,
526 struct hw_perf_counter *hwc, int idx)
527{
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200528 int shift = 64 - x86_pmu.counter_bits;
529 u64 prev_raw_count, new_raw_count;
530 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100531
Ingo Molnaree060942008-12-13 09:00:03 +0100532 /*
533 * Careful: an NMI might modify the previous counter value.
534 *
535 * Our tactic to handle this is to first atomically read and
536 * exchange a new raw count - then add that new-prev delta
537 * count to the generic counter atomically:
538 */
539again:
540 prev_raw_count = atomic64_read(&hwc->prev_count);
541 rdmsrl(hwc->counter_base + idx, new_raw_count);
542
543 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
544 new_raw_count) != prev_raw_count)
545 goto again;
546
547 /*
548 * Now we have the new raw value and have updated the prev
549 * timestamp already. We can now calculate the elapsed delta
550 * (counter-)time and add that to the generic counter.
551 *
552 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200553 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100554 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200555 delta = (new_raw_count << shift) - (prev_raw_count << shift);
556 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100557
558 atomic64_add(delta, &counter->count);
559 atomic64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200560
561 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100562}
563
Peter Zijlstraba778132009-05-04 18:47:44 +0200564static atomic_t active_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200565static DEFINE_MUTEX(pmc_reserve_mutex);
566
567static bool reserve_pmc_hardware(void)
568{
569 int i;
570
571 if (nmi_watchdog == NMI_LOCAL_APIC)
572 disable_lapic_nmi_watchdog();
573
Robert Richter0933e5c2009-04-29 12:47:12 +0200574 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200575 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200576 goto perfctr_fail;
577 }
578
Robert Richter0933e5c2009-04-29 12:47:12 +0200579 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200580 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200581 goto eventsel_fail;
582 }
583
584 return true;
585
586eventsel_fail:
587 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200588 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200589
Robert Richter0933e5c2009-04-29 12:47:12 +0200590 i = x86_pmu.num_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200591
592perfctr_fail:
593 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200594 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200595
596 if (nmi_watchdog == NMI_LOCAL_APIC)
597 enable_lapic_nmi_watchdog();
598
599 return false;
600}
601
602static void release_pmc_hardware(void)
603{
604 int i;
605
Robert Richter0933e5c2009-04-29 12:47:12 +0200606 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200607 release_perfctr_nmi(x86_pmu.perfctr + i);
608 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200609 }
610
611 if (nmi_watchdog == NMI_LOCAL_APIC)
612 enable_lapic_nmi_watchdog();
613}
614
615static void hw_perf_counter_destroy(struct perf_counter *counter)
616{
Peter Zijlstraba778132009-05-04 18:47:44 +0200617 if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200618 release_pmc_hardware();
619 mutex_unlock(&pmc_reserve_mutex);
620 }
621}
622
Robert Richter85cf9db2009-04-29 12:47:20 +0200623static inline int x86_pmu_initialized(void)
624{
625 return x86_pmu.handle_irq != NULL;
626}
627
Ingo Molnar8326f442009-06-05 20:22:46 +0200628static inline int
629set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr)
630{
631 unsigned int cache_type, cache_op, cache_result;
632 u64 config, val;
633
634 config = attr->config;
635
636 cache_type = (config >> 0) & 0xff;
637 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
638 return -EINVAL;
639
640 cache_op = (config >> 8) & 0xff;
641 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
642 return -EINVAL;
643
644 cache_result = (config >> 16) & 0xff;
645 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
646 return -EINVAL;
647
648 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
649
650 if (val == 0)
651 return -ENOENT;
652
653 if (val == -1)
654 return -EINVAL;
655
656 hwc->config |= val;
657
658 return 0;
659}
660
Ingo Molnaree060942008-12-13 09:00:03 +0100661/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200662 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100663 */
Ingo Molnar621a01e2008-12-11 12:46:46 +0100664static int __hw_perf_counter_init(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +0100665{
Peter Zijlstra0d486962009-06-02 19:22:16 +0200666 struct perf_counter_attr *attr = &counter->attr;
Ingo Molnar241771e2008-12-03 10:39:53 +0100667 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200668 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100669
Robert Richter85cf9db2009-04-29 12:47:20 +0200670 if (!x86_pmu_initialized())
671 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100672
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200673 err = 0;
Peter Zijlstraba778132009-05-04 18:47:44 +0200674 if (!atomic_inc_not_zero(&active_counters)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200675 mutex_lock(&pmc_reserve_mutex);
Peter Zijlstraba778132009-05-04 18:47:44 +0200676 if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware())
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200677 err = -EBUSY;
678 else
Peter Zijlstraba778132009-05-04 18:47:44 +0200679 atomic_inc(&active_counters);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200680 mutex_unlock(&pmc_reserve_mutex);
681 }
682 if (err)
683 return err;
684
Ingo Molnar241771e2008-12-03 10:39:53 +0100685 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100686 * Generate PMC IRQs:
Ingo Molnar241771e2008-12-03 10:39:53 +0100687 * (keep 'enabled' bit clear for now)
688 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100689 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
Ingo Molnar241771e2008-12-03 10:39:53 +0100690
691 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100692 * Count user and OS events unless requested not to.
693 */
Peter Zijlstra0d486962009-06-02 19:22:16 +0200694 if (!attr->exclude_user)
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100695 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
Peter Zijlstra0d486962009-06-02 19:22:16 +0200696 if (!attr->exclude_kernel)
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100697 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
698
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200699 if (!hwc->sample_period) {
Peter Zijlstrab23f3322009-06-02 15:13:03 +0200700 hwc->sample_period = x86_pmu.max_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200701 hwc->last_period = hwc->sample_period;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200702 atomic64_set(&hwc->period_left, hwc->sample_period);
703 }
Ingo Molnard2517a42009-05-17 10:04:45 +0200704
Ingo Molnar8326f442009-06-05 20:22:46 +0200705 counter->destroy = hw_perf_counter_destroy;
Ingo Molnar241771e2008-12-03 10:39:53 +0100706
707 /*
Thomas Gleixnerdfa7c892008-12-08 19:35:37 +0100708 * Raw event type provide the config in the event structure
Ingo Molnar241771e2008-12-03 10:39:53 +0100709 */
Ingo Molnara21ca2c2009-06-06 09:58:57 +0200710 if (attr->type == PERF_TYPE_RAW) {
711 hwc->config |= x86_pmu.raw_event(attr->config);
Ingo Molnar8326f442009-06-05 20:22:46 +0200712 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100713 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100714
Ingo Molnar8326f442009-06-05 20:22:46 +0200715 if (attr->type == PERF_TYPE_HW_CACHE)
716 return set_ext_hw_attr(hwc, attr);
717
718 if (attr->config >= x86_pmu.max_events)
719 return -EINVAL;
720 /*
721 * The generic map:
722 */
723 hwc->config |= x86_pmu.event_map(attr->config);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200724
Ingo Molnar241771e2008-12-03 10:39:53 +0100725 return 0;
726}
727
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200728static void intel_pmu_disable_all(void)
Thomas Gleixner4ac13292008-12-09 21:43:39 +0100729{
Ingo Molnar862a1a52008-12-17 13:09:20 +0100730 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
Thomas Gleixner4ac13292008-12-09 21:43:39 +0100731}
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530732
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200733static void amd_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530734{
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100735 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200736 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100737
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200738 if (!cpuc->enabled)
739 return;
740
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100741 cpuc->enabled = 0;
Peter Zijlstra60b3df92009-03-13 12:21:30 +0100742 /*
743 * ensure we write the disable before we start disabling the
Robert Richter5f4ec282009-04-29 12:47:04 +0200744 * counters proper, so that amd_pmu_enable_counter() does the
745 * right thing.
Peter Zijlstra60b3df92009-03-13 12:21:30 +0100746 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100747 barrier();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530748
Robert Richter0933e5c2009-04-29 12:47:12 +0200749 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100750 u64 val;
751
Robert Richter43f62012009-04-29 16:55:56 +0200752 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200753 continue;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530754 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
Robert Richter4295ee62009-04-29 12:47:01 +0200755 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
756 continue;
757 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
758 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530759 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530760}
761
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200762void hw_perf_disable(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530763{
Robert Richter85cf9db2009-04-29 12:47:20 +0200764 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200765 return;
766 return x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530767}
Ingo Molnar241771e2008-12-03 10:39:53 +0100768
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200769static void intel_pmu_enable_all(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530770{
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200771 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530772}
773
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200774static void amd_pmu_enable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530775{
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100776 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530777 int idx;
778
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200779 if (cpuc->enabled)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100780 return;
781
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200782 cpuc->enabled = 1;
783 barrier();
784
Robert Richter0933e5c2009-04-29 12:47:12 +0200785 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter4295ee62009-04-29 12:47:01 +0200786 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100787
Robert Richter43f62012009-04-29 16:55:56 +0200788 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200789 continue;
790 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
791 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
792 continue;
793 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
794 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530795 }
796}
797
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200798void hw_perf_enable(void)
Ingo Molnaree060942008-12-13 09:00:03 +0100799{
Robert Richter85cf9db2009-04-29 12:47:20 +0200800 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100801 return;
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200802 x86_pmu.enable_all();
Ingo Molnaree060942008-12-13 09:00:03 +0100803}
Ingo Molnaree060942008-12-13 09:00:03 +0100804
Robert Richter19d84da2009-04-29 12:47:25 +0200805static inline u64 intel_pmu_get_status(void)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100806{
807 u64 status;
808
809 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
810
811 return status;
812}
813
Robert Richterdee5d902009-04-29 12:47:07 +0200814static inline void intel_pmu_ack_status(u64 ack)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100815{
816 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
817}
818
Robert Richter7c90cc42009-04-29 12:47:18 +0200819static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100820{
Robert Richter7c90cc42009-04-29 12:47:18 +0200821 int err;
Robert Richter7c90cc42009-04-29 12:47:18 +0200822 err = checking_wrmsrl(hwc->config_base + idx,
823 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100824}
825
Robert Richterd4369892009-04-29 12:47:19 +0200826static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100827{
Robert Richterd4369892009-04-29 12:47:19 +0200828 int err;
Robert Richterd4369892009-04-29 12:47:19 +0200829 err = checking_wrmsrl(hwc->config_base + idx,
830 hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100831}
832
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100833static inline void
Robert Richterd4369892009-04-29 12:47:19 +0200834intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100835{
836 int idx = __idx - X86_PMC_IDX_FIXED;
837 u64 ctrl_val, mask;
838 int err;
839
840 mask = 0xfULL << (idx * 4);
841
842 rdmsrl(hwc->config_base, ctrl_val);
843 ctrl_val &= ~mask;
844 err = checking_wrmsrl(hwc->config_base, ctrl_val);
845}
846
847static inline void
Robert Richterd4369892009-04-29 12:47:19 +0200848intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100849{
Robert Richterd4369892009-04-29 12:47:19 +0200850 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
851 intel_pmu_disable_fixed(hwc, idx);
852 return;
853 }
854
855 x86_pmu_disable_counter(hwc, idx);
856}
857
858static inline void
859amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
860{
861 x86_pmu_disable_counter(hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100862}
863
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100864static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
Ingo Molnar241771e2008-12-03 10:39:53 +0100865
Ingo Molnaree060942008-12-13 09:00:03 +0100866/*
867 * Set the next IRQ period, based on the hwc->period_left value.
868 * To be called with the counter disabled in hw:
869 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200870static int
Robert Richter26816c22009-04-29 12:47:08 +0200871x86_perf_counter_set_period(struct perf_counter *counter,
Ingo Molnaree060942008-12-13 09:00:03 +0100872 struct hw_perf_counter *hwc, int idx)
Ingo Molnar241771e2008-12-03 10:39:53 +0100873{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100874 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200875 s64 period = hwc->sample_period;
876 int err, ret = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100877
Ingo Molnaree060942008-12-13 09:00:03 +0100878 /*
879 * If we are way outside a reasoable range then just skip forward:
880 */
881 if (unlikely(left <= -period)) {
882 left = period;
883 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200884 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200885 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100886 }
887
888 if (unlikely(left <= 0)) {
889 left += period;
890 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200891 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200892 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100893 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200894 /*
895 * Quirk: certain CPUs dont like it if just 1 event is left:
896 */
897 if (unlikely(left < 2))
898 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +0100899
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200900 if (left > x86_pmu.max_period)
901 left = x86_pmu.max_period;
902
Ingo Molnaree060942008-12-13 09:00:03 +0100903 per_cpu(prev_left[idx], smp_processor_id()) = left;
904
905 /*
906 * The hw counter starts counting from this counter offset,
907 * mark it to be able to extra future deltas:
908 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100909 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +0100910
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100911 err = checking_wrmsrl(hwc->counter_base + idx,
Robert Richter0933e5c2009-04-29 12:47:12 +0200912 (u64)(-left) & x86_pmu.counter_mask);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200913
914 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100915}
916
917static inline void
Robert Richter7c90cc42009-04-29 12:47:18 +0200918intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100919{
920 int idx = __idx - X86_PMC_IDX_FIXED;
921 u64 ctrl_val, bits, mask;
922 int err;
923
924 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100925 * Enable IRQ generation (0x8),
926 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
927 * if requested:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100928 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100929 bits = 0x8ULL;
930 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
931 bits |= 0x2;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100932 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
933 bits |= 0x1;
934 bits <<= (idx * 4);
935 mask = 0xfULL << (idx * 4);
936
937 rdmsrl(hwc->config_base, ctrl_val);
938 ctrl_val &= ~mask;
939 ctrl_val |= bits;
940 err = checking_wrmsrl(hwc->config_base, ctrl_val);
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100941}
942
Robert Richter7c90cc42009-04-29 12:47:18 +0200943static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100944{
Robert Richter7c90cc42009-04-29 12:47:18 +0200945 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
946 intel_pmu_enable_fixed(hwc, idx);
947 return;
948 }
949
950 x86_pmu_enable_counter(hwc, idx);
951}
952
953static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
954{
955 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
956
957 if (cpuc->enabled)
958 x86_pmu_enable_counter(hwc, idx);
Jaswinder Singh Rajput2b583d82008-12-27 19:15:43 +0530959 else
Robert Richterd4369892009-04-29 12:47:19 +0200960 x86_pmu_disable_counter(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +0100961}
962
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100963static int
964fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
Ingo Molnar862a1a52008-12-17 13:09:20 +0100965{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100966 unsigned int event;
967
Robert Richteref7b3e02009-04-29 12:47:24 +0200968 if (!x86_pmu.num_counters_fixed)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530969 return -1;
970
Yong Wangdff5da62009-06-12 16:08:55 +0800971 /*
972 * Quirk, IA32_FIXED_CTRs do not work on current Atom processors:
973 */
974 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
975 boot_cpu_data.x86_model == 28)
976 return -1;
977
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100978 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
979
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200980 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100981 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200982 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100983 return X86_PMC_IDX_FIXED_CPU_CYCLES;
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200984 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100985 return X86_PMC_IDX_FIXED_BUS_CYCLES;
986
Ingo Molnar862a1a52008-12-17 13:09:20 +0100987 return -1;
988}
989
Ingo Molnaree060942008-12-13 09:00:03 +0100990/*
991 * Find a PMC slot for the freshly enabled / scheduled in counter:
992 */
Robert Richter4aeb0b42009-04-29 12:47:03 +0200993static int x86_pmu_enable(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +0100994{
995 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
996 struct hw_perf_counter *hwc = &counter->hw;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100997 int idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100998
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100999 idx = fixed_mode_idx(counter, hwc);
1000 if (idx >= 0) {
1001 /*
1002 * Try to get the fixed counter, if that is already taken
1003 * then try to get a generic counter:
1004 */
Robert Richter43f62012009-04-29 16:55:56 +02001005 if (test_and_set_bit(idx, cpuc->used_mask))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001006 goto try_generic;
Ingo Molnar0dff86a2008-12-23 12:28:12 +01001007
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001008 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1009 /*
1010 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
1011 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1012 */
1013 hwc->counter_base =
1014 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
Ingo Molnar241771e2008-12-03 10:39:53 +01001015 hwc->idx = idx;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001016 } else {
1017 idx = hwc->idx;
1018 /* Try to get the previous generic counter again */
Robert Richter43f62012009-04-29 16:55:56 +02001019 if (test_and_set_bit(idx, cpuc->used_mask)) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001020try_generic:
Robert Richter43f62012009-04-29 16:55:56 +02001021 idx = find_first_zero_bit(cpuc->used_mask,
Robert Richter0933e5c2009-04-29 12:47:12 +02001022 x86_pmu.num_counters);
1023 if (idx == x86_pmu.num_counters)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001024 return -EAGAIN;
1025
Robert Richter43f62012009-04-29 16:55:56 +02001026 set_bit(idx, cpuc->used_mask);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001027 hwc->idx = idx;
1028 }
Robert Richter4a06bd82009-04-29 12:47:11 +02001029 hwc->config_base = x86_pmu.eventsel;
1030 hwc->counter_base = x86_pmu.perfctr;
Ingo Molnar241771e2008-12-03 10:39:53 +01001031 }
1032
Yong Wangc323d952009-05-29 13:28:35 +08001033 perf_counters_lapic_init();
Ingo Molnar53b441a2009-05-25 21:41:28 +02001034
Robert Richterd4369892009-04-29 12:47:19 +02001035 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001036
Ingo Molnar862a1a52008-12-17 13:09:20 +01001037 cpuc->counters[idx] = counter;
Robert Richter43f62012009-04-29 16:55:56 +02001038 set_bit(idx, cpuc->active_mask);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001039
Robert Richter26816c22009-04-29 12:47:08 +02001040 x86_perf_counter_set_period(counter, hwc, idx);
Robert Richter7c90cc42009-04-29 12:47:18 +02001041 x86_pmu.enable(hwc, idx);
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001042
1043 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001044}
1045
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001046static void x86_pmu_unthrottle(struct perf_counter *counter)
1047{
1048 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
1049 struct hw_perf_counter *hwc = &counter->hw;
1050
1051 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1052 cpuc->counters[hwc->idx] != counter))
1053 return;
1054
1055 x86_pmu.enable(hwc, hwc->idx);
1056}
1057
Ingo Molnar241771e2008-12-03 10:39:53 +01001058void perf_counter_print_debug(void)
1059{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001060 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Ingo Molnar0dff86a2008-12-23 12:28:12 +01001061 struct cpu_hw_counters *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001062 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +01001063 int cpu, idx;
1064
Robert Richter0933e5c2009-04-29 12:47:12 +02001065 if (!x86_pmu.num_counters)
Ingo Molnar1e125672008-12-09 12:18:18 +01001066 return;
Ingo Molnar241771e2008-12-03 10:39:53 +01001067
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001068 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001069
1070 cpu = smp_processor_id();
Ingo Molnar0dff86a2008-12-23 12:28:12 +01001071 cpuc = &per_cpu(cpu_hw_counters, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001072
Robert Richterfaa28ae2009-04-29 12:47:13 +02001073 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301074 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1075 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1076 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1077 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +01001078
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301079 pr_info("\n");
1080 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1081 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1082 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1083 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301084 }
Robert Richter43f62012009-04-29 16:55:56 +02001085 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001086
Robert Richter0933e5c2009-04-29 12:47:12 +02001087 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +02001088 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1089 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001090
Ingo Molnaree060942008-12-13 09:00:03 +01001091 prev_left = per_cpu(prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001092
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301093 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001094 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301095 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001096 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301097 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001098 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001099 }
Robert Richter0933e5c2009-04-29 12:47:12 +02001100 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001101 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1102
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301103 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001104 cpu, idx, pmc_count);
1105 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001106 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001107}
1108
Robert Richter4aeb0b42009-04-29 12:47:03 +02001109static void x86_pmu_disable(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +01001110{
1111 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
1112 struct hw_perf_counter *hwc = &counter->hw;
Robert Richter6f00cad2009-04-29 12:47:17 +02001113 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001114
Robert Richter09534232009-04-29 12:47:16 +02001115 /*
1116 * Must be done before we disable, otherwise the nmi handler
1117 * could reenable again:
1118 */
Robert Richter43f62012009-04-29 16:55:56 +02001119 clear_bit(idx, cpuc->active_mask);
Robert Richterd4369892009-04-29 12:47:19 +02001120 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001121
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001122 /*
1123 * Make sure the cleared pointer becomes visible before we
1124 * (potentially) free the counter:
1125 */
Robert Richter527e26a2009-04-29 12:47:02 +02001126 barrier();
Ingo Molnar241771e2008-12-03 10:39:53 +01001127
Ingo Molnaree060942008-12-13 09:00:03 +01001128 /*
1129 * Drain the remaining delta count out of a counter
1130 * that we are disabling:
1131 */
1132 x86_perf_counter_update(counter, hwc, idx);
Robert Richter09534232009-04-29 12:47:16 +02001133 cpuc->counters[idx] = NULL;
Robert Richter43f62012009-04-29 16:55:56 +02001134 clear_bit(idx, cpuc->used_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001135}
1136
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001137/*
Ingo Molnaree060942008-12-13 09:00:03 +01001138 * Save and restart an expired counter. Called by NMI contexts,
1139 * so it has to be careful about preempting normal counter ops:
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001140 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001141static int intel_pmu_save_and_restart(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +01001142{
1143 struct hw_perf_counter *hwc = &counter->hw;
1144 int idx = hwc->idx;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001145 int ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001146
Ingo Molnaree060942008-12-13 09:00:03 +01001147 x86_perf_counter_update(counter, hwc, idx);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001148 ret = x86_perf_counter_set_period(counter, hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001149
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001150 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
Robert Richter7c90cc42009-04-29 12:47:18 +02001151 intel_pmu_enable_counter(hwc, idx);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001152
1153 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001154}
1155
Ingo Molnaraaba9802009-05-26 08:10:00 +02001156static void intel_pmu_reset(void)
1157{
1158 unsigned long flags;
1159 int idx;
1160
1161 if (!x86_pmu.num_counters)
1162 return;
1163
1164 local_irq_save(flags);
1165
1166 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1167
1168 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1169 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1170 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
1171 }
1172 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1173 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1174 }
1175
1176 local_irq_restore(flags);
1177}
1178
1179
Ingo Molnar241771e2008-12-03 10:39:53 +01001180/*
1181 * This handler is triggered by the local APIC, so the APIC IRQ handling
1182 * rules apply:
1183 */
Yong Wanga3288102009-06-03 13:12:55 +08001184static int intel_pmu_handle_irq(struct pt_regs *regs)
Ingo Molnar241771e2008-12-03 10:39:53 +01001185{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001186 struct perf_sample_data data;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001187 struct cpu_hw_counters *cpuc;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001188 int bit, cpu, loops;
Mike Galbraith4b39fd92009-01-23 14:36:16 +01001189 u64 ack, status;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001190
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001191 data.regs = regs;
1192 data.addr = 0;
1193
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001194 cpu = smp_processor_id();
1195 cpuc = &per_cpu(cpu_hw_counters, cpu);
Ingo Molnar43874d22008-12-09 12:23:59 +01001196
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001197 perf_disable();
Robert Richter19d84da2009-04-29 12:47:25 +02001198 status = intel_pmu_get_status();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001199 if (!status) {
1200 perf_enable();
1201 return 0;
1202 }
Ingo Molnar87b9cf42008-12-08 14:20:16 +01001203
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001204 loops = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001205again:
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001206 if (++loops > 100) {
1207 WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
Ingo Molnar34adc802009-05-20 20:13:28 +02001208 perf_counter_print_debug();
Ingo Molnaraaba9802009-05-26 08:10:00 +02001209 intel_pmu_reset();
1210 perf_enable();
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001211 return 1;
1212 }
1213
Mike Galbraithd278c482009-02-09 07:38:50 +01001214 inc_irq_stat(apic_perf_irqs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001215 ack = status;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001216 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
Ingo Molnar862a1a52008-12-17 13:09:20 +01001217 struct perf_counter *counter = cpuc->counters[bit];
Ingo Molnar241771e2008-12-03 10:39:53 +01001218
1219 clear_bit(bit, (unsigned long *) &status);
Robert Richter43f62012009-04-29 16:55:56 +02001220 if (!test_bit(bit, cpuc->active_mask))
Ingo Molnar241771e2008-12-03 10:39:53 +01001221 continue;
1222
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001223 if (!intel_pmu_save_and_restart(counter))
1224 continue;
1225
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001226 if (perf_counter_overflow(counter, 1, &data))
Robert Richterd4369892009-04-29 12:47:19 +02001227 intel_pmu_disable_counter(&counter->hw, bit);
Ingo Molnar241771e2008-12-03 10:39:53 +01001228 }
1229
Robert Richterdee5d902009-04-29 12:47:07 +02001230 intel_pmu_ack_status(ack);
Ingo Molnar241771e2008-12-03 10:39:53 +01001231
1232 /*
1233 * Repeat if there is more work to be done:
1234 */
Robert Richter19d84da2009-04-29 12:47:25 +02001235 status = intel_pmu_get_status();
Ingo Molnar241771e2008-12-03 10:39:53 +01001236 if (status)
1237 goto again;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001238
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001239 perf_enable();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001240
1241 return 1;
Mike Galbraith1b023a92009-01-23 10:13:01 +01001242}
1243
Yong Wanga3288102009-06-03 13:12:55 +08001244static int amd_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02001245{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001246 struct perf_sample_data data;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001247 struct cpu_hw_counters *cpuc;
Robert Richtera29aa8a2009-04-29 12:47:21 +02001248 struct perf_counter *counter;
1249 struct hw_perf_counter *hwc;
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001250 int cpu, idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001251 u64 val;
1252
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001253 data.regs = regs;
1254 data.addr = 0;
1255
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001256 cpu = smp_processor_id();
1257 cpuc = &per_cpu(cpu_hw_counters, cpu);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001258
Robert Richtera29aa8a2009-04-29 12:47:21 +02001259 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter43f62012009-04-29 16:55:56 +02001260 if (!test_bit(idx, cpuc->active_mask))
Robert Richtera29aa8a2009-04-29 12:47:21 +02001261 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001262
Robert Richtera29aa8a2009-04-29 12:47:21 +02001263 counter = cpuc->counters[idx];
1264 hwc = &counter->hw;
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001265
Robert Richter4b7bfd02009-04-29 12:47:22 +02001266 val = x86_perf_counter_update(counter, hwc, idx);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001267 if (val & (1ULL << (x86_pmu.counter_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001268 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001269
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001270 /*
1271 * counter overflow
1272 */
1273 handled = 1;
1274 data.period = counter->hw.last_period;
1275
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001276 if (!x86_perf_counter_set_period(counter, hwc, idx))
1277 continue;
1278
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001279 if (perf_counter_overflow(counter, 1, &data))
Robert Richtera29aa8a2009-04-29 12:47:21 +02001280 amd_pmu_disable_counter(hwc, idx);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001281 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001282
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001283 if (handled)
1284 inc_irq_stat(apic_perf_irqs);
1285
Robert Richtera29aa8a2009-04-29 12:47:21 +02001286 return handled;
1287}
Robert Richter39d81ea2009-04-29 12:47:05 +02001288
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001289void smp_perf_pending_interrupt(struct pt_regs *regs)
1290{
1291 irq_enter();
1292 ack_APIC_irq();
1293 inc_irq_stat(apic_pending_irqs);
1294 perf_counter_do_pending();
1295 irq_exit();
1296}
1297
1298void set_perf_counter_pending(void)
1299{
1300 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
1301}
1302
Yong Wangc323d952009-05-29 13:28:35 +08001303void perf_counters_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001304{
Robert Richter85cf9db2009-04-29 12:47:20 +02001305 if (!x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001306 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001307
Ingo Molnar241771e2008-12-03 10:39:53 +01001308 /*
Yong Wangc323d952009-05-29 13:28:35 +08001309 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001310 */
Yong Wangc323d952009-05-29 13:28:35 +08001311 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar241771e2008-12-03 10:39:53 +01001312}
1313
1314static int __kprobes
1315perf_counter_nmi_handler(struct notifier_block *self,
1316 unsigned long cmd, void *__args)
1317{
1318 struct die_args *args = __args;
1319 struct pt_regs *regs;
1320
Peter Zijlstraba778132009-05-04 18:47:44 +02001321 if (!atomic_read(&active_counters))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001322 return NOTIFY_DONE;
1323
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001324 switch (cmd) {
1325 case DIE_NMI:
1326 case DIE_NMI_IPI:
1327 break;
1328
1329 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01001330 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001331 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001332
1333 regs = args->regs;
1334
1335 apic_write(APIC_LVTPC, APIC_DM_NMI);
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001336 /*
1337 * Can't rely on the handled return value to say it was our NMI, two
1338 * counters could trigger 'simultaneously' raising two back-to-back NMIs.
1339 *
1340 * If the first NMI handles both, the latter will be empty and daze
1341 * the CPU.
1342 */
Yong Wanga3288102009-06-03 13:12:55 +08001343 x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001344
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001345 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01001346}
1347
1348static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
Mike Galbraith5b75af02009-02-04 17:11:34 +01001349 .notifier_call = perf_counter_nmi_handler,
1350 .next = NULL,
1351 .priority = 1
Ingo Molnar241771e2008-12-03 10:39:53 +01001352};
1353
Robert Richter5f4ec282009-04-29 12:47:04 +02001354static struct x86_pmu intel_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +02001355 .name = "Intel",
Robert Richter39d81ea2009-04-29 12:47:05 +02001356 .handle_irq = intel_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001357 .disable_all = intel_pmu_disable_all,
1358 .enable_all = intel_pmu_enable_all,
Robert Richter5f4ec282009-04-29 12:47:04 +02001359 .enable = intel_pmu_enable_counter,
1360 .disable = intel_pmu_disable_counter,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301361 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1362 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +02001363 .event_map = intel_pmu_event_map,
1364 .raw_event = intel_pmu_raw_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301365 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
Robert Richterc619b8f2009-04-29 12:47:23 +02001366 /*
1367 * Intel PMCs cannot be accessed sanely above 32 bit width,
1368 * so we install an artificial 1<<31 period regardless of
1369 * the generic counter period:
1370 */
1371 .max_period = (1ULL << 31) - 1,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301372};
1373
Robert Richter5f4ec282009-04-29 12:47:04 +02001374static struct x86_pmu amd_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +02001375 .name = "AMD",
Robert Richter39d81ea2009-04-29 12:47:05 +02001376 .handle_irq = amd_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001377 .disable_all = amd_pmu_disable_all,
1378 .enable_all = amd_pmu_enable_all,
Robert Richter5f4ec282009-04-29 12:47:04 +02001379 .enable = amd_pmu_enable_counter,
1380 .disable = amd_pmu_disable_counter,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301381 .eventsel = MSR_K7_EVNTSEL0,
1382 .perfctr = MSR_K7_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +02001383 .event_map = amd_pmu_event_map,
1384 .raw_event = amd_pmu_raw_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301385 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
Robert Richter0933e5c2009-04-29 12:47:12 +02001386 .num_counters = 4,
1387 .counter_bits = 48,
1388 .counter_mask = (1ULL << 48) - 1,
Robert Richterc619b8f2009-04-29 12:47:23 +02001389 /* use highest bit to detect overflow */
1390 .max_period = (1ULL << 47) - 1,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301391};
1392
Robert Richter72eae042009-04-29 12:47:10 +02001393static int intel_pmu_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001394{
Ingo Molnar703e9372008-12-17 10:51:15 +01001395 union cpuid10_edx edx;
Ingo Molnar7bb497b2009-03-18 08:59:21 +01001396 union cpuid10_eax eax;
1397 unsigned int unused;
1398 unsigned int ebx;
Robert Richterfaa28ae2009-04-29 12:47:13 +02001399 int version;
Ingo Molnar241771e2008-12-03 10:39:53 +01001400
Robert Richterda1a7762009-04-29 12:46:58 +02001401 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
Robert Richter72eae042009-04-29 12:47:10 +02001402 return -ENODEV;
Robert Richterda1a7762009-04-29 12:46:58 +02001403
Ingo Molnar241771e2008-12-03 10:39:53 +01001404 /*
1405 * Check whether the Architectural PerfMon supports
1406 * Branch Misses Retired Event or not.
1407 */
Ingo Molnar703e9372008-12-17 10:51:15 +01001408 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
Ingo Molnar241771e2008-12-03 10:39:53 +01001409 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
Robert Richter72eae042009-04-29 12:47:10 +02001410 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +01001411
Robert Richterfaa28ae2009-04-29 12:47:13 +02001412 version = eax.split.version_id;
1413 if (version < 2)
Robert Richter72eae042009-04-29 12:47:10 +02001414 return -ENODEV;
Ingo Molnar7bb497b2009-03-18 08:59:21 +01001415
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001416 x86_pmu = intel_pmu;
1417 x86_pmu.version = version;
1418 x86_pmu.num_counters = eax.split.num_counters;
1419 x86_pmu.counter_bits = eax.split.bit_width;
1420 x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1;
Ingo Molnar066d7de2009-05-04 19:04:09 +02001421
1422 /*
1423 * Quirk: v2 perfmon does not report fixed-purpose counters, so
1424 * assume at least 3 counters:
1425 */
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001426 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301427
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001428 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1429
Ingo Molnar8326f442009-06-05 20:22:46 +02001430 /*
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001431 * Install the hw-cache-events table:
Ingo Molnar8326f442009-06-05 20:22:46 +02001432 */
1433 switch (boot_cpu_data.x86_model) {
Yong Wangdc810812009-06-10 17:06:12 +08001434 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1435 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1436 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1437 case 29: /* six-core 45 nm xeon "Dunnington" */
Ingo Molnar8326f442009-06-05 20:22:46 +02001438 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02001439 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02001440
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001441 pr_cont("Core2 events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02001442 break;
1443 default:
1444 case 26:
1445 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02001446 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02001447
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001448 pr_cont("Nehalem/Corei7 events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02001449 break;
1450 case 28:
1451 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02001452 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02001453
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001454 pr_cont("Atom events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02001455 break;
1456 }
Robert Richter72eae042009-04-29 12:47:10 +02001457 return 0;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301458}
1459
Robert Richter72eae042009-04-29 12:47:10 +02001460static int amd_pmu_init(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301461{
Jaswinder Singh Rajput4d2be122009-06-11 15:28:09 +05301462 /* Performance-monitoring supported from K7 and later: */
1463 if (boot_cpu_data.x86 < 6)
1464 return -ENODEV;
1465
Robert Richter4a06bd82009-04-29 12:47:11 +02001466 x86_pmu = amd_pmu;
Thomas Gleixnerf86748e2009-06-08 22:33:10 +02001467
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +05301468 /* Events are common for all AMDs */
1469 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
1470 sizeof(hw_cache_event_ids));
Thomas Gleixnerf86748e2009-06-08 22:33:10 +02001471
Robert Richter72eae042009-04-29 12:47:10 +02001472 return 0;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301473}
1474
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301475void __init init_hw_perf_counters(void)
1476{
Robert Richter72eae042009-04-29 12:47:10 +02001477 int err;
1478
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001479 pr_info("Performance Counters: ");
1480
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301481 switch (boot_cpu_data.x86_vendor) {
1482 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02001483 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301484 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301485 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02001486 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301487 break;
Robert Richter41389602009-04-29 12:47:00 +02001488 default:
1489 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301490 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001491 if (err != 0) {
1492 pr_cont("no PMU driver, software counters only.\n");
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301493 return;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001494 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301495
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001496 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02001497
Robert Richter0933e5c2009-04-29 12:47:12 +02001498 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
1499 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01001500 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
Robert Richter0933e5c2009-04-29 12:47:12 +02001501 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
Ingo Molnar241771e2008-12-03 10:39:53 +01001502 }
Robert Richter0933e5c2009-04-29 12:47:12 +02001503 perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
1504 perf_max_counters = x86_pmu.num_counters;
Ingo Molnar241771e2008-12-03 10:39:53 +01001505
Robert Richter0933e5c2009-04-29 12:47:12 +02001506 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1507 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01001508 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
Robert Richter0933e5c2009-04-29 12:47:12 +02001509 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
Ingo Molnar703e9372008-12-17 10:51:15 +01001510 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001511
Robert Richter0933e5c2009-04-29 12:47:12 +02001512 perf_counter_mask |=
1513 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001514
Yong Wangc323d952009-05-29 13:28:35 +08001515 perf_counters_lapic_init();
Ingo Molnar241771e2008-12-03 10:39:53 +01001516 register_die_notifier(&perf_counter_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001517
1518 pr_info("... version: %d\n", x86_pmu.version);
1519 pr_info("... bit width: %d\n", x86_pmu.counter_bits);
1520 pr_info("... generic counters: %d\n", x86_pmu.num_counters);
1521 pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask);
1522 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1523 pr_info("... fixed-purpose counters: %d\n", x86_pmu.num_counters_fixed);
1524 pr_info("... counter mask: %016Lx\n", perf_counter_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001525}
Ingo Molnar621a01e2008-12-11 12:46:46 +01001526
Robert Richterbb775fc2009-04-29 12:47:14 +02001527static inline void x86_pmu_read(struct perf_counter *counter)
Ingo Molnaree060942008-12-13 09:00:03 +01001528{
1529 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
1530}
1531
Robert Richter4aeb0b42009-04-29 12:47:03 +02001532static const struct pmu pmu = {
1533 .enable = x86_pmu_enable,
1534 .disable = x86_pmu_disable,
1535 .read = x86_pmu_read,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001536 .unthrottle = x86_pmu_unthrottle,
Ingo Molnar621a01e2008-12-11 12:46:46 +01001537};
1538
Robert Richter4aeb0b42009-04-29 12:47:03 +02001539const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001540{
1541 int err;
1542
1543 err = __hw_perf_counter_init(counter);
1544 if (err)
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02001545 return ERR_PTR(err);
Ingo Molnar621a01e2008-12-11 12:46:46 +01001546
Robert Richter4aeb0b42009-04-29 12:47:03 +02001547 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001548}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001549
1550/*
1551 * callchain support
1552 */
1553
1554static inline
1555void callchain_store(struct perf_callchain_entry *entry, unsigned long ip)
1556{
1557 if (entry->nr < MAX_STACK_DEPTH)
1558 entry->ip[entry->nr++] = ip;
1559}
1560
1561static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
1562static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
1563
1564
1565static void
1566backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1567{
1568 /* Ignore warnings */
1569}
1570
1571static void backtrace_warning(void *data, char *msg)
1572{
1573 /* Ignore warnings */
1574}
1575
1576static int backtrace_stack(void *data, char *name)
1577{
Ingo Molnar038e8362009-06-15 09:57:59 +02001578 /* Process all stacks: */
1579 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001580}
1581
1582static void backtrace_address(void *data, unsigned long addr, int reliable)
1583{
1584 struct perf_callchain_entry *entry = data;
1585
1586 if (reliable)
1587 callchain_store(entry, addr);
1588}
1589
1590static const struct stacktrace_ops backtrace_ops = {
1591 .warning = backtrace_warning,
1592 .warning_symbol = backtrace_warning_symbol,
1593 .stack = backtrace_stack,
1594 .address = backtrace_address,
1595};
1596
Ingo Molnar038e8362009-06-15 09:57:59 +02001597#include "../dumpstack.h"
1598
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001599static void
1600perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1601{
1602 unsigned long bp;
1603 char *stack;
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001604 int nr = entry->nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001605
Ingo Molnar038e8362009-06-15 09:57:59 +02001606 callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001607
1608 stack = ((char *)regs + sizeof(struct pt_regs));
1609#ifdef CONFIG_FRAME_POINTER
Ingo Molnar038e8362009-06-15 09:57:59 +02001610 get_bp(bp);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001611#else
1612 bp = 0;
1613#endif
1614
Ingo Molnar038e8362009-06-15 09:57:59 +02001615 dump_trace(NULL, regs, (void *)&stack, bp, &backtrace_ops, entry);
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001616
1617 entry->kernel = entry->nr - nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001618}
1619
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001620static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
1621{
1622 int ret;
1623
1624 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
1625 return 0;
1626
1627 ret = 1;
1628 pagefault_disable();
1629 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
1630 ret = 0;
1631 pagefault_enable();
1632
1633 return ret;
1634}
1635
1636static void
1637perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1638{
1639 struct stack_frame frame;
1640 const void __user *fp;
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001641 int nr = entry->nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001642
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001643 if (!user_mode(regs))
1644 regs = task_pt_regs(current);
1645
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001646 fp = (void __user *)regs->bp;
1647
1648 callchain_store(entry, regs->ip);
1649
1650 while (entry->nr < MAX_STACK_DEPTH) {
Ingo Molnar038e8362009-06-15 09:57:59 +02001651 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001652 frame.return_address = 0;
1653
1654 if (!copy_stack_frame(fp, &frame))
1655 break;
1656
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001657 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001658 break;
1659
1660 callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02001661 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001662 }
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001663
1664 entry->user = entry->nr - nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001665}
1666
1667static void
1668perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1669{
1670 int is_user;
1671
1672 if (!regs)
1673 return;
1674
1675 is_user = user_mode(regs);
1676
1677 if (!current || current->pid == 0)
1678 return;
1679
1680 if (is_user && current->state != TASK_RUNNING)
1681 return;
1682
1683 if (!is_user)
1684 perf_callchain_kernel(regs, entry);
1685
1686 if (current->mm)
1687 perf_callchain_user(regs, entry);
1688}
1689
1690struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1691{
1692 struct perf_callchain_entry *entry;
1693
1694 if (in_nmi())
1695 entry = &__get_cpu_var(nmi_entry);
1696 else
1697 entry = &__get_cpu_var(irq_entry);
1698
1699 entry->nr = 0;
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001700 entry->hv = 0;
1701 entry->kernel = 0;
1702 entry->user = 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001703
1704 perf_do_callchain(regs, entry);
1705
1706 return entry;
1707}