blob: 82395f2378ec7317ca25f3d37dd9e7cd2a9b8396 [file] [log] [blame]
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001#ifdef CONFIG_CPU_SUP_INTEL
2
3/*
Peter Zijlstrab622d642010-02-01 15:36:30 +01004 * Intel PerfMon, used on Core and later.
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01005 */
6static const u64 intel_perfmon_event_map[] =
7{
8 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
9 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
10 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
11 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
12 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
13 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
14 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
15};
16
17static struct event_constraint intel_core_event_constraints[] =
18{
19 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
20 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
21 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
22 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
23 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
24 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
25 EVENT_CONSTRAINT_END
26};
27
28static struct event_constraint intel_core2_event_constraints[] =
29{
Peter Zijlstrab622d642010-02-01 15:36:30 +010030 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
31 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
32 /*
33 * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
34 * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
35 * ratio between these counters.
36 */
37 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010038 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
39 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
40 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
41 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
42 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
43 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
44 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
45 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
Peter Zijlstrab622d642010-02-01 15:36:30 +010046 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010047 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
48 EVENT_CONSTRAINT_END
49};
50
51static struct event_constraint intel_nehalem_event_constraints[] =
52{
Peter Zijlstrab622d642010-02-01 15:36:30 +010053 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
54 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
55 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010056 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
57 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
58 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
59 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
60 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
61 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
62 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
63 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
64 EVENT_CONSTRAINT_END
65};
66
67static struct event_constraint intel_westmere_event_constraints[] =
68{
Peter Zijlstrab622d642010-02-01 15:36:30 +010069 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
70 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
71 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010072 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
73 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
74 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
Stephane Eraniand1100772010-06-10 13:25:01 +020075 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010076 EVENT_CONSTRAINT_END
77};
78
79static struct event_constraint intel_gen_event_constraints[] =
80{
Peter Zijlstrab622d642010-02-01 15:36:30 +010081 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
82 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
83 /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010084 EVENT_CONSTRAINT_END
85};
86
87static u64 intel_pmu_event_map(int hw_event)
88{
89 return intel_perfmon_event_map[hw_event];
90}
91
Peter Zijlstracaaa8be2010-03-29 13:09:53 +020092static __initconst const u64 westmere_hw_cache_event_ids
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010093 [PERF_COUNT_HW_CACHE_MAX]
94 [PERF_COUNT_HW_CACHE_OP_MAX]
95 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
96{
97 [ C(L1D) ] = {
98 [ C(OP_READ) ] = {
99 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
100 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
101 },
102 [ C(OP_WRITE) ] = {
103 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
104 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
105 },
106 [ C(OP_PREFETCH) ] = {
107 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
108 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
109 },
110 },
111 [ C(L1I ) ] = {
112 [ C(OP_READ) ] = {
113 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
114 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
115 },
116 [ C(OP_WRITE) ] = {
117 [ C(RESULT_ACCESS) ] = -1,
118 [ C(RESULT_MISS) ] = -1,
119 },
120 [ C(OP_PREFETCH) ] = {
121 [ C(RESULT_ACCESS) ] = 0x0,
122 [ C(RESULT_MISS) ] = 0x0,
123 },
124 },
125 [ C(LL ) ] = {
126 [ C(OP_READ) ] = {
127 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
128 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
129 },
130 [ C(OP_WRITE) ] = {
131 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
132 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
133 },
134 [ C(OP_PREFETCH) ] = {
135 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
136 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
137 },
138 },
139 [ C(DTLB) ] = {
140 [ C(OP_READ) ] = {
141 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
142 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
143 },
144 [ C(OP_WRITE) ] = {
145 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
146 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
147 },
148 [ C(OP_PREFETCH) ] = {
149 [ C(RESULT_ACCESS) ] = 0x0,
150 [ C(RESULT_MISS) ] = 0x0,
151 },
152 },
153 [ C(ITLB) ] = {
154 [ C(OP_READ) ] = {
155 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
156 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
157 },
158 [ C(OP_WRITE) ] = {
159 [ C(RESULT_ACCESS) ] = -1,
160 [ C(RESULT_MISS) ] = -1,
161 },
162 [ C(OP_PREFETCH) ] = {
163 [ C(RESULT_ACCESS) ] = -1,
164 [ C(RESULT_MISS) ] = -1,
165 },
166 },
167 [ C(BPU ) ] = {
168 [ C(OP_READ) ] = {
169 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
170 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
171 },
172 [ C(OP_WRITE) ] = {
173 [ C(RESULT_ACCESS) ] = -1,
174 [ C(RESULT_MISS) ] = -1,
175 },
176 [ C(OP_PREFETCH) ] = {
177 [ C(RESULT_ACCESS) ] = -1,
178 [ C(RESULT_MISS) ] = -1,
179 },
180 },
181};
182
Peter Zijlstracaaa8be2010-03-29 13:09:53 +0200183static __initconst const u64 nehalem_hw_cache_event_ids
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100184 [PERF_COUNT_HW_CACHE_MAX]
185 [PERF_COUNT_HW_CACHE_OP_MAX]
186 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
187{
188 [ C(L1D) ] = {
189 [ C(OP_READ) ] = {
190 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
191 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
192 },
193 [ C(OP_WRITE) ] = {
194 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
195 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
196 },
197 [ C(OP_PREFETCH) ] = {
198 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
199 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
200 },
201 },
202 [ C(L1I ) ] = {
203 [ C(OP_READ) ] = {
204 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
205 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
206 },
207 [ C(OP_WRITE) ] = {
208 [ C(RESULT_ACCESS) ] = -1,
209 [ C(RESULT_MISS) ] = -1,
210 },
211 [ C(OP_PREFETCH) ] = {
212 [ C(RESULT_ACCESS) ] = 0x0,
213 [ C(RESULT_MISS) ] = 0x0,
214 },
215 },
216 [ C(LL ) ] = {
217 [ C(OP_READ) ] = {
218 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
219 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
220 },
221 [ C(OP_WRITE) ] = {
222 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
223 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
224 },
225 [ C(OP_PREFETCH) ] = {
226 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
227 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
228 },
229 },
230 [ C(DTLB) ] = {
231 [ C(OP_READ) ] = {
232 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
233 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
234 },
235 [ C(OP_WRITE) ] = {
236 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
237 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
238 },
239 [ C(OP_PREFETCH) ] = {
240 [ C(RESULT_ACCESS) ] = 0x0,
241 [ C(RESULT_MISS) ] = 0x0,
242 },
243 },
244 [ C(ITLB) ] = {
245 [ C(OP_READ) ] = {
246 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
247 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
248 },
249 [ C(OP_WRITE) ] = {
250 [ C(RESULT_ACCESS) ] = -1,
251 [ C(RESULT_MISS) ] = -1,
252 },
253 [ C(OP_PREFETCH) ] = {
254 [ C(RESULT_ACCESS) ] = -1,
255 [ C(RESULT_MISS) ] = -1,
256 },
257 },
258 [ C(BPU ) ] = {
259 [ C(OP_READ) ] = {
260 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
261 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
262 },
263 [ C(OP_WRITE) ] = {
264 [ C(RESULT_ACCESS) ] = -1,
265 [ C(RESULT_MISS) ] = -1,
266 },
267 [ C(OP_PREFETCH) ] = {
268 [ C(RESULT_ACCESS) ] = -1,
269 [ C(RESULT_MISS) ] = -1,
270 },
271 },
272};
273
Peter Zijlstracaaa8be2010-03-29 13:09:53 +0200274static __initconst const u64 core2_hw_cache_event_ids
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100275 [PERF_COUNT_HW_CACHE_MAX]
276 [PERF_COUNT_HW_CACHE_OP_MAX]
277 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
278{
279 [ C(L1D) ] = {
280 [ C(OP_READ) ] = {
281 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
282 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
283 },
284 [ C(OP_WRITE) ] = {
285 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
286 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
287 },
288 [ C(OP_PREFETCH) ] = {
289 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
290 [ C(RESULT_MISS) ] = 0,
291 },
292 },
293 [ C(L1I ) ] = {
294 [ C(OP_READ) ] = {
295 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
296 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
297 },
298 [ C(OP_WRITE) ] = {
299 [ C(RESULT_ACCESS) ] = -1,
300 [ C(RESULT_MISS) ] = -1,
301 },
302 [ C(OP_PREFETCH) ] = {
303 [ C(RESULT_ACCESS) ] = 0,
304 [ C(RESULT_MISS) ] = 0,
305 },
306 },
307 [ C(LL ) ] = {
308 [ C(OP_READ) ] = {
309 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
310 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
311 },
312 [ C(OP_WRITE) ] = {
313 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
314 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
315 },
316 [ C(OP_PREFETCH) ] = {
317 [ C(RESULT_ACCESS) ] = 0,
318 [ C(RESULT_MISS) ] = 0,
319 },
320 },
321 [ C(DTLB) ] = {
322 [ C(OP_READ) ] = {
323 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
324 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
325 },
326 [ C(OP_WRITE) ] = {
327 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
328 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
329 },
330 [ C(OP_PREFETCH) ] = {
331 [ C(RESULT_ACCESS) ] = 0,
332 [ C(RESULT_MISS) ] = 0,
333 },
334 },
335 [ C(ITLB) ] = {
336 [ C(OP_READ) ] = {
337 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
338 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
339 },
340 [ C(OP_WRITE) ] = {
341 [ C(RESULT_ACCESS) ] = -1,
342 [ C(RESULT_MISS) ] = -1,
343 },
344 [ C(OP_PREFETCH) ] = {
345 [ C(RESULT_ACCESS) ] = -1,
346 [ C(RESULT_MISS) ] = -1,
347 },
348 },
349 [ C(BPU ) ] = {
350 [ C(OP_READ) ] = {
351 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
352 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
353 },
354 [ C(OP_WRITE) ] = {
355 [ C(RESULT_ACCESS) ] = -1,
356 [ C(RESULT_MISS) ] = -1,
357 },
358 [ C(OP_PREFETCH) ] = {
359 [ C(RESULT_ACCESS) ] = -1,
360 [ C(RESULT_MISS) ] = -1,
361 },
362 },
363};
364
Peter Zijlstracaaa8be2010-03-29 13:09:53 +0200365static __initconst const u64 atom_hw_cache_event_ids
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100366 [PERF_COUNT_HW_CACHE_MAX]
367 [PERF_COUNT_HW_CACHE_OP_MAX]
368 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
369{
370 [ C(L1D) ] = {
371 [ C(OP_READ) ] = {
372 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
373 [ C(RESULT_MISS) ] = 0,
374 },
375 [ C(OP_WRITE) ] = {
376 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
377 [ C(RESULT_MISS) ] = 0,
378 },
379 [ C(OP_PREFETCH) ] = {
380 [ C(RESULT_ACCESS) ] = 0x0,
381 [ C(RESULT_MISS) ] = 0,
382 },
383 },
384 [ C(L1I ) ] = {
385 [ C(OP_READ) ] = {
386 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
387 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
388 },
389 [ C(OP_WRITE) ] = {
390 [ C(RESULT_ACCESS) ] = -1,
391 [ C(RESULT_MISS) ] = -1,
392 },
393 [ C(OP_PREFETCH) ] = {
394 [ C(RESULT_ACCESS) ] = 0,
395 [ C(RESULT_MISS) ] = 0,
396 },
397 },
398 [ C(LL ) ] = {
399 [ C(OP_READ) ] = {
400 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
401 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
402 },
403 [ C(OP_WRITE) ] = {
404 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
405 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
406 },
407 [ C(OP_PREFETCH) ] = {
408 [ C(RESULT_ACCESS) ] = 0,
409 [ C(RESULT_MISS) ] = 0,
410 },
411 },
412 [ C(DTLB) ] = {
413 [ C(OP_READ) ] = {
414 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
415 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
416 },
417 [ C(OP_WRITE) ] = {
418 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
419 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
420 },
421 [ C(OP_PREFETCH) ] = {
422 [ C(RESULT_ACCESS) ] = 0,
423 [ C(RESULT_MISS) ] = 0,
424 },
425 },
426 [ C(ITLB) ] = {
427 [ C(OP_READ) ] = {
428 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
429 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
430 },
431 [ C(OP_WRITE) ] = {
432 [ C(RESULT_ACCESS) ] = -1,
433 [ C(RESULT_MISS) ] = -1,
434 },
435 [ C(OP_PREFETCH) ] = {
436 [ C(RESULT_ACCESS) ] = -1,
437 [ C(RESULT_MISS) ] = -1,
438 },
439 },
440 [ C(BPU ) ] = {
441 [ C(OP_READ) ] = {
442 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
443 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
444 },
445 [ C(OP_WRITE) ] = {
446 [ C(RESULT_ACCESS) ] = -1,
447 [ C(RESULT_MISS) ] = -1,
448 },
449 [ C(OP_PREFETCH) ] = {
450 [ C(RESULT_ACCESS) ] = -1,
451 [ C(RESULT_MISS) ] = -1,
452 },
453 },
454};
455
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100456static void intel_pmu_disable_all(void)
457{
458 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
459
460 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
461
462 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
463 intel_pmu_disable_bts();
Peter Zijlstraca037702010-03-02 19:52:12 +0100464
465 intel_pmu_pebs_disable_all();
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100466 intel_pmu_lbr_disable_all();
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100467}
468
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100469static void intel_pmu_enable_all(int added)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100470{
471 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
472
Peter Zijlstrad3295272010-03-08 13:57:14 +0100473 intel_pmu_pebs_enable_all();
474 intel_pmu_lbr_enable_all();
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100475 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
476
477 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
478 struct perf_event *event =
479 cpuc->events[X86_PMC_IDX_FIXED_BTS];
480
481 if (WARN_ON_ONCE(!event))
482 return;
483
484 intel_pmu_enable_bts(event->hw.config);
485 }
486}
487
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100488/*
489 * Workaround for:
490 * Intel Errata AAK100 (model 26)
491 * Intel Errata AAP53 (model 30)
Peter Zijlstra40b91cd2010-03-29 16:37:17 +0200492 * Intel Errata BD53 (model 44)
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100493 *
Zhang, Yanmin351af072010-08-06 13:39:08 +0800494 * The official story:
495 * These chips need to be 'reset' when adding counters by programming the
496 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
497 * in sequence on the same PMC or on different PMCs.
498 *
499 * In practise it appears some of these events do in fact count, and
500 * we need to programm all 4 events.
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100501 */
Zhang, Yanmin351af072010-08-06 13:39:08 +0800502static void intel_pmu_nhm_workaround(void)
503{
504 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
505 static const unsigned long nhm_magic[4] = {
506 0x4300B5,
507 0x4300D2,
508 0x4300B1,
509 0x4300B1
510 };
511 struct perf_event *event;
512 int i;
513
514 /*
515 * The Errata requires below steps:
516 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
517 * 2) Configure 4 PERFEVTSELx with the magic events and clear
518 * the corresponding PMCx;
519 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
520 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
521 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
522 */
523
524 /*
525 * The real steps we choose are a little different from above.
526 * A) To reduce MSR operations, we don't run step 1) as they
527 * are already cleared before this function is called;
528 * B) Call x86_perf_event_update to save PMCx before configuring
529 * PERFEVTSELx with magic number;
530 * C) With step 5), we do clear only when the PERFEVTSELx is
531 * not used currently.
532 * D) Call x86_perf_event_set_period to restore PMCx;
533 */
534
535 /* We always operate 4 pairs of PERF Counters */
536 for (i = 0; i < 4; i++) {
537 event = cpuc->events[i];
538 if (event)
539 x86_perf_event_update(event);
540 }
541
542 for (i = 0; i < 4; i++) {
543 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
544 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
545 }
546
547 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
548 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
549
550 for (i = 0; i < 4; i++) {
551 event = cpuc->events[i];
552
553 if (event) {
554 x86_perf_event_set_period(event);
555 __x86_pmu_enable_event(&event->hw,
556 ARCH_PERFMON_EVENTSEL_ENABLE);
557 } else
558 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
559 }
560}
561
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100562static void intel_pmu_nhm_enable_all(int added)
563{
Zhang, Yanmin351af072010-08-06 13:39:08 +0800564 if (added)
565 intel_pmu_nhm_workaround();
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100566 intel_pmu_enable_all(added);
567}
568
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100569static inline u64 intel_pmu_get_status(void)
570{
571 u64 status;
572
573 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
574
575 return status;
576}
577
578static inline void intel_pmu_ack_status(u64 ack)
579{
580 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
581}
582
Peter Zijlstraca037702010-03-02 19:52:12 +0100583static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100584{
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100585 int idx = hwc->idx - X86_PMC_IDX_FIXED;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100586 u64 ctrl_val, mask;
587
588 mask = 0xfULL << (idx * 4);
589
590 rdmsrl(hwc->config_base, ctrl_val);
591 ctrl_val &= ~mask;
Peter Zijlstra7645a242010-03-08 13:51:31 +0100592 wrmsrl(hwc->config_base, ctrl_val);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100593}
594
Peter Zijlstraca037702010-03-02 19:52:12 +0100595static void intel_pmu_disable_event(struct perf_event *event)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100596{
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100597 struct hw_perf_event *hwc = &event->hw;
598
599 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100600 intel_pmu_disable_bts();
601 intel_pmu_drain_bts_buffer();
602 return;
603 }
604
605 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100606 intel_pmu_disable_fixed(hwc);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100607 return;
608 }
609
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100610 x86_pmu_disable_event(event);
Peter Zijlstraca037702010-03-02 19:52:12 +0100611
Peter Zijlstraab608342010-04-08 23:03:20 +0200612 if (unlikely(event->attr.precise_ip))
Peter Zijlstraef21f682010-03-03 13:12:23 +0100613 intel_pmu_pebs_disable(event);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100614}
615
Peter Zijlstraca037702010-03-02 19:52:12 +0100616static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100617{
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100618 int idx = hwc->idx - X86_PMC_IDX_FIXED;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100619 u64 ctrl_val, bits, mask;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100620
621 /*
622 * Enable IRQ generation (0x8),
623 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
624 * if requested:
625 */
626 bits = 0x8ULL;
627 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
628 bits |= 0x2;
629 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
630 bits |= 0x1;
631
632 /*
633 * ANY bit is supported in v3 and up
634 */
635 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
636 bits |= 0x4;
637
638 bits <<= (idx * 4);
639 mask = 0xfULL << (idx * 4);
640
641 rdmsrl(hwc->config_base, ctrl_val);
642 ctrl_val &= ~mask;
643 ctrl_val |= bits;
Peter Zijlstra7645a242010-03-08 13:51:31 +0100644 wrmsrl(hwc->config_base, ctrl_val);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100645}
646
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100647static void intel_pmu_enable_event(struct perf_event *event)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100648{
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100649 struct hw_perf_event *hwc = &event->hw;
650
651 if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100652 if (!__get_cpu_var(cpu_hw_events).enabled)
653 return;
654
655 intel_pmu_enable_bts(hwc->config);
656 return;
657 }
658
659 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100660 intel_pmu_enable_fixed(hwc);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100661 return;
662 }
663
Peter Zijlstraab608342010-04-08 23:03:20 +0200664 if (unlikely(event->attr.precise_ip))
Peter Zijlstraef21f682010-03-03 13:12:23 +0100665 intel_pmu_pebs_enable(event);
Peter Zijlstraca037702010-03-02 19:52:12 +0100666
Robert Richter31fa58a2010-04-13 22:23:14 +0200667 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100668}
669
670/*
671 * Save and restart an expired event. Called by NMI contexts,
672 * so it has to be careful about preempting normal event ops:
673 */
674static int intel_pmu_save_and_restart(struct perf_event *event)
675{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100676 x86_perf_event_update(event);
677 return x86_perf_event_set_period(event);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100678}
679
680static void intel_pmu_reset(void)
681{
682 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
683 unsigned long flags;
684 int idx;
685
Robert Richter948b1bb2010-03-29 18:36:50 +0200686 if (!x86_pmu.num_counters)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100687 return;
688
689 local_irq_save(flags);
690
691 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
692
Robert Richter948b1bb2010-03-29 18:36:50 +0200693 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100694 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
695 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
696 }
Robert Richter948b1bb2010-03-29 18:36:50 +0200697 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100698 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
Robert Richter948b1bb2010-03-29 18:36:50 +0200699
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100700 if (ds)
701 ds->bts_index = ds->bts_buffer_base;
702
703 local_irq_restore(flags);
704}
705
706/*
707 * This handler is triggered by the local APIC, so the APIC IRQ handling
708 * rules apply:
709 */
710static int intel_pmu_handle_irq(struct pt_regs *regs)
711{
712 struct perf_sample_data data;
713 struct cpu_hw_events *cpuc;
714 int bit, loops;
Don Zickus2e556b52010-09-02 15:07:47 -0400715 u64 status;
Peter Zijlstrade725de2010-09-02 15:07:49 -0400716 int handled = 0;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100717
Peter Zijlstradc1d6282010-03-03 15:55:04 +0100718 perf_sample_data_init(&data, 0);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100719
720 cpuc = &__get_cpu_var(cpu_hw_events);
721
Peter Zijlstra3fb2b8d2010-03-08 13:51:01 +0100722 intel_pmu_disable_all();
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100723 intel_pmu_drain_bts_buffer();
724 status = intel_pmu_get_status();
725 if (!status) {
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100726 intel_pmu_enable_all(0);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100727 return 0;
728 }
729
730 loops = 0;
731again:
Don Zickus2e556b52010-09-02 15:07:47 -0400732 intel_pmu_ack_status(status);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100733 if (++loops > 100) {
734 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
735 perf_event_print_debug();
736 intel_pmu_reset();
Peter Zijlstra3fb2b8d2010-03-08 13:51:01 +0100737 goto done;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100738 }
739
740 inc_irq_stat(apic_perf_irqs);
Peter Zijlstraca037702010-03-02 19:52:12 +0100741
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100742 intel_pmu_lbr_read();
743
Peter Zijlstraca037702010-03-02 19:52:12 +0100744 /*
745 * PEBS overflow sets bit 62 in the global status register
746 */
Peter Zijlstrade725de2010-09-02 15:07:49 -0400747 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
748 handled++;
Peter Zijlstraca037702010-03-02 19:52:12 +0100749 x86_pmu.drain_pebs(regs);
Peter Zijlstrade725de2010-09-02 15:07:49 -0400750 }
Peter Zijlstraca037702010-03-02 19:52:12 +0100751
Akinobu Mita984b3f52010-03-05 13:41:37 -0800752 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100753 struct perf_event *event = cpuc->events[bit];
754
Peter Zijlstrade725de2010-09-02 15:07:49 -0400755 handled++;
756
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100757 if (!test_bit(bit, cpuc->active_mask))
758 continue;
759
760 if (!intel_pmu_save_and_restart(event))
761 continue;
762
763 data.period = event->hw.last_period;
764
765 if (perf_event_overflow(event, 1, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200766 x86_pmu_stop(event, 0);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100767 }
768
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100769 /*
770 * Repeat if there is more work to be done:
771 */
772 status = intel_pmu_get_status();
773 if (status)
774 goto again;
775
Peter Zijlstra3fb2b8d2010-03-08 13:51:01 +0100776done:
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100777 intel_pmu_enable_all(0);
Peter Zijlstrade725de2010-09-02 15:07:49 -0400778 return handled;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100779}
780
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100781static struct event_constraint *
Peter Zijlstraca037702010-03-02 19:52:12 +0100782intel_bts_constraints(struct perf_event *event)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100783{
Peter Zijlstraca037702010-03-02 19:52:12 +0100784 struct hw_perf_event *hwc = &event->hw;
785 unsigned int hw_event, bts_event;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100786
Peter Zijlstraca037702010-03-02 19:52:12 +0100787 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
788 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100789
Peter Zijlstraca037702010-03-02 19:52:12 +0100790 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100791 return &bts_constraint;
Peter Zijlstraca037702010-03-02 19:52:12 +0100792
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100793 return NULL;
794}
795
796static struct event_constraint *
797intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
798{
799 struct event_constraint *c;
800
Peter Zijlstraca037702010-03-02 19:52:12 +0100801 c = intel_bts_constraints(event);
802 if (c)
803 return c;
804
805 c = intel_pebs_constraints(event);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100806 if (c)
807 return c;
808
809 return x86_get_event_constraints(cpuc, event);
810}
811
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200812static int intel_pmu_hw_config(struct perf_event *event)
813{
814 int ret = x86_pmu_hw_config(event);
815
816 if (ret)
817 return ret;
818
819 if (event->attr.type != PERF_TYPE_RAW)
820 return 0;
821
822 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
823 return 0;
824
825 if (x86_pmu.version < 3)
826 return -EINVAL;
827
828 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
829 return -EACCES;
830
831 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
832
833 return 0;
834}
835
Peter Zijlstracaaa8be2010-03-29 13:09:53 +0200836static __initconst const struct x86_pmu core_pmu = {
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100837 .name = "core",
838 .handle_irq = x86_pmu_handle_irq,
839 .disable_all = x86_pmu_disable_all,
840 .enable_all = x86_pmu_enable_all,
841 .enable = x86_pmu_enable_event,
842 .disable = x86_pmu_disable_event,
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200843 .hw_config = x86_pmu_hw_config,
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300844 .schedule_events = x86_schedule_events,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100845 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
846 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
847 .event_map = intel_pmu_event_map,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100848 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
849 .apic = 1,
850 /*
851 * Intel PMCs cannot be accessed sanely above 32 bit width,
852 * so we install an artificial 1<<31 period regardless of
853 * the generic event period:
854 */
855 .max_period = (1ULL << 31) - 1,
856 .get_event_constraints = intel_get_event_constraints,
857 .event_constraints = intel_core_event_constraints,
858};
859
Peter Zijlstra74846d32010-03-05 13:49:35 +0100860static void intel_pmu_cpu_starting(int cpu)
861{
862 init_debug_store_on_cpu(cpu);
863 /*
864 * Deal with CPUs that don't clear their LBRs on power-up.
865 */
866 intel_pmu_lbr_reset();
867}
868
869static void intel_pmu_cpu_dying(int cpu)
870{
871 fini_debug_store_on_cpu(cpu);
872}
873
Peter Zijlstracaaa8be2010-03-29 13:09:53 +0200874static __initconst const struct x86_pmu intel_pmu = {
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100875 .name = "Intel",
876 .handle_irq = intel_pmu_handle_irq,
877 .disable_all = intel_pmu_disable_all,
878 .enable_all = intel_pmu_enable_all,
879 .enable = intel_pmu_enable_event,
880 .disable = intel_pmu_disable_event,
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200881 .hw_config = intel_pmu_hw_config,
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300882 .schedule_events = x86_schedule_events,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100883 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
884 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
885 .event_map = intel_pmu_event_map,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100886 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
887 .apic = 1,
888 /*
889 * Intel PMCs cannot be accessed sanely above 32 bit width,
890 * so we install an artificial 1<<31 period regardless of
891 * the generic event period:
892 */
893 .max_period = (1ULL << 31) - 1,
Peter Zijlstra3f6da392010-03-05 13:01:18 +0100894 .get_event_constraints = intel_get_event_constraints,
895
Peter Zijlstra74846d32010-03-05 13:49:35 +0100896 .cpu_starting = intel_pmu_cpu_starting,
897 .cpu_dying = intel_pmu_cpu_dying,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100898};
899
Peter Zijlstra3c447802010-03-04 21:49:01 +0100900static void intel_clovertown_quirks(void)
901{
902 /*
903 * PEBS is unreliable due to:
904 *
905 * AJ67 - PEBS may experience CPL leaks
906 * AJ68 - PEBS PMI may be delayed by one event
907 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
908 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
909 *
910 * AJ67 could be worked around by restricting the OS/USR flags.
911 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
912 *
913 * AJ106 could possibly be worked around by not allowing LBR
914 * usage from PEBS, including the fixup.
915 * AJ68 could possibly be worked around by always programming
916 * a pebs_event_reset[0] value and coping with the lost events.
917 *
918 * But taken together it might just make sense to not enable PEBS on
919 * these chips.
920 */
921 printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
922 x86_pmu.pebs = 0;
923 x86_pmu.pebs_constraints = NULL;
924}
925
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100926static __init int intel_pmu_init(void)
927{
928 union cpuid10_edx edx;
929 union cpuid10_eax eax;
930 unsigned int unused;
931 unsigned int ebx;
932 int version;
933
934 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300935 switch (boot_cpu_data.x86) {
936 case 0x6:
937 return p6_pmu_init();
938 case 0xf:
939 return p4_pmu_init();
940 }
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100941 return -ENODEV;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100942 }
943
944 /*
945 * Check whether the Architectural PerfMon supports
946 * Branch Misses Retired hw_event or not.
947 */
948 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
949 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
950 return -ENODEV;
951
952 version = eax.split.version_id;
953 if (version < 2)
954 x86_pmu = core_pmu;
955 else
956 x86_pmu = intel_pmu;
957
958 x86_pmu.version = version;
Robert Richter948b1bb2010-03-29 18:36:50 +0200959 x86_pmu.num_counters = eax.split.num_counters;
960 x86_pmu.cntval_bits = eax.split.bit_width;
961 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100962
963 /*
964 * Quirk: v2 perfmon does not report fixed-purpose events, so
965 * assume at least 3 events:
966 */
967 if (version > 1)
Robert Richter948b1bb2010-03-29 18:36:50 +0200968 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100969
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100970 /*
971 * v2 and above have a perf capabilities MSR
972 */
973 if (version > 1) {
974 u64 capabilities;
975
976 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
977 x86_pmu.intel_cap.capabilities = capabilities;
978 }
979
Peter Zijlstraca037702010-03-02 19:52:12 +0100980 intel_ds_init();
981
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100982 /*
983 * Install the hw-cache-events table:
984 */
985 switch (boot_cpu_data.x86_model) {
986 case 14: /* 65 nm core solo/duo, "Yonah" */
987 pr_cont("Core events, ");
988 break;
989
990 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
Peter Zijlstra3c447802010-03-04 21:49:01 +0100991 x86_pmu.quirks = intel_clovertown_quirks;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100992 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
993 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
994 case 29: /* six-core 45 nm xeon "Dunnington" */
995 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
996 sizeof(hw_cache_event_ids));
997
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100998 intel_pmu_lbr_init_core();
999
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001000 x86_pmu.event_constraints = intel_core2_event_constraints;
1001 pr_cont("Core2 events, ");
1002 break;
1003
1004 case 26: /* 45 nm nehalem, "Bloomfield" */
1005 case 30: /* 45 nm nehalem, "Lynnfield" */
Vince Weaver134fbad2010-04-06 10:01:19 -04001006 case 46: /* 45 nm nehalem-ex, "Beckton" */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001007 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
1008 sizeof(hw_cache_event_ids));
1009
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001010 intel_pmu_lbr_init_nhm();
1011
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001012 x86_pmu.event_constraints = intel_nehalem_event_constraints;
Peter Zijlstra11164cd2010-03-26 14:08:44 +01001013 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
1014 pr_cont("Nehalem events, ");
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001015 break;
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001016
Peter Zijlstrab622d642010-02-01 15:36:30 +01001017 case 28: /* Atom */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001018 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
1019 sizeof(hw_cache_event_ids));
1020
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001021 intel_pmu_lbr_init_atom();
1022
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001023 x86_pmu.event_constraints = intel_gen_event_constraints;
1024 pr_cont("Atom events, ");
1025 break;
1026
1027 case 37: /* 32 nm nehalem, "Clarkdale" */
1028 case 44: /* 32 nm nehalem, "Gulftown" */
1029 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
1030 sizeof(hw_cache_event_ids));
1031
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001032 intel_pmu_lbr_init_nhm();
1033
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001034 x86_pmu.event_constraints = intel_westmere_event_constraints;
Peter Zijlstra40b91cd2010-03-29 16:37:17 +02001035 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001036 pr_cont("Westmere events, ");
1037 break;
Peter Zijlstrab622d642010-02-01 15:36:30 +01001038
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001039 default:
1040 /*
1041 * default constraints for v2 and up
1042 */
1043 x86_pmu.event_constraints = intel_gen_event_constraints;
1044 pr_cont("generic architected perfmon, ");
1045 }
1046 return 0;
1047}
1048
1049#else /* CONFIG_CPU_SUP_INTEL */
1050
1051static int intel_pmu_init(void)
1052{
1053 return 0;
1054}
1055
1056#endif /* CONFIG_CPU_SUP_INTEL */