blob: f60d41ff9a97fba11bf28808a65ed775e1605666 [file] [log] [blame]
Andi Kleena7e3ed12011-03-03 10:34:47 +08001/*
Stephane Eranianefc9f052011-06-06 16:57:03 +02002 * Per core/cpu state
3 *
4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
Andi Kleena7e3ed12011-03-03 10:34:47 +08006 */
Kevin Winchesterde0428a2011-08-30 20:41:05 -03007
Joe Perchesc767a542012-05-21 19:50:07 -07008#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
Kevin Winchesterde0428a2011-08-30 20:41:05 -030010#include <linux/stddef.h>
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/slab.h>
Paul Gortmaker69c60c82011-05-26 12:22:53 -040014#include <linux/export.h>
Kevin Winchesterde0428a2011-08-30 20:41:05 -030015
16#include <asm/hardirq.h>
17#include <asm/apic.h>
18
19#include "perf_event.h"
Andi Kleena7e3ed12011-03-03 10:34:47 +080020
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010021/*
Peter Zijlstrab622d642010-02-01 15:36:30 +010022 * Intel PerfMon, used on Core and later.
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010023 */
Ingo Molnarec75a712011-04-27 11:51:41 +020024static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010025{
Pekka Enbergc3b7cdf2012-07-06 12:59:46 +030026 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
27 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
28 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
29 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
30 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
31 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
32 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
33 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010034};
35
Ingo Molnar5c543e32011-04-27 12:02:04 +020036static struct event_constraint intel_core_event_constraints[] __read_mostly =
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010037{
38 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
39 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
40 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
41 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
42 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
43 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
44 EVENT_CONSTRAINT_END
45};
46
Ingo Molnar5c543e32011-04-27 12:02:04 +020047static struct event_constraint intel_core2_event_constraints[] __read_mostly =
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010048{
Peter Zijlstrab622d642010-02-01 15:36:30 +010049 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
50 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
Stephane Eraniancd09c0c2011-12-11 00:28:51 +010051 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010052 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
53 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
54 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
55 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
56 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
57 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
58 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
59 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
Peter Zijlstrab622d642010-02-01 15:36:30 +010060 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010061 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
62 EVENT_CONSTRAINT_END
63};
64
Ingo Molnar5c543e32011-04-27 12:02:04 +020065static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010066{
Peter Zijlstrab622d642010-02-01 15:36:30 +010067 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
68 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
Stephane Eraniancd09c0c2011-12-11 00:28:51 +010069 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010070 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
71 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
72 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
73 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
74 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
75 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
76 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
77 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
78 EVENT_CONSTRAINT_END
79};
80
Ingo Molnar5c543e32011-04-27 12:02:04 +020081static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
Andi Kleena7e3ed12011-03-03 10:34:47 +080082{
Stephane Eranianefc9f052011-06-06 16:57:03 +020083 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
Stephane Eranianf20093e2013-01-24 16:10:32 +010084 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
Andi Kleena7e3ed12011-03-03 10:34:47 +080085 EVENT_EXTRA_END
86};
87
Ingo Molnar5c543e32011-04-27 12:02:04 +020088static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010089{
Peter Zijlstrab622d642010-02-01 15:36:30 +010090 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
91 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
Stephane Eraniancd09c0c2011-12-11 00:28:51 +010092 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010093 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
94 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
95 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
Stephane Eraniand1100772010-06-10 13:25:01 +020096 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010097 EVENT_CONSTRAINT_END
98};
99
Ingo Molnar5c543e32011-04-27 12:02:04 +0200100static struct event_constraint intel_snb_event_constraints[] __read_mostly =
Lin Mingb06b3d42011-03-02 21:27:04 +0800101{
102 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
103 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
Stephane Eraniancd09c0c2011-12-11 00:28:51 +0100104 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
Stephane Eranianfd4a5ae2013-03-17 14:49:57 +0100105 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
106 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
107 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
108 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
Lin Mingb06b3d42011-03-02 21:27:04 +0800109 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
Lin Mingb06b3d42011-03-02 21:27:04 +0800110 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
111 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
Andi Kleenf8378f52013-03-08 15:22:48 -0800112 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
113 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
Lin Mingb06b3d42011-03-02 21:27:04 +0800114 EVENT_CONSTRAINT_END
115};
116
Stephane Eranian69943182013-02-20 11:15:12 +0100117static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
118{
119 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
120 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
121 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
122 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
123 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
124 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
125 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
126 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
127 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
128 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
129 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
130 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
Peter Zijlstra741a6982013-05-03 14:11:23 +0200131 /*
132 * Errata BV98 -- MEM_*_RETIRED events can leak between counters of SMT
133 * siblings; disable these events because they can corrupt unrelated
134 * counters.
135 */
136 INTEL_EVENT_CONSTRAINT(0xd0, 0x0), /* MEM_UOPS_RETIRED.* */
137 INTEL_EVENT_CONSTRAINT(0xd1, 0x0), /* MEM_LOAD_UOPS_RETIRED.* */
138 INTEL_EVENT_CONSTRAINT(0xd2, 0x0), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
139 INTEL_EVENT_CONSTRAINT(0xd3, 0x0), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
Stephane Eranian69943182013-02-20 11:15:12 +0100140 EVENT_CONSTRAINT_END
141};
142
Ingo Molnar5c543e32011-04-27 12:02:04 +0200143static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
Andi Kleena7e3ed12011-03-03 10:34:47 +0800144{
Stephane Eranianefc9f052011-06-06 16:57:03 +0200145 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
146 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
Stephane Eranianf20093e2013-01-24 16:10:32 +0100147 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
Andi Kleena7e3ed12011-03-03 10:34:47 +0800148 EVENT_EXTRA_END
149};
150
Avi Kivity0af3ac12011-06-29 18:42:36 +0300151static struct event_constraint intel_v1_event_constraints[] __read_mostly =
152{
153 EVENT_CONSTRAINT_END
154};
155
Ingo Molnar5c543e32011-04-27 12:02:04 +0200156static struct event_constraint intel_gen_event_constraints[] __read_mostly =
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100157{
Peter Zijlstrab622d642010-02-01 15:36:30 +0100158 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
159 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
Stephane Eraniancd09c0c2011-12-11 00:28:51 +0100160 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100161 EVENT_CONSTRAINT_END
162};
163
Stephane Eranianee89cbc2011-06-06 16:57:12 +0200164static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
Stephane Eranianf1923822013-04-16 13:51:43 +0200165 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
166 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
Stephane Eranianf20093e2013-01-24 16:10:32 +0100167 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
Ingo Molnar73e21ce2013-04-21 10:57:33 +0200168 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
Stephane Eranianf1923822013-04-16 13:51:43 +0200169 EVENT_EXTRA_END
170};
171
172static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
173 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
174 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
Stephane Eranianee89cbc2011-06-06 16:57:12 +0200175 EVENT_EXTRA_END
176};
177
Stephane Eranianf20093e2013-01-24 16:10:32 +0100178EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
179EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
Stephane Eranian9ad64c02013-01-24 16:10:34 +0100180EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
Stephane Eranianf20093e2013-01-24 16:10:32 +0100181
182struct attribute *nhm_events_attrs[] = {
183 EVENT_PTR(mem_ld_nhm),
184 NULL,
185};
186
187struct attribute *snb_events_attrs[] = {
188 EVENT_PTR(mem_ld_snb),
Stephane Eranian9ad64c02013-01-24 16:10:34 +0100189 EVENT_PTR(mem_st_snb),
Stephane Eranianf20093e2013-01-24 16:10:32 +0100190 NULL,
191};
192
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100193static u64 intel_pmu_event_map(int hw_event)
194{
195 return intel_perfmon_event_map[hw_event];
196}
197
Yan, Zheng74e65432012-07-17 17:27:55 +0800198#define SNB_DMND_DATA_RD (1ULL << 0)
199#define SNB_DMND_RFO (1ULL << 1)
200#define SNB_DMND_IFETCH (1ULL << 2)
201#define SNB_DMND_WB (1ULL << 3)
202#define SNB_PF_DATA_RD (1ULL << 4)
203#define SNB_PF_RFO (1ULL << 5)
204#define SNB_PF_IFETCH (1ULL << 6)
205#define SNB_LLC_DATA_RD (1ULL << 7)
206#define SNB_LLC_RFO (1ULL << 8)
207#define SNB_LLC_IFETCH (1ULL << 9)
208#define SNB_BUS_LOCKS (1ULL << 10)
209#define SNB_STRM_ST (1ULL << 11)
210#define SNB_OTHER (1ULL << 15)
211#define SNB_RESP_ANY (1ULL << 16)
212#define SNB_NO_SUPP (1ULL << 17)
213#define SNB_LLC_HITM (1ULL << 18)
214#define SNB_LLC_HITE (1ULL << 19)
215#define SNB_LLC_HITS (1ULL << 20)
216#define SNB_LLC_HITF (1ULL << 21)
217#define SNB_LOCAL (1ULL << 22)
218#define SNB_REMOTE (0xffULL << 23)
219#define SNB_SNP_NONE (1ULL << 31)
220#define SNB_SNP_NOT_NEEDED (1ULL << 32)
221#define SNB_SNP_MISS (1ULL << 33)
222#define SNB_NO_FWD (1ULL << 34)
223#define SNB_SNP_FWD (1ULL << 35)
224#define SNB_HITM (1ULL << 36)
225#define SNB_NON_DRAM (1ULL << 37)
226
227#define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
228#define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
229#define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
230
231#define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
232 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
233 SNB_HITM)
234
235#define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
236#define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
237
238#define SNB_L3_ACCESS SNB_RESP_ANY
239#define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
240
241static __initconst const u64 snb_hw_cache_extra_regs
242 [PERF_COUNT_HW_CACHE_MAX]
243 [PERF_COUNT_HW_CACHE_OP_MAX]
244 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
245{
246 [ C(LL ) ] = {
247 [ C(OP_READ) ] = {
248 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
249 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
250 },
251 [ C(OP_WRITE) ] = {
252 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
253 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
254 },
255 [ C(OP_PREFETCH) ] = {
256 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
257 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
258 },
259 },
260 [ C(NODE) ] = {
261 [ C(OP_READ) ] = {
262 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
263 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
264 },
265 [ C(OP_WRITE) ] = {
266 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
267 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
268 },
269 [ C(OP_PREFETCH) ] = {
270 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
271 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
272 },
273 },
274};
275
Lin Mingb06b3d42011-03-02 21:27:04 +0800276static __initconst const u64 snb_hw_cache_event_ids
277 [PERF_COUNT_HW_CACHE_MAX]
278 [PERF_COUNT_HW_CACHE_OP_MAX]
279 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
280{
281 [ C(L1D) ] = {
282 [ C(OP_READ) ] = {
283 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
284 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
285 },
286 [ C(OP_WRITE) ] = {
287 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
288 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
289 },
290 [ C(OP_PREFETCH) ] = {
291 [ C(RESULT_ACCESS) ] = 0x0,
292 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
293 },
294 },
295 [ C(L1I ) ] = {
296 [ C(OP_READ) ] = {
297 [ C(RESULT_ACCESS) ] = 0x0,
298 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
299 },
300 [ C(OP_WRITE) ] = {
301 [ C(RESULT_ACCESS) ] = -1,
302 [ C(RESULT_MISS) ] = -1,
303 },
304 [ C(OP_PREFETCH) ] = {
305 [ C(RESULT_ACCESS) ] = 0x0,
306 [ C(RESULT_MISS) ] = 0x0,
307 },
308 },
309 [ C(LL ) ] = {
Lin Mingb06b3d42011-03-02 21:27:04 +0800310 [ C(OP_READ) ] = {
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200311 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
Lin Mingb06b3d42011-03-02 21:27:04 +0800312 [ C(RESULT_ACCESS) ] = 0x01b7,
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200313 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
314 [ C(RESULT_MISS) ] = 0x01b7,
Lin Mingb06b3d42011-03-02 21:27:04 +0800315 },
316 [ C(OP_WRITE) ] = {
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200317 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
Lin Mingb06b3d42011-03-02 21:27:04 +0800318 [ C(RESULT_ACCESS) ] = 0x01b7,
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200319 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
320 [ C(RESULT_MISS) ] = 0x01b7,
Lin Mingb06b3d42011-03-02 21:27:04 +0800321 },
322 [ C(OP_PREFETCH) ] = {
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200323 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
Lin Mingb06b3d42011-03-02 21:27:04 +0800324 [ C(RESULT_ACCESS) ] = 0x01b7,
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200325 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
326 [ C(RESULT_MISS) ] = 0x01b7,
Lin Mingb06b3d42011-03-02 21:27:04 +0800327 },
328 },
329 [ C(DTLB) ] = {
330 [ C(OP_READ) ] = {
331 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
332 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
333 },
334 [ C(OP_WRITE) ] = {
335 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
336 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
337 },
338 [ C(OP_PREFETCH) ] = {
339 [ C(RESULT_ACCESS) ] = 0x0,
340 [ C(RESULT_MISS) ] = 0x0,
341 },
342 },
343 [ C(ITLB) ] = {
344 [ C(OP_READ) ] = {
345 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
346 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
347 },
348 [ C(OP_WRITE) ] = {
349 [ C(RESULT_ACCESS) ] = -1,
350 [ C(RESULT_MISS) ] = -1,
351 },
352 [ C(OP_PREFETCH) ] = {
353 [ C(RESULT_ACCESS) ] = -1,
354 [ C(RESULT_MISS) ] = -1,
355 },
356 },
357 [ C(BPU ) ] = {
358 [ C(OP_READ) ] = {
359 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
360 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
361 },
362 [ C(OP_WRITE) ] = {
363 [ C(RESULT_ACCESS) ] = -1,
364 [ C(RESULT_MISS) ] = -1,
365 },
366 [ C(OP_PREFETCH) ] = {
367 [ C(RESULT_ACCESS) ] = -1,
368 [ C(RESULT_MISS) ] = -1,
369 },
370 },
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200371 [ C(NODE) ] = {
372 [ C(OP_READ) ] = {
Yan, Zheng74e65432012-07-17 17:27:55 +0800373 [ C(RESULT_ACCESS) ] = 0x01b7,
374 [ C(RESULT_MISS) ] = 0x01b7,
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200375 },
376 [ C(OP_WRITE) ] = {
Yan, Zheng74e65432012-07-17 17:27:55 +0800377 [ C(RESULT_ACCESS) ] = 0x01b7,
378 [ C(RESULT_MISS) ] = 0x01b7,
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200379 },
380 [ C(OP_PREFETCH) ] = {
Yan, Zheng74e65432012-07-17 17:27:55 +0800381 [ C(RESULT_ACCESS) ] = 0x01b7,
382 [ C(RESULT_MISS) ] = 0x01b7,
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200383 },
384 },
385
Lin Mingb06b3d42011-03-02 21:27:04 +0800386};
387
Peter Zijlstracaaa8be2010-03-29 13:09:53 +0200388static __initconst const u64 westmere_hw_cache_event_ids
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100389 [PERF_COUNT_HW_CACHE_MAX]
390 [PERF_COUNT_HW_CACHE_OP_MAX]
391 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
392{
393 [ C(L1D) ] = {
394 [ C(OP_READ) ] = {
395 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
396 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
397 },
398 [ C(OP_WRITE) ] = {
399 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
400 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
401 },
402 [ C(OP_PREFETCH) ] = {
403 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
404 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
405 },
406 },
407 [ C(L1I ) ] = {
408 [ C(OP_READ) ] = {
409 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
410 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
411 },
412 [ C(OP_WRITE) ] = {
413 [ C(RESULT_ACCESS) ] = -1,
414 [ C(RESULT_MISS) ] = -1,
415 },
416 [ C(OP_PREFETCH) ] = {
417 [ C(RESULT_ACCESS) ] = 0x0,
418 [ C(RESULT_MISS) ] = 0x0,
419 },
420 },
421 [ C(LL ) ] = {
422 [ C(OP_READ) ] = {
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200423 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
Andi Kleene994d7d2011-03-03 10:34:48 +0800424 [ C(RESULT_ACCESS) ] = 0x01b7,
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200425 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
426 [ C(RESULT_MISS) ] = 0x01b7,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100427 },
Andi Kleene994d7d2011-03-03 10:34:48 +0800428 /*
429 * Use RFO, not WRITEBACK, because a write miss would typically occur
430 * on RFO.
431 */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100432 [ C(OP_WRITE) ] = {
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200433 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
434 [ C(RESULT_ACCESS) ] = 0x01b7,
435 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
Andi Kleene994d7d2011-03-03 10:34:48 +0800436 [ C(RESULT_MISS) ] = 0x01b7,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100437 },
438 [ C(OP_PREFETCH) ] = {
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200439 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
Andi Kleene994d7d2011-03-03 10:34:48 +0800440 [ C(RESULT_ACCESS) ] = 0x01b7,
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200441 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
442 [ C(RESULT_MISS) ] = 0x01b7,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100443 },
444 },
445 [ C(DTLB) ] = {
446 [ C(OP_READ) ] = {
447 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
448 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
449 },
450 [ C(OP_WRITE) ] = {
451 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
452 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
453 },
454 [ C(OP_PREFETCH) ] = {
455 [ C(RESULT_ACCESS) ] = 0x0,
456 [ C(RESULT_MISS) ] = 0x0,
457 },
458 },
459 [ C(ITLB) ] = {
460 [ C(OP_READ) ] = {
461 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
462 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
463 },
464 [ C(OP_WRITE) ] = {
465 [ C(RESULT_ACCESS) ] = -1,
466 [ C(RESULT_MISS) ] = -1,
467 },
468 [ C(OP_PREFETCH) ] = {
469 [ C(RESULT_ACCESS) ] = -1,
470 [ C(RESULT_MISS) ] = -1,
471 },
472 },
473 [ C(BPU ) ] = {
474 [ C(OP_READ) ] = {
475 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
476 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
477 },
478 [ C(OP_WRITE) ] = {
479 [ C(RESULT_ACCESS) ] = -1,
480 [ C(RESULT_MISS) ] = -1,
481 },
482 [ C(OP_PREFETCH) ] = {
483 [ C(RESULT_ACCESS) ] = -1,
484 [ C(RESULT_MISS) ] = -1,
485 },
486 },
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200487 [ C(NODE) ] = {
488 [ C(OP_READ) ] = {
489 [ C(RESULT_ACCESS) ] = 0x01b7,
490 [ C(RESULT_MISS) ] = 0x01b7,
491 },
492 [ C(OP_WRITE) ] = {
493 [ C(RESULT_ACCESS) ] = 0x01b7,
494 [ C(RESULT_MISS) ] = 0x01b7,
495 },
496 [ C(OP_PREFETCH) ] = {
497 [ C(RESULT_ACCESS) ] = 0x01b7,
498 [ C(RESULT_MISS) ] = 0x01b7,
499 },
500 },
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100501};
502
Andi Kleene994d7d2011-03-03 10:34:48 +0800503/*
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200504 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
505 * See IA32 SDM Vol 3B 30.6.1.3
Andi Kleene994d7d2011-03-03 10:34:48 +0800506 */
507
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200508#define NHM_DMND_DATA_RD (1 << 0)
509#define NHM_DMND_RFO (1 << 1)
510#define NHM_DMND_IFETCH (1 << 2)
511#define NHM_DMND_WB (1 << 3)
512#define NHM_PF_DATA_RD (1 << 4)
513#define NHM_PF_DATA_RFO (1 << 5)
514#define NHM_PF_IFETCH (1 << 6)
515#define NHM_OFFCORE_OTHER (1 << 7)
516#define NHM_UNCORE_HIT (1 << 8)
517#define NHM_OTHER_CORE_HIT_SNP (1 << 9)
518#define NHM_OTHER_CORE_HITM (1 << 10)
519 /* reserved */
520#define NHM_REMOTE_CACHE_FWD (1 << 12)
521#define NHM_REMOTE_DRAM (1 << 13)
522#define NHM_LOCAL_DRAM (1 << 14)
523#define NHM_NON_DRAM (1 << 15)
524
Peter Zijlstra87e24f42012-03-05 23:59:25 +0100525#define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
526#define NHM_REMOTE (NHM_REMOTE_DRAM)
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200527
528#define NHM_DMND_READ (NHM_DMND_DATA_RD)
529#define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
530#define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
531
532#define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
Peter Zijlstra87e24f42012-03-05 23:59:25 +0100533#define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200534#define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
Andi Kleene994d7d2011-03-03 10:34:48 +0800535
536static __initconst const u64 nehalem_hw_cache_extra_regs
537 [PERF_COUNT_HW_CACHE_MAX]
538 [PERF_COUNT_HW_CACHE_OP_MAX]
539 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
540{
541 [ C(LL ) ] = {
542 [ C(OP_READ) ] = {
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200543 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
544 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
Andi Kleene994d7d2011-03-03 10:34:48 +0800545 },
546 [ C(OP_WRITE) ] = {
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200547 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
548 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
Andi Kleene994d7d2011-03-03 10:34:48 +0800549 },
550 [ C(OP_PREFETCH) ] = {
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200551 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
552 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
Andi Kleene994d7d2011-03-03 10:34:48 +0800553 },
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200554 },
555 [ C(NODE) ] = {
556 [ C(OP_READ) ] = {
Peter Zijlstra87e24f42012-03-05 23:59:25 +0100557 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
558 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200559 },
560 [ C(OP_WRITE) ] = {
Peter Zijlstra87e24f42012-03-05 23:59:25 +0100561 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
562 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200563 },
564 [ C(OP_PREFETCH) ] = {
Peter Zijlstra87e24f42012-03-05 23:59:25 +0100565 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
566 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200567 },
568 },
Andi Kleene994d7d2011-03-03 10:34:48 +0800569};
570
Peter Zijlstracaaa8be2010-03-29 13:09:53 +0200571static __initconst const u64 nehalem_hw_cache_event_ids
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100572 [PERF_COUNT_HW_CACHE_MAX]
573 [PERF_COUNT_HW_CACHE_OP_MAX]
574 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
575{
576 [ C(L1D) ] = {
577 [ C(OP_READ) ] = {
Peter Zijlstraf4929bd2011-04-22 13:39:56 +0200578 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
579 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100580 },
581 [ C(OP_WRITE) ] = {
Peter Zijlstraf4929bd2011-04-22 13:39:56 +0200582 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
583 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100584 },
585 [ C(OP_PREFETCH) ] = {
586 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
587 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
588 },
589 },
590 [ C(L1I ) ] = {
591 [ C(OP_READ) ] = {
592 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
593 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
594 },
595 [ C(OP_WRITE) ] = {
596 [ C(RESULT_ACCESS) ] = -1,
597 [ C(RESULT_MISS) ] = -1,
598 },
599 [ C(OP_PREFETCH) ] = {
600 [ C(RESULT_ACCESS) ] = 0x0,
601 [ C(RESULT_MISS) ] = 0x0,
602 },
603 },
604 [ C(LL ) ] = {
605 [ C(OP_READ) ] = {
Andi Kleene994d7d2011-03-03 10:34:48 +0800606 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
607 [ C(RESULT_ACCESS) ] = 0x01b7,
608 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
609 [ C(RESULT_MISS) ] = 0x01b7,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100610 },
Andi Kleene994d7d2011-03-03 10:34:48 +0800611 /*
612 * Use RFO, not WRITEBACK, because a write miss would typically occur
613 * on RFO.
614 */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100615 [ C(OP_WRITE) ] = {
Andi Kleene994d7d2011-03-03 10:34:48 +0800616 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
617 [ C(RESULT_ACCESS) ] = 0x01b7,
618 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
619 [ C(RESULT_MISS) ] = 0x01b7,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100620 },
621 [ C(OP_PREFETCH) ] = {
Andi Kleene994d7d2011-03-03 10:34:48 +0800622 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
623 [ C(RESULT_ACCESS) ] = 0x01b7,
624 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
625 [ C(RESULT_MISS) ] = 0x01b7,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100626 },
627 },
628 [ C(DTLB) ] = {
629 [ C(OP_READ) ] = {
630 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
631 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
632 },
633 [ C(OP_WRITE) ] = {
634 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
635 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
636 },
637 [ C(OP_PREFETCH) ] = {
638 [ C(RESULT_ACCESS) ] = 0x0,
639 [ C(RESULT_MISS) ] = 0x0,
640 },
641 },
642 [ C(ITLB) ] = {
643 [ C(OP_READ) ] = {
644 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
645 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
646 },
647 [ C(OP_WRITE) ] = {
648 [ C(RESULT_ACCESS) ] = -1,
649 [ C(RESULT_MISS) ] = -1,
650 },
651 [ C(OP_PREFETCH) ] = {
652 [ C(RESULT_ACCESS) ] = -1,
653 [ C(RESULT_MISS) ] = -1,
654 },
655 },
656 [ C(BPU ) ] = {
657 [ C(OP_READ) ] = {
658 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
659 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
660 },
661 [ C(OP_WRITE) ] = {
662 [ C(RESULT_ACCESS) ] = -1,
663 [ C(RESULT_MISS) ] = -1,
664 },
665 [ C(OP_PREFETCH) ] = {
666 [ C(RESULT_ACCESS) ] = -1,
667 [ C(RESULT_MISS) ] = -1,
668 },
669 },
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200670 [ C(NODE) ] = {
671 [ C(OP_READ) ] = {
672 [ C(RESULT_ACCESS) ] = 0x01b7,
673 [ C(RESULT_MISS) ] = 0x01b7,
674 },
675 [ C(OP_WRITE) ] = {
676 [ C(RESULT_ACCESS) ] = 0x01b7,
677 [ C(RESULT_MISS) ] = 0x01b7,
678 },
679 [ C(OP_PREFETCH) ] = {
680 [ C(RESULT_ACCESS) ] = 0x01b7,
681 [ C(RESULT_MISS) ] = 0x01b7,
682 },
683 },
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100684};
685
Peter Zijlstracaaa8be2010-03-29 13:09:53 +0200686static __initconst const u64 core2_hw_cache_event_ids
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100687 [PERF_COUNT_HW_CACHE_MAX]
688 [PERF_COUNT_HW_CACHE_OP_MAX]
689 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
690{
691 [ C(L1D) ] = {
692 [ C(OP_READ) ] = {
693 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
694 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
695 },
696 [ C(OP_WRITE) ] = {
697 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
698 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
699 },
700 [ C(OP_PREFETCH) ] = {
701 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
702 [ C(RESULT_MISS) ] = 0,
703 },
704 },
705 [ C(L1I ) ] = {
706 [ C(OP_READ) ] = {
707 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
708 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
709 },
710 [ C(OP_WRITE) ] = {
711 [ C(RESULT_ACCESS) ] = -1,
712 [ C(RESULT_MISS) ] = -1,
713 },
714 [ C(OP_PREFETCH) ] = {
715 [ C(RESULT_ACCESS) ] = 0,
716 [ C(RESULT_MISS) ] = 0,
717 },
718 },
719 [ C(LL ) ] = {
720 [ C(OP_READ) ] = {
721 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
722 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
723 },
724 [ C(OP_WRITE) ] = {
725 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
726 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
727 },
728 [ C(OP_PREFETCH) ] = {
729 [ C(RESULT_ACCESS) ] = 0,
730 [ C(RESULT_MISS) ] = 0,
731 },
732 },
733 [ C(DTLB) ] = {
734 [ C(OP_READ) ] = {
735 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
736 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
737 },
738 [ C(OP_WRITE) ] = {
739 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
740 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
741 },
742 [ C(OP_PREFETCH) ] = {
743 [ C(RESULT_ACCESS) ] = 0,
744 [ C(RESULT_MISS) ] = 0,
745 },
746 },
747 [ C(ITLB) ] = {
748 [ C(OP_READ) ] = {
749 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
750 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
751 },
752 [ C(OP_WRITE) ] = {
753 [ C(RESULT_ACCESS) ] = -1,
754 [ C(RESULT_MISS) ] = -1,
755 },
756 [ C(OP_PREFETCH) ] = {
757 [ C(RESULT_ACCESS) ] = -1,
758 [ C(RESULT_MISS) ] = -1,
759 },
760 },
761 [ C(BPU ) ] = {
762 [ C(OP_READ) ] = {
763 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
764 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
765 },
766 [ C(OP_WRITE) ] = {
767 [ C(RESULT_ACCESS) ] = -1,
768 [ C(RESULT_MISS) ] = -1,
769 },
770 [ C(OP_PREFETCH) ] = {
771 [ C(RESULT_ACCESS) ] = -1,
772 [ C(RESULT_MISS) ] = -1,
773 },
774 },
775};
776
Peter Zijlstracaaa8be2010-03-29 13:09:53 +0200777static __initconst const u64 atom_hw_cache_event_ids
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100778 [PERF_COUNT_HW_CACHE_MAX]
779 [PERF_COUNT_HW_CACHE_OP_MAX]
780 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
781{
782 [ C(L1D) ] = {
783 [ C(OP_READ) ] = {
784 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
785 [ C(RESULT_MISS) ] = 0,
786 },
787 [ C(OP_WRITE) ] = {
788 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
789 [ C(RESULT_MISS) ] = 0,
790 },
791 [ C(OP_PREFETCH) ] = {
792 [ C(RESULT_ACCESS) ] = 0x0,
793 [ C(RESULT_MISS) ] = 0,
794 },
795 },
796 [ C(L1I ) ] = {
797 [ C(OP_READ) ] = {
798 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
799 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
800 },
801 [ C(OP_WRITE) ] = {
802 [ C(RESULT_ACCESS) ] = -1,
803 [ C(RESULT_MISS) ] = -1,
804 },
805 [ C(OP_PREFETCH) ] = {
806 [ C(RESULT_ACCESS) ] = 0,
807 [ C(RESULT_MISS) ] = 0,
808 },
809 },
810 [ C(LL ) ] = {
811 [ C(OP_READ) ] = {
812 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
813 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
814 },
815 [ C(OP_WRITE) ] = {
816 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
817 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
818 },
819 [ C(OP_PREFETCH) ] = {
820 [ C(RESULT_ACCESS) ] = 0,
821 [ C(RESULT_MISS) ] = 0,
822 },
823 },
824 [ C(DTLB) ] = {
825 [ C(OP_READ) ] = {
826 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
827 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
828 },
829 [ C(OP_WRITE) ] = {
830 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
831 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
832 },
833 [ C(OP_PREFETCH) ] = {
834 [ C(RESULT_ACCESS) ] = 0,
835 [ C(RESULT_MISS) ] = 0,
836 },
837 },
838 [ C(ITLB) ] = {
839 [ C(OP_READ) ] = {
840 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
841 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
842 },
843 [ C(OP_WRITE) ] = {
844 [ C(RESULT_ACCESS) ] = -1,
845 [ C(RESULT_MISS) ] = -1,
846 },
847 [ C(OP_PREFETCH) ] = {
848 [ C(RESULT_ACCESS) ] = -1,
849 [ C(RESULT_MISS) ] = -1,
850 },
851 },
852 [ C(BPU ) ] = {
853 [ C(OP_READ) ] = {
854 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
855 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
856 },
857 [ C(OP_WRITE) ] = {
858 [ C(RESULT_ACCESS) ] = -1,
859 [ C(RESULT_MISS) ] = -1,
860 },
861 [ C(OP_PREFETCH) ] = {
862 [ C(RESULT_ACCESS) ] = -1,
863 [ C(RESULT_MISS) ] = -1,
864 },
865 },
866};
867
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100868static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
869{
870 /* user explicitly requested branch sampling */
871 if (has_branch_stack(event))
872 return true;
873
874 /* implicit branch sampling to correct PEBS skid */
875 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
876 return true;
877
878 return false;
879}
880
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100881static void intel_pmu_disable_all(void)
882{
883 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
884
885 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
886
Robert Richter15c7ad52012-06-20 20:46:33 +0200887 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100888 intel_pmu_disable_bts();
Peter Zijlstraca037702010-03-02 19:52:12 +0100889
890 intel_pmu_pebs_disable_all();
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100891 intel_pmu_lbr_disable_all();
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100892}
893
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100894static void intel_pmu_enable_all(int added)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100895{
896 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
897
Peter Zijlstrad3295272010-03-08 13:57:14 +0100898 intel_pmu_pebs_enable_all();
899 intel_pmu_lbr_enable_all();
Gleb Natapov144d31e2011-10-05 14:01:21 +0200900 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
901 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100902
Robert Richter15c7ad52012-06-20 20:46:33 +0200903 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100904 struct perf_event *event =
Robert Richter15c7ad52012-06-20 20:46:33 +0200905 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100906
907 if (WARN_ON_ONCE(!event))
908 return;
909
910 intel_pmu_enable_bts(event->hw.config);
911 }
912}
913
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100914/*
915 * Workaround for:
916 * Intel Errata AAK100 (model 26)
917 * Intel Errata AAP53 (model 30)
Peter Zijlstra40b91cd2010-03-29 16:37:17 +0200918 * Intel Errata BD53 (model 44)
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100919 *
Zhang, Yanmin351af072010-08-06 13:39:08 +0800920 * The official story:
921 * These chips need to be 'reset' when adding counters by programming the
922 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
923 * in sequence on the same PMC or on different PMCs.
924 *
925 * In practise it appears some of these events do in fact count, and
926 * we need to programm all 4 events.
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100927 */
Zhang, Yanmin351af072010-08-06 13:39:08 +0800928static void intel_pmu_nhm_workaround(void)
929{
930 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
931 static const unsigned long nhm_magic[4] = {
932 0x4300B5,
933 0x4300D2,
934 0x4300B1,
935 0x4300B1
936 };
937 struct perf_event *event;
938 int i;
939
940 /*
941 * The Errata requires below steps:
942 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
943 * 2) Configure 4 PERFEVTSELx with the magic events and clear
944 * the corresponding PMCx;
945 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
946 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
947 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
948 */
949
950 /*
951 * The real steps we choose are a little different from above.
952 * A) To reduce MSR operations, we don't run step 1) as they
953 * are already cleared before this function is called;
954 * B) Call x86_perf_event_update to save PMCx before configuring
955 * PERFEVTSELx with magic number;
956 * C) With step 5), we do clear only when the PERFEVTSELx is
957 * not used currently.
958 * D) Call x86_perf_event_set_period to restore PMCx;
959 */
960
961 /* We always operate 4 pairs of PERF Counters */
962 for (i = 0; i < 4; i++) {
963 event = cpuc->events[i];
964 if (event)
965 x86_perf_event_update(event);
966 }
967
968 for (i = 0; i < 4; i++) {
969 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
970 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
971 }
972
973 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
974 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
975
976 for (i = 0; i < 4; i++) {
977 event = cpuc->events[i];
978
979 if (event) {
980 x86_perf_event_set_period(event);
981 __x86_pmu_enable_event(&event->hw,
982 ARCH_PERFMON_EVENTSEL_ENABLE);
983 } else
984 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
985 }
986}
987
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100988static void intel_pmu_nhm_enable_all(int added)
989{
Zhang, Yanmin351af072010-08-06 13:39:08 +0800990 if (added)
991 intel_pmu_nhm_workaround();
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100992 intel_pmu_enable_all(added);
993}
994
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100995static inline u64 intel_pmu_get_status(void)
996{
997 u64 status;
998
999 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1000
1001 return status;
1002}
1003
1004static inline void intel_pmu_ack_status(u64 ack)
1005{
1006 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1007}
1008
Peter Zijlstraca037702010-03-02 19:52:12 +01001009static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001010{
Robert Richter15c7ad52012-06-20 20:46:33 +02001011 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001012 u64 ctrl_val, mask;
1013
1014 mask = 0xfULL << (idx * 4);
1015
1016 rdmsrl(hwc->config_base, ctrl_val);
1017 ctrl_val &= ~mask;
Peter Zijlstra7645a242010-03-08 13:51:31 +01001018 wrmsrl(hwc->config_base, ctrl_val);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001019}
1020
Peter Zijlstraca037702010-03-02 19:52:12 +01001021static void intel_pmu_disable_event(struct perf_event *event)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001022{
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001023 struct hw_perf_event *hwc = &event->hw;
Gleb Natapov144d31e2011-10-05 14:01:21 +02001024 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001025
Robert Richter15c7ad52012-06-20 20:46:33 +02001026 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001027 intel_pmu_disable_bts();
1028 intel_pmu_drain_bts_buffer();
1029 return;
1030 }
1031
Gleb Natapov144d31e2011-10-05 14:01:21 +02001032 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
1033 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
1034
Stephane Eranian60ce0fb2012-02-09 23:20:57 +01001035 /*
1036 * must disable before any actual event
1037 * because any event may be combined with LBR
1038 */
1039 if (intel_pmu_needs_lbr_smpl(event))
1040 intel_pmu_lbr_disable(event);
1041
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001042 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001043 intel_pmu_disable_fixed(hwc);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001044 return;
1045 }
1046
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001047 x86_pmu_disable_event(event);
Peter Zijlstraca037702010-03-02 19:52:12 +01001048
Peter Zijlstraab608342010-04-08 23:03:20 +02001049 if (unlikely(event->attr.precise_ip))
Peter Zijlstraef21f682010-03-03 13:12:23 +01001050 intel_pmu_pebs_disable(event);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001051}
1052
Peter Zijlstraca037702010-03-02 19:52:12 +01001053static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001054{
Robert Richter15c7ad52012-06-20 20:46:33 +02001055 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001056 u64 ctrl_val, bits, mask;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001057
1058 /*
1059 * Enable IRQ generation (0x8),
1060 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1061 * if requested:
1062 */
1063 bits = 0x8ULL;
1064 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1065 bits |= 0x2;
1066 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1067 bits |= 0x1;
1068
1069 /*
1070 * ANY bit is supported in v3 and up
1071 */
1072 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1073 bits |= 0x4;
1074
1075 bits <<= (idx * 4);
1076 mask = 0xfULL << (idx * 4);
1077
1078 rdmsrl(hwc->config_base, ctrl_val);
1079 ctrl_val &= ~mask;
1080 ctrl_val |= bits;
Peter Zijlstra7645a242010-03-08 13:51:31 +01001081 wrmsrl(hwc->config_base, ctrl_val);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001082}
1083
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001084static void intel_pmu_enable_event(struct perf_event *event)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001085{
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001086 struct hw_perf_event *hwc = &event->hw;
Gleb Natapov144d31e2011-10-05 14:01:21 +02001087 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001088
Robert Richter15c7ad52012-06-20 20:46:33 +02001089 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
Tejun Heo0a3aee02010-12-18 16:28:55 +01001090 if (!__this_cpu_read(cpu_hw_events.enabled))
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001091 return;
1092
1093 intel_pmu_enable_bts(hwc->config);
1094 return;
1095 }
Stephane Eranian60ce0fb2012-02-09 23:20:57 +01001096 /*
1097 * must enabled before any actual event
1098 * because any event may be combined with LBR
1099 */
1100 if (intel_pmu_needs_lbr_smpl(event))
1101 intel_pmu_lbr_enable(event);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001102
Gleb Natapov144d31e2011-10-05 14:01:21 +02001103 if (event->attr.exclude_host)
1104 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
1105 if (event->attr.exclude_guest)
1106 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
1107
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001108 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001109 intel_pmu_enable_fixed(hwc);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001110 return;
1111 }
1112
Peter Zijlstraab608342010-04-08 23:03:20 +02001113 if (unlikely(event->attr.precise_ip))
Peter Zijlstraef21f682010-03-03 13:12:23 +01001114 intel_pmu_pebs_enable(event);
Peter Zijlstraca037702010-03-02 19:52:12 +01001115
Robert Richter31fa58a2010-04-13 22:23:14 +02001116 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001117}
1118
1119/*
1120 * Save and restart an expired event. Called by NMI contexts,
1121 * so it has to be careful about preempting normal event ops:
1122 */
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001123int intel_pmu_save_and_restart(struct perf_event *event)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001124{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001125 x86_perf_event_update(event);
1126 return x86_perf_event_set_period(event);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001127}
1128
1129static void intel_pmu_reset(void)
1130{
Tejun Heo0a3aee02010-12-18 16:28:55 +01001131 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001132 unsigned long flags;
1133 int idx;
1134
Robert Richter948b1bb2010-03-29 18:36:50 +02001135 if (!x86_pmu.num_counters)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001136 return;
1137
1138 local_irq_save(flags);
1139
Joe Perchesc767a542012-05-21 19:50:07 -07001140 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001141
Robert Richter948b1bb2010-03-29 18:36:50 +02001142 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
H. Peter Anvin715c85b2012-06-07 13:32:04 -07001143 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
1144 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001145 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001146 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
H. Peter Anvin715c85b2012-06-07 13:32:04 -07001147 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
Robert Richter948b1bb2010-03-29 18:36:50 +02001148
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001149 if (ds)
1150 ds->bts_index = ds->bts_buffer_base;
1151
1152 local_irq_restore(flags);
1153}
1154
1155/*
1156 * This handler is triggered by the local APIC, so the APIC IRQ handling
1157 * rules apply:
1158 */
1159static int intel_pmu_handle_irq(struct pt_regs *regs)
1160{
1161 struct perf_sample_data data;
1162 struct cpu_hw_events *cpuc;
1163 int bit, loops;
Don Zickus2e556b52010-09-02 15:07:47 -04001164 u64 status;
Stephane Eranianb0b20722010-09-10 13:28:01 +02001165 int handled;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001166
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001167 cpuc = &__get_cpu_var(cpu_hw_events);
1168
Don Zickus2bce5da2011-04-27 06:32:33 -04001169 /*
1170 * Some chipsets need to unmask the LVTPC in a particular spot
1171 * inside the nmi handler. As a result, the unmasking was pushed
1172 * into all the nmi handlers.
1173 *
1174 * This handler doesn't seem to have any issues with the unmasking
1175 * so it was left at the top.
1176 */
1177 apic_write(APIC_LVTPC, APIC_DM_NMI);
1178
Peter Zijlstra3fb2b8d2010-03-08 13:51:01 +01001179 intel_pmu_disable_all();
Stephane Eranianb0b20722010-09-10 13:28:01 +02001180 handled = intel_pmu_drain_bts_buffer();
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001181 status = intel_pmu_get_status();
1182 if (!status) {
Peter Zijlstra11164cd2010-03-26 14:08:44 +01001183 intel_pmu_enable_all(0);
Stephane Eranianb0b20722010-09-10 13:28:01 +02001184 return handled;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001185 }
1186
1187 loops = 0;
1188again:
Don Zickus2e556b52010-09-02 15:07:47 -04001189 intel_pmu_ack_status(status);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001190 if (++loops > 100) {
1191 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1192 perf_event_print_debug();
1193 intel_pmu_reset();
Peter Zijlstra3fb2b8d2010-03-08 13:51:01 +01001194 goto done;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001195 }
1196
1197 inc_irq_stat(apic_perf_irqs);
Peter Zijlstraca037702010-03-02 19:52:12 +01001198
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001199 intel_pmu_lbr_read();
1200
Peter Zijlstraca037702010-03-02 19:52:12 +01001201 /*
1202 * PEBS overflow sets bit 62 in the global status register
1203 */
Peter Zijlstrade725de2010-09-02 15:07:49 -04001204 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
1205 handled++;
Peter Zijlstraca037702010-03-02 19:52:12 +01001206 x86_pmu.drain_pebs(regs);
Peter Zijlstrade725de2010-09-02 15:07:49 -04001207 }
Peter Zijlstraca037702010-03-02 19:52:12 +01001208
Akinobu Mita984b3f52010-03-05 13:41:37 -08001209 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001210 struct perf_event *event = cpuc->events[bit];
1211
Peter Zijlstrade725de2010-09-02 15:07:49 -04001212 handled++;
1213
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001214 if (!test_bit(bit, cpuc->active_mask))
1215 continue;
1216
1217 if (!intel_pmu_save_and_restart(event))
1218 continue;
1219
Robert Richterfd0d0002012-04-02 20:19:08 +02001220 perf_sample_data_init(&data, 0, event->hw.last_period);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001221
Stephane Eranian60ce0fb2012-02-09 23:20:57 +01001222 if (has_branch_stack(event))
1223 data.br_stack = &cpuc->lbr_stack;
1224
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02001225 if (perf_event_overflow(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001226 x86_pmu_stop(event, 0);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001227 }
1228
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001229 /*
1230 * Repeat if there is more work to be done:
1231 */
1232 status = intel_pmu_get_status();
1233 if (status)
1234 goto again;
1235
Peter Zijlstra3fb2b8d2010-03-08 13:51:01 +01001236done:
Peter Zijlstra11164cd2010-03-26 14:08:44 +01001237 intel_pmu_enable_all(0);
Peter Zijlstrade725de2010-09-02 15:07:49 -04001238 return handled;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001239}
1240
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001241static struct event_constraint *
Peter Zijlstraca037702010-03-02 19:52:12 +01001242intel_bts_constraints(struct perf_event *event)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001243{
Peter Zijlstraca037702010-03-02 19:52:12 +01001244 struct hw_perf_event *hwc = &event->hw;
1245 unsigned int hw_event, bts_event;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001246
Peter Zijlstra18a073a2011-04-26 13:24:33 +02001247 if (event->attr.freq)
1248 return NULL;
1249
Peter Zijlstraca037702010-03-02 19:52:12 +01001250 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1251 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001252
Peter Zijlstraca037702010-03-02 19:52:12 +01001253 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001254 return &bts_constraint;
Peter Zijlstraca037702010-03-02 19:52:12 +01001255
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001256 return NULL;
1257}
1258
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001259static int intel_alt_er(int idx)
Peter Zijlstrab79e8942011-05-23 11:08:15 +02001260{
1261 if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001262 return idx;
Peter Zijlstrab79e8942011-05-23 11:08:15 +02001263
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001264 if (idx == EXTRA_REG_RSP_0)
1265 return EXTRA_REG_RSP_1;
1266
1267 if (idx == EXTRA_REG_RSP_1)
1268 return EXTRA_REG_RSP_0;
1269
1270 return idx;
1271}
1272
1273static void intel_fixup_er(struct perf_event *event, int idx)
1274{
1275 event->hw.extra_reg.idx = idx;
1276
1277 if (idx == EXTRA_REG_RSP_0) {
Peter Zijlstrab79e8942011-05-23 11:08:15 +02001278 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1279 event->hw.config |= 0x01b7;
Peter Zijlstrab79e8942011-05-23 11:08:15 +02001280 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001281 } else if (idx == EXTRA_REG_RSP_1) {
1282 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1283 event->hw.config |= 0x01bb;
1284 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
Peter Zijlstrab79e8942011-05-23 11:08:15 +02001285 }
Peter Zijlstrab79e8942011-05-23 11:08:15 +02001286}
1287
Stephane Eranianefc9f052011-06-06 16:57:03 +02001288/*
1289 * manage allocation of shared extra msr for certain events
1290 *
1291 * sharing can be:
1292 * per-cpu: to be shared between the various events on a single PMU
1293 * per-core: per-cpu + shared by HT threads
1294 */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001295static struct event_constraint *
Stephane Eranianefc9f052011-06-06 16:57:03 +02001296__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
Stephane Eranianb36817e2012-02-09 23:20:53 +01001297 struct perf_event *event,
1298 struct hw_perf_event_extra *reg)
Andi Kleena7e3ed12011-03-03 10:34:47 +08001299{
Stephane Eranianefc9f052011-06-06 16:57:03 +02001300 struct event_constraint *c = &emptyconstraint;
Andi Kleena7e3ed12011-03-03 10:34:47 +08001301 struct er_account *era;
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001302 unsigned long flags;
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001303 int idx = reg->idx;
Andi Kleena7e3ed12011-03-03 10:34:47 +08001304
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001305 /*
1306 * reg->alloc can be set due to existing state, so for fake cpuc we
1307 * need to ignore this, otherwise we might fail to allocate proper fake
1308 * state for this extra reg constraint. Also see the comment below.
1309 */
1310 if (reg->alloc && !cpuc->is_fake)
Stephane Eranianb36817e2012-02-09 23:20:53 +01001311 return NULL; /* call x86_get_event_constraint() */
Andi Kleena7e3ed12011-03-03 10:34:47 +08001312
Peter Zijlstrab79e8942011-05-23 11:08:15 +02001313again:
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001314 era = &cpuc->shared_regs->regs[idx];
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001315 /*
1316 * we use spin_lock_irqsave() to avoid lockdep issues when
1317 * passing a fake cpuc
1318 */
1319 raw_spin_lock_irqsave(&era->lock, flags);
Stephane Eranianefc9f052011-06-06 16:57:03 +02001320
1321 if (!atomic_read(&era->ref) || era->config == reg->config) {
1322
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001323 /*
1324 * If its a fake cpuc -- as per validate_{group,event}() we
1325 * shouldn't touch event state and we can avoid doing so
1326 * since both will only call get_event_constraints() once
1327 * on each event, this avoids the need for reg->alloc.
1328 *
1329 * Not doing the ER fixup will only result in era->reg being
1330 * wrong, but since we won't actually try and program hardware
1331 * this isn't a problem either.
1332 */
1333 if (!cpuc->is_fake) {
1334 if (idx != reg->idx)
1335 intel_fixup_er(event, idx);
1336
1337 /*
1338 * x86_schedule_events() can call get_event_constraints()
1339 * multiple times on events in the case of incremental
1340 * scheduling(). reg->alloc ensures we only do the ER
1341 * allocation once.
1342 */
1343 reg->alloc = 1;
1344 }
1345
Stephane Eranianefc9f052011-06-06 16:57:03 +02001346 /* lock in msr value */
1347 era->config = reg->config;
1348 era->reg = reg->reg;
1349
1350 /* one more user */
1351 atomic_inc(&era->ref);
1352
Andi Kleena7e3ed12011-03-03 10:34:47 +08001353 /*
Stephane Eranianb36817e2012-02-09 23:20:53 +01001354 * need to call x86_get_event_constraint()
1355 * to check if associated event has constraints
Andi Kleena7e3ed12011-03-03 10:34:47 +08001356 */
Stephane Eranianb36817e2012-02-09 23:20:53 +01001357 c = NULL;
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001358 } else {
1359 idx = intel_alt_er(idx);
1360 if (idx != reg->idx) {
1361 raw_spin_unlock_irqrestore(&era->lock, flags);
1362 goto again;
1363 }
Andi Kleena7e3ed12011-03-03 10:34:47 +08001364 }
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001365 raw_spin_unlock_irqrestore(&era->lock, flags);
Andi Kleena7e3ed12011-03-03 10:34:47 +08001366
Stephane Eranianefc9f052011-06-06 16:57:03 +02001367 return c;
1368}
1369
1370static void
1371__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
1372 struct hw_perf_event_extra *reg)
1373{
1374 struct er_account *era;
1375
1376 /*
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001377 * Only put constraint if extra reg was actually allocated. Also takes
1378 * care of event which do not use an extra shared reg.
1379 *
1380 * Also, if this is a fake cpuc we shouldn't touch any event state
1381 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1382 * either since it'll be thrown out.
Stephane Eranianefc9f052011-06-06 16:57:03 +02001383 */
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001384 if (!reg->alloc || cpuc->is_fake)
Stephane Eranianefc9f052011-06-06 16:57:03 +02001385 return;
1386
1387 era = &cpuc->shared_regs->regs[reg->idx];
1388
1389 /* one fewer user */
1390 atomic_dec(&era->ref);
1391
1392 /* allocate again next time */
1393 reg->alloc = 0;
1394}
1395
1396static struct event_constraint *
1397intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1398 struct perf_event *event)
1399{
Stephane Eranianb36817e2012-02-09 23:20:53 +01001400 struct event_constraint *c = NULL, *d;
1401 struct hw_perf_event_extra *xreg, *breg;
Stephane Eranianefc9f052011-06-06 16:57:03 +02001402
Stephane Eranianb36817e2012-02-09 23:20:53 +01001403 xreg = &event->hw.extra_reg;
1404 if (xreg->idx != EXTRA_REG_NONE) {
1405 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
1406 if (c == &emptyconstraint)
1407 return c;
1408 }
1409 breg = &event->hw.branch_reg;
1410 if (breg->idx != EXTRA_REG_NONE) {
1411 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
1412 if (d == &emptyconstraint) {
1413 __intel_shared_reg_put_constraints(cpuc, xreg);
1414 c = d;
1415 }
1416 }
Stephane Eranianefc9f052011-06-06 16:57:03 +02001417 return c;
Andi Kleena7e3ed12011-03-03 10:34:47 +08001418}
1419
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001420struct event_constraint *
1421x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1422{
1423 struct event_constraint *c;
1424
1425 if (x86_pmu.event_constraints) {
1426 for_each_event_constraint(c, x86_pmu.event_constraints) {
Stephane Eranian9fac2cf2013-01-24 16:10:27 +01001427 if ((event->hw.config & c->cmask) == c->code) {
1428 /* hw.flags zeroed at initialization */
1429 event->hw.flags |= c->flags;
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001430 return c;
Stephane Eranian9fac2cf2013-01-24 16:10:27 +01001431 }
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001432 }
1433 }
1434
1435 return &unconstrained;
1436}
1437
Andi Kleena7e3ed12011-03-03 10:34:47 +08001438static struct event_constraint *
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001439intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
1440{
1441 struct event_constraint *c;
1442
Peter Zijlstraca037702010-03-02 19:52:12 +01001443 c = intel_bts_constraints(event);
1444 if (c)
1445 return c;
1446
1447 c = intel_pebs_constraints(event);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001448 if (c)
1449 return c;
1450
Stephane Eranianefc9f052011-06-06 16:57:03 +02001451 c = intel_shared_regs_constraints(cpuc, event);
Andi Kleena7e3ed12011-03-03 10:34:47 +08001452 if (c)
1453 return c;
1454
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001455 return x86_get_event_constraints(cpuc, event);
1456}
1457
Stephane Eranianefc9f052011-06-06 16:57:03 +02001458static void
1459intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
1460 struct perf_event *event)
1461{
1462 struct hw_perf_event_extra *reg;
1463
1464 reg = &event->hw.extra_reg;
1465 if (reg->idx != EXTRA_REG_NONE)
1466 __intel_shared_reg_put_constraints(cpuc, reg);
Stephane Eranianb36817e2012-02-09 23:20:53 +01001467
1468 reg = &event->hw.branch_reg;
1469 if (reg->idx != EXTRA_REG_NONE)
1470 __intel_shared_reg_put_constraints(cpuc, reg);
Stephane Eranianefc9f052011-06-06 16:57:03 +02001471}
1472
Andi Kleena7e3ed12011-03-03 10:34:47 +08001473static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1474 struct perf_event *event)
1475{
Stephane Eranian9fac2cf2013-01-24 16:10:27 +01001476 event->hw.flags = 0;
Stephane Eranianefc9f052011-06-06 16:57:03 +02001477 intel_put_shared_regs_event_constraints(cpuc, event);
Andi Kleena7e3ed12011-03-03 10:34:47 +08001478}
1479
Peter Zijlstra0780c922012-06-05 10:26:43 +02001480static void intel_pebs_aliases_core2(struct perf_event *event)
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +02001481{
Peter Zijlstra0780c922012-06-05 10:26:43 +02001482 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
Peter Zijlstra7639dae2010-12-14 21:26:40 +01001483 /*
1484 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1485 * (0x003c) so that we can use it with PEBS.
1486 *
1487 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1488 * PEBS capable. However we can use INST_RETIRED.ANY_P
1489 * (0x00c0), which is a PEBS capable event, to get the same
1490 * count.
1491 *
1492 * INST_RETIRED.ANY_P counts the number of cycles that retires
1493 * CNTMASK instructions. By setting CNTMASK to a value (16)
1494 * larger than the maximum number of instructions that can be
1495 * retired per cycle (4) and then inverting the condition, we
1496 * count all cycles that retire 16 or less instructions, which
1497 * is every cycle.
1498 *
1499 * Thereby we gain a PEBS capable cycle counter.
1500 */
Peter Zijlstraf9b4eeb2012-03-12 12:44:35 +01001501 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
1502
Peter Zijlstra0780c922012-06-05 10:26:43 +02001503 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1504 event->hw.config = alt_config;
1505 }
1506}
1507
1508static void intel_pebs_aliases_snb(struct perf_event *event)
1509{
1510 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1511 /*
1512 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1513 * (0x003c) so that we can use it with PEBS.
1514 *
1515 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1516 * PEBS capable. However we can use UOPS_RETIRED.ALL
1517 * (0x01c2), which is a PEBS capable event, to get the same
1518 * count.
1519 *
1520 * UOPS_RETIRED.ALL counts the number of cycles that retires
1521 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1522 * larger than the maximum number of micro-ops that can be
1523 * retired per cycle (4) and then inverting the condition, we
1524 * count all cycles that retire 16 or less micro-ops, which
1525 * is every cycle.
1526 *
1527 * Thereby we gain a PEBS capable cycle counter.
1528 */
1529 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
Peter Zijlstra7639dae2010-12-14 21:26:40 +01001530
1531 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1532 event->hw.config = alt_config;
1533 }
Peter Zijlstra0780c922012-06-05 10:26:43 +02001534}
1535
1536static int intel_pmu_hw_config(struct perf_event *event)
1537{
1538 int ret = x86_pmu_hw_config(event);
1539
1540 if (ret)
1541 return ret;
1542
1543 if (event->attr.precise_ip && x86_pmu.pebs_aliases)
1544 x86_pmu.pebs_aliases(event);
Peter Zijlstra7639dae2010-12-14 21:26:40 +01001545
Stephane Eranian60ce0fb2012-02-09 23:20:57 +01001546 if (intel_pmu_needs_lbr_smpl(event)) {
1547 ret = intel_pmu_setup_lbr_filter(event);
1548 if (ret)
1549 return ret;
1550 }
1551
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +02001552 if (event->attr.type != PERF_TYPE_RAW)
1553 return 0;
1554
1555 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
1556 return 0;
1557
1558 if (x86_pmu.version < 3)
1559 return -EINVAL;
1560
1561 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
1562 return -EACCES;
1563
1564 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
1565
1566 return 0;
1567}
1568
Gleb Natapov144d31e2011-10-05 14:01:21 +02001569struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
1570{
1571 if (x86_pmu.guest_get_msrs)
1572 return x86_pmu.guest_get_msrs(nr);
1573 *nr = 0;
1574 return NULL;
1575}
1576EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
1577
1578static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
1579{
1580 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1581 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1582
1583 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
1584 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
1585 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
Gleb Natapov26a4f3c2012-08-09 11:52:34 +03001586 /*
1587 * If PMU counter has PEBS enabled it is not enough to disable counter
1588 * on a guest entry since PEBS memory write can overshoot guest entry
1589 * and corrupt guest memory. Disabling PEBS solves the problem.
1590 */
1591 arr[1].msr = MSR_IA32_PEBS_ENABLE;
1592 arr[1].host = cpuc->pebs_enabled;
1593 arr[1].guest = 0;
Gleb Natapov144d31e2011-10-05 14:01:21 +02001594
Gleb Natapov26a4f3c2012-08-09 11:52:34 +03001595 *nr = 2;
Gleb Natapov144d31e2011-10-05 14:01:21 +02001596 return arr;
1597}
1598
1599static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
1600{
1601 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1602 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
1603 int idx;
1604
1605 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1606 struct perf_event *event = cpuc->events[idx];
1607
1608 arr[idx].msr = x86_pmu_config_addr(idx);
1609 arr[idx].host = arr[idx].guest = 0;
1610
1611 if (!test_bit(idx, cpuc->active_mask))
1612 continue;
1613
1614 arr[idx].host = arr[idx].guest =
1615 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
1616
1617 if (event->attr.exclude_host)
1618 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1619 else if (event->attr.exclude_guest)
1620 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
1621 }
1622
1623 *nr = x86_pmu.num_counters;
1624 return arr;
1625}
1626
1627static void core_pmu_enable_event(struct perf_event *event)
1628{
1629 if (!event->attr.exclude_host)
1630 x86_pmu_enable_event(event);
1631}
1632
1633static void core_pmu_enable_all(int added)
1634{
1635 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1636 int idx;
1637
1638 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1639 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
1640
1641 if (!test_bit(idx, cpuc->active_mask) ||
1642 cpuc->events[idx]->attr.exclude_host)
1643 continue;
1644
1645 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
1646 }
1647}
1648
Jiri Olsa641cc932012-03-15 20:09:14 +01001649PMU_FORMAT_ATTR(event, "config:0-7" );
1650PMU_FORMAT_ATTR(umask, "config:8-15" );
1651PMU_FORMAT_ATTR(edge, "config:18" );
1652PMU_FORMAT_ATTR(pc, "config:19" );
1653PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
1654PMU_FORMAT_ATTR(inv, "config:23" );
1655PMU_FORMAT_ATTR(cmask, "config:24-31" );
1656
1657static struct attribute *intel_arch_formats_attr[] = {
1658 &format_attr_event.attr,
1659 &format_attr_umask.attr,
1660 &format_attr_edge.attr,
1661 &format_attr_pc.attr,
1662 &format_attr_inv.attr,
1663 &format_attr_cmask.attr,
1664 NULL,
1665};
1666
Jiri Olsa0bf79d42012-10-10 14:53:14 +02001667ssize_t intel_event_sysfs_show(char *page, u64 config)
1668{
1669 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
1670
1671 return x86_event_sysfs_show(page, config, event);
1672}
1673
Peter Zijlstracaaa8be2010-03-29 13:09:53 +02001674static __initconst const struct x86_pmu core_pmu = {
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001675 .name = "core",
1676 .handle_irq = x86_pmu_handle_irq,
1677 .disable_all = x86_pmu_disable_all,
Gleb Natapov144d31e2011-10-05 14:01:21 +02001678 .enable_all = core_pmu_enable_all,
1679 .enable = core_pmu_enable_event,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001680 .disable = x86_pmu_disable_event,
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +02001681 .hw_config = x86_pmu_hw_config,
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001682 .schedule_events = x86_schedule_events,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001683 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1684 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
1685 .event_map = intel_pmu_event_map,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001686 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
1687 .apic = 1,
1688 /*
1689 * Intel PMCs cannot be accessed sanely above 32 bit width,
1690 * so we install an artificial 1<<31 period regardless of
1691 * the generic event period:
1692 */
1693 .max_period = (1ULL << 31) - 1,
1694 .get_event_constraints = intel_get_event_constraints,
Andi Kleena7e3ed12011-03-03 10:34:47 +08001695 .put_event_constraints = intel_put_event_constraints,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001696 .event_constraints = intel_core_event_constraints,
Gleb Natapov144d31e2011-10-05 14:01:21 +02001697 .guest_get_msrs = core_guest_get_msrs,
Jiri Olsa641cc932012-03-15 20:09:14 +01001698 .format_attrs = intel_arch_formats_attr,
Jiri Olsa0bf79d42012-10-10 14:53:14 +02001699 .events_sysfs_show = intel_event_sysfs_show,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001700};
1701
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001702struct intel_shared_regs *allocate_shared_regs(int cpu)
Stephane Eranianefc9f052011-06-06 16:57:03 +02001703{
1704 struct intel_shared_regs *regs;
1705 int i;
1706
1707 regs = kzalloc_node(sizeof(struct intel_shared_regs),
1708 GFP_KERNEL, cpu_to_node(cpu));
1709 if (regs) {
1710 /*
1711 * initialize the locks to keep lockdep happy
1712 */
1713 for (i = 0; i < EXTRA_REG_MAX; i++)
1714 raw_spin_lock_init(&regs->regs[i].lock);
1715
1716 regs->core_id = -1;
1717 }
1718 return regs;
1719}
1720
Andi Kleena7e3ed12011-03-03 10:34:47 +08001721static int intel_pmu_cpu_prepare(int cpu)
1722{
1723 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1724
Stephane Eranianb36817e2012-02-09 23:20:53 +01001725 if (!(x86_pmu.extra_regs || x86_pmu.lbr_sel_map))
Lin Ming69092622011-03-03 10:34:50 +08001726 return NOTIFY_OK;
1727
Stephane Eranianefc9f052011-06-06 16:57:03 +02001728 cpuc->shared_regs = allocate_shared_regs(cpu);
1729 if (!cpuc->shared_regs)
Andi Kleena7e3ed12011-03-03 10:34:47 +08001730 return NOTIFY_BAD;
1731
Andi Kleena7e3ed12011-03-03 10:34:47 +08001732 return NOTIFY_OK;
1733}
1734
Peter Zijlstra74846d32010-03-05 13:49:35 +01001735static void intel_pmu_cpu_starting(int cpu)
1736{
Andi Kleena7e3ed12011-03-03 10:34:47 +08001737 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1738 int core_id = topology_core_id(cpu);
1739 int i;
1740
Lin Ming69092622011-03-03 10:34:50 +08001741 init_debug_store_on_cpu(cpu);
1742 /*
1743 * Deal with CPUs that don't clear their LBRs on power-up.
1744 */
1745 intel_pmu_lbr_reset();
1746
Stephane Eranianb36817e2012-02-09 23:20:53 +01001747 cpuc->lbr_sel = NULL;
1748
1749 if (!cpuc->shared_regs)
Lin Ming69092622011-03-03 10:34:50 +08001750 return;
1751
Stephane Eranianb36817e2012-02-09 23:20:53 +01001752 if (!(x86_pmu.er_flags & ERF_NO_HT_SHARING)) {
1753 for_each_cpu(i, topology_thread_cpumask(cpu)) {
1754 struct intel_shared_regs *pc;
Andi Kleena7e3ed12011-03-03 10:34:47 +08001755
Stephane Eranianb36817e2012-02-09 23:20:53 +01001756 pc = per_cpu(cpu_hw_events, i).shared_regs;
1757 if (pc && pc->core_id == core_id) {
1758 cpuc->kfree_on_online = cpuc->shared_regs;
1759 cpuc->shared_regs = pc;
1760 break;
1761 }
Andi Kleena7e3ed12011-03-03 10:34:47 +08001762 }
Stephane Eranianb36817e2012-02-09 23:20:53 +01001763 cpuc->shared_regs->core_id = core_id;
1764 cpuc->shared_regs->refcnt++;
Andi Kleena7e3ed12011-03-03 10:34:47 +08001765 }
1766
Stephane Eranianb36817e2012-02-09 23:20:53 +01001767 if (x86_pmu.lbr_sel_map)
1768 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
Peter Zijlstra74846d32010-03-05 13:49:35 +01001769}
1770
1771static void intel_pmu_cpu_dying(int cpu)
1772{
Andi Kleena7e3ed12011-03-03 10:34:47 +08001773 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
Stephane Eranianefc9f052011-06-06 16:57:03 +02001774 struct intel_shared_regs *pc;
Andi Kleena7e3ed12011-03-03 10:34:47 +08001775
Stephane Eranianefc9f052011-06-06 16:57:03 +02001776 pc = cpuc->shared_regs;
Andi Kleena7e3ed12011-03-03 10:34:47 +08001777 if (pc) {
1778 if (pc->core_id == -1 || --pc->refcnt == 0)
1779 kfree(pc);
Stephane Eranianefc9f052011-06-06 16:57:03 +02001780 cpuc->shared_regs = NULL;
Andi Kleena7e3ed12011-03-03 10:34:47 +08001781 }
1782
Peter Zijlstra74846d32010-03-05 13:49:35 +01001783 fini_debug_store_on_cpu(cpu);
1784}
1785
Stephane Eraniand010b332012-02-09 23:21:00 +01001786static void intel_pmu_flush_branch_stack(void)
1787{
1788 /*
1789 * Intel LBR does not tag entries with the
1790 * PID of the current task, then we need to
1791 * flush it on ctxsw
1792 * For now, we simply reset it
1793 */
1794 if (x86_pmu.lbr_nr)
1795 intel_pmu_lbr_reset();
1796}
1797
Jiri Olsa641cc932012-03-15 20:09:14 +01001798PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
1799
Stephane Eraniana63fcab2013-01-24 16:10:33 +01001800PMU_FORMAT_ATTR(ldlat, "config1:0-15");
1801
Jiri Olsa641cc932012-03-15 20:09:14 +01001802static struct attribute *intel_arch3_formats_attr[] = {
1803 &format_attr_event.attr,
1804 &format_attr_umask.attr,
1805 &format_attr_edge.attr,
1806 &format_attr_pc.attr,
1807 &format_attr_any.attr,
1808 &format_attr_inv.attr,
1809 &format_attr_cmask.attr,
1810
1811 &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
Stephane Eraniana63fcab2013-01-24 16:10:33 +01001812 &format_attr_ldlat.attr, /* PEBS load latency */
Jiri Olsa641cc932012-03-15 20:09:14 +01001813 NULL,
1814};
1815
Peter Zijlstracaaa8be2010-03-29 13:09:53 +02001816static __initconst const struct x86_pmu intel_pmu = {
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001817 .name = "Intel",
1818 .handle_irq = intel_pmu_handle_irq,
1819 .disable_all = intel_pmu_disable_all,
1820 .enable_all = intel_pmu_enable_all,
1821 .enable = intel_pmu_enable_event,
1822 .disable = intel_pmu_disable_event,
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +02001823 .hw_config = intel_pmu_hw_config,
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001824 .schedule_events = x86_schedule_events,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001825 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1826 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
1827 .event_map = intel_pmu_event_map,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001828 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
1829 .apic = 1,
1830 /*
1831 * Intel PMCs cannot be accessed sanely above 32 bit width,
1832 * so we install an artificial 1<<31 period regardless of
1833 * the generic event period:
1834 */
1835 .max_period = (1ULL << 31) - 1,
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001836 .get_event_constraints = intel_get_event_constraints,
Andi Kleena7e3ed12011-03-03 10:34:47 +08001837 .put_event_constraints = intel_put_event_constraints,
Peter Zijlstra0780c922012-06-05 10:26:43 +02001838 .pebs_aliases = intel_pebs_aliases_core2,
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001839
Jiri Olsa641cc932012-03-15 20:09:14 +01001840 .format_attrs = intel_arch3_formats_attr,
Jiri Olsa0bf79d42012-10-10 14:53:14 +02001841 .events_sysfs_show = intel_event_sysfs_show,
Jiri Olsa641cc932012-03-15 20:09:14 +01001842
Andi Kleena7e3ed12011-03-03 10:34:47 +08001843 .cpu_prepare = intel_pmu_cpu_prepare,
Peter Zijlstra74846d32010-03-05 13:49:35 +01001844 .cpu_starting = intel_pmu_cpu_starting,
1845 .cpu_dying = intel_pmu_cpu_dying,
Gleb Natapov144d31e2011-10-05 14:01:21 +02001846 .guest_get_msrs = intel_guest_get_msrs,
Stephane Eraniand010b332012-02-09 23:21:00 +01001847 .flush_branch_stack = intel_pmu_flush_branch_stack,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001848};
1849
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01001850static __init void intel_clovertown_quirk(void)
Peter Zijlstra3c447802010-03-04 21:49:01 +01001851{
1852 /*
1853 * PEBS is unreliable due to:
1854 *
1855 * AJ67 - PEBS may experience CPL leaks
1856 * AJ68 - PEBS PMI may be delayed by one event
1857 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
1858 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
1859 *
1860 * AJ67 could be worked around by restricting the OS/USR flags.
1861 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
1862 *
1863 * AJ106 could possibly be worked around by not allowing LBR
1864 * usage from PEBS, including the fixup.
1865 * AJ68 could possibly be worked around by always programming
Ingo Molnarec75a712011-04-27 11:51:41 +02001866 * a pebs_event_reset[0] value and coping with the lost events.
Peter Zijlstra3c447802010-03-04 21:49:01 +01001867 *
1868 * But taken together it might just make sense to not enable PEBS on
1869 * these chips.
1870 */
Joe Perchesc767a542012-05-21 19:50:07 -07001871 pr_warn("PEBS disabled due to CPU errata\n");
Peter Zijlstra3c447802010-03-04 21:49:01 +01001872 x86_pmu.pebs = 0;
1873 x86_pmu.pebs_constraints = NULL;
1874}
1875
Peter Zijlstrac93dc842012-06-08 14:50:50 +02001876static int intel_snb_pebs_broken(int cpu)
1877{
1878 u32 rev = UINT_MAX; /* default to broken for unknown models */
1879
1880 switch (cpu_data(cpu).x86_model) {
1881 case 42: /* SNB */
1882 rev = 0x28;
1883 break;
1884
1885 case 45: /* SNB-EP */
1886 switch (cpu_data(cpu).x86_mask) {
1887 case 6: rev = 0x618; break;
1888 case 7: rev = 0x70c; break;
1889 }
1890 }
1891
1892 return (cpu_data(cpu).microcode < rev);
1893}
1894
1895static void intel_snb_check_microcode(void)
1896{
1897 int pebs_broken = 0;
1898 int cpu;
1899
1900 get_online_cpus();
1901 for_each_online_cpu(cpu) {
1902 if ((pebs_broken = intel_snb_pebs_broken(cpu)))
1903 break;
1904 }
1905 put_online_cpus();
1906
1907 if (pebs_broken == x86_pmu.pebs_broken)
1908 return;
1909
1910 /*
1911 * Serialized by the microcode lock..
1912 */
1913 if (x86_pmu.pebs_broken) {
1914 pr_info("PEBS enabled due to microcode update\n");
1915 x86_pmu.pebs_broken = 0;
1916 } else {
1917 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
1918 x86_pmu.pebs_broken = 1;
1919 }
1920}
1921
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01001922static __init void intel_sandybridge_quirk(void)
Peter Zijlstra6a600a82011-11-15 10:51:15 +01001923{
Peter Zijlstrac93dc842012-06-08 14:50:50 +02001924 x86_pmu.check_microcode = intel_snb_check_microcode;
1925 intel_snb_check_microcode();
Peter Zijlstra6a600a82011-11-15 10:51:15 +01001926}
1927
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01001928static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
1929 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
1930 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
1931 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
1932 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
1933 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
1934 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
1935 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
Gleb Natapovffb871b2011-11-10 14:57:26 +02001936};
1937
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01001938static __init void intel_arch_events_quirk(void)
1939{
1940 int bit;
1941
1942 /* disable event that reported as not presend by cpuid */
1943 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
1944 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
Joe Perchesc767a542012-05-21 19:50:07 -07001945 pr_warn("CPUID marked event: \'%s\' unavailable\n",
1946 intel_arch_events_map[bit].name);
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01001947 }
1948}
1949
1950static __init void intel_nehalem_quirk(void)
1951{
1952 union cpuid10_ebx ebx;
1953
1954 ebx.full = x86_pmu.events_maskl;
1955 if (ebx.split.no_branch_misses_retired) {
1956 /*
1957 * Erratum AAJ80 detected, we work it around by using
1958 * the BR_MISP_EXEC.ANY event. This will over-count
1959 * branch-misses, but it's still much better than the
1960 * architectural event which is often completely bogus:
1961 */
1962 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
1963 ebx.split.no_branch_misses_retired = 0;
1964 x86_pmu.events_maskl = ebx.full;
Joe Perchesc767a542012-05-21 19:50:07 -07001965 pr_info("CPU erratum AAJ80 worked around\n");
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01001966 }
1967}
1968
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001969__init int intel_pmu_init(void)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001970{
1971 union cpuid10_edx edx;
1972 union cpuid10_eax eax;
Gleb Natapovffb871b2011-11-10 14:57:26 +02001973 union cpuid10_ebx ebx;
Robert Richtera1eac7a2012-06-20 20:46:34 +02001974 struct event_constraint *c;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001975 unsigned int unused;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001976 int version;
1977
1978 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001979 switch (boot_cpu_data.x86) {
1980 case 0x6:
1981 return p6_pmu_init();
Vince Weavere717bf42012-09-26 14:12:52 -04001982 case 0xb:
1983 return knc_pmu_init();
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001984 case 0xf:
1985 return p4_pmu_init();
1986 }
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001987 return -ENODEV;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001988 }
1989
1990 /*
1991 * Check whether the Architectural PerfMon supports
1992 * Branch Misses Retired hw_event or not.
1993 */
Gleb Natapovffb871b2011-11-10 14:57:26 +02001994 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
1995 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001996 return -ENODEV;
1997
1998 version = eax.split.version_id;
1999 if (version < 2)
2000 x86_pmu = core_pmu;
2001 else
2002 x86_pmu = intel_pmu;
2003
2004 x86_pmu.version = version;
Robert Richter948b1bb2010-03-29 18:36:50 +02002005 x86_pmu.num_counters = eax.split.num_counters;
2006 x86_pmu.cntval_bits = eax.split.bit_width;
2007 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002008
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01002009 x86_pmu.events_maskl = ebx.full;
2010 x86_pmu.events_mask_len = eax.split.mask_length;
2011
Andi Kleen70ab7002012-06-05 17:56:48 -07002012 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
2013
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002014 /*
2015 * Quirk: v2 perfmon does not report fixed-purpose events, so
2016 * assume at least 3 events:
2017 */
2018 if (version > 1)
Robert Richter948b1bb2010-03-29 18:36:50 +02002019 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002020
Peter Zijlstra8db909a2010-03-03 17:07:40 +01002021 /*
2022 * v2 and above have a perf capabilities MSR
2023 */
2024 if (version > 1) {
2025 u64 capabilities;
2026
2027 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
2028 x86_pmu.intel_cap.capabilities = capabilities;
2029 }
2030
Peter Zijlstraca037702010-03-02 19:52:12 +01002031 intel_ds_init();
2032
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01002033 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
2034
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002035 /*
2036 * Install the hw-cache-events table:
2037 */
2038 switch (boot_cpu_data.x86_model) {
2039 case 14: /* 65 nm core solo/duo, "Yonah" */
2040 pr_cont("Core events, ");
2041 break;
2042
2043 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01002044 x86_add_quirk(intel_clovertown_quirk);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002045 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2046 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2047 case 29: /* six-core 45 nm xeon "Dunnington" */
2048 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2049 sizeof(hw_cache_event_ids));
2050
Peter Zijlstracaff2be2010-03-03 12:02:30 +01002051 intel_pmu_lbr_init_core();
2052
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002053 x86_pmu.event_constraints = intel_core2_event_constraints;
Stephane Eranian17e31622011-03-02 17:05:01 +02002054 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002055 pr_cont("Core2 events, ");
2056 break;
2057
2058 case 26: /* 45 nm nehalem, "Bloomfield" */
2059 case 30: /* 45 nm nehalem, "Lynnfield" */
Vince Weaver134fbad2010-04-06 10:01:19 -04002060 case 46: /* 45 nm nehalem-ex, "Beckton" */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002061 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2062 sizeof(hw_cache_event_ids));
Andi Kleene994d7d2011-03-03 10:34:48 +08002063 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
2064 sizeof(hw_cache_extra_regs));
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002065
Peter Zijlstracaff2be2010-03-03 12:02:30 +01002066 intel_pmu_lbr_init_nhm();
2067
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002068 x86_pmu.event_constraints = intel_nehalem_event_constraints;
Stephane Eranian17e31622011-03-02 17:05:01 +02002069 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
Peter Zijlstra11164cd2010-03-26 14:08:44 +01002070 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
Andi Kleena7e3ed12011-03-03 10:34:47 +08002071 x86_pmu.extra_regs = intel_nehalem_extra_regs;
Ingo Molnarec75a712011-04-27 11:51:41 +02002072
Stephane Eranianf20093e2013-01-24 16:10:32 +01002073 x86_pmu.cpu_events = nhm_events_attrs;
2074
Ingo Molnar91fc4cc2011-04-29 14:17:19 +02002075 /* UOPS_ISSUED.STALLED_CYCLES */
Peter Zijlstraf9b4eeb2012-03-12 12:44:35 +01002076 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2077 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
Ingo Molnar91fc4cc2011-04-29 14:17:19 +02002078 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
Peter Zijlstraf9b4eeb2012-03-12 12:44:35 +01002079 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2080 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
Ingo Molnar94403f82011-04-24 08:18:31 +02002081
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01002082 x86_add_quirk(intel_nehalem_quirk);
Ingo Molnarec75a712011-04-27 11:51:41 +02002083
Peter Zijlstra11164cd2010-03-26 14:08:44 +01002084 pr_cont("Nehalem events, ");
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002085 break;
Peter Zijlstracaff2be2010-03-03 12:02:30 +01002086
Peter Zijlstrab622d642010-02-01 15:36:30 +01002087 case 28: /* Atom */
ShuoX Liu0927b482012-12-29 00:48:44 +08002088 case 38: /* Lincroft */
2089 case 39: /* Penwell */
2090 case 53: /* Cloverview */
2091 case 54: /* Cedarview */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002092 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2093 sizeof(hw_cache_event_ids));
2094
Peter Zijlstracaff2be2010-03-03 12:02:30 +01002095 intel_pmu_lbr_init_atom();
2096
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002097 x86_pmu.event_constraints = intel_gen_event_constraints;
Stephane Eranian17e31622011-03-02 17:05:01 +02002098 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002099 pr_cont("Atom events, ");
2100 break;
2101
2102 case 37: /* 32 nm nehalem, "Clarkdale" */
2103 case 44: /* 32 nm nehalem, "Gulftown" */
Andi Kleenb2508e82011-04-21 16:48:35 -07002104 case 47: /* 32 nm Xeon E7 */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002105 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
2106 sizeof(hw_cache_event_ids));
Andi Kleene994d7d2011-03-03 10:34:48 +08002107 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
2108 sizeof(hw_cache_extra_regs));
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002109
Peter Zijlstracaff2be2010-03-03 12:02:30 +01002110 intel_pmu_lbr_init_nhm();
2111
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002112 x86_pmu.event_constraints = intel_westmere_event_constraints;
Peter Zijlstra40b91cd2010-03-29 16:37:17 +02002113 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
Stephane Eranian17e31622011-03-02 17:05:01 +02002114 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
Andi Kleena7e3ed12011-03-03 10:34:47 +08002115 x86_pmu.extra_regs = intel_westmere_extra_regs;
Peter Zijlstrab79e8942011-05-23 11:08:15 +02002116 x86_pmu.er_flags |= ERF_HAS_RSP_1;
Ingo Molnar30112032011-04-30 09:14:54 +02002117
Stephane Eranianf20093e2013-01-24 16:10:32 +01002118 x86_pmu.cpu_events = nhm_events_attrs;
2119
Ingo Molnar30112032011-04-30 09:14:54 +02002120 /* UOPS_ISSUED.STALLED_CYCLES */
Peter Zijlstraf9b4eeb2012-03-12 12:44:35 +01002121 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2122 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
Ingo Molnar30112032011-04-30 09:14:54 +02002123 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
Peter Zijlstraf9b4eeb2012-03-12 12:44:35 +01002124 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2125 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
Ingo Molnar30112032011-04-30 09:14:54 +02002126
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002127 pr_cont("Westmere events, ");
2128 break;
Peter Zijlstrab622d642010-02-01 15:36:30 +01002129
Lin Mingb06b3d42011-03-02 21:27:04 +08002130 case 42: /* SandyBridge */
Youquan Songa34668f2011-08-02 14:01:35 +08002131 case 45: /* SandyBridge, "Romely-EP" */
Peter Zijlstra47a88632012-06-05 10:26:43 +02002132 x86_add_quirk(intel_sandybridge_quirk);
Lin Mingb06b3d42011-03-02 21:27:04 +08002133 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2134 sizeof(hw_cache_event_ids));
Yan, Zheng74e65432012-07-17 17:27:55 +08002135 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2136 sizeof(hw_cache_extra_regs));
Lin Mingb06b3d42011-03-02 21:27:04 +08002137
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01002138 intel_pmu_lbr_init_snb();
Lin Mingb06b3d42011-03-02 21:27:04 +08002139
2140 x86_pmu.event_constraints = intel_snb_event_constraints;
Kevin Winchesterde0428a2011-08-30 20:41:05 -03002141 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
Peter Zijlstra0780c922012-06-05 10:26:43 +02002142 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
Stephane Eranianf1923822013-04-16 13:51:43 +02002143 if (boot_cpu_data.x86_model == 45)
2144 x86_pmu.extra_regs = intel_snbep_extra_regs;
2145 else
2146 x86_pmu.extra_regs = intel_snb_extra_regs;
Stephane Eranianee89cbc2011-06-06 16:57:12 +02002147 /* all extra regs are per-cpu when HT is on */
Peter Zijlstrab79e8942011-05-23 11:08:15 +02002148 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2149 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
Lin Minge04d1b22011-05-06 07:14:02 +00002150
Stephane Eranianf20093e2013-01-24 16:10:32 +01002151 x86_pmu.cpu_events = snb_events_attrs;
2152
Lin Minge04d1b22011-05-06 07:14:02 +00002153 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
Peter Zijlstraf9b4eeb2012-03-12 12:44:35 +01002154 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2155 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
Lin Minge04d1b22011-05-06 07:14:02 +00002156 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
Peter Zijlstraf9b4eeb2012-03-12 12:44:35 +01002157 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
2158 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
Lin Minge04d1b22011-05-06 07:14:02 +00002159
Lin Mingb06b3d42011-03-02 21:27:04 +08002160 pr_cont("SandyBridge events, ");
2161 break;
Stephane Eranian20a36e32012-09-11 01:07:01 +02002162 case 58: /* IvyBridge */
Youquan Song923d8692012-12-18 12:20:23 -05002163 case 62: /* IvyBridge EP */
Stephane Eranian20a36e32012-09-11 01:07:01 +02002164 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2165 sizeof(hw_cache_event_ids));
2166 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
2167 sizeof(hw_cache_extra_regs));
2168
2169 intel_pmu_lbr_init_snb();
2170
Stephane Eranian69943182013-02-20 11:15:12 +01002171 x86_pmu.event_constraints = intel_ivb_event_constraints;
Stephane Eranian20a36e32012-09-11 01:07:01 +02002172 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
2173 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
Stephane Eranianf1923822013-04-16 13:51:43 +02002174 if (boot_cpu_data.x86_model == 62)
2175 x86_pmu.extra_regs = intel_snbep_extra_regs;
2176 else
2177 x86_pmu.extra_regs = intel_snb_extra_regs;
Stephane Eranian20a36e32012-09-11 01:07:01 +02002178 /* all extra regs are per-cpu when HT is on */
2179 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2180 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2181
Stephane Eranianf20093e2013-01-24 16:10:32 +01002182 x86_pmu.cpu_events = snb_events_attrs;
2183
Stephane Eranian20a36e32012-09-11 01:07:01 +02002184 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
2185 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
2186 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
2187
2188 pr_cont("IvyBridge events, ");
2189 break;
2190
Lin Mingb06b3d42011-03-02 21:27:04 +08002191
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002192 default:
Avi Kivity0af3ac12011-06-29 18:42:36 +03002193 switch (x86_pmu.version) {
2194 case 1:
2195 x86_pmu.event_constraints = intel_v1_event_constraints;
2196 pr_cont("generic architected perfmon v1, ");
2197 break;
2198 default:
2199 /*
2200 * default constraints for v2 and up
2201 */
2202 x86_pmu.event_constraints = intel_gen_event_constraints;
2203 pr_cont("generic architected perfmon, ");
2204 break;
2205 }
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002206 }
Gleb Natapovffb871b2011-11-10 14:57:26 +02002207
Robert Richtera1eac7a2012-06-20 20:46:34 +02002208 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
2209 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2210 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
2211 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
2212 }
2213 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
2214
2215 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
2216 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2217 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
2218 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
2219 }
2220
2221 x86_pmu.intel_ctrl |=
2222 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
2223
2224 if (x86_pmu.event_constraints) {
2225 /*
2226 * event on fixed counter2 (REF_CYCLES) only works on this
2227 * counter, so do not extend mask to generic counters
2228 */
2229 for_each_event_constraint(c, x86_pmu.event_constraints) {
2230 if (c->cmask != X86_RAW_EVENT_MASK
2231 || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
2232 continue;
2233 }
2234
2235 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
2236 c->weight += x86_pmu.num_counters;
2237 }
2238 }
2239
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002240 return 0;
2241}