blob: 2c3c7abf678b8fbde8fe5c11dfd9220f90ec4ded [file] [log] [blame]
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001#include <linux/perf_event.h>
2#include <linux/types.h>
3
4#include <asm/perf_event.h>
5#include <asm/msr.h>
Stephane Eranian3e702ff2012-02-09 23:20:58 +01006#include <asm/insn.h>
Kevin Winchesterde0428a2011-08-30 20:41:05 -03007
Borislav Petkov27f6d222016-02-10 10:55:23 +01008#include "../perf_event.h"
Peter Zijlstracaff2be2010-03-03 12:02:30 +01009
10enum {
11 LBR_FORMAT_32 = 0x00,
12 LBR_FORMAT_LIP = 0x01,
13 LBR_FORMAT_EIP = 0x02,
14 LBR_FORMAT_EIP_FLAGS = 0x03,
Andi Kleen135c5612013-06-17 17:36:51 -070015 LBR_FORMAT_EIP_FLAGS2 = 0x04,
Andi Kleen50eab8f2015-05-10 12:22:43 -070016 LBR_FORMAT_INFO = 0x05,
Kan Liang8b92c3a2016-04-15 00:42:47 -070017 LBR_FORMAT_TIME = 0x06,
18 LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_TIME,
Andi Kleen135c5612013-06-17 17:36:51 -070019};
20
21static enum {
22 LBR_EIP_FLAGS = 1,
23 LBR_TSX = 2,
24} lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
25 [LBR_FORMAT_EIP_FLAGS] = LBR_EIP_FLAGS,
26 [LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX,
Peter Zijlstracaff2be2010-03-03 12:02:30 +010027};
28
29/*
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +010030 * Intel LBR_SELECT bits
31 * Intel Vol3a, April 2011, Section 16.7 Table 16-10
32 *
33 * Hardware branch filter (not available on all CPUs)
34 */
35#define LBR_KERNEL_BIT 0 /* do not capture at ring0 */
36#define LBR_USER_BIT 1 /* do not capture at ring > 0 */
37#define LBR_JCC_BIT 2 /* do not capture conditional branches */
38#define LBR_REL_CALL_BIT 3 /* do not capture relative calls */
39#define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */
40#define LBR_RETURN_BIT 5 /* do not capture near returns */
41#define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
42#define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
43#define LBR_FAR_BIT 8 /* do not capture far branches */
Yan, Zhenge9d7f7c2014-11-04 21:56:00 -050044#define LBR_CALL_STACK_BIT 9 /* enable call stack */
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +010045
Andi Kleenb16a5b52015-10-20 11:46:34 -070046/*
47 * Following bit only exists in Linux; we mask it out before writing it to
48 * the actual MSR. But it helps the constraint perf code to understand
49 * that this is a separate configuration.
50 */
51#define LBR_NO_INFO_BIT 63 /* don't read LBR_INFO. */
52
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +010053#define LBR_KERNEL (1 << LBR_KERNEL_BIT)
54#define LBR_USER (1 << LBR_USER_BIT)
55#define LBR_JCC (1 << LBR_JCC_BIT)
56#define LBR_REL_CALL (1 << LBR_REL_CALL_BIT)
57#define LBR_IND_CALL (1 << LBR_IND_CALL_BIT)
58#define LBR_RETURN (1 << LBR_RETURN_BIT)
59#define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
60#define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
61#define LBR_FAR (1 << LBR_FAR_BIT)
Yan, Zhenge9d7f7c2014-11-04 21:56:00 -050062#define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT)
Andi Kleenb16a5b52015-10-20 11:46:34 -070063#define LBR_NO_INFO (1ULL << LBR_NO_INFO_BIT)
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +010064
65#define LBR_PLM (LBR_KERNEL | LBR_USER)
66
Kan Liangcf3beb72016-04-21 02:30:10 -070067#define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +010068#define LBR_NOT_SUPP -1 /* LBR filter not supported */
69#define LBR_IGN 0 /* ignored */
70
71#define LBR_ANY \
72 (LBR_JCC |\
73 LBR_REL_CALL |\
74 LBR_IND_CALL |\
75 LBR_RETURN |\
76 LBR_REL_JMP |\
77 LBR_IND_JMP |\
78 LBR_FAR)
79
David Carrillo-Cisneros3812bba2016-06-21 11:31:12 -070080#define LBR_FROM_FLAG_MISPRED BIT_ULL(63)
81#define LBR_FROM_FLAG_IN_TX BIT_ULL(62)
82#define LBR_FROM_FLAG_ABORT BIT_ULL(61)
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +010083
David Carrillo-Cisneros19fc9dd2016-06-21 11:31:11 -070084#define LBR_FROM_SIGNEXT_2MSB (BIT_ULL(60) | BIT_ULL(59))
85
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +010086/*
Stephane Eranian3e702ff2012-02-09 23:20:58 +010087 * x86control flow change classification
88 * x86control flow changes include branches, interrupts, traps, faults
89 */
90enum {
Yan, Zhenge9d7f7c2014-11-04 21:56:00 -050091 X86_BR_NONE = 0, /* unknown */
Stephane Eranian3e702ff2012-02-09 23:20:58 +010092
Yan, Zhenge9d7f7c2014-11-04 21:56:00 -050093 X86_BR_USER = 1 << 0, /* branch target is user */
94 X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
Stephane Eranian3e702ff2012-02-09 23:20:58 +010095
Yan, Zhenge9d7f7c2014-11-04 21:56:00 -050096 X86_BR_CALL = 1 << 2, /* call */
97 X86_BR_RET = 1 << 3, /* return */
98 X86_BR_SYSCALL = 1 << 4, /* syscall */
99 X86_BR_SYSRET = 1 << 5, /* syscall return */
100 X86_BR_INT = 1 << 6, /* sw interrupt */
101 X86_BR_IRET = 1 << 7, /* return from interrupt */
102 X86_BR_JCC = 1 << 8, /* conditional */
103 X86_BR_JMP = 1 << 9, /* jump */
104 X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
105 X86_BR_IND_CALL = 1 << 11,/* indirect calls */
106 X86_BR_ABORT = 1 << 12,/* transaction abort */
107 X86_BR_IN_TX = 1 << 13,/* in transaction */
108 X86_BR_NO_TX = 1 << 14,/* not in transaction */
Yan, Zhengaa54ae92014-11-04 21:56:11 -0500109 X86_BR_ZERO_CALL = 1 << 15,/* zero length call */
110 X86_BR_CALL_STACK = 1 << 16,/* call stack */
Stephane Eranian7b74cfb2015-05-14 23:09:59 +0200111 X86_BR_IND_JMP = 1 << 17,/* indirect jump */
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100112};
113
114#define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
Andi Kleen135c5612013-06-17 17:36:51 -0700115#define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100116
117#define X86_BR_ANY \
118 (X86_BR_CALL |\
119 X86_BR_RET |\
120 X86_BR_SYSCALL |\
121 X86_BR_SYSRET |\
122 X86_BR_INT |\
123 X86_BR_IRET |\
124 X86_BR_JCC |\
125 X86_BR_JMP |\
126 X86_BR_IRQ |\
Andi Kleen135c5612013-06-17 17:36:51 -0700127 X86_BR_ABORT |\
Yan, Zhengaa54ae92014-11-04 21:56:11 -0500128 X86_BR_IND_CALL |\
Stephane Eranian7b74cfb2015-05-14 23:09:59 +0200129 X86_BR_IND_JMP |\
Yan, Zhengaa54ae92014-11-04 21:56:11 -0500130 X86_BR_ZERO_CALL)
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100131
132#define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
133
134#define X86_BR_ANY_CALL \
135 (X86_BR_CALL |\
136 X86_BR_IND_CALL |\
Yan, Zhengaa54ae92014-11-04 21:56:11 -0500137 X86_BR_ZERO_CALL |\
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100138 X86_BR_SYSCALL |\
139 X86_BR_IRQ |\
140 X86_BR_INT)
141
142static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
143
144/*
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100145 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
146 * otherwise it becomes near impossible to get a reliable stack.
147 */
148
Andi Kleen1a78d932015-03-20 10:11:23 -0700149static void __intel_pmu_lbr_enable(bool pmi)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100150{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500151 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Andi Kleencd1f11d2015-03-20 10:11:24 -0700152 u64 debugctl, lbr_select = 0, orig_debugctl;
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100153
Andi Kleen1a78d932015-03-20 10:11:23 -0700154 /*
Andi Kleen425507f2015-05-10 12:22:46 -0700155 * No need to unfreeze manually, as v4 can do that as part
156 * of the GLOBAL_STATUS ack.
157 */
158 if (pmi && x86_pmu.version >= 4)
159 return;
160
161 /*
Andi Kleen1a78d932015-03-20 10:11:23 -0700162 * No need to reprogram LBR_SELECT in a PMI, as it
163 * did not change.
164 */
Kan Liang96f3eda2015-09-14 10:14:07 -0400165 if (cpuc->lbr_sel)
Andi Kleenb16a5b52015-10-20 11:46:34 -0700166 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
Stephane Eranian6fc2e832015-12-03 23:33:17 +0100167 if (!pmi && cpuc->lbr_sel)
Yan, Zheng2c70d002014-11-04 21:56:10 -0500168 wrmsrl(MSR_LBR_SELECT, lbr_select);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100169
170 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
Andi Kleencd1f11d2015-03-20 10:11:24 -0700171 orig_debugctl = debugctl;
Yan, Zheng2c70d002014-11-04 21:56:10 -0500172 debugctl |= DEBUGCTLMSR_LBR;
173 /*
174 * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
175 * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
176 * may cause superfluous increase/decrease of LBR_TOS.
177 */
178 if (!(lbr_select & LBR_CALL_STACK))
179 debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
Andi Kleencd1f11d2015-03-20 10:11:24 -0700180 if (orig_debugctl != debugctl)
181 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100182}
183
184static void __intel_pmu_lbr_disable(void)
185{
186 u64 debugctl;
187
188 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
Peter Zijlstra7c5ecaf2010-03-25 14:51:49 +0100189 debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100190 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
191}
192
193static void intel_pmu_lbr_reset_32(void)
194{
195 int i;
196
197 for (i = 0; i < x86_pmu.lbr_nr; i++)
198 wrmsrl(x86_pmu.lbr_from + i, 0);
199}
200
201static void intel_pmu_lbr_reset_64(void)
202{
203 int i;
204
205 for (i = 0; i < x86_pmu.lbr_nr; i++) {
206 wrmsrl(x86_pmu.lbr_from + i, 0);
207 wrmsrl(x86_pmu.lbr_to + i, 0);
Andi Kleen50eab8f2015-05-10 12:22:43 -0700208 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
209 wrmsrl(MSR_LBR_INFO_0 + i, 0);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100210 }
211}
212
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300213void intel_pmu_lbr_reset(void)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100214{
Peter Zijlstra74846d32010-03-05 13:49:35 +0100215 if (!x86_pmu.lbr_nr)
216 return;
217
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100218 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100219 intel_pmu_lbr_reset_32();
220 else
221 intel_pmu_lbr_reset_64();
222}
223
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500224/*
225 * TOS = most recently recorded branch
226 */
227static inline u64 intel_pmu_lbr_tos(void)
228{
229 u64 tos;
230
231 rdmsrl(x86_pmu.lbr_tos, tos);
232 return tos;
233}
234
235enum {
236 LBR_NONE,
237 LBR_VALID,
238};
239
David Carrillo-Cisneros19fc9dd2016-06-21 11:31:11 -0700240/*
241 * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
242 * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
243 * TSX is not supported they have no consistent behavior:
244 *
245 * - For wrmsr(), bits 61:62 are considered part of the sign extension.
246 * - For HW updates (branch captures) bits 61:62 are always OFF and are not
247 * part of the sign extension.
248 *
249 * Therefore, if:
250 *
251 * 1) LBR has TSX format
252 * 2) CPU has no TSX support enabled
253 *
254 * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
255 * value from rdmsr() must be converted to have a 61 bits sign extension,
256 * ignoring the TSX flags.
257 */
258static inline bool lbr_from_signext_quirk_needed(void)
259{
260 int lbr_format = x86_pmu.intel_cap.lbr_format;
261 bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
262 boot_cpu_has(X86_FEATURE_RTM);
263
264 return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
265}
266
267DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
268
269/* If quirk is enabled, ensure sign extension is 63 bits: */
270inline u64 lbr_from_signext_quirk_wr(u64 val)
271{
272 if (static_branch_unlikely(&lbr_from_quirk_key)) {
273 /*
274 * Sign extend into bits 61:62 while preserving bit 63.
275 *
276 * Quirk is enabled when TSX is disabled. Therefore TSX bits
277 * in val are always OFF and must be changed to be sign
278 * extension bits. Since bits 59:60 are guaranteed to be
279 * part of the sign extension bits, we can just copy them
280 * to 61:62.
281 */
282 val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
283 }
284 return val;
285}
286
David Carrillo-Cisneros71adae92016-06-21 11:31:13 -0700287/*
288 * If quirk is needed, ensure sign extension is 61 bits:
289 */
290u64 lbr_from_signext_quirk_rd(u64 val)
291{
Peter Zijlstrad4cf1942016-06-23 10:44:49 +0200292 if (static_branch_unlikely(&lbr_from_quirk_key)) {
David Carrillo-Cisneros71adae92016-06-21 11:31:13 -0700293 /*
294 * Quirk is on when TSX is not enabled. Therefore TSX
295 * flags must be read as OFF.
296 */
297 val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
Peter Zijlstrad4cf1942016-06-23 10:44:49 +0200298 }
299 return val;
300}
301
302static inline void wrlbr_from(unsigned int idx, u64 val)
303{
304 val = lbr_from_signext_quirk_wr(val);
305 wrmsrl(x86_pmu.lbr_from + idx, val);
306}
307
308static inline void wrlbr_to(unsigned int idx, u64 val)
309{
310 wrmsrl(x86_pmu.lbr_to + idx, val);
311}
312
313static inline u64 rdlbr_from(unsigned int idx)
314{
315 u64 val;
316
317 rdmsrl(x86_pmu.lbr_from + idx, val);
318
319 return lbr_from_signext_quirk_rd(val);
320}
321
322static inline u64 rdlbr_to(unsigned int idx)
323{
324 u64 val;
325
Peter Zijlstraaefbc4d2016-06-30 11:49:08 +0200326 rdmsrl(x86_pmu.lbr_to + idx, val);
Peter Zijlstrad4cf1942016-06-23 10:44:49 +0200327
David Carrillo-Cisneros71adae92016-06-21 11:31:13 -0700328 return val;
329}
330
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500331static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
332{
333 int i;
334 unsigned lbr_idx, mask;
335 u64 tos;
336
337 if (task_ctx->lbr_callstack_users == 0 ||
338 task_ctx->lbr_stack_state == LBR_NONE) {
339 intel_pmu_lbr_reset();
340 return;
341 }
342
343 mask = x86_pmu.lbr_nr - 1;
Andi Kleenb28ae952015-10-20 11:46:33 -0700344 tos = task_ctx->tos;
Kan Liang5a206f12018-06-05 08:38:45 -0700345 for (i = 0; i < task_ctx->valid_lbrs; i++) {
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500346 lbr_idx = (tos - i) & mask;
Peter Zijlstrad4cf1942016-06-23 10:44:49 +0200347 wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
348 wrlbr_to (lbr_idx, task_ctx->lbr_to[i]);
349
Andi Kleen50eab8f2015-05-10 12:22:43 -0700350 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
Andi Kleene0573362015-05-27 21:13:17 -0700351 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500352 }
Kan Liang5a206f12018-06-05 08:38:45 -0700353
354 for (; i < x86_pmu.lbr_nr; i++) {
355 lbr_idx = (tos - i) & mask;
356 wrlbr_from(lbr_idx, 0);
357 wrlbr_to(lbr_idx, 0);
358 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
359 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, 0);
360 }
361
Andi Kleenb28ae952015-10-20 11:46:33 -0700362 wrmsrl(x86_pmu.lbr_tos, tos);
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500363 task_ctx->lbr_stack_state = LBR_NONE;
364}
365
366static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
367{
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500368 unsigned lbr_idx, mask;
Kan Liang5a206f12018-06-05 08:38:45 -0700369 u64 tos, from;
Peter Zijlstrad4cf1942016-06-23 10:44:49 +0200370 int i;
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500371
372 if (task_ctx->lbr_callstack_users == 0) {
373 task_ctx->lbr_stack_state = LBR_NONE;
374 return;
375 }
376
377 mask = x86_pmu.lbr_nr - 1;
378 tos = intel_pmu_lbr_tos();
Kan Liang5a206f12018-06-05 08:38:45 -0700379 for (i = 0; i < x86_pmu.lbr_nr; i++) {
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500380 lbr_idx = (tos - i) & mask;
Kan Liang5a206f12018-06-05 08:38:45 -0700381 from = rdlbr_from(lbr_idx);
382 if (!from)
383 break;
384 task_ctx->lbr_from[i] = from;
Peter Zijlstrad4cf1942016-06-23 10:44:49 +0200385 task_ctx->lbr_to[i] = rdlbr_to(lbr_idx);
Andi Kleen50eab8f2015-05-10 12:22:43 -0700386 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
Andi Kleene0573362015-05-27 21:13:17 -0700387 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500388 }
Kan Liang5a206f12018-06-05 08:38:45 -0700389 task_ctx->valid_lbrs = i;
Andi Kleenb28ae952015-10-20 11:46:33 -0700390 task_ctx->tos = tos;
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500391 task_ctx->lbr_stack_state = LBR_VALID;
392}
393
Yan, Zheng2a0ad3b2014-11-04 21:55:59 -0500394void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
395{
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500396 struct x86_perf_task_context *task_ctx;
Yan, Zheng2a0ad3b2014-11-04 21:55:59 -0500397
Yan, Zheng2a0ad3b2014-11-04 21:55:59 -0500398 /*
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500399 * If LBR callstack feature is enabled and the stack was saved when
400 * the task was scheduled out, restore the stack. Otherwise flush
401 * the LBR stack.
402 */
403 task_ctx = ctx ? ctx->task_ctx_data : NULL;
404 if (task_ctx) {
Peter Zijlstra3e2c1a62016-07-07 19:37:52 +0200405 if (sched_in)
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500406 __intel_pmu_lbr_restore(task_ctx);
Peter Zijlstra3e2c1a62016-07-07 19:37:52 +0200407 else
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500408 __intel_pmu_lbr_save(task_ctx);
Yan, Zheng76cb2c62014-11-04 21:56:05 -0500409 return;
410 }
411
412 /*
Peter Zijlstra3e2c1a62016-07-07 19:37:52 +0200413 * Since a context switch can flip the address space and LBR entries
414 * are not tagged with an identifier, we need to wipe the LBR, even for
415 * per-cpu events. You simply cannot resolve the branches from the old
416 * address space.
417 */
418 if (sched_in)
Yan, Zheng2a0ad3b2014-11-04 21:55:59 -0500419 intel_pmu_lbr_reset();
Yan, Zheng2a0ad3b2014-11-04 21:55:59 -0500420}
421
Yan, Zheng63f0c1d2014-11-04 21:56:04 -0500422static inline bool branch_user_callstack(unsigned br_sel)
423{
424 return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
425}
426
Peter Zijlstra68f70822016-07-06 18:02:43 +0200427void intel_pmu_lbr_add(struct perf_event *event)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100428{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500429 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Yan, Zheng63f0c1d2014-11-04 21:56:04 -0500430 struct x86_perf_task_context *task_ctx;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100431
432 if (!x86_pmu.lbr_nr)
433 return;
434
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100435 cpuc->br_sel = event->hw.branch_reg.reg;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100436
Peter Zijlstraa5dcff62016-07-07 19:37:52 +0200437 if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data) {
Yan, Zheng63f0c1d2014-11-04 21:56:04 -0500438 task_ctx = event->ctx->task_ctx_data;
439 task_ctx->lbr_callstack_users++;
440 }
441
Peter Zijlstra3e2c1a62016-07-07 19:37:52 +0200442 /*
443 * Request pmu::sched_task() callback, which will fire inside the
444 * regular perf event scheduling, so that call will:
445 *
446 * - restore or wipe; when LBR-callstack,
447 * - wipe; otherwise,
448 *
449 * when this is from __perf_event_task_sched_in().
450 *
451 * However, if this is from perf_install_in_context(), no such callback
452 * will follow and we'll need to reset the LBR here if this is the
453 * first LBR event.
454 *
455 * The problem is, we cannot tell these cases apart... but we can
456 * exclude the biggest chunk of cases by looking at
457 * event->total_time_running. An event that has accrued runtime cannot
458 * be 'new'. Conversely, a new event can get installed through the
459 * context switch path for the first time.
460 */
Yan, Zheng2a0ad3b2014-11-04 21:55:59 -0500461 perf_sched_cb_inc(event->ctx->pmu);
Peter Zijlstra3e2c1a62016-07-07 19:37:52 +0200462 if (!cpuc->lbr_users++ && !event->total_time_running)
463 intel_pmu_lbr_reset();
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100464}
465
Peter Zijlstra68f70822016-07-06 18:02:43 +0200466void intel_pmu_lbr_del(struct perf_event *event)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100467{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500468 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Yan, Zheng63f0c1d2014-11-04 21:56:04 -0500469 struct x86_perf_task_context *task_ctx;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100470
471 if (!x86_pmu.lbr_nr)
472 return;
473
Dan Carpenter5c381812016-10-14 10:29:08 +0300474 if (branch_user_callstack(cpuc->br_sel) &&
475 event->ctx->task_ctx_data) {
Yan, Zheng63f0c1d2014-11-04 21:56:04 -0500476 task_ctx = event->ctx->task_ctx_data;
477 task_ctx->lbr_callstack_users--;
478 }
479
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100480 cpuc->lbr_users--;
Peter Zijlstrab83a46e2010-03-08 13:51:12 +0100481 WARN_ON_ONCE(cpuc->lbr_users < 0);
Yan, Zheng2a0ad3b2014-11-04 21:55:59 -0500482 perf_sched_cb_dec(event->ctx->pmu);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100483}
484
Andi Kleen1a78d932015-03-20 10:11:23 -0700485void intel_pmu_lbr_enable_all(bool pmi)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100486{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500487 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100488
489 if (cpuc->lbr_users)
Andi Kleen1a78d932015-03-20 10:11:23 -0700490 __intel_pmu_lbr_enable(pmi);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100491}
492
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300493void intel_pmu_lbr_disable_all(void)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100494{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500495 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100496
497 if (cpuc->lbr_users)
498 __intel_pmu_lbr_disable();
499}
500
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100501static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
502{
503 unsigned long mask = x86_pmu.lbr_nr - 1;
504 u64 tos = intel_pmu_lbr_tos();
505 int i;
506
Peter Zijlstra63fb3f92010-03-09 11:51:02 +0100507 for (i = 0; i < x86_pmu.lbr_nr; i++) {
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100508 unsigned long lbr_idx = (tos - i) & mask;
509 union {
510 struct {
511 u32 from;
512 u32 to;
513 };
514 u64 lbr;
515 } msr_lastbranch;
516
517 rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
518
Stephane Eranianbce38cd2012-02-09 23:20:51 +0100519 cpuc->lbr_entries[i].from = msr_lastbranch.from;
520 cpuc->lbr_entries[i].to = msr_lastbranch.to;
521 cpuc->lbr_entries[i].mispred = 0;
522 cpuc->lbr_entries[i].predicted = 0;
Peter Zijlstra085656d2017-04-11 10:10:28 +0200523 cpuc->lbr_entries[i].in_tx = 0;
524 cpuc->lbr_entries[i].abort = 0;
525 cpuc->lbr_entries[i].cycles = 0;
Stephane Eranianbce38cd2012-02-09 23:20:51 +0100526 cpuc->lbr_entries[i].reserved = 0;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100527 }
528 cpuc->lbr_stack.nr = i;
529}
530
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100531/*
532 * Due to lack of segmentation in Linux the effective address (offset)
533 * is the same as the linear address, allowing us to merge the LIP and EIP
534 * LBR formats.
535 */
536static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
537{
Kan Liang5a206f12018-06-05 08:38:45 -0700538 bool need_info = false, call_stack = false;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100539 unsigned long mask = x86_pmu.lbr_nr - 1;
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100540 int lbr_format = x86_pmu.intel_cap.lbr_format;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100541 u64 tos = intel_pmu_lbr_tos();
542 int i;
Andi Kleenb7af41a2013-09-20 07:40:44 -0700543 int out = 0;
Andi Kleen90405aa2015-05-27 21:13:18 -0700544 int num = x86_pmu.lbr_nr;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100545
Stephane Eranian6fc2e832015-12-03 23:33:17 +0100546 if (cpuc->lbr_sel) {
547 need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
548 if (cpuc->lbr_sel->config & LBR_CALL_STACK)
Kan Liang5a206f12018-06-05 08:38:45 -0700549 call_stack = true;
Stephane Eranian6fc2e832015-12-03 23:33:17 +0100550 }
Andi Kleen90405aa2015-05-27 21:13:18 -0700551
552 for (i = 0; i < num; i++) {
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100553 unsigned long lbr_idx = (tos - i) & mask;
Andi Kleen135c5612013-06-17 17:36:51 -0700554 u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
555 int skip = 0;
Andi Kleen50eab8f2015-05-10 12:22:43 -0700556 u16 cycles = 0;
Andi Kleen135c5612013-06-17 17:36:51 -0700557 int lbr_flags = lbr_desc[lbr_format];
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100558
Peter Zijlstrad4cf1942016-06-23 10:44:49 +0200559 from = rdlbr_from(lbr_idx);
560 to = rdlbr_to(lbr_idx);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100561
Kan Liang5a206f12018-06-05 08:38:45 -0700562 /*
563 * Read LBR call stack entries
564 * until invalid entry (0s) is detected.
565 */
566 if (call_stack && !from)
567 break;
568
Andi Kleenb16a5b52015-10-20 11:46:34 -0700569 if (lbr_format == LBR_FORMAT_INFO && need_info) {
Andi Kleen50eab8f2015-05-10 12:22:43 -0700570 u64 info;
571
572 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, info);
573 mis = !!(info & LBR_INFO_MISPRED);
574 pred = !mis;
575 in_tx = !!(info & LBR_INFO_IN_TX);
576 abort = !!(info & LBR_INFO_ABORT);
577 cycles = (info & LBR_INFO_CYCLES);
578 }
Kan Liang8b92c3a2016-04-15 00:42:47 -0700579
580 if (lbr_format == LBR_FORMAT_TIME) {
581 mis = !!(from & LBR_FROM_FLAG_MISPRED);
582 pred = !mis;
583 skip = 1;
584 cycles = ((to >> 48) & LBR_INFO_CYCLES);
585
586 to = (u64)((((s64)to) << 16) >> 16);
587 }
588
Andi Kleen135c5612013-06-17 17:36:51 -0700589 if (lbr_flags & LBR_EIP_FLAGS) {
Stephane Eranianbce38cd2012-02-09 23:20:51 +0100590 mis = !!(from & LBR_FROM_FLAG_MISPRED);
591 pred = !mis;
Andi Kleen135c5612013-06-17 17:36:51 -0700592 skip = 1;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100593 }
Andi Kleen135c5612013-06-17 17:36:51 -0700594 if (lbr_flags & LBR_TSX) {
595 in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
596 abort = !!(from & LBR_FROM_FLAG_ABORT);
597 skip = 3;
598 }
599 from = (u64)((((s64)from) << skip) >> skip);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100600
Andi Kleenb7af41a2013-09-20 07:40:44 -0700601 /*
602 * Some CPUs report duplicated abort records,
603 * with the second entry not having an abort bit set.
604 * Skip them here. This loop runs backwards,
605 * so we need to undo the previous record.
606 * If the abort just happened outside the window
607 * the extra entry cannot be removed.
608 */
609 if (abort && x86_pmu.lbr_double_abort && out > 0)
610 out--;
611
612 cpuc->lbr_entries[out].from = from;
613 cpuc->lbr_entries[out].to = to;
614 cpuc->lbr_entries[out].mispred = mis;
615 cpuc->lbr_entries[out].predicted = pred;
616 cpuc->lbr_entries[out].in_tx = in_tx;
617 cpuc->lbr_entries[out].abort = abort;
Andi Kleen50eab8f2015-05-10 12:22:43 -0700618 cpuc->lbr_entries[out].cycles = cycles;
Andi Kleenb7af41a2013-09-20 07:40:44 -0700619 cpuc->lbr_entries[out].reserved = 0;
620 out++;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100621 }
Andi Kleenb7af41a2013-09-20 07:40:44 -0700622 cpuc->lbr_stack.nr = out;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100623}
624
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300625void intel_pmu_lbr_read(void)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100626{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500627 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100628
629 if (!cpuc->lbr_users)
630 return;
631
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100632 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100633 intel_pmu_lbr_read_32(cpuc);
634 else
635 intel_pmu_lbr_read_64(cpuc);
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100636
637 intel_pmu_lbr_filter(cpuc);
638}
639
640/*
641 * SW filter is used:
642 * - in case there is no HW filter
643 * - in case the HW filter has errata or limitations
644 */
Yan, Zhenge9d7f7c2014-11-04 21:56:00 -0500645static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100646{
647 u64 br_type = event->attr.branch_sample_type;
648 int mask = 0;
649
650 if (br_type & PERF_SAMPLE_BRANCH_USER)
651 mask |= X86_BR_USER;
652
Stephane Eranian2b923c82013-05-21 12:53:37 +0200653 if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100654 mask |= X86_BR_KERNEL;
655
656 /* we ignore BRANCH_HV here */
657
658 if (br_type & PERF_SAMPLE_BRANCH_ANY)
659 mask |= X86_BR_ANY;
660
661 if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
662 mask |= X86_BR_ANY_CALL;
663
664 if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
665 mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
666
667 if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
668 mask |= X86_BR_IND_CALL;
Andi Kleen135c5612013-06-17 17:36:51 -0700669
670 if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX)
671 mask |= X86_BR_ABORT;
672
673 if (br_type & PERF_SAMPLE_BRANCH_IN_TX)
674 mask |= X86_BR_IN_TX;
675
676 if (br_type & PERF_SAMPLE_BRANCH_NO_TX)
677 mask |= X86_BR_NO_TX;
678
Anshuman Khandual37548912014-05-22 12:50:09 +0530679 if (br_type & PERF_SAMPLE_BRANCH_COND)
680 mask |= X86_BR_JCC;
681
Yan, Zhenge9d7f7c2014-11-04 21:56:00 -0500682 if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
683 if (!x86_pmu_has_lbr_callstack())
684 return -EOPNOTSUPP;
685 if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
686 return -EINVAL;
687 mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
688 X86_BR_CALL_STACK;
689 }
690
Stephane Eranian7b74cfb2015-05-14 23:09:59 +0200691 if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
692 mask |= X86_BR_IND_JMP;
693
Stephane Eraniand8928192015-10-13 09:09:09 +0200694 if (br_type & PERF_SAMPLE_BRANCH_CALL)
695 mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100696 /*
697 * stash actual user request into reg, it may
698 * be used by fixup code for some CPU
699 */
700 event->hw.branch_reg.reg = mask;
Yan, Zhenge9d7f7c2014-11-04 21:56:00 -0500701 return 0;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100702}
703
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +0100704/*
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100705 * setup the HW LBR filter
706 * Used only when available, may not be enough to disambiguate
707 * all branches, may need the help of the SW filter
708 */
709static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
710{
711 struct hw_perf_event_extra *reg;
712 u64 br_type = event->attr.branch_sample_type;
Yan, Zheng27ac9052014-11-04 21:55:57 -0500713 u64 mask = 0, v;
714 int i;
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100715
Peter Zijlstra2c44b192014-11-05 10:36:45 +0100716 for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
Yan, Zheng27ac9052014-11-04 21:55:57 -0500717 if (!(br_type & (1ULL << i)))
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100718 continue;
719
Yan, Zheng27ac9052014-11-04 21:55:57 -0500720 v = x86_pmu.lbr_sel_map[i];
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100721 if (v == LBR_NOT_SUPP)
722 return -EOPNOTSUPP;
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100723
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100724 if (v != LBR_IGN)
725 mask |= v;
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100726 }
Andi Kleenb16a5b52015-10-20 11:46:34 -0700727
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100728 reg = &event->hw.branch_reg;
729 reg->idx = EXTRA_REG_LBR;
730
Yan, Zhenge9d7f7c2014-11-04 21:56:00 -0500731 /*
732 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
733 * in suppress mode. So LBR_SELECT should be set to
734 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
Kan Liangcf3beb72016-04-21 02:30:10 -0700735 * But the 10th bit LBR_CALL_STACK does not operate
736 * in suppress mode.
Yan, Zhenge9d7f7c2014-11-04 21:56:00 -0500737 */
Kan Liangcf3beb72016-04-21 02:30:10 -0700738 reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100739
Andi Kleenb16a5b52015-10-20 11:46:34 -0700740 if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
741 (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
742 (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO))
743 reg->config |= LBR_NO_INFO;
744
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100745 return 0;
746}
747
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100748int intel_pmu_setup_lbr_filter(struct perf_event *event)
749{
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100750 int ret = 0;
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100751
752 /*
753 * no LBR on this PMU
754 */
755 if (!x86_pmu.lbr_nr)
756 return -EOPNOTSUPP;
757
758 /*
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100759 * setup SW LBR filter
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100760 */
Yan, Zhenge9d7f7c2014-11-04 21:56:00 -0500761 ret = intel_pmu_setup_sw_lbr_filter(event);
762 if (ret)
763 return ret;
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100764
765 /*
766 * setup HW LBR filter, if any
767 */
768 if (x86_pmu.lbr_sel_map)
769 ret = intel_pmu_setup_hw_lbr_filter(event);
770
771 return ret;
772}
773
774/*
775 * return the type of control flow change at address "from"
Adam Buchbinder6a6256f2016-02-23 15:34:30 -0800776 * instruction is not necessarily a branch (in case of interrupt).
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100777 *
778 * The branch type returned also includes the priv level of the
779 * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
780 *
781 * If a branch type is unknown OR the instruction cannot be
782 * decoded (e.g., text page not present), then X86_BR_NONE is
783 * returned.
784 */
Andi Kleen135c5612013-06-17 17:36:51 -0700785static int branch_type(unsigned long from, unsigned long to, int abort)
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100786{
787 struct insn insn;
788 void *addr;
Dave Hansen6ba48ff2014-11-14 07:39:57 -0800789 int bytes_read, bytes_left;
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100790 int ret = X86_BR_NONE;
791 int ext, to_plm, from_plm;
792 u8 buf[MAX_INSN_SIZE];
793 int is64 = 0;
794
795 to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
796 from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
797
798 /*
799 * maybe zero if lbr did not fill up after a reset by the time
800 * we get a PMU interrupt
801 */
802 if (from == 0 || to == 0)
803 return X86_BR_NONE;
804
Andi Kleen135c5612013-06-17 17:36:51 -0700805 if (abort)
806 return X86_BR_ABORT | to_plm;
807
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100808 if (from_plm == X86_BR_USER) {
809 /*
810 * can happen if measuring at the user level only
811 * and we interrupt in a kernel thread, e.g., idle.
812 */
813 if (!current->mm)
814 return X86_BR_NONE;
815
816 /* may fail if text not present */
Dave Hansen6ba48ff2014-11-14 07:39:57 -0800817 bytes_left = copy_from_user_nmi(buf, (void __user *)from,
818 MAX_INSN_SIZE);
819 bytes_read = MAX_INSN_SIZE - bytes_left;
820 if (!bytes_read)
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100821 return X86_BR_NONE;
822
823 addr = buf;
Peter Zijlstra6e15eb32013-05-03 14:11:24 +0200824 } else {
825 /*
826 * The LBR logs any address in the IP, even if the IP just
827 * faulted. This means userspace can control the from address.
828 * Ensure we don't blindy read any address by validating it is
829 * a known text address.
830 */
Dave Hansen6ba48ff2014-11-14 07:39:57 -0800831 if (kernel_text_address(from)) {
Peter Zijlstra6e15eb32013-05-03 14:11:24 +0200832 addr = (void *)from;
Dave Hansen6ba48ff2014-11-14 07:39:57 -0800833 /*
834 * Assume we can get the maximum possible size
835 * when grabbing kernel data. This is not
836 * _strictly_ true since we could possibly be
837 * executing up next to a memory hole, but
838 * it is very unlikely to be a problem.
839 */
840 bytes_read = MAX_INSN_SIZE;
841 } else {
Peter Zijlstra6e15eb32013-05-03 14:11:24 +0200842 return X86_BR_NONE;
Dave Hansen6ba48ff2014-11-14 07:39:57 -0800843 }
Peter Zijlstra6e15eb32013-05-03 14:11:24 +0200844 }
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100845
846 /*
847 * decoder needs to know the ABI especially
848 * on 64-bit systems running 32-bit apps
849 */
850#ifdef CONFIG_X86_64
851 is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32);
852#endif
Dave Hansen6ba48ff2014-11-14 07:39:57 -0800853 insn_init(&insn, addr, bytes_read, is64);
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100854 insn_get_opcode(&insn);
Dave Hansen6ba48ff2014-11-14 07:39:57 -0800855 if (!insn.opcode.got)
856 return X86_BR_ABORT;
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100857
858 switch (insn.opcode.bytes[0]) {
859 case 0xf:
860 switch (insn.opcode.bytes[1]) {
861 case 0x05: /* syscall */
862 case 0x34: /* sysenter */
863 ret = X86_BR_SYSCALL;
864 break;
865 case 0x07: /* sysret */
866 case 0x35: /* sysexit */
867 ret = X86_BR_SYSRET;
868 break;
869 case 0x80 ... 0x8f: /* conditional */
870 ret = X86_BR_JCC;
871 break;
872 default:
873 ret = X86_BR_NONE;
874 }
875 break;
876 case 0x70 ... 0x7f: /* conditional */
877 ret = X86_BR_JCC;
878 break;
879 case 0xc2: /* near ret */
880 case 0xc3: /* near ret */
881 case 0xca: /* far ret */
882 case 0xcb: /* far ret */
883 ret = X86_BR_RET;
884 break;
885 case 0xcf: /* iret */
886 ret = X86_BR_IRET;
887 break;
888 case 0xcc ... 0xce: /* int */
889 ret = X86_BR_INT;
890 break;
891 case 0xe8: /* call near rel */
Yan, Zhengaa54ae92014-11-04 21:56:11 -0500892 insn_get_immediate(&insn);
893 if (insn.immediate1.value == 0) {
894 /* zero length call */
895 ret = X86_BR_ZERO_CALL;
896 break;
897 }
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100898 case 0x9a: /* call far absolute */
899 ret = X86_BR_CALL;
900 break;
901 case 0xe0 ... 0xe3: /* loop jmp */
902 ret = X86_BR_JCC;
903 break;
904 case 0xe9 ... 0xeb: /* jmp */
905 ret = X86_BR_JMP;
906 break;
907 case 0xff: /* call near absolute, call far absolute ind */
908 insn_get_modrm(&insn);
909 ext = (insn.modrm.bytes[0] >> 3) & 0x7;
910 switch (ext) {
911 case 2: /* near ind call */
912 case 3: /* far ind call */
913 ret = X86_BR_IND_CALL;
914 break;
915 case 4:
916 case 5:
Stephane Eranian7b74cfb2015-05-14 23:09:59 +0200917 ret = X86_BR_IND_JMP;
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100918 break;
919 }
920 break;
921 default:
922 ret = X86_BR_NONE;
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100923 }
924 /*
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100925 * interrupts, traps, faults (and thus ring transition) may
926 * occur on any instructions. Thus, to classify them correctly,
927 * we need to first look at the from and to priv levels. If they
928 * are different and to is in the kernel, then it indicates
929 * a ring transition. If the from instruction is not a ring
930 * transition instr (syscall, systenter, int), then it means
931 * it was a irq, trap or fault.
932 *
933 * we have no way of detecting kernel to kernel faults.
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100934 */
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100935 if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
936 && ret != X86_BR_SYSCALL && ret != X86_BR_INT)
937 ret = X86_BR_IRQ;
Stephane Eranian60ce0fb2012-02-09 23:20:57 +0100938
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100939 /*
940 * branch priv level determined by target as
941 * is done by HW when LBR_SELECT is implemented
942 */
943 if (ret != X86_BR_NONE)
944 ret |= to_plm;
945
946 return ret;
947}
948
949/*
950 * implement actual branch filter based on user demand.
951 * Hardware may not exactly satisfy that request, thus
952 * we need to inspect opcodes. Mismatched branches are
953 * discarded. Therefore, the number of branches returned
954 * in PERF_SAMPLE_BRANCH_STACK sample may vary.
955 */
956static void
957intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
958{
959 u64 from, to;
960 int br_sel = cpuc->br_sel;
961 int i, j, type;
962 bool compress = false;
963
964 /* if sampling all branches, then nothing to filter */
965 if ((br_sel & X86_BR_ALL) == X86_BR_ALL)
966 return;
967
968 for (i = 0; i < cpuc->lbr_stack.nr; i++) {
969
970 from = cpuc->lbr_entries[i].from;
971 to = cpuc->lbr_entries[i].to;
972
Andi Kleen135c5612013-06-17 17:36:51 -0700973 type = branch_type(from, to, cpuc->lbr_entries[i].abort);
974 if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) {
975 if (cpuc->lbr_entries[i].in_tx)
976 type |= X86_BR_IN_TX;
977 else
978 type |= X86_BR_NO_TX;
979 }
Stephane Eranian3e702ff2012-02-09 23:20:58 +0100980
981 /* if type does not correspond, then discard */
982 if (type == X86_BR_NONE || (br_sel & type) != type) {
983 cpuc->lbr_entries[i].from = 0;
984 compress = true;
985 }
986 }
987
988 if (!compress)
989 return;
990
991 /* remove all entries with from=0 */
992 for (i = 0; i < cpuc->lbr_stack.nr; ) {
993 if (!cpuc->lbr_entries[i].from) {
994 j = i;
995 while (++j < cpuc->lbr_stack.nr)
996 cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
997 cpuc->lbr_stack.nr--;
998 if (!cpuc->lbr_entries[i].from)
999 continue;
1000 }
1001 i++;
1002 }
Stephane Eranian60ce0fb2012-02-09 23:20:57 +01001003}
1004
1005/*
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001006 * Map interface branch filters onto LBR filters
1007 */
Peter Zijlstra2c44b192014-11-05 10:36:45 +01001008static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
Yan, Zheng27ac9052014-11-04 21:55:57 -05001009 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1010 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1011 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1012 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1013 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP
1014 | LBR_IND_JMP | LBR_FAR,
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001015 /*
1016 * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
1017 */
Yan, Zheng27ac9052014-11-04 21:55:57 -05001018 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001019 LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
1020 /*
1021 * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
1022 */
Yan, Zheng27ac9052014-11-04 21:55:57 -05001023 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
1024 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
Stephane Eranian7b74cfb2015-05-14 23:09:59 +02001025 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001026};
1027
Peter Zijlstra2c44b192014-11-05 10:36:45 +01001028static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
Yan, Zheng27ac9052014-11-04 21:55:57 -05001029 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1030 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1031 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1032 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1033 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1034 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1035 | LBR_FAR,
1036 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1037 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
Stephane Eranian7b74cfb2015-05-14 23:09:59 +02001038 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
Stephane Eraniand8928192015-10-13 09:09:09 +02001039 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001040};
1041
Peter Zijlstra2c44b192014-11-05 10:36:45 +01001042static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
Yan, Zhenge9d7f7c2014-11-04 21:56:00 -05001043 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1044 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1045 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1046 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1047 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1048 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1049 | LBR_FAR,
1050 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1051 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1052 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1053 | LBR_RETURN | LBR_CALL_STACK,
Stephane Eranian7b74cfb2015-05-14 23:09:59 +02001054 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
Stephane Eraniand8928192015-10-13 09:09:09 +02001055 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
Yan, Zhenge9d7f7c2014-11-04 21:56:00 -05001056};
1057
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001058/* core */
Mathias Krause066ce642014-08-26 18:49:45 +02001059void __init intel_pmu_lbr_init_core(void)
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001060{
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001061 x86_pmu.lbr_nr = 4;
Stephane Eranian225ce532012-02-09 23:20:52 +01001062 x86_pmu.lbr_tos = MSR_LBR_TOS;
1063 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1064 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001065
Stephane Eranian3e702ff2012-02-09 23:20:58 +01001066 /*
1067 * SW branch filter usage:
1068 * - compensate for lack of HW filter
1069 */
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001070}
1071
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001072/* nehalem/westmere */
Mathias Krause066ce642014-08-26 18:49:45 +02001073void __init intel_pmu_lbr_init_nhm(void)
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001074{
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001075 x86_pmu.lbr_nr = 16;
Stephane Eranian225ce532012-02-09 23:20:52 +01001076 x86_pmu.lbr_tos = MSR_LBR_TOS;
1077 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1078 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001079
1080 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1081 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1082
Stephane Eranian3e702ff2012-02-09 23:20:58 +01001083 /*
1084 * SW branch filter usage:
1085 * - workaround LBR_SEL errata (see above)
1086 * - support syscall, sysret capture.
1087 * That requires LBR_FAR but that means far
1088 * jmp need to be filtered out
1089 */
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001090}
1091
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001092/* sandy bridge */
Mathias Krause066ce642014-08-26 18:49:45 +02001093void __init intel_pmu_lbr_init_snb(void)
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001094{
1095 x86_pmu.lbr_nr = 16;
1096 x86_pmu.lbr_tos = MSR_LBR_TOS;
1097 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1098 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1099
1100 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1101 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1102
Stephane Eranian3e702ff2012-02-09 23:20:58 +01001103 /*
1104 * SW branch filter usage:
1105 * - support syscall, sysret capture.
1106 * That requires LBR_FAR but that means far
1107 * jmp need to be filtered out
1108 */
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001109}
1110
Yan, Zhenge9d7f7c2014-11-04 21:56:00 -05001111/* haswell */
1112void intel_pmu_lbr_init_hsw(void)
1113{
1114 x86_pmu.lbr_nr = 16;
1115 x86_pmu.lbr_tos = MSR_LBR_TOS;
1116 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1117 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1118
1119 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1120 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
David Carrillo-Cisneros19fc9dd2016-06-21 11:31:11 -07001121
1122 if (lbr_from_signext_quirk_needed())
1123 static_branch_enable(&lbr_from_quirk_key);
Yan, Zhenge9d7f7c2014-11-04 21:56:00 -05001124}
1125
Andi Kleen9a92e162015-05-10 12:22:44 -07001126/* skylake */
1127__init void intel_pmu_lbr_init_skl(void)
1128{
1129 x86_pmu.lbr_nr = 32;
1130 x86_pmu.lbr_tos = MSR_LBR_TOS;
1131 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1132 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1133
1134 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1135 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1136
1137 /*
1138 * SW branch filter usage:
1139 * - support syscall, sysret capture.
1140 * That requires LBR_FAR but that means far
1141 * jmp need to be filtered out
1142 */
Andi Kleen9a92e162015-05-10 12:22:44 -07001143}
1144
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001145/* atom */
Mathias Krause066ce642014-08-26 18:49:45 +02001146void __init intel_pmu_lbr_init_atom(void)
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001147{
Stephane Eranian88c9a652012-02-09 23:20:56 +01001148 /*
1149 * only models starting at stepping 10 seems
1150 * to have an operational LBR which can freeze
1151 * on PMU interrupt
1152 */
Stephane Eranian3ec18cd2012-08-20 11:24:21 +02001153 if (boot_cpu_data.x86_model == 28
Jia Zhang06be0072018-01-01 09:52:10 +08001154 && boot_cpu_data.x86_stepping < 10) {
Stephane Eranian88c9a652012-02-09 23:20:56 +01001155 pr_cont("LBR disabled due to erratum");
1156 return;
1157 }
1158
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001159 x86_pmu.lbr_nr = 8;
Stephane Eranian225ce532012-02-09 23:20:52 +01001160 x86_pmu.lbr_tos = MSR_LBR_TOS;
1161 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1162 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01001163
Stephane Eranian3e702ff2012-02-09 23:20:58 +01001164 /*
1165 * SW branch filter usage:
1166 * - compensate for lack of HW filter
1167 */
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001168}
Harish Chegondi1e7b9392015-12-07 14:28:18 -08001169
Kan Liangf21d5ad2016-04-15 00:53:45 -07001170/* slm */
1171void __init intel_pmu_lbr_init_slm(void)
1172{
1173 x86_pmu.lbr_nr = 8;
1174 x86_pmu.lbr_tos = MSR_LBR_TOS;
1175 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1176 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1177
1178 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1179 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1180
1181 /*
1182 * SW branch filter usage:
1183 * - compensate for lack of HW filter
1184 */
1185 pr_cont("8-deep LBR, ");
1186}
1187
Harish Chegondi1e7b9392015-12-07 14:28:18 -08001188/* Knights Landing */
1189void intel_pmu_lbr_init_knl(void)
1190{
1191 x86_pmu.lbr_nr = 8;
1192 x86_pmu.lbr_tos = MSR_LBR_TOS;
1193 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1194 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1195
1196 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1197 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
Harish Chegondi1e7b9392015-12-07 14:28:18 -08001198}