blob: e1965e5ff570835be004e753a718858473bc8325 [file] [log] [blame]
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001#include <linux/bitops.h>
2#include <linux/types.h>
3#include <linux/slab.h>
Peter Zijlstraca037702010-03-02 19:52:12 +01004
Kevin Winchesterde0428a2011-08-30 20:41:05 -03005#include <asm/perf_event.h>
Stephane Eranian3e702ff2012-02-09 23:20:58 +01006#include <asm/insn.h>
Kevin Winchesterde0428a2011-08-30 20:41:05 -03007
Borislav Petkov27f6d222016-02-10 10:55:23 +01008#include "../perf_event.h"
Peter Zijlstraca037702010-03-02 19:52:12 +01009
10/* The size of a BTS record in bytes: */
11#define BTS_RECORD_SIZE 24
12
13#define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
Yan, Zheng15617492015-05-06 15:33:52 -040014#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4)
Peter Zijlstra9536c8d2013-10-15 12:14:04 +020015#define PEBS_FIXUP_SIZE PAGE_SIZE
Peter Zijlstraca037702010-03-02 19:52:12 +010016
17/*
18 * pebs_record_32 for p4 and core not supported
19
20struct pebs_record_32 {
21 u32 flags, ip;
22 u32 ax, bc, cx, dx;
23 u32 si, di, bp, sp;
24};
25
26 */
27
Stephane Eranianf20093e2013-01-24 16:10:32 +010028union intel_x86_pebs_dse {
29 u64 val;
30 struct {
31 unsigned int ld_dse:4;
32 unsigned int ld_stlb_miss:1;
33 unsigned int ld_locked:1;
34 unsigned int ld_reserved:26;
35 };
36 struct {
37 unsigned int st_l1d_hit:1;
38 unsigned int st_reserved1:3;
39 unsigned int st_stlb_miss:1;
40 unsigned int st_locked:1;
41 unsigned int st_reserved2:26;
42 };
43};
44
45
46/*
47 * Map PEBS Load Latency Data Source encodings to generic
48 * memory data source information
49 */
50#define P(a, b) PERF_MEM_S(a, b)
51#define OP_LH (P(OP, LOAD) | P(LVL, HIT))
Andi Kleen6ae5fa62017-08-16 15:21:54 -070052#define LEVEL(x) P(LVLNUM, x)
53#define REM P(REMOTE, REMOTE)
Stephane Eranianf20093e2013-01-24 16:10:32 +010054#define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
55
Andi Kleene17dc652016-03-01 14:25:24 -080056/* Version for Sandy Bridge and later */
57static u64 pebs_data_source[] = {
Andi Kleen6ae5fa62017-08-16 15:21:54 -070058 P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
59 OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* 0x01: L1 local */
60 OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
61 OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* 0x03: L2 hit */
62 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE), /* 0x04: L3 hit */
63 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, MISS), /* 0x05: L3 hit, snoop miss */
64 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT), /* 0x06: L3 hit, snoop hit */
65 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* 0x07: L3 hit, snoop hitm */
66 OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* 0x08: L3 miss snoop hit */
67 OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/
68 OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, HIT), /* 0x0a: L3 miss, shared */
69 OP_LH | P(LVL, REM_RAM1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* 0x0b: L3 miss, shared */
70 OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | SNOOP_NONE_MISS, /* 0x0c: L3 miss, excl */
71 OP_LH | P(LVL, REM_RAM1) | LEVEL(RAM) | REM | SNOOP_NONE_MISS, /* 0x0d: L3 miss, excl */
72 OP_LH | P(LVL, IO) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0e: I/O */
73 OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0f: uncached */
Stephane Eranianf20093e2013-01-24 16:10:32 +010074};
75
Andi Kleene17dc652016-03-01 14:25:24 -080076/* Patch up minor differences in the bits */
77void __init intel_pmu_pebs_data_source_nhm(void)
78{
Andi Kleen6ae5fa62017-08-16 15:21:54 -070079 pebs_data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
80 pebs_data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
81 pebs_data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
82}
83
84void __init intel_pmu_pebs_data_source_skl(bool pmem)
85{
86 u64 pmem_or_l4 = pmem ? LEVEL(PMEM) : LEVEL(L4);
87
88 pebs_data_source[0x08] = OP_LH | pmem_or_l4 | P(SNOOP, HIT);
89 pebs_data_source[0x09] = OP_LH | pmem_or_l4 | REM | P(SNOOP, HIT);
90 pebs_data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE);
91 pebs_data_source[0x0c] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOPX, FWD);
92 pebs_data_source[0x0d] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOP, HITM);
Andi Kleene17dc652016-03-01 14:25:24 -080093}
94
Stephane Eranian9ad64c02013-01-24 16:10:34 +010095static u64 precise_store_data(u64 status)
96{
97 union intel_x86_pebs_dse dse;
98 u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2);
99
100 dse.val = status;
101
102 /*
103 * bit 4: TLB access
104 * 1 = stored missed 2nd level TLB
105 *
106 * so it either hit the walker or the OS
107 * otherwise hit 2nd level TLB
108 */
109 if (dse.st_stlb_miss)
110 val |= P(TLB, MISS);
111 else
112 val |= P(TLB, HIT);
113
114 /*
115 * bit 0: hit L1 data cache
116 * if not set, then all we know is that
117 * it missed L1D
118 */
119 if (dse.st_l1d_hit)
120 val |= P(LVL, HIT);
121 else
122 val |= P(LVL, MISS);
123
124 /*
125 * bit 5: Locked prefix
126 */
127 if (dse.st_locked)
128 val |= P(LOCK, LOCKED);
129
130 return val;
131}
132
Stephane Eranianc8aab2e2014-08-11 21:27:13 +0200133static u64 precise_datala_hsw(struct perf_event *event, u64 status)
Andi Kleenf9134f32013-06-17 17:36:52 -0700134{
135 union perf_mem_data_src dse;
136
Stephane Eranian770eee12014-08-11 21:27:12 +0200137 dse.val = PERF_MEM_NA;
138
139 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
140 dse.mem_op = PERF_MEM_OP_STORE;
141 else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW)
142 dse.mem_op = PERF_MEM_OP_LOAD;
Stephane Eranian722e76e2014-05-15 17:56:44 +0200143
144 /*
145 * L1 info only valid for following events:
146 *
147 * MEM_UOPS_RETIRED.STLB_MISS_STORES
148 * MEM_UOPS_RETIRED.LOCK_STORES
149 * MEM_UOPS_RETIRED.SPLIT_STORES
150 * MEM_UOPS_RETIRED.ALL_STORES
151 */
Stephane Eranianc8aab2e2014-08-11 21:27:13 +0200152 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) {
153 if (status & 1)
154 dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
155 else
156 dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
157 }
Andi Kleenf9134f32013-06-17 17:36:52 -0700158 return dse.val;
159}
160
Stephane Eranianf20093e2013-01-24 16:10:32 +0100161static u64 load_latency_data(u64 status)
162{
163 union intel_x86_pebs_dse dse;
164 u64 val;
Stephane Eranianf20093e2013-01-24 16:10:32 +0100165
166 dse.val = status;
167
168 /*
169 * use the mapping table for bit 0-3
170 */
171 val = pebs_data_source[dse.ld_dse];
172
173 /*
174 * Nehalem models do not support TLB, Lock infos
175 */
Andi Kleen95298352017-08-16 15:21:53 -0700176 if (x86_pmu.pebs_no_tlb) {
Stephane Eranianf20093e2013-01-24 16:10:32 +0100177 val |= P(TLB, NA) | P(LOCK, NA);
178 return val;
179 }
180 /*
181 * bit 4: TLB access
182 * 0 = did not miss 2nd level TLB
183 * 1 = missed 2nd level TLB
184 */
185 if (dse.ld_stlb_miss)
186 val |= P(TLB, MISS) | P(TLB, L2);
187 else
188 val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
189
190 /*
191 * bit 5: locked prefix
192 */
193 if (dse.ld_locked)
194 val |= P(LOCK, LOCKED);
195
196 return val;
197}
198
Peter Zijlstraca037702010-03-02 19:52:12 +0100199struct pebs_record_core {
200 u64 flags, ip;
201 u64 ax, bx, cx, dx;
202 u64 si, di, bp, sp;
203 u64 r8, r9, r10, r11;
204 u64 r12, r13, r14, r15;
205};
206
207struct pebs_record_nhm {
208 u64 flags, ip;
209 u64 ax, bx, cx, dx;
210 u64 si, di, bp, sp;
211 u64 r8, r9, r10, r11;
212 u64 r12, r13, r14, r15;
213 u64 status, dla, dse, lat;
214};
215
Andi Kleen130768b2013-06-17 17:36:47 -0700216/*
217 * Same as pebs_record_nhm, with two additional fields.
218 */
219struct pebs_record_hsw {
Andi Kleen748e86a2013-09-05 20:37:39 -0700220 u64 flags, ip;
221 u64 ax, bx, cx, dx;
222 u64 si, di, bp, sp;
223 u64 r8, r9, r10, r11;
224 u64 r12, r13, r14, r15;
225 u64 status, dla, dse, lat;
Peter Zijlstrad2beea42013-09-12 13:00:47 +0200226 u64 real_ip, tsx_tuning;
Andi Kleen748e86a2013-09-05 20:37:39 -0700227};
228
229union hsw_tsx_tuning {
230 struct {
231 u32 cycles_last_block : 32,
232 hle_abort : 1,
233 rtm_abort : 1,
234 instruction_abort : 1,
235 non_instruction_abort : 1,
236 retry : 1,
237 data_conflict : 1,
238 capacity_writes : 1,
239 capacity_reads : 1;
240 };
241 u64 value;
Andi Kleen130768b2013-06-17 17:36:47 -0700242};
243
Andi Kleena405bad2013-09-20 07:40:40 -0700244#define PEBS_HSW_TSX_FLAGS 0xff00000000ULL
245
Andi Kleen2f7ebf22015-05-10 12:22:40 -0700246/* Same as HSW, plus TSC */
247
248struct pebs_record_skl {
249 u64 flags, ip;
250 u64 ax, bx, cx, dx;
251 u64 si, di, bp, sp;
252 u64 r8, r9, r10, r11;
253 u64 r12, r13, r14, r15;
254 u64 status, dla, dse, lat;
255 u64 real_ip, tsx_tuning;
256 u64 tsc;
257};
258
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300259void init_debug_store_on_cpu(int cpu)
Peter Zijlstraca037702010-03-02 19:52:12 +0100260{
261 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
262
263 if (!ds)
264 return;
265
266 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
267 (u32)((u64)(unsigned long)ds),
268 (u32)((u64)(unsigned long)ds >> 32));
269}
270
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300271void fini_debug_store_on_cpu(int cpu)
Peter Zijlstraca037702010-03-02 19:52:12 +0100272{
273 if (!per_cpu(cpu_hw_events, cpu).ds)
274 return;
275
276 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
277}
278
Peter Zijlstra9536c8d2013-10-15 12:14:04 +0200279static DEFINE_PER_CPU(void *, insn_buffer);
280
Peter Zijlstra5ee25c82010-10-19 14:15:04 +0200281static int alloc_pebs_buffer(int cpu)
282{
283 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Peter Zijlstra96681fc2010-10-19 14:55:33 +0200284 int node = cpu_to_node(cpu);
Yan, Zheng3569c0d2015-05-06 15:33:50 -0400285 int max;
Peter Zijlstra9536c8d2013-10-15 12:14:04 +0200286 void *buffer, *ibuffer;
Peter Zijlstra5ee25c82010-10-19 14:15:04 +0200287
288 if (!x86_pmu.pebs)
289 return 0;
290
Jiri Olsae72daf32016-03-01 20:03:52 +0100291 buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node);
Peter Zijlstra5ee25c82010-10-19 14:15:04 +0200292 if (unlikely(!buffer))
293 return -ENOMEM;
294
Peter Zijlstra9536c8d2013-10-15 12:14:04 +0200295 /*
296 * HSW+ already provides us the eventing ip; no need to allocate this
297 * buffer then.
298 */
299 if (x86_pmu.intel_cap.pebs_format < 2) {
300 ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
301 if (!ibuffer) {
302 kfree(buffer);
303 return -ENOMEM;
304 }
305 per_cpu(insn_buffer, cpu) = ibuffer;
306 }
307
Jiri Olsae72daf32016-03-01 20:03:52 +0100308 max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size;
Peter Zijlstra5ee25c82010-10-19 14:15:04 +0200309
310 ds->pebs_buffer_base = (u64)(unsigned long)buffer;
311 ds->pebs_index = ds->pebs_buffer_base;
312 ds->pebs_absolute_maximum = ds->pebs_buffer_base +
313 max * x86_pmu.pebs_record_size;
314
Peter Zijlstra5ee25c82010-10-19 14:15:04 +0200315 return 0;
316}
317
Peter Zijlstrab39f88a2010-10-19 14:08:29 +0200318static void release_pebs_buffer(int cpu)
319{
320 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
321
322 if (!ds || !x86_pmu.pebs)
323 return;
324
Peter Zijlstra9536c8d2013-10-15 12:14:04 +0200325 kfree(per_cpu(insn_buffer, cpu));
326 per_cpu(insn_buffer, cpu) = NULL;
327
Peter Zijlstrab39f88a2010-10-19 14:08:29 +0200328 kfree((void *)(unsigned long)ds->pebs_buffer_base);
329 ds->pebs_buffer_base = 0;
330}
331
Peter Zijlstra5ee25c82010-10-19 14:15:04 +0200332static int alloc_bts_buffer(int cpu)
333{
334 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Peter Zijlstra96681fc2010-10-19 14:55:33 +0200335 int node = cpu_to_node(cpu);
Peter Zijlstra5ee25c82010-10-19 14:15:04 +0200336 int max, thresh;
337 void *buffer;
338
339 if (!x86_pmu.bts)
340 return 0;
341
David Rientjes44851542014-06-30 16:04:08 -0700342 buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
343 if (unlikely(!buffer)) {
344 WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
Peter Zijlstra5ee25c82010-10-19 14:15:04 +0200345 return -ENOMEM;
David Rientjes44851542014-06-30 16:04:08 -0700346 }
Peter Zijlstra5ee25c82010-10-19 14:15:04 +0200347
348 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
349 thresh = max / 16;
350
351 ds->bts_buffer_base = (u64)(unsigned long)buffer;
352 ds->bts_index = ds->bts_buffer_base;
353 ds->bts_absolute_maximum = ds->bts_buffer_base +
354 max * BTS_RECORD_SIZE;
355 ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
356 thresh * BTS_RECORD_SIZE;
357
358 return 0;
359}
360
Peter Zijlstrab39f88a2010-10-19 14:08:29 +0200361static void release_bts_buffer(int cpu)
362{
363 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
364
365 if (!ds || !x86_pmu.bts)
366 return;
367
368 kfree((void *)(unsigned long)ds->bts_buffer_base);
369 ds->bts_buffer_base = 0;
370}
371
Peter Zijlstra65af94b2010-10-19 14:37:23 +0200372static int alloc_ds_buffer(int cpu)
373{
Peter Zijlstra96681fc2010-10-19 14:55:33 +0200374 int node = cpu_to_node(cpu);
Peter Zijlstra65af94b2010-10-19 14:37:23 +0200375 struct debug_store *ds;
376
Joe Perches7bfb7e62013-08-29 13:59:17 -0700377 ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node);
Peter Zijlstra65af94b2010-10-19 14:37:23 +0200378 if (unlikely(!ds))
379 return -ENOMEM;
380
381 per_cpu(cpu_hw_events, cpu).ds = ds;
382
383 return 0;
384}
385
386static void release_ds_buffer(int cpu)
387{
388 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
389
390 if (!ds)
391 return;
392
393 per_cpu(cpu_hw_events, cpu).ds = NULL;
394 kfree(ds);
395}
396
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300397void release_ds_buffers(void)
Peter Zijlstraca037702010-03-02 19:52:12 +0100398{
399 int cpu;
400
401 if (!x86_pmu.bts && !x86_pmu.pebs)
402 return;
403
404 get_online_cpus();
Peter Zijlstraca037702010-03-02 19:52:12 +0100405 for_each_online_cpu(cpu)
406 fini_debug_store_on_cpu(cpu);
407
408 for_each_possible_cpu(cpu) {
Peter Zijlstrab39f88a2010-10-19 14:08:29 +0200409 release_pebs_buffer(cpu);
410 release_bts_buffer(cpu);
Peter Zijlstra65af94b2010-10-19 14:37:23 +0200411 release_ds_buffer(cpu);
Peter Zijlstraca037702010-03-02 19:52:12 +0100412 }
Peter Zijlstraca037702010-03-02 19:52:12 +0100413 put_online_cpus();
414}
415
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300416void reserve_ds_buffers(void)
Peter Zijlstraca037702010-03-02 19:52:12 +0100417{
Peter Zijlstra6809b6e2010-10-19 14:22:50 +0200418 int bts_err = 0, pebs_err = 0;
419 int cpu;
420
421 x86_pmu.bts_active = 0;
422 x86_pmu.pebs_active = 0;
Peter Zijlstraca037702010-03-02 19:52:12 +0100423
424 if (!x86_pmu.bts && !x86_pmu.pebs)
Peter Zijlstraf80c9e32010-10-19 14:50:02 +0200425 return;
Peter Zijlstraca037702010-03-02 19:52:12 +0100426
Peter Zijlstra6809b6e2010-10-19 14:22:50 +0200427 if (!x86_pmu.bts)
428 bts_err = 1;
429
430 if (!x86_pmu.pebs)
431 pebs_err = 1;
432
Peter Zijlstraca037702010-03-02 19:52:12 +0100433 get_online_cpus();
434
435 for_each_possible_cpu(cpu) {
Peter Zijlstra6809b6e2010-10-19 14:22:50 +0200436 if (alloc_ds_buffer(cpu)) {
437 bts_err = 1;
438 pebs_err = 1;
439 }
Peter Zijlstraca037702010-03-02 19:52:12 +0100440
Peter Zijlstra6809b6e2010-10-19 14:22:50 +0200441 if (!bts_err && alloc_bts_buffer(cpu))
442 bts_err = 1;
Peter Zijlstraca037702010-03-02 19:52:12 +0100443
Peter Zijlstra6809b6e2010-10-19 14:22:50 +0200444 if (!pebs_err && alloc_pebs_buffer(cpu))
445 pebs_err = 1;
Peter Zijlstraca037702010-03-02 19:52:12 +0100446
Peter Zijlstra6809b6e2010-10-19 14:22:50 +0200447 if (bts_err && pebs_err)
448 break;
Peter Zijlstraca037702010-03-02 19:52:12 +0100449 }
450
Peter Zijlstra6809b6e2010-10-19 14:22:50 +0200451 if (bts_err) {
452 for_each_possible_cpu(cpu)
453 release_bts_buffer(cpu);
454 }
455
456 if (pebs_err) {
457 for_each_possible_cpu(cpu)
458 release_pebs_buffer(cpu);
459 }
460
461 if (bts_err && pebs_err) {
462 for_each_possible_cpu(cpu)
463 release_ds_buffer(cpu);
464 } else {
465 if (x86_pmu.bts && !bts_err)
466 x86_pmu.bts_active = 1;
467
468 if (x86_pmu.pebs && !pebs_err)
469 x86_pmu.pebs_active = 1;
470
Peter Zijlstraca037702010-03-02 19:52:12 +0100471 for_each_online_cpu(cpu)
472 init_debug_store_on_cpu(cpu);
473 }
474
475 put_online_cpus();
Peter Zijlstraca037702010-03-02 19:52:12 +0100476}
477
478/*
479 * BTS
480 */
481
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300482struct event_constraint bts_constraint =
Robert Richter15c7ad52012-06-20 20:46:33 +0200483 EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);
Peter Zijlstraca037702010-03-02 19:52:12 +0100484
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300485void intel_pmu_enable_bts(u64 config)
Peter Zijlstraca037702010-03-02 19:52:12 +0100486{
487 unsigned long debugctlmsr;
488
489 debugctlmsr = get_debugctlmsr();
490
Peter Zijlstra7c5ecaf2010-03-25 14:51:49 +0100491 debugctlmsr |= DEBUGCTLMSR_TR;
492 debugctlmsr |= DEBUGCTLMSR_BTS;
Alexander Shishkin80623822015-01-30 12:40:35 +0200493 if (config & ARCH_PERFMON_EVENTSEL_INT)
494 debugctlmsr |= DEBUGCTLMSR_BTINT;
Peter Zijlstraca037702010-03-02 19:52:12 +0100495
496 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
Peter Zijlstra7c5ecaf2010-03-25 14:51:49 +0100497 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
Peter Zijlstraca037702010-03-02 19:52:12 +0100498
499 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
Peter Zijlstra7c5ecaf2010-03-25 14:51:49 +0100500 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
Peter Zijlstraca037702010-03-02 19:52:12 +0100501
502 update_debugctlmsr(debugctlmsr);
503}
504
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300505void intel_pmu_disable_bts(void)
Peter Zijlstraca037702010-03-02 19:52:12 +0100506{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500507 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstraca037702010-03-02 19:52:12 +0100508 unsigned long debugctlmsr;
509
510 if (!cpuc->ds)
511 return;
512
513 debugctlmsr = get_debugctlmsr();
514
515 debugctlmsr &=
Peter Zijlstra7c5ecaf2010-03-25 14:51:49 +0100516 ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
517 DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
Peter Zijlstraca037702010-03-02 19:52:12 +0100518
519 update_debugctlmsr(debugctlmsr);
520}
521
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300522int intel_pmu_drain_bts_buffer(void)
Peter Zijlstraca037702010-03-02 19:52:12 +0100523{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500524 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstraca037702010-03-02 19:52:12 +0100525 struct debug_store *ds = cpuc->ds;
526 struct bts_record {
527 u64 from;
528 u64 to;
529 u64 flags;
530 };
Robert Richter15c7ad52012-06-20 20:46:33 +0200531 struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
Alexander Shishkina09d31f42015-08-31 17:09:27 +0300532 struct bts_record *at, *base, *top;
Peter Zijlstraca037702010-03-02 19:52:12 +0100533 struct perf_output_handle handle;
534 struct perf_event_header header;
535 struct perf_sample_data data;
Alexander Shishkina09d31f42015-08-31 17:09:27 +0300536 unsigned long skip = 0;
Peter Zijlstraca037702010-03-02 19:52:12 +0100537 struct pt_regs regs;
538
539 if (!event)
Stephane Eranianb0b20722010-09-10 13:28:01 +0200540 return 0;
Peter Zijlstraca037702010-03-02 19:52:12 +0100541
Peter Zijlstra6809b6e2010-10-19 14:22:50 +0200542 if (!x86_pmu.bts_active)
Stephane Eranianb0b20722010-09-10 13:28:01 +0200543 return 0;
Peter Zijlstraca037702010-03-02 19:52:12 +0100544
Alexander Shishkina09d31f42015-08-31 17:09:27 +0300545 base = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
546 top = (struct bts_record *)(unsigned long)ds->bts_index;
Peter Zijlstraca037702010-03-02 19:52:12 +0100547
Alexander Shishkina09d31f42015-08-31 17:09:27 +0300548 if (top <= base)
Stephane Eranianb0b20722010-09-10 13:28:01 +0200549 return 0;
Peter Zijlstraca037702010-03-02 19:52:12 +0100550
Stephane Eranian0e480262013-03-19 16:10:38 +0100551 memset(&regs, 0, sizeof(regs));
552
Peter Zijlstraca037702010-03-02 19:52:12 +0100553 ds->bts_index = ds->bts_buffer_base;
554
Robert Richterfd0d0002012-04-02 20:19:08 +0200555 perf_sample_data_init(&data, 0, event->hw.last_period);
Peter Zijlstraca037702010-03-02 19:52:12 +0100556
557 /*
Alexander Shishkina09d31f42015-08-31 17:09:27 +0300558 * BTS leaks kernel addresses in branches across the cpl boundary,
559 * such as traps or system calls, so unless the user is asking for
560 * kernel tracing (and right now it's not possible), we'd need to
561 * filter them out. But first we need to count how many of those we
562 * have in the current batch. This is an extra O(n) pass, however,
563 * it's much faster than the other one especially considering that
564 * n <= 2560 (BTS_BUFFER_SIZE / BTS_RECORD_SIZE * 15/16; see the
565 * alloc_bts_buffer()).
566 */
567 for (at = base; at < top; at++) {
568 /*
569 * Note that right now *this* BTS code only works if
570 * attr::exclude_kernel is set, but let's keep this extra
571 * check here in case that changes.
572 */
573 if (event->attr.exclude_kernel &&
574 (kernel_ip(at->from) || kernel_ip(at->to)))
575 skip++;
576 }
577
578 /*
Peter Zijlstraca037702010-03-02 19:52:12 +0100579 * Prepare a generic sample, i.e. fill in the invariant fields.
580 * We will overwrite the from and to address before we output
581 * the sample.
582 */
Peter Zijlstrae8d8a902016-03-18 17:31:27 +0100583 rcu_read_lock();
Peter Zijlstraca037702010-03-02 19:52:12 +0100584 perf_prepare_sample(&header, &data, event, &regs);
585
Alexander Shishkina09d31f42015-08-31 17:09:27 +0300586 if (perf_output_begin(&handle, event, header.size *
587 (top - base - skip)))
Peter Zijlstrae8d8a902016-03-18 17:31:27 +0100588 goto unlock;
Peter Zijlstraca037702010-03-02 19:52:12 +0100589
Alexander Shishkina09d31f42015-08-31 17:09:27 +0300590 for (at = base; at < top; at++) {
591 /* Filter out any records that contain kernel addresses. */
592 if (event->attr.exclude_kernel &&
593 (kernel_ip(at->from) || kernel_ip(at->to)))
594 continue;
595
Peter Zijlstraca037702010-03-02 19:52:12 +0100596 data.ip = at->from;
597 data.addr = at->to;
598
599 perf_output_sample(&handle, &header, &data, event);
600 }
601
602 perf_output_end(&handle);
603
604 /* There's new data available. */
605 event->hw.interrupts++;
606 event->pending_kill = POLL_IN;
Peter Zijlstrae8d8a902016-03-18 17:31:27 +0100607unlock:
608 rcu_read_unlock();
Stephane Eranianb0b20722010-09-10 13:28:01 +0200609 return 1;
Peter Zijlstraca037702010-03-02 19:52:12 +0100610}
611
Yan, Zheng9c964ef2015-05-06 15:33:51 -0400612static inline void intel_pmu_drain_pebs_buffer(void)
613{
614 struct pt_regs regs;
615
616 x86_pmu.drain_pebs(&regs);
617}
618
Peter Zijlstraca037702010-03-02 19:52:12 +0100619/*
620 * PEBS
621 */
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300622struct event_constraint intel_core2_pebs_event_constraints[] = {
Andi Kleenaf4bdcf2014-09-24 07:34:48 -0700623 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
624 INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
625 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
626 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
627 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
Peter Zijlstra517e6342015-04-11 12:16:22 +0200628 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
629 INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
Peter Zijlstraca037702010-03-02 19:52:12 +0100630 EVENT_CONSTRAINT_END
631};
632
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300633struct event_constraint intel_atom_pebs_event_constraints[] = {
Andi Kleenaf4bdcf2014-09-24 07:34:48 -0700634 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
635 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
636 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
Peter Zijlstra517e6342015-04-11 12:16:22 +0200637 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
638 INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
Stephane Eranian673d1882015-12-03 21:03:10 +0100639 /* Allow all events as PEBS with no flags */
640 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
Stephane Eranian17e31622011-03-02 17:05:01 +0200641 EVENT_CONSTRAINT_END
642};
643
Yan, Zheng1fa64182013-07-18 17:02:24 +0800644struct event_constraint intel_slm_pebs_event_constraints[] = {
Kan Liang33636732015-01-12 17:42:21 +0000645 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
646 INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
Andi Kleen86a04462014-08-11 21:27:10 +0200647 /* Allow all events as PEBS with no flags */
648 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
Yan, Zheng1fa64182013-07-18 17:02:24 +0800649 EVENT_CONSTRAINT_END
650};
651
Kan Liang8b92c3a2016-04-15 00:42:47 -0700652struct event_constraint intel_glm_pebs_event_constraints[] = {
653 /* Allow all events as PEBS with no flags */
654 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
655 EVENT_CONSTRAINT_END
656};
657
Kan Liangdd0b06b2017-07-12 09:44:23 -0400658struct event_constraint intel_glp_pebs_event_constraints[] = {
659 /* Allow all events as PEBS with no flags */
660 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
661 EVENT_CONSTRAINT_END
662};
663
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300664struct event_constraint intel_nehalem_pebs_event_constraints[] = {
Stephane Eranianf20093e2013-01-24 16:10:32 +0100665 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
Andi Kleenaf4bdcf2014-09-24 07:34:48 -0700666 INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
667 INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
668 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
Lin Ming7d5d02d2011-03-09 23:21:29 +0800669 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
Andi Kleenaf4bdcf2014-09-24 07:34:48 -0700670 INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
671 INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
672 INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
673 INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
674 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
675 INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
Peter Zijlstra517e6342015-04-11 12:16:22 +0200676 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
677 INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
Stephane Eranian17e31622011-03-02 17:05:01 +0200678 EVENT_CONSTRAINT_END
679};
680
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300681struct event_constraint intel_westmere_pebs_event_constraints[] = {
Stephane Eranianf20093e2013-01-24 16:10:32 +0100682 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
Andi Kleenaf4bdcf2014-09-24 07:34:48 -0700683 INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
684 INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
685 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
Lin Ming7d5d02d2011-03-09 23:21:29 +0800686 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
Andi Kleenaf4bdcf2014-09-24 07:34:48 -0700687 INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
688 INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
689 INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
690 INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
691 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
692 INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
Peter Zijlstra517e6342015-04-11 12:16:22 +0200693 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
694 INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
Peter Zijlstraca037702010-03-02 19:52:12 +0100695 EVENT_CONSTRAINT_END
696};
697
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300698struct event_constraint intel_snb_pebs_event_constraints[] = {
Andi Kleen0dbc9472014-09-24 07:34:47 -0700699 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
Stephane Eranianf20093e2013-01-24 16:10:32 +0100700 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
Stephane Eranian9ad64c02013-01-24 16:10:34 +0100701 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
Andi Kleen86a04462014-08-11 21:27:10 +0200702 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
703 INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
Maria Dimakopouloub63b4b42014-11-17 20:07:00 +0100704 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
705 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
706 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
707 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
Andi Kleen86a04462014-08-11 21:27:10 +0200708 /* Allow all events as PEBS with no flags */
709 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
Lin Mingb06b3d42011-03-02 21:27:04 +0800710 EVENT_CONSTRAINT_END
711};
712
Stephane Eranian20a36e32012-09-11 01:07:01 +0200713struct event_constraint intel_ivb_pebs_event_constraints[] = {
Andi Kleen0dbc9472014-09-24 07:34:47 -0700714 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
Stephane Eranianf20093e2013-01-24 16:10:32 +0100715 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
Stephane Eranian9ad64c02013-01-24 16:10:34 +0100716 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
Andi Kleen86a04462014-08-11 21:27:10 +0200717 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
718 INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
Andi Kleen72469762015-12-04 03:50:52 -0800719 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
720 INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
Maria Dimakopouloub63b4b42014-11-17 20:07:00 +0100721 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
722 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
723 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
724 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
Andi Kleen86a04462014-08-11 21:27:10 +0200725 /* Allow all events as PEBS with no flags */
726 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
Stephane Eranian20a36e32012-09-11 01:07:01 +0200727 EVENT_CONSTRAINT_END
728};
729
Andi Kleen30443182013-06-17 17:36:49 -0700730struct event_constraint intel_hsw_pebs_event_constraints[] = {
Andi Kleen0dbc9472014-09-24 07:34:47 -0700731 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
Andi Kleen86a04462014-08-11 21:27:10 +0200732 INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
733 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
734 INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
Andi Kleen72469762015-12-04 03:50:52 -0800735 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
736 INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
Andi Kleen86a04462014-08-11 21:27:10 +0200737 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
Maria Dimakopouloub63b4b42014-11-17 20:07:00 +0100738 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
739 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
740 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
741 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
742 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
743 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
744 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
745 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
746 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
747 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
Andi Kleen86a04462014-08-11 21:27:10 +0200748 /* Allow all events as PEBS with no flags */
749 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
Andi Kleen30443182013-06-17 17:36:49 -0700750 EVENT_CONSTRAINT_END
751};
752
Stephane Eranianb3e62462016-03-03 20:50:42 +0100753struct event_constraint intel_bdw_pebs_event_constraints[] = {
754 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
755 INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
756 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
757 INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
758 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
759 INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
760 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
761 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
762 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
763 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
764 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
765 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
766 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
767 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
768 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
769 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
770 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
771 /* Allow all events as PEBS with no flags */
772 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
773 EVENT_CONSTRAINT_END
774};
775
776
Andi Kleen9a92e162015-05-10 12:22:44 -0700777struct event_constraint intel_skl_pebs_event_constraints[] = {
778 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
Andi Kleen72469762015-12-04 03:50:52 -0800779 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
780 INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
Andi Kleen442f5c72015-12-04 03:50:32 -0800781 /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
782 INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
Andi Kleen9a92e162015-05-10 12:22:44 -0700783 INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */
784 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
785 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
786 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
787 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf), /* MEM_INST_RETIRED.LOCK_STORES */
788 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
789 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
790 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
791 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
792 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
793 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
794 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_L3_MISS_RETIRED.* */
795 /* Allow all events as PEBS with no flags */
796 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
797 EVENT_CONSTRAINT_END
798};
799
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300800struct event_constraint *intel_pebs_constraints(struct perf_event *event)
Peter Zijlstraca037702010-03-02 19:52:12 +0100801{
802 struct event_constraint *c;
803
Peter Zijlstraab608342010-04-08 23:03:20 +0200804 if (!event->attr.precise_ip)
Peter Zijlstraca037702010-03-02 19:52:12 +0100805 return NULL;
806
807 if (x86_pmu.pebs_constraints) {
808 for_each_event_constraint(c, x86_pmu.pebs_constraints) {
Stephane Eranian9fac2cf2013-01-24 16:10:27 +0100809 if ((event->hw.config & c->cmask) == c->code) {
810 event->hw.flags |= c->flags;
Peter Zijlstraca037702010-03-02 19:52:12 +0100811 return c;
Stephane Eranian9fac2cf2013-01-24 16:10:27 +0100812 }
Peter Zijlstraca037702010-03-02 19:52:12 +0100813 }
814 }
815
816 return &emptyconstraint;
817}
818
Peter Zijlstra09e61b4f2016-07-06 18:02:43 +0200819/*
820 * We need the sched_task callback even for per-cpu events when we use
821 * the large interrupt threshold, such that we can provide PID and TID
822 * to PEBS samples.
823 */
824static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
Yan, Zheng3569c0d2015-05-06 15:33:50 -0400825{
Peter Zijlstra09e61b4f2016-07-06 18:02:43 +0200826 return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
827}
828
Jiri Olsadf6c3db2017-07-19 09:52:47 +0200829void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
830{
831 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
832
833 if (!sched_in && pebs_needs_sched_cb(cpuc))
834 intel_pmu_drain_pebs_buffer();
835}
836
Peter Zijlstra09e61b4f2016-07-06 18:02:43 +0200837static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
838{
839 struct debug_store *ds = cpuc->ds;
840 u64 threshold;
841
842 if (cpuc->n_pebs == cpuc->n_large_pebs) {
843 threshold = ds->pebs_absolute_maximum -
844 x86_pmu.max_pebs_events * x86_pmu.pebs_record_size;
845 } else {
846 threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size;
847 }
848
849 ds->pebs_interrupt_threshold = threshold;
850}
851
852static void
853pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc, struct pmu *pmu)
854{
Jiri Olsab6a32f02016-08-18 11:09:52 +0200855 /*
856 * Make sure we get updated with the first PEBS
857 * event. It will trigger also during removal, but
858 * that does not hurt:
859 */
860 bool update = cpuc->n_pebs == 1;
861
Peter Zijlstra09e61b4f2016-07-06 18:02:43 +0200862 if (needed_cb != pebs_needs_sched_cb(cpuc)) {
863 if (!needed_cb)
864 perf_sched_cb_inc(pmu);
865 else
866 perf_sched_cb_dec(pmu);
867
Jiri Olsab6a32f02016-08-18 11:09:52 +0200868 update = true;
Peter Zijlstra09e61b4f2016-07-06 18:02:43 +0200869 }
Jiri Olsab6a32f02016-08-18 11:09:52 +0200870
871 if (update)
872 pebs_update_threshold(cpuc);
Peter Zijlstra09e61b4f2016-07-06 18:02:43 +0200873}
874
Peter Zijlstra68f70822016-07-06 18:02:43 +0200875void intel_pmu_pebs_add(struct perf_event *event)
Peter Zijlstra09e61b4f2016-07-06 18:02:43 +0200876{
877 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
878 struct hw_perf_event *hwc = &event->hw;
879 bool needed_cb = pebs_needs_sched_cb(cpuc);
880
881 cpuc->n_pebs++;
882 if (hwc->flags & PERF_X86_EVENT_FREERUNNING)
883 cpuc->n_large_pebs++;
884
885 pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
Yan, Zheng3569c0d2015-05-06 15:33:50 -0400886}
887
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300888void intel_pmu_pebs_enable(struct perf_event *event)
Peter Zijlstraca037702010-03-02 19:52:12 +0100889{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500890 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstraef21f682010-03-03 13:12:23 +0100891 struct hw_perf_event *hwc = &event->hw;
Yan, Zheng851559e2015-05-06 15:33:47 -0400892 struct debug_store *ds = cpuc->ds;
Peter Zijlstra09e61b4f2016-07-06 18:02:43 +0200893
Peter Zijlstraca037702010-03-02 19:52:12 +0100894 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
895
Peter Zijlstraad0e6cf2010-03-06 19:49:06 +0100896 cpuc->pebs_enabled |= 1ULL << hwc->idx;
Stephane Eranianf20093e2013-01-24 16:10:32 +0100897
898 if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
899 cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
Stephane Eranian9ad64c02013-01-24 16:10:34 +0100900 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
901 cpuc->pebs_enabled |= 1ULL << 63;
Yan, Zheng851559e2015-05-06 15:33:47 -0400902
Yan, Zheng3569c0d2015-05-06 15:33:50 -0400903 /*
Peter Zijlstra09e61b4f2016-07-06 18:02:43 +0200904 * Use auto-reload if possible to save a MSR write in the PMI.
905 * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
Yan, Zheng3569c0d2015-05-06 15:33:50 -0400906 */
Yan, Zheng851559e2015-05-06 15:33:47 -0400907 if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
908 ds->pebs_event_reset[hwc->idx] =
909 (u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
Jiri Olsadc853e22017-07-14 18:35:51 +0200910 } else {
911 ds->pebs_event_reset[hwc->idx] = 0;
Yan, Zheng851559e2015-05-06 15:33:47 -0400912 }
Peter Zijlstra09e61b4f2016-07-06 18:02:43 +0200913}
Yan, Zheng3569c0d2015-05-06 15:33:50 -0400914
Peter Zijlstra68f70822016-07-06 18:02:43 +0200915void intel_pmu_pebs_del(struct perf_event *event)
Peter Zijlstra09e61b4f2016-07-06 18:02:43 +0200916{
917 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
918 struct hw_perf_event *hwc = &event->hw;
919 bool needed_cb = pebs_needs_sched_cb(cpuc);
920
921 cpuc->n_pebs--;
922 if (hwc->flags & PERF_X86_EVENT_FREERUNNING)
923 cpuc->n_large_pebs--;
924
925 pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
Peter Zijlstraca037702010-03-02 19:52:12 +0100926}
927
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300928void intel_pmu_pebs_disable(struct perf_event *event)
Peter Zijlstraca037702010-03-02 19:52:12 +0100929{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500930 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstraef21f682010-03-03 13:12:23 +0100931 struct hw_perf_event *hwc = &event->hw;
Liang, Kan2a853e12015-07-03 20:08:27 +0000932
Peter Zijlstra09e61b4f2016-07-06 18:02:43 +0200933 if (cpuc->n_pebs == cpuc->n_large_pebs)
Liang, Kan2a853e12015-07-03 20:08:27 +0000934 intel_pmu_drain_pebs_buffer();
Peter Zijlstraca037702010-03-02 19:52:12 +0100935
Peter Zijlstraad0e6cf2010-03-06 19:49:06 +0100936 cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
Stephane Eranian983433b2013-06-21 16:20:41 +0200937
Peter Zijlstrab371b592015-05-21 10:57:13 +0200938 if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
Stephane Eranian983433b2013-06-21 16:20:41 +0200939 cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
Peter Zijlstrab371b592015-05-21 10:57:13 +0200940 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
Stephane Eranian983433b2013-06-21 16:20:41 +0200941 cpuc->pebs_enabled &= ~(1ULL << 63);
942
Peter Zijlstra4807e3d2010-03-06 13:47:07 +0100943 if (cpuc->enabled)
Peter Zijlstraad0e6cf2010-03-06 19:49:06 +0100944 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
Peter Zijlstraca037702010-03-02 19:52:12 +0100945
946 hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
947}
948
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300949void intel_pmu_pebs_enable_all(void)
Peter Zijlstraca037702010-03-02 19:52:12 +0100950{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500951 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstraca037702010-03-02 19:52:12 +0100952
953 if (cpuc->pebs_enabled)
954 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
955}
956
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300957void intel_pmu_pebs_disable_all(void)
Peter Zijlstraca037702010-03-02 19:52:12 +0100958{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500959 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstraca037702010-03-02 19:52:12 +0100960
961 if (cpuc->pebs_enabled)
962 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
963}
964
Peter Zijlstraef21f682010-03-03 13:12:23 +0100965static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
966{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500967 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstraef21f682010-03-03 13:12:23 +0100968 unsigned long from = cpuc->lbr_entries[0].from;
969 unsigned long old_to, to = cpuc->lbr_entries[0].to;
970 unsigned long ip = regs->ip;
Peter Zijlstra57d1c0c2011-10-07 13:36:40 +0200971 int is_64bit = 0;
Peter Zijlstra9536c8d2013-10-15 12:14:04 +0200972 void *kaddr;
Dave Hansen6ba48ff2014-11-14 07:39:57 -0800973 int size;
Peter Zijlstraef21f682010-03-03 13:12:23 +0100974
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100975 /*
976 * We don't need to fixup if the PEBS assist is fault like
977 */
978 if (!x86_pmu.intel_cap.pebs_trap)
979 return 1;
980
Peter Zijlstraa562b182010-03-05 16:29:14 +0100981 /*
982 * No LBR entry, no basic block, no rewinding
983 */
Peter Zijlstraef21f682010-03-03 13:12:23 +0100984 if (!cpuc->lbr_stack.nr || !from || !to)
985 return 0;
986
Peter Zijlstraa562b182010-03-05 16:29:14 +0100987 /*
988 * Basic blocks should never cross user/kernel boundaries
989 */
990 if (kernel_ip(ip) != kernel_ip(to))
991 return 0;
992
993 /*
994 * unsigned math, either ip is before the start (impossible) or
995 * the basic block is larger than 1 page (sanity)
996 */
Peter Zijlstra9536c8d2013-10-15 12:14:04 +0200997 if ((ip - to) > PEBS_FIXUP_SIZE)
Peter Zijlstraef21f682010-03-03 13:12:23 +0100998 return 0;
999
1000 /*
1001 * We sampled a branch insn, rewind using the LBR stack
1002 */
1003 if (ip == to) {
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02001004 set_linear_ip(regs, from);
Peter Zijlstraef21f682010-03-03 13:12:23 +01001005 return 1;
1006 }
1007
Dave Hansen6ba48ff2014-11-14 07:39:57 -08001008 size = ip - to;
Peter Zijlstra9536c8d2013-10-15 12:14:04 +02001009 if (!kernel_ip(ip)) {
Dave Hansen6ba48ff2014-11-14 07:39:57 -08001010 int bytes;
Peter Zijlstra9536c8d2013-10-15 12:14:04 +02001011 u8 *buf = this_cpu_read(insn_buffer);
1012
Dave Hansen6ba48ff2014-11-14 07:39:57 -08001013 /* 'size' must fit our buffer, see above */
Peter Zijlstra9536c8d2013-10-15 12:14:04 +02001014 bytes = copy_from_user_nmi(buf, (void __user *)to, size);
Peter Zijlstra0a196842013-10-30 21:16:22 +01001015 if (bytes != 0)
Peter Zijlstra9536c8d2013-10-15 12:14:04 +02001016 return 0;
1017
1018 kaddr = buf;
1019 } else {
1020 kaddr = (void *)to;
1021 }
1022
Peter Zijlstraef21f682010-03-03 13:12:23 +01001023 do {
1024 struct insn insn;
Peter Zijlstraef21f682010-03-03 13:12:23 +01001025
1026 old_to = to;
Peter Zijlstraef21f682010-03-03 13:12:23 +01001027
Peter Zijlstra57d1c0c2011-10-07 13:36:40 +02001028#ifdef CONFIG_X86_64
1029 is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
1030#endif
Dave Hansen6ba48ff2014-11-14 07:39:57 -08001031 insn_init(&insn, kaddr, size, is_64bit);
Peter Zijlstraef21f682010-03-03 13:12:23 +01001032 insn_get_length(&insn);
Dave Hansen6ba48ff2014-11-14 07:39:57 -08001033 /*
1034 * Make sure there was not a problem decoding the
1035 * instruction and getting the length. This is
1036 * doubly important because we have an infinite
1037 * loop if insn.length=0.
1038 */
1039 if (!insn.length)
1040 break;
Peter Zijlstra9536c8d2013-10-15 12:14:04 +02001041
Peter Zijlstraef21f682010-03-03 13:12:23 +01001042 to += insn.length;
Peter Zijlstra9536c8d2013-10-15 12:14:04 +02001043 kaddr += insn.length;
Dave Hansen6ba48ff2014-11-14 07:39:57 -08001044 size -= insn.length;
Peter Zijlstraef21f682010-03-03 13:12:23 +01001045 } while (to < ip);
1046
1047 if (to == ip) {
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02001048 set_linear_ip(regs, old_to);
Peter Zijlstraef21f682010-03-03 13:12:23 +01001049 return 1;
1050 }
1051
Peter Zijlstraa562b182010-03-05 16:29:14 +01001052 /*
1053 * Even though we decoded the basic block, the instruction stream
1054 * never matched the given IP, either the TO or the IP got corrupted.
1055 */
Peter Zijlstraef21f682010-03-03 13:12:23 +01001056 return 0;
1057}
1058
Andi Kleen2f7ebf22015-05-10 12:22:40 -07001059static inline u64 intel_hsw_weight(struct pebs_record_skl *pebs)
Andi Kleen748e86a2013-09-05 20:37:39 -07001060{
1061 if (pebs->tsx_tuning) {
1062 union hsw_tsx_tuning tsx = { .value = pebs->tsx_tuning };
1063 return tsx.cycles_last_block;
1064 }
1065 return 0;
1066}
1067
Andi Kleen2f7ebf22015-05-10 12:22:40 -07001068static inline u64 intel_hsw_transaction(struct pebs_record_skl *pebs)
Andi Kleena405bad2013-09-20 07:40:40 -07001069{
1070 u64 txn = (pebs->tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
1071
1072 /* For RTM XABORTs also log the abort code from AX */
1073 if ((txn & PERF_TXN_TRANSACTION) && (pebs->ax & 1))
1074 txn |= ((pebs->ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
1075 return txn;
1076}
1077
Yan, Zheng43cf7632015-05-06 15:33:48 -04001078static void setup_pebs_sample_data(struct perf_event *event,
1079 struct pt_regs *iregs, void *__pebs,
1080 struct perf_sample_data *data,
1081 struct pt_regs *regs)
Peter Zijlstra2b0b5c62010-04-08 23:03:20 +02001082{
Stephane Eranianc8aab2e2014-08-11 21:27:13 +02001083#define PERF_X86_EVENT_PEBS_HSW_PREC \
1084 (PERF_X86_EVENT_PEBS_ST_HSW | \
1085 PERF_X86_EVENT_PEBS_LD_HSW | \
1086 PERF_X86_EVENT_PEBS_NA_HSW)
Peter Zijlstra2b0b5c62010-04-08 23:03:20 +02001087 /*
Peter Zijlstrad2beea42013-09-12 13:00:47 +02001088 * We cast to the biggest pebs_record but are careful not to
1089 * unconditionally access the 'extra' entries.
Peter Zijlstra2b0b5c62010-04-08 23:03:20 +02001090 */
Christoph Lameter89cbc762014-08-17 12:30:40 -05001091 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Andi Kleen2f7ebf22015-05-10 12:22:40 -07001092 struct pebs_record_skl *pebs = __pebs;
Stephane Eranianf20093e2013-01-24 16:10:32 +01001093 u64 sample_type;
Stephane Eranianc8aab2e2014-08-11 21:27:13 +02001094 int fll, fst, dsrc;
1095 int fl = event->hw.flags;
Peter Zijlstra2b0b5c62010-04-08 23:03:20 +02001096
Yan, Zheng21509082015-05-06 15:33:49 -04001097 if (pebs == NULL)
1098 return;
1099
Stephane Eranianc8aab2e2014-08-11 21:27:13 +02001100 sample_type = event->attr.sample_type;
1101 dsrc = sample_type & PERF_SAMPLE_DATA_SRC;
1102
1103 fll = fl & PERF_X86_EVENT_PEBS_LDLAT;
1104 fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
Stephane Eranianf20093e2013-01-24 16:10:32 +01001105
Yan, Zheng43cf7632015-05-06 15:33:48 -04001106 perf_sample_data_init(data, 0, event->hw.last_period);
Peter Zijlstra2b0b5c62010-04-08 23:03:20 +02001107
Yan, Zheng43cf7632015-05-06 15:33:48 -04001108 data->period = event->hw.last_period;
Stephane Eranianf20093e2013-01-24 16:10:32 +01001109
1110 /*
Stephane Eranianc8aab2e2014-08-11 21:27:13 +02001111 * Use latency for weight (only avail with PEBS-LL)
Stephane Eranianf20093e2013-01-24 16:10:32 +01001112 */
Stephane Eranianc8aab2e2014-08-11 21:27:13 +02001113 if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
Yan, Zheng43cf7632015-05-06 15:33:48 -04001114 data->weight = pebs->lat;
Stephane Eranianf20093e2013-01-24 16:10:32 +01001115
Stephane Eranianc8aab2e2014-08-11 21:27:13 +02001116 /*
1117 * data.data_src encodes the data source
1118 */
1119 if (dsrc) {
1120 u64 val = PERF_MEM_NA;
1121 if (fll)
1122 val = load_latency_data(pebs->dse);
1123 else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
1124 val = precise_datala_hsw(event, pebs->dse);
1125 else if (fst)
1126 val = precise_store_data(pebs->dse);
Yan, Zheng43cf7632015-05-06 15:33:48 -04001127 data->data_src.val = val;
Stephane Eranianf20093e2013-01-24 16:10:32 +01001128 }
1129
Peter Zijlstra2b0b5c62010-04-08 23:03:20 +02001130 /*
Peter Zijlstrab8000582016-11-17 18:17:31 +01001131 * We use the interrupt regs as a base because the PEBS record does not
1132 * contain a full regs set, specifically it seems to lack segment
1133 * descriptors, which get used by things like user_mode().
Peter Zijlstra2b0b5c62010-04-08 23:03:20 +02001134 *
Peter Zijlstrab8000582016-11-17 18:17:31 +01001135 * In the simple case fix up only the IP for PERF_SAMPLE_IP.
1136 *
1137 * We must however always use BP,SP from iregs for the unwinder to stay
1138 * sane; the record BP,SP can point into thin air when the record is
1139 * from a previous PMI context or an (I)RET happend between the record
1140 * and PMI.
Peter Zijlstra2b0b5c62010-04-08 23:03:20 +02001141 */
Yan, Zheng43cf7632015-05-06 15:33:48 -04001142 *regs = *iregs;
1143 regs->flags = pebs->flags;
1144 set_linear_ip(regs, pebs->ip);
Peter Zijlstra2b0b5c62010-04-08 23:03:20 +02001145
Stephane Eranianaea48552014-09-24 13:48:38 +02001146 if (sample_type & PERF_SAMPLE_REGS_INTR) {
Yan, Zheng43cf7632015-05-06 15:33:48 -04001147 regs->ax = pebs->ax;
1148 regs->bx = pebs->bx;
1149 regs->cx = pebs->cx;
1150 regs->dx = pebs->dx;
1151 regs->si = pebs->si;
1152 regs->di = pebs->di;
Stephane Eranianaea48552014-09-24 13:48:38 +02001153
Peter Zijlstrab8000582016-11-17 18:17:31 +01001154 /*
1155 * Per the above; only set BP,SP if we don't need callchains.
1156 *
1157 * XXX: does this make sense?
1158 */
1159 if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
1160 regs->bp = pebs->bp;
1161 regs->sp = pebs->sp;
1162 }
1163
1164 /*
1165 * Preserve PERF_EFLAGS_VM from set_linear_ip().
1166 */
1167 regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM);
Stephane Eranianaea48552014-09-24 13:48:38 +02001168#ifndef CONFIG_X86_32
Yan, Zheng43cf7632015-05-06 15:33:48 -04001169 regs->r8 = pebs->r8;
1170 regs->r9 = pebs->r9;
1171 regs->r10 = pebs->r10;
1172 regs->r11 = pebs->r11;
1173 regs->r12 = pebs->r12;
1174 regs->r13 = pebs->r13;
1175 regs->r14 = pebs->r14;
1176 regs->r15 = pebs->r15;
Stephane Eranianaea48552014-09-24 13:48:38 +02001177#endif
1178 }
1179
Andi Kleen130768b2013-06-17 17:36:47 -07001180 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
Yan, Zheng43cf7632015-05-06 15:33:48 -04001181 regs->ip = pebs->real_ip;
1182 regs->flags |= PERF_EFLAGS_EXACT;
1183 } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs))
1184 regs->flags |= PERF_EFLAGS_EXACT;
Peter Zijlstra2b0b5c62010-04-08 23:03:20 +02001185 else
Yan, Zheng43cf7632015-05-06 15:33:48 -04001186 regs->flags &= ~PERF_EFLAGS_EXACT;
Peter Zijlstra2b0b5c62010-04-08 23:03:20 +02001187
Kan Liangfc7ce9c2017-08-28 20:52:49 -04001188 if ((sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) &&
Peter Zijlstrad2beea42013-09-12 13:00:47 +02001189 x86_pmu.intel_cap.pebs_format >= 1)
Yan, Zheng43cf7632015-05-06 15:33:48 -04001190 data->addr = pebs->dla;
Andi Kleenf9134f32013-06-17 17:36:52 -07001191
Andi Kleena405bad2013-09-20 07:40:40 -07001192 if (x86_pmu.intel_cap.pebs_format >= 2) {
1193 /* Only set the TSX weight when no memory weight. */
Stephane Eranianc8aab2e2014-08-11 21:27:13 +02001194 if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
Yan, Zheng43cf7632015-05-06 15:33:48 -04001195 data->weight = intel_hsw_weight(pebs);
Andi Kleena405bad2013-09-20 07:40:40 -07001196
Stephane Eranianc8aab2e2014-08-11 21:27:13 +02001197 if (sample_type & PERF_SAMPLE_TRANSACTION)
Yan, Zheng43cf7632015-05-06 15:33:48 -04001198 data->txn = intel_hsw_transaction(pebs);
Andi Kleena405bad2013-09-20 07:40:40 -07001199 }
Andi Kleen748e86a2013-09-05 20:37:39 -07001200
Andi Kleen2f7ebf22015-05-10 12:22:40 -07001201 /*
1202 * v3 supplies an accurate time stamp, so we use that
1203 * for the time stamp.
1204 *
1205 * We can only do this for the default trace clock.
1206 */
1207 if (x86_pmu.intel_cap.pebs_format >= 3 &&
1208 event->attr.use_clockid == 0)
1209 data->time = native_sched_clock_from_tsc(pebs->tsc);
1210
Stephane Eranian60ce0fb2012-02-09 23:20:57 +01001211 if (has_branch_stack(event))
Yan, Zheng43cf7632015-05-06 15:33:48 -04001212 data->br_stack = &cpuc->lbr_stack;
1213}
1214
Yan, Zheng21509082015-05-06 15:33:49 -04001215static inline void *
1216get_next_pebs_record_by_bit(void *base, void *top, int bit)
1217{
1218 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1219 void *at;
1220 u64 pebs_status;
1221
Stephane Eranian1424a092015-12-03 23:33:18 +01001222 /*
1223 * fmt0 does not have a status bitfield (does not use
1224 * perf_record_nhm format)
1225 */
1226 if (x86_pmu.intel_cap.pebs_format < 1)
1227 return base;
1228
Yan, Zheng21509082015-05-06 15:33:49 -04001229 if (base == NULL)
1230 return NULL;
1231
1232 for (at = base; at < top; at += x86_pmu.pebs_record_size) {
1233 struct pebs_record_nhm *p = at;
1234
1235 if (test_bit(bit, (unsigned long *)&p->status)) {
Peter Zijlstraa3d86542015-05-12 15:18:18 +02001236 /* PEBS v3 has accurate status bits */
1237 if (x86_pmu.intel_cap.pebs_format >= 3)
1238 return at;
Yan, Zheng21509082015-05-06 15:33:49 -04001239
1240 if (p->status == (1 << bit))
1241 return at;
1242
1243 /* clear non-PEBS bit and re-check */
1244 pebs_status = p->status & cpuc->pebs_enabled;
Kan Liangfd583ad2017-04-04 15:14:06 -04001245 pebs_status &= PEBS_COUNTER_MASK;
Yan, Zheng21509082015-05-06 15:33:49 -04001246 if (pebs_status == (1 << bit))
1247 return at;
1248 }
1249 }
1250 return NULL;
1251}
1252
Yan, Zheng43cf7632015-05-06 15:33:48 -04001253static void __intel_pmu_pebs_event(struct perf_event *event,
Yan, Zheng21509082015-05-06 15:33:49 -04001254 struct pt_regs *iregs,
1255 void *base, void *top,
1256 int bit, int count)
Yan, Zheng43cf7632015-05-06 15:33:48 -04001257{
1258 struct perf_sample_data data;
1259 struct pt_regs regs;
Yan, Zheng21509082015-05-06 15:33:49 -04001260 void *at = get_next_pebs_record_by_bit(base, top, bit);
Yan, Zheng43cf7632015-05-06 15:33:48 -04001261
Yan, Zheng21509082015-05-06 15:33:49 -04001262 if (!intel_pmu_save_and_restart(event) &&
1263 !(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD))
Yan, Zheng43cf7632015-05-06 15:33:48 -04001264 return;
1265
Peter Zijlstraa3d86542015-05-12 15:18:18 +02001266 while (count > 1) {
1267 setup_pebs_sample_data(event, iregs, at, &data, &regs);
1268 perf_event_output(event, &data, &regs);
1269 at += x86_pmu.pebs_record_size;
1270 at = get_next_pebs_record_by_bit(at, top, bit);
1271 count--;
Yan, Zheng21509082015-05-06 15:33:49 -04001272 }
Stephane Eranian60ce0fb2012-02-09 23:20:57 +01001273
Yan, Zheng21509082015-05-06 15:33:49 -04001274 setup_pebs_sample_data(event, iregs, at, &data, &regs);
1275
1276 /*
1277 * All but the last records are processed.
1278 * The last one is left to be able to call the overflow handler.
1279 */
1280 if (perf_event_overflow(event, &data, &regs)) {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001281 x86_pmu_stop(event, 0);
Yan, Zheng21509082015-05-06 15:33:49 -04001282 return;
1283 }
1284
Peter Zijlstra2b0b5c62010-04-08 23:03:20 +02001285}
1286
Peter Zijlstraca037702010-03-02 19:52:12 +01001287static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
1288{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001289 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstraca037702010-03-02 19:52:12 +01001290 struct debug_store *ds = cpuc->ds;
1291 struct perf_event *event = cpuc->events[0]; /* PMC0 only */
1292 struct pebs_record_core *at, *top;
Peter Zijlstraca037702010-03-02 19:52:12 +01001293 int n;
1294
Peter Zijlstra6809b6e2010-10-19 14:22:50 +02001295 if (!x86_pmu.pebs_active)
Peter Zijlstraca037702010-03-02 19:52:12 +01001296 return;
1297
Peter Zijlstraca037702010-03-02 19:52:12 +01001298 at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
1299 top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
1300
Peter Zijlstrad80c7502010-03-09 11:41:02 +01001301 /*
1302 * Whatever else happens, drain the thing
1303 */
1304 ds->pebs_index = ds->pebs_buffer_base;
1305
1306 if (!test_bit(0, cpuc->active_mask))
Peter Zijlstra8f4aebd2010-03-06 13:26:11 +01001307 return;
Peter Zijlstraca037702010-03-02 19:52:12 +01001308
Peter Zijlstrad80c7502010-03-09 11:41:02 +01001309 WARN_ON_ONCE(!event);
1310
Peter Zijlstraab608342010-04-08 23:03:20 +02001311 if (!event->attr.precise_ip)
Peter Zijlstrad80c7502010-03-09 11:41:02 +01001312 return;
1313
Stephane Eranian1424a092015-12-03 23:33:18 +01001314 n = top - at;
Peter Zijlstrad80c7502010-03-09 11:41:02 +01001315 if (n <= 0)
1316 return;
Peter Zijlstraca037702010-03-02 19:52:12 +01001317
Yan, Zheng21509082015-05-06 15:33:49 -04001318 __intel_pmu_pebs_event(event, iregs, at, top, 0, n);
Peter Zijlstraca037702010-03-02 19:52:12 +01001319}
1320
Peter Zijlstrad2beea42013-09-12 13:00:47 +02001321static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
Peter Zijlstraca037702010-03-02 19:52:12 +01001322{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001323 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstraca037702010-03-02 19:52:12 +01001324 struct debug_store *ds = cpuc->ds;
Yan, Zheng21509082015-05-06 15:33:49 -04001325 struct perf_event *event;
1326 void *base, *at, *top;
Yan, Zheng21509082015-05-06 15:33:49 -04001327 short counts[MAX_PEBS_EVENTS] = {};
Kan Liangf38b0db2015-05-10 15:13:14 -04001328 short error[MAX_PEBS_EVENTS] = {};
Peter Zijlstraa3d86542015-05-12 15:18:18 +02001329 int bit, i;
Peter Zijlstrad2beea42013-09-12 13:00:47 +02001330
1331 if (!x86_pmu.pebs_active)
1332 return;
1333
Yan, Zheng21509082015-05-06 15:33:49 -04001334 base = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
Peter Zijlstrad2beea42013-09-12 13:00:47 +02001335 top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
Peter Zijlstraca037702010-03-02 19:52:12 +01001336
Peter Zijlstraca037702010-03-02 19:52:12 +01001337 ds->pebs_index = ds->pebs_buffer_base;
1338
Yan, Zheng21509082015-05-06 15:33:49 -04001339 if (unlikely(base >= top))
Peter Zijlstrad2beea42013-09-12 13:00:47 +02001340 return;
1341
Yan, Zheng21509082015-05-06 15:33:49 -04001342 for (at = base; at < top; at += x86_pmu.pebs_record_size) {
Andi Kleen130768b2013-06-17 17:36:47 -07001343 struct pebs_record_nhm *p = at;
Peter Zijlstra75f80852015-07-15 14:35:46 +02001344 u64 pebs_status;
Peter Zijlstraca037702010-03-02 19:52:12 +01001345
Peter Zijlstra8ef9b842016-09-07 14:42:55 +02001346 pebs_status = p->status & cpuc->pebs_enabled;
1347 pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
1348
1349 /* PEBS v3 has more accurate status bits */
Peter Zijlstraa3d86542015-05-12 15:18:18 +02001350 if (x86_pmu.intel_cap.pebs_format >= 3) {
Peter Zijlstra8ef9b842016-09-07 14:42:55 +02001351 for_each_set_bit(bit, (unsigned long *)&pebs_status,
1352 x86_pmu.max_pebs_events)
Peter Zijlstraa3d86542015-05-12 15:18:18 +02001353 counts[bit]++;
1354
1355 continue;
1356 }
1357
Andi Kleen01330d72015-12-03 13:22:20 -08001358 /*
1359 * On some CPUs the PEBS status can be zero when PEBS is
1360 * racing with clearing of GLOBAL_STATUS.
1361 *
1362 * Normally we would drop that record, but in the
1363 * case when there is only a single active PEBS event
1364 * we can assume it's for that event.
1365 */
1366 if (!pebs_status && cpuc->pebs_enabled &&
1367 !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
1368 pebs_status = cpuc->pebs_enabled;
1369
Peter Zijlstra75f80852015-07-15 14:35:46 +02001370 bit = find_first_bit((unsigned long *)&pebs_status,
Yan, Zheng21509082015-05-06 15:33:49 -04001371 x86_pmu.max_pebs_events);
Andi Kleen957ea1f2015-12-03 13:22:19 -08001372 if (bit >= x86_pmu.max_pebs_events)
Peter Zijlstraca037702010-03-02 19:52:12 +01001373 continue;
Peter Zijlstra75f80852015-07-15 14:35:46 +02001374
Yan, Zheng21509082015-05-06 15:33:49 -04001375 /*
1376 * The PEBS hardware does not deal well with the situation
1377 * when events happen near to each other and multiple bits
1378 * are set. But it should happen rarely.
1379 *
1380 * If these events include one PEBS and multiple non-PEBS
1381 * events, it doesn't impact PEBS record. The record will
1382 * be handled normally. (slow path)
1383 *
1384 * If these events include two or more PEBS events, the
1385 * records for the events can be collapsed into a single
1386 * one, and it's not possible to reconstruct all events
1387 * that caused the PEBS record. It's called collision.
1388 * If collision happened, the record will be dropped.
Yan, Zheng21509082015-05-06 15:33:49 -04001389 */
Peter Zijlstra75f80852015-07-15 14:35:46 +02001390 if (p->status != (1ULL << bit)) {
1391 for_each_set_bit(i, (unsigned long *)&pebs_status,
1392 x86_pmu.max_pebs_events)
1393 error[i]++;
1394 continue;
Yan, Zheng21509082015-05-06 15:33:49 -04001395 }
Peter Zijlstra75f80852015-07-15 14:35:46 +02001396
Yan, Zheng21509082015-05-06 15:33:49 -04001397 counts[bit]++;
1398 }
1399
1400 for (bit = 0; bit < x86_pmu.max_pebs_events; bit++) {
Kan Liangf38b0db2015-05-10 15:13:14 -04001401 if ((counts[bit] == 0) && (error[bit] == 0))
Yan, Zheng21509082015-05-06 15:33:49 -04001402 continue;
Peter Zijlstra75f80852015-07-15 14:35:46 +02001403
Yan, Zheng21509082015-05-06 15:33:49 -04001404 event = cpuc->events[bit];
Peter Zijlstra8ef9b842016-09-07 14:42:55 +02001405 if (WARN_ON_ONCE(!event))
1406 continue;
1407
1408 if (WARN_ON_ONCE(!event->attr.precise_ip))
1409 continue;
Yan, Zheng21509082015-05-06 15:33:49 -04001410
Kan Liangf38b0db2015-05-10 15:13:14 -04001411 /* log dropped samples number */
Jiri Olsa475113d2016-12-28 14:31:03 +01001412 if (error[bit]) {
Kan Liangf38b0db2015-05-10 15:13:14 -04001413 perf_log_lost_samples(event, error[bit]);
1414
Jiri Olsa475113d2016-12-28 14:31:03 +01001415 if (perf_event_account_interrupt(event))
1416 x86_pmu_stop(event, 0);
1417 }
1418
Kan Liangf38b0db2015-05-10 15:13:14 -04001419 if (counts[bit]) {
1420 __intel_pmu_pebs_event(event, iregs, base,
1421 top, bit, counts[bit]);
1422 }
Peter Zijlstraca037702010-03-02 19:52:12 +01001423 }
Peter Zijlstraca037702010-03-02 19:52:12 +01001424}
1425
1426/*
1427 * BTS, PEBS probe and setup
1428 */
1429
Mathias Krause066ce642014-08-26 18:49:45 +02001430void __init intel_ds_init(void)
Peter Zijlstraca037702010-03-02 19:52:12 +01001431{
1432 /*
1433 * No support for 32bit formats
1434 */
1435 if (!boot_cpu_has(X86_FEATURE_DTES64))
1436 return;
1437
1438 x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
1439 x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
Jiri Olsae72daf32016-03-01 20:03:52 +01001440 x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
Peter Zijlstraca037702010-03-02 19:52:12 +01001441 if (x86_pmu.pebs) {
Peter Zijlstra8db909a2010-03-03 17:07:40 +01001442 char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
1443 int format = x86_pmu.intel_cap.pebs_format;
Peter Zijlstraca037702010-03-02 19:52:12 +01001444
1445 switch (format) {
1446 case 0:
Chen Yucong1b74dde2016-02-02 11:45:02 +08001447 pr_cont("PEBS fmt0%c, ", pebs_type);
Peter Zijlstraca037702010-03-02 19:52:12 +01001448 x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
Jiri Olsae72daf32016-03-01 20:03:52 +01001449 /*
1450 * Using >PAGE_SIZE buffers makes the WRMSR to
1451 * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
1452 * mysteriously hang on Core2.
1453 *
1454 * As a workaround, we don't do this.
1455 */
1456 x86_pmu.pebs_buffer_size = PAGE_SIZE;
Peter Zijlstraca037702010-03-02 19:52:12 +01001457 x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
Peter Zijlstraca037702010-03-02 19:52:12 +01001458 break;
1459
1460 case 1:
Chen Yucong1b74dde2016-02-02 11:45:02 +08001461 pr_cont("PEBS fmt1%c, ", pebs_type);
Peter Zijlstraca037702010-03-02 19:52:12 +01001462 x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
1463 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
Peter Zijlstraca037702010-03-02 19:52:12 +01001464 break;
1465
Andi Kleen130768b2013-06-17 17:36:47 -07001466 case 2:
1467 pr_cont("PEBS fmt2%c, ", pebs_type);
1468 x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw);
Peter Zijlstrad2beea42013-09-12 13:00:47 +02001469 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
Andi Kleen130768b2013-06-17 17:36:47 -07001470 break;
1471
Andi Kleen2f7ebf22015-05-10 12:22:40 -07001472 case 3:
1473 pr_cont("PEBS fmt3%c, ", pebs_type);
1474 x86_pmu.pebs_record_size =
1475 sizeof(struct pebs_record_skl);
1476 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
Andi Kleena7b58d22015-05-27 21:13:14 -07001477 x86_pmu.free_running_flags |= PERF_SAMPLE_TIME;
Andi Kleen2f7ebf22015-05-10 12:22:40 -07001478 break;
1479
Peter Zijlstraca037702010-03-02 19:52:12 +01001480 default:
Chen Yucong1b74dde2016-02-02 11:45:02 +08001481 pr_cont("no PEBS fmt%d%c, ", format, pebs_type);
Peter Zijlstraca037702010-03-02 19:52:12 +01001482 x86_pmu.pebs = 0;
Peter Zijlstraca037702010-03-02 19:52:12 +01001483 }
1484 }
1485}
Stephane Eranian1d9d8632013-03-15 14:26:07 +01001486
1487void perf_restore_debug_store(void)
1488{
Linus Torvalds2a6e06b2013-03-17 15:44:43 -07001489 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
1490
Stephane Eranian1d9d8632013-03-15 14:26:07 +01001491 if (!x86_pmu.bts && !x86_pmu.pebs)
1492 return;
1493
Linus Torvalds2a6e06b2013-03-17 15:44:43 -07001494 wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
Stephane Eranian1d9d8632013-03-15 14:26:07 +01001495}