blob: 2ee4a707f0df85c37dba15c6ab5f4e7f84e553fc [file] [log] [blame]
Michael Ellermane05b9b92013-04-25 19:28:28 +00001/*
2 * Performance counter support for POWER8 processors.
3 *
4 * Copyright 2009 Paul Mackerras, IBM Corporation.
5 * Copyright 2013 Michael Ellerman, IBM Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
14#include <linux/perf_event.h>
15#include <asm/firmware.h>
16
17
18/*
19 * Some power8 event codes.
20 */
21#define PM_CYC 0x0001e
22#define PM_GCT_NOSLOT_CYC 0x100f8
23#define PM_CMPLU_STALL 0x4000a
24#define PM_INST_CMPL 0x00002
25#define PM_BRU_FIN 0x10068
26#define PM_BR_MPRED_CMPL 0x400f6
27
28
29/*
30 * Raw event encoding for POWER8:
31 *
32 * 60 56 52 48 44 40 36 32
33 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
Michael Ellerman4df48992013-06-28 18:15:17 +100034 * | [ thresh_cmp ] [ thresh_ctl ]
35 * | |
36 * *- EBB (Linux) thresh start/stop OR FAB match -*
Michael Ellermane05b9b92013-04-25 19:28:28 +000037 *
38 * 28 24 20 16 12 8 4 0
39 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
40 * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ]
41 * | | | | |
42 * | | | | *- mark
43 * | | *- L1/L2/L3 cache_sel |
44 * | | |
45 * | *- sampling mode for marked events *- combine
46 * |
47 * *- thresh_sel
48 *
49 * Below uses IBM bit numbering.
50 *
51 * MMCR1[x:y] = unit (PMCxUNIT)
52 * MMCR1[x] = combine (PMCxCOMB)
53 *
54 * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
55 * # PM_MRK_FAB_RSP_MATCH
56 * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
57 * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
58 * # PM_MRK_FAB_RSP_MATCH_CYC
59 * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
60 * else
61 * MMCRA[48:55] = thresh_ctl (THRESH START/END)
62 *
63 * if thresh_sel:
64 * MMCRA[45:47] = thresh_sel
65 *
66 * if thresh_cmp:
67 * MMCRA[22:24] = thresh_cmp[0:2]
68 * MMCRA[25:31] = thresh_cmp[3:9]
69 *
70 * if unit == 6 or unit == 7
71 * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
72 * else if unit == 8 or unit == 9:
73 * if cache_sel[0] == 0: # L3 bank
74 * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
75 * else if cache_sel[0] == 1:
76 * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
77 * else if cache_sel[1]: # L1 event
78 * MMCR1[16] = cache_sel[2]
79 * MMCR1[17] = cache_sel[3]
80 *
81 * if mark:
82 * MMCRA[63] = 1 (SAMPLE_ENABLE)
83 * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
84 * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
85 *
86 */
87
Michael Ellerman4df48992013-06-28 18:15:17 +100088#define EVENT_EBB_MASK 1ull
Michael Ellermane05b9b92013-04-25 19:28:28 +000089#define EVENT_THR_CMP_SHIFT 40 /* Threshold CMP value */
90#define EVENT_THR_CMP_MASK 0x3ff
91#define EVENT_THR_CTL_SHIFT 32 /* Threshold control value (start/stop) */
92#define EVENT_THR_CTL_MASK 0xffull
93#define EVENT_THR_SEL_SHIFT 29 /* Threshold select value */
94#define EVENT_THR_SEL_MASK 0x7
95#define EVENT_THRESH_SHIFT 29 /* All threshold bits */
96#define EVENT_THRESH_MASK 0x1fffffull
97#define EVENT_SAMPLE_SHIFT 24 /* Sampling mode & eligibility */
98#define EVENT_SAMPLE_MASK 0x1f
99#define EVENT_CACHE_SEL_SHIFT 20 /* L2/L3 cache select */
100#define EVENT_CACHE_SEL_MASK 0xf
101#define EVENT_IS_L1 (4 << EVENT_CACHE_SEL_SHIFT)
102#define EVENT_PMC_SHIFT 16 /* PMC number (1-based) */
103#define EVENT_PMC_MASK 0xf
104#define EVENT_UNIT_SHIFT 12 /* Unit */
105#define EVENT_UNIT_MASK 0xf
106#define EVENT_COMBINE_SHIFT 11 /* Combine bit */
107#define EVENT_COMBINE_MASK 0x1
108#define EVENT_MARKED_SHIFT 8 /* Marked bit */
109#define EVENT_MARKED_MASK 0x1
110#define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
111#define EVENT_PSEL_MASK 0xff /* PMCxSEL value */
112
Michael Ellermand8bec4c2013-06-28 18:15:10 +1000113#define EVENT_VALID_MASK \
114 ((EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \
115 (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \
116 (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \
117 (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \
118 (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
119 (EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \
120 (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
Michael Ellerman8d7c55d2013-07-23 18:07:45 +1000121 (EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT) | \
Michael Ellermand8bec4c2013-06-28 18:15:10 +1000122 EVENT_PSEL_MASK)
123
Anshuman Khandualb1113552013-04-22 19:42:43 +0000124/* MMCRA IFM bits - POWER8 */
125#define POWER8_MMCRA_IFM1 0x0000000040000000UL
126#define POWER8_MMCRA_IFM2 0x0000000080000000UL
127#define POWER8_MMCRA_IFM3 0x00000000C0000000UL
128
129#define ONLY_PLM \
130 (PERF_SAMPLE_BRANCH_USER |\
131 PERF_SAMPLE_BRANCH_KERNEL |\
132 PERF_SAMPLE_BRANCH_HV)
133
Michael Ellermane05b9b92013-04-25 19:28:28 +0000134/*
135 * Layout of constraint bits:
136 *
137 * 60 56 52 48 44 40 36 32
138 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
139 * [ fab_match ] [ thresh_cmp ] [ thresh_ctl ] [ ]
140 * |
141 * thresh_sel -*
142 *
143 * 28 24 20 16 12 8 4 0
144 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
Michael Ellerman4df48992013-06-28 18:15:17 +1000145 * | [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1]
146 * EBB -* | |
147 * | | Count of events for each PMC.
148 * L1 I/D qualifier -* | p1, p2, p3, p4, p5, p6.
Michael Ellermane05b9b92013-04-25 19:28:28 +0000149 * nc - number of counters -*
150 *
151 * The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints
152 * we want the low bit of each field to be added to any existing value.
153 *
154 * Everything else is a value field.
155 */
156
157#define CNST_FAB_MATCH_VAL(v) (((v) & EVENT_THR_CTL_MASK) << 56)
158#define CNST_FAB_MATCH_MASK CNST_FAB_MATCH_VAL(EVENT_THR_CTL_MASK)
159
160/* We just throw all the threshold bits into the constraint */
161#define CNST_THRESH_VAL(v) (((v) & EVENT_THRESH_MASK) << 32)
162#define CNST_THRESH_MASK CNST_THRESH_VAL(EVENT_THRESH_MASK)
163
Michael Ellerman4df48992013-06-28 18:15:17 +1000164#define CNST_EBB_VAL(v) (((v) & EVENT_EBB_MASK) << 24)
165#define CNST_EBB_MASK CNST_EBB_VAL(EVENT_EBB_MASK)
166
Michael Ellermane05b9b92013-04-25 19:28:28 +0000167#define CNST_L1_QUAL_VAL(v) (((v) & 3) << 22)
168#define CNST_L1_QUAL_MASK CNST_L1_QUAL_VAL(3)
169
170#define CNST_SAMPLE_VAL(v) (((v) & EVENT_SAMPLE_MASK) << 16)
171#define CNST_SAMPLE_MASK CNST_SAMPLE_VAL(EVENT_SAMPLE_MASK)
172
173/*
174 * For NC we are counting up to 4 events. This requires three bits, and we need
175 * the fifth event to overflow and set the 4th bit. To achieve that we bias the
176 * fields by 3 in test_adder.
177 */
178#define CNST_NC_SHIFT 12
179#define CNST_NC_VAL (1 << CNST_NC_SHIFT)
180#define CNST_NC_MASK (8 << CNST_NC_SHIFT)
181#define POWER8_TEST_ADDER (3 << CNST_NC_SHIFT)
182
183/*
184 * For the per-PMC fields we have two bits. The low bit is added, so if two
185 * events ask for the same PMC the sum will overflow, setting the high bit,
186 * indicating an error. So our mask sets the high bit.
187 */
188#define CNST_PMC_SHIFT(pmc) ((pmc - 1) * 2)
189#define CNST_PMC_VAL(pmc) (1 << CNST_PMC_SHIFT(pmc))
190#define CNST_PMC_MASK(pmc) (2 << CNST_PMC_SHIFT(pmc))
191
192/* Our add_fields is defined as: */
193#define POWER8_ADD_FIELDS \
194 CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
195 CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
196
197
198/* Bits in MMCR1 for POWER8 */
199#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
200#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
201#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
202#define MMCR1_DC_QUAL_SHIFT 47
203#define MMCR1_IC_QUAL_SHIFT 46
204
205/* Bits in MMCRA for POWER8 */
206#define MMCRA_SAMP_MODE_SHIFT 1
207#define MMCRA_SAMP_ELIG_SHIFT 4
208#define MMCRA_THR_CTL_SHIFT 8
209#define MMCRA_THR_SEL_SHIFT 16
210#define MMCRA_THR_CMP_SHIFT 32
211#define MMCRA_SDAR_MODE_TLB (1ull << 42)
212
213
214static inline bool event_is_fab_match(u64 event)
215{
216 /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
217 event &= 0xff0fe;
218
219 /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
220 return (event == 0x30056 || event == 0x4f052);
221}
222
223static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
224{
Michael Ellerman4df48992013-06-28 18:15:17 +1000225 unsigned int unit, pmc, cache, ebb;
Michael Ellermane05b9b92013-04-25 19:28:28 +0000226 unsigned long mask, value;
227
228 mask = value = 0;
229
Michael Ellermand8bec4c2013-06-28 18:15:10 +1000230 if (event & ~EVENT_VALID_MASK)
231 return -1;
232
Michael Ellerman4df48992013-06-28 18:15:17 +1000233 pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
234 unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
235 cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
Michael Ellerman8d7c55d2013-07-23 18:07:45 +1000236 ebb = (event >> PERF_EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK;
Michael Ellerman4df48992013-06-28 18:15:17 +1000237
238 /* Clear the EBB bit in the event, so event checks work below */
Michael Ellerman8d7c55d2013-07-23 18:07:45 +1000239 event &= ~(EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT);
Michael Ellermane05b9b92013-04-25 19:28:28 +0000240
241 if (pmc) {
242 if (pmc > 6)
243 return -1;
244
245 mask |= CNST_PMC_MASK(pmc);
246 value |= CNST_PMC_VAL(pmc);
247
248 if (pmc >= 5 && event != 0x500fa && event != 0x600f4)
249 return -1;
250 }
251
252 if (pmc <= 4) {
253 /*
254 * Add to number of counters in use. Note this includes events with
255 * a PMC of 0 - they still need a PMC, it's just assigned later.
256 * Don't count events on PMC 5 & 6, there is only one valid event
257 * on each of those counters, and they are handled above.
258 */
259 mask |= CNST_NC_MASK;
260 value |= CNST_NC_VAL;
261 }
262
263 if (unit >= 6 && unit <= 9) {
264 /*
265 * L2/L3 events contain a cache selector field, which is
266 * supposed to be programmed into MMCRC. However MMCRC is only
267 * HV writable, and there is no API for guest kernels to modify
268 * it. The solution is for the hypervisor to initialise the
269 * field to zeroes, and for us to only ever allow events that
270 * have a cache selector of zero.
271 */
272 if (cache)
273 return -1;
274
275 } else if (event & EVENT_IS_L1) {
276 mask |= CNST_L1_QUAL_MASK;
277 value |= CNST_L1_QUAL_VAL(cache);
278 }
279
280 if (event & EVENT_IS_MARKED) {
281 mask |= CNST_SAMPLE_MASK;
282 value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
283 }
284
285 /*
286 * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
287 * the threshold control bits are used for the match value.
288 */
289 if (event_is_fab_match(event)) {
290 mask |= CNST_FAB_MATCH_MASK;
291 value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
292 } else {
293 /*
294 * Check the mantissa upper two bits are not zero, unless the
295 * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
296 */
297 unsigned int cmp, exp;
298
299 cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
300 exp = cmp >> 7;
301
302 if (exp && (cmp & 0x60) == 0)
303 return -1;
304
305 mask |= CNST_THRESH_MASK;
306 value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
307 }
308
Michael Ellerman4df48992013-06-28 18:15:17 +1000309 if (!pmc && ebb)
310 /* EBB events must specify the PMC */
311 return -1;
312
313 /*
314 * All events must agree on EBB, either all request it or none.
315 * EBB events are pinned & exclusive, so this should never actually
316 * hit, but we leave it as a fallback in case.
317 */
318 mask |= CNST_EBB_VAL(ebb);
319 value |= CNST_EBB_MASK;
320
Michael Ellermane05b9b92013-04-25 19:28:28 +0000321 *maskp = mask;
322 *valp = value;
323
324 return 0;
325}
326
327static int power8_compute_mmcr(u64 event[], int n_ev,
328 unsigned int hwc[], unsigned long mmcr[])
329{
330 unsigned long mmcra, mmcr1, unit, combine, psel, cache, val;
331 unsigned int pmc, pmc_inuse;
332 int i;
333
334 pmc_inuse = 0;
335
336 /* First pass to count resource use */
337 for (i = 0; i < n_ev; ++i) {
338 pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
339 if (pmc)
340 pmc_inuse |= 1 << pmc;
341 }
342
343 /* In continous sampling mode, update SDAR on TLB miss */
344 mmcra = MMCRA_SDAR_MODE_TLB;
345 mmcr1 = 0;
346
347 /* Second pass: assign PMCs, set all MMCR1 fields */
348 for (i = 0; i < n_ev; ++i) {
349 pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
350 unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
351 combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK;
352 psel = event[i] & EVENT_PSEL_MASK;
353
354 if (!pmc) {
355 for (pmc = 1; pmc <= 4; ++pmc) {
356 if (!(pmc_inuse & (1 << pmc)))
357 break;
358 }
359
360 pmc_inuse |= 1 << pmc;
361 }
362
363 if (pmc <= 4) {
364 mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
365 mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc);
366 mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
367 }
368
369 if (event[i] & EVENT_IS_L1) {
370 cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
371 mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
372 cache >>= 1;
373 mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
374 }
375
376 if (event[i] & EVENT_IS_MARKED) {
377 mmcra |= MMCRA_SAMPLE_ENABLE;
378
379 val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
380 if (val) {
381 mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT;
382 mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
383 }
384 }
385
386 /*
387 * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
388 * the threshold bits are used for the match value.
389 */
390 if (event_is_fab_match(event[i])) {
391 mmcr1 |= (event[i] >> EVENT_THR_CTL_SHIFT) &
392 EVENT_THR_CTL_MASK;
393 } else {
394 val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
395 mmcra |= val << MMCRA_THR_CTL_SHIFT;
396 val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
397 mmcra |= val << MMCRA_THR_SEL_SHIFT;
398 val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
399 mmcra |= val << MMCRA_THR_CMP_SHIFT;
400 }
401
402 hwc[i] = pmc - 1;
403 }
404
405 /* Return MMCRx values */
406 mmcr[0] = 0;
407
408 /* pmc_inuse is 1-based */
409 if (pmc_inuse & 2)
410 mmcr[0] = MMCR0_PMC1CE;
411
412 if (pmc_inuse & 0x7c)
413 mmcr[0] |= MMCR0_PMCjCE;
414
Michael Ellerman7a7a41f2013-06-28 18:15:12 +1000415 /* If we're not using PMC 5 or 6, freeze them */
416 if (!(pmc_inuse & 0x60))
417 mmcr[0] |= MMCR0_FC56;
418
Michael Ellermane05b9b92013-04-25 19:28:28 +0000419 mmcr[1] = mmcr1;
420 mmcr[2] = mmcra;
421
422 return 0;
423}
424
425#define MAX_ALT 2
426
427/* Table of alternatives, sorted by column 0 */
428static const unsigned int event_alternatives[][MAX_ALT] = {
429 { 0x10134, 0x301e2 }, /* PM_MRK_ST_CMPL */
430 { 0x10138, 0x40138 }, /* PM_BR_MRK_2PATH */
431 { 0x18082, 0x3e05e }, /* PM_L3_CO_MEPF */
432 { 0x1d14e, 0x401e8 }, /* PM_MRK_DATA_FROM_L2MISS */
433 { 0x1e054, 0x4000a }, /* PM_CMPLU_STALL */
434 { 0x20036, 0x40036 }, /* PM_BR_2PATH */
435 { 0x200f2, 0x300f2 }, /* PM_INST_DISP */
436 { 0x200f4, 0x600f4 }, /* PM_RUN_CYC */
437 { 0x2013c, 0x3012e }, /* PM_MRK_FILT_MATCH */
438 { 0x3e054, 0x400f0 }, /* PM_LD_MISS_L1 */
439 { 0x400fa, 0x500fa }, /* PM_RUN_INST_CMPL */
440};
441
442/*
443 * Scan the alternatives table for a match and return the
444 * index into the alternatives table if found, else -1.
445 */
446static int find_alternative(u64 event)
447{
448 int i, j;
449
450 for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
451 if (event < event_alternatives[i][0])
452 break;
453
454 for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
455 if (event == event_alternatives[i][j])
456 return i;
457 }
458
459 return -1;
460}
461
462static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
463{
464 int i, j, num_alt = 0;
465 u64 alt_event;
466
467 alt[num_alt++] = event;
468
469 i = find_alternative(event);
470 if (i >= 0) {
471 /* Filter out the original event, it's already in alt[0] */
472 for (j = 0; j < MAX_ALT; ++j) {
473 alt_event = event_alternatives[i][j];
474 if (alt_event && alt_event != event)
475 alt[num_alt++] = alt_event;
476 }
477 }
478
479 if (flags & PPMU_ONLY_COUNT_RUN) {
480 /*
481 * We're only counting in RUN state, so PM_CYC is equivalent to
482 * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
483 */
484 j = num_alt;
485 for (i = 0; i < num_alt; ++i) {
486 switch (alt[i]) {
487 case 0x1e: /* PM_CYC */
488 alt[j++] = 0x600f4; /* PM_RUN_CYC */
489 break;
490 case 0x600f4: /* PM_RUN_CYC */
491 alt[j++] = 0x1e;
492 break;
493 case 0x2: /* PM_PPC_CMPL */
494 alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */
495 break;
496 case 0x500fa: /* PM_RUN_INST_CMPL */
497 alt[j++] = 0x2; /* PM_PPC_CMPL */
498 break;
499 }
500 }
501 num_alt = j;
502 }
503
504 return num_alt;
505}
506
507static void power8_disable_pmc(unsigned int pmc, unsigned long mmcr[])
508{
509 if (pmc <= 3)
510 mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
511}
512
513PMU_FORMAT_ATTR(event, "config:0-49");
514PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
515PMU_FORMAT_ATTR(mark, "config:8");
516PMU_FORMAT_ATTR(combine, "config:11");
517PMU_FORMAT_ATTR(unit, "config:12-15");
518PMU_FORMAT_ATTR(pmc, "config:16-19");
519PMU_FORMAT_ATTR(cache_sel, "config:20-23");
520PMU_FORMAT_ATTR(sample_mode, "config:24-28");
521PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
522PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
523PMU_FORMAT_ATTR(thresh_start, "config:36-39");
524PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
525
526static struct attribute *power8_pmu_format_attr[] = {
527 &format_attr_event.attr,
528 &format_attr_pmcxsel.attr,
529 &format_attr_mark.attr,
530 &format_attr_combine.attr,
531 &format_attr_unit.attr,
532 &format_attr_pmc.attr,
533 &format_attr_cache_sel.attr,
534 &format_attr_sample_mode.attr,
535 &format_attr_thresh_sel.attr,
536 &format_attr_thresh_stop.attr,
537 &format_attr_thresh_start.attr,
538 &format_attr_thresh_cmp.attr,
539 NULL,
540};
541
542struct attribute_group power8_pmu_format_group = {
543 .name = "format",
544 .attrs = power8_pmu_format_attr,
545};
546
547static const struct attribute_group *power8_pmu_attr_groups[] = {
548 &power8_pmu_format_group,
549 NULL,
550};
551
552static int power8_generic_events[] = {
553 [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
554 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC,
555 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
556 [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
557 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
558 [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
559};
560
Anshuman Khandualb1113552013-04-22 19:42:43 +0000561static u64 power8_bhrb_filter_map(u64 branch_sample_type)
562{
563 u64 pmu_bhrb_filter = 0;
Anshuman Khandualb1113552013-04-22 19:42:43 +0000564
Anshuman Khandual7689bdc2013-06-10 11:23:28 +0530565 /* BHRB and regular PMU events share the same privilege state
Anshuman Khandualb1113552013-04-22 19:42:43 +0000566 * filter configuration. BHRB is always recorded along with a
Anshuman Khandual7689bdc2013-06-10 11:23:28 +0530567 * regular PMU event. As the privilege state filter is handled
568 * in the basic PMC configuration of the accompanying regular
569 * PMU event, we ignore any separate BHRB specific request.
Anshuman Khandualb1113552013-04-22 19:42:43 +0000570 */
Anshuman Khandualb1113552013-04-22 19:42:43 +0000571
572 /* No branch filter requested */
573 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
574 return pmu_bhrb_filter;
575
576 /* Invalid branch filter options - HW does not support */
577 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
578 return -1;
579
580 if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
581 return -1;
582
583 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
584 pmu_bhrb_filter |= POWER8_MMCRA_IFM1;
585 return pmu_bhrb_filter;
586 }
587
588 /* Every thing else is unsupported */
589 return -1;
590}
591
592static void power8_config_bhrb(u64 pmu_bhrb_filter)
593{
594 /* Enable BHRB filter in PMU */
595 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
596}
597
Michael Ellermane05b9b92013-04-25 19:28:28 +0000598static struct power_pmu power8_pmu = {
599 .name = "POWER8",
600 .n_counter = 6,
601 .max_alternatives = MAX_ALT + 1,
602 .add_fields = POWER8_ADD_FIELDS,
603 .test_adder = POWER8_TEST_ADDER,
604 .compute_mmcr = power8_compute_mmcr,
Anshuman Khandualb1113552013-04-22 19:42:43 +0000605 .config_bhrb = power8_config_bhrb,
606 .bhrb_filter_map = power8_bhrb_filter_map,
Michael Ellermane05b9b92013-04-25 19:28:28 +0000607 .get_constraint = power8_get_constraint,
608 .get_alternatives = power8_get_alternatives,
609 .disable_pmc = power8_disable_pmc,
Michael Ellerman4df48992013-06-28 18:15:17 +1000610 .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB,
Michael Ellermane05b9b92013-04-25 19:28:28 +0000611 .n_generic = ARRAY_SIZE(power8_generic_events),
612 .generic_events = power8_generic_events,
613 .attr_groups = power8_pmu_attr_groups,
Anshuman Khandualb1113552013-04-22 19:42:43 +0000614 .bhrb_nr = 32,
Michael Ellermane05b9b92013-04-25 19:28:28 +0000615};
616
617static int __init init_power8_pmu(void)
618{
Michael Ellerman5d7ead02013-07-13 12:53:40 +1000619 int rc;
620
Michael Ellermane05b9b92013-04-25 19:28:28 +0000621 if (!cur_cpu_spec->oprofile_cpu_type ||
622 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
623 return -ENODEV;
624
Michael Ellerman5d7ead02013-07-13 12:53:40 +1000625 rc = register_power_pmu(&power8_pmu);
626 if (rc)
627 return rc;
628
629 /* Tell userspace that EBB is supported */
630 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
631
632 return 0;
Michael Ellermane05b9b92013-04-25 19:28:28 +0000633}
634early_initcall(init_power8_pmu);