blob: 2a2040ea5f99e5a85072ad3f6d09b5fa70b18507 [file] [log] [blame]
Madhavan Srinivasan7ffd9482016-06-26 23:07:05 +05301/*
2 * Common Performance counter support functions for PowerISA v2.07 processors.
3 *
4 * Copyright 2009 Paul Mackerras, IBM Corporation.
5 * Copyright 2013 Michael Ellerman, IBM Corporation.
6 * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13#include "isa207-common.h"
14
Madhavan Srinivasan60b00022016-12-02 06:04:59 +053015PMU_FORMAT_ATTR(event, "config:0-49");
16PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
17PMU_FORMAT_ATTR(mark, "config:8");
18PMU_FORMAT_ATTR(combine, "config:11");
19PMU_FORMAT_ATTR(unit, "config:12-15");
20PMU_FORMAT_ATTR(pmc, "config:16-19");
21PMU_FORMAT_ATTR(cache_sel, "config:20-23");
22PMU_FORMAT_ATTR(sample_mode, "config:24-28");
23PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
24PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
25PMU_FORMAT_ATTR(thresh_start, "config:36-39");
26PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
27
28struct attribute *isa207_pmu_format_attr[] = {
29 &format_attr_event.attr,
30 &format_attr_pmcxsel.attr,
31 &format_attr_mark.attr,
32 &format_attr_combine.attr,
33 &format_attr_unit.attr,
34 &format_attr_pmc.attr,
35 &format_attr_cache_sel.attr,
36 &format_attr_sample_mode.attr,
37 &format_attr_thresh_sel.attr,
38 &format_attr_thresh_stop.attr,
39 &format_attr_thresh_start.attr,
40 &format_attr_thresh_cmp.attr,
41 NULL,
42};
43
44struct attribute_group isa207_pmu_format_group = {
45 .name = "format",
46 .attrs = isa207_pmu_format_attr,
47};
48
Madhavan Srinivasan7ffd9482016-06-26 23:07:05 +053049static inline bool event_is_fab_match(u64 event)
50{
51 /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
52 event &= 0xff0fe;
53
54 /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
55 return (event == 0x30056 || event == 0x4f052);
56}
57
58int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
59{
60 unsigned int unit, pmc, cache, ebb;
61 unsigned long mask, value;
62
63 mask = value = 0;
64
65 if (event & ~EVENT_VALID_MASK)
66 return -1;
67
68 pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
69 unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
70 cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
71 ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK;
72
73 if (pmc) {
74 u64 base_event;
75
76 if (pmc > 6)
77 return -1;
78
79 /* Ignore Linux defined bits when checking event below */
80 base_event = event & ~EVENT_LINUX_MASK;
81
82 if (pmc >= 5 && base_event != 0x500fa &&
83 base_event != 0x600f4)
84 return -1;
85
86 mask |= CNST_PMC_MASK(pmc);
87 value |= CNST_PMC_VAL(pmc);
88 }
89
90 if (pmc <= 4) {
91 /*
92 * Add to number of counters in use. Note this includes events with
93 * a PMC of 0 - they still need a PMC, it's just assigned later.
94 * Don't count events on PMC 5 & 6, there is only one valid event
95 * on each of those counters, and they are handled above.
96 */
97 mask |= CNST_NC_MASK;
98 value |= CNST_NC_VAL;
99 }
100
101 if (unit >= 6 && unit <= 9) {
102 /*
103 * L2/L3 events contain a cache selector field, which is
104 * supposed to be programmed into MMCRC. However MMCRC is only
105 * HV writable, and there is no API for guest kernels to modify
106 * it. The solution is for the hypervisor to initialise the
107 * field to zeroes, and for us to only ever allow events that
108 * have a cache selector of zero. The bank selector (bit 3) is
109 * irrelevant, as long as the rest of the value is 0.
110 */
111 if (cache & 0x7)
112 return -1;
113
114 } else if (event & EVENT_IS_L1) {
115 mask |= CNST_L1_QUAL_MASK;
116 value |= CNST_L1_QUAL_VAL(cache);
117 }
118
119 if (event & EVENT_IS_MARKED) {
120 mask |= CNST_SAMPLE_MASK;
121 value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
122 }
123
124 /*
125 * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
126 * the threshold control bits are used for the match value.
127 */
128 if (event_is_fab_match(event)) {
129 mask |= CNST_FAB_MATCH_MASK;
130 value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
131 } else {
132 /*
133 * Check the mantissa upper two bits are not zero, unless the
134 * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
135 */
136 unsigned int cmp, exp;
137
138 cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
139 exp = cmp >> 7;
140
141 if (exp && (cmp & 0x60) == 0)
142 return -1;
143
144 mask |= CNST_THRESH_MASK;
145 value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
146 }
147
148 if (!pmc && ebb)
149 /* EBB events must specify the PMC */
150 return -1;
151
152 if (event & EVENT_WANTS_BHRB) {
153 if (!ebb)
154 /* Only EBB events can request BHRB */
155 return -1;
156
157 mask |= CNST_IFM_MASK;
158 value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT);
159 }
160
161 /*
162 * All events must agree on EBB, either all request it or none.
163 * EBB events are pinned & exclusive, so this should never actually
164 * hit, but we leave it as a fallback in case.
165 */
166 mask |= CNST_EBB_VAL(ebb);
167 value |= CNST_EBB_MASK;
168
169 *maskp = mask;
170 *valp = value;
171
172 return 0;
173}
174
175int isa207_compute_mmcr(u64 event[], int n_ev,
176 unsigned int hwc[], unsigned long mmcr[],
177 struct perf_event *pevents[])
178{
179 unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val;
180 unsigned int pmc, pmc_inuse;
181 int i;
182
183 pmc_inuse = 0;
184
185 /* First pass to count resource use */
186 for (i = 0; i < n_ev; ++i) {
187 pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
188 if (pmc)
189 pmc_inuse |= 1 << pmc;
190 }
191
192 /* In continuous sampling mode, update SDAR on TLB miss */
193 mmcra = MMCRA_SDAR_MODE_TLB;
194 mmcr1 = mmcr2 = 0;
195
196 /* Second pass: assign PMCs, set all MMCR1 fields */
197 for (i = 0; i < n_ev; ++i) {
198 pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
199 unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
200 combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK;
201 psel = event[i] & EVENT_PSEL_MASK;
202
203 if (!pmc) {
204 for (pmc = 1; pmc <= 4; ++pmc) {
205 if (!(pmc_inuse & (1 << pmc)))
206 break;
207 }
208
209 pmc_inuse |= 1 << pmc;
210 }
211
212 if (pmc <= 4) {
213 mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
214 mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc);
215 mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
216 }
217
218 if (event[i] & EVENT_IS_L1) {
219 cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
220 mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
221 cache >>= 1;
222 mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
223 }
224
225 if (event[i] & EVENT_IS_MARKED) {
226 mmcra |= MMCRA_SAMPLE_ENABLE;
227
228 val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
229 if (val) {
230 mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT;
231 mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
232 }
233 }
234
235 /*
236 * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
237 * the threshold bits are used for the match value.
238 */
239 if (event_is_fab_match(event[i])) {
240 mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
241 EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
242 } else {
243 val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
244 mmcra |= val << MMCRA_THR_CTL_SHIFT;
245 val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
246 mmcra |= val << MMCRA_THR_SEL_SHIFT;
247 val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
248 mmcra |= val << MMCRA_THR_CMP_SHIFT;
249 }
250
251 if (event[i] & EVENT_WANTS_BHRB) {
252 val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK;
253 mmcra |= val << MMCRA_IFM_SHIFT;
254 }
255
256 if (pevents[i]->attr.exclude_user)
257 mmcr2 |= MMCR2_FCP(pmc);
258
259 if (pevents[i]->attr.exclude_hv)
260 mmcr2 |= MMCR2_FCH(pmc);
261
262 if (pevents[i]->attr.exclude_kernel) {
263 if (cpu_has_feature(CPU_FTR_HVMODE))
264 mmcr2 |= MMCR2_FCH(pmc);
265 else
266 mmcr2 |= MMCR2_FCS(pmc);
267 }
268
269 hwc[i] = pmc - 1;
270 }
271
272 /* Return MMCRx values */
273 mmcr[0] = 0;
274
275 /* pmc_inuse is 1-based */
276 if (pmc_inuse & 2)
277 mmcr[0] = MMCR0_PMC1CE;
278
279 if (pmc_inuse & 0x7c)
280 mmcr[0] |= MMCR0_PMCjCE;
281
282 /* If we're not using PMC 5 or 6, freeze them */
283 if (!(pmc_inuse & 0x60))
284 mmcr[0] |= MMCR0_FC56;
285
286 mmcr[1] = mmcr1;
287 mmcr[2] = mmcra;
288 mmcr[3] = mmcr2;
289
290 return 0;
291}
292
293void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[])
294{
295 if (pmc <= 3)
296 mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
297}