blob: ae6fd1c12d4e8c750f88dbb0238b73f68cadd88c [file] [log] [blame]
Kevin Corryd8bf96e2006-10-24 18:31:22 +02001/*
2 * Cell Broadband Engine Performance Monitor
3 *
4 * (C) Copyright IBM Corporation 2001,2006
5 *
6 * Author:
7 * David Erb (djerb@us.ibm.com)
8 * Kevin Corry (kevcorry@us.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <linux/types.h>
26#include <asm/io.h>
27#include <asm/machdep.h>
28#include <asm/reg.h>
29#include <asm/spu.h>
30
31#include "cbe_regs.h"
32#include "interrupt.h"
Kevin Corryd8bf96e2006-10-24 18:31:22 +020033
34/*
35 * When writing to write-only mmio addresses, save a shadow copy. All of the
36 * registers are 32-bit, but stored in the upper-half of a 64-bit field in
37 * pmd_regs.
38 */
39
40#define WRITE_WO_MMIO(reg, x) \
41 do { \
42 u32 _x = (x); \
43 struct cbe_pmd_regs __iomem *pmd_regs; \
44 struct cbe_pmd_shadow_regs *shadow_regs; \
45 pmd_regs = cbe_get_cpu_pmd_regs(cpu); \
46 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
47 out_be64(&(pmd_regs->reg), (((u64)_x) << 32)); \
48 shadow_regs->reg = _x; \
49 } while (0)
50
51#define READ_SHADOW_REG(val, reg) \
52 do { \
53 struct cbe_pmd_shadow_regs *shadow_regs; \
54 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \
55 (val) = shadow_regs->reg; \
56 } while (0)
57
58#define READ_MMIO_UPPER32(val, reg) \
59 do { \
60 struct cbe_pmd_regs __iomem *pmd_regs; \
61 pmd_regs = cbe_get_cpu_pmd_regs(cpu); \
62 (val) = (u32)(in_be64(&pmd_regs->reg) >> 32); \
63 } while (0)
64
65/*
66 * Physical counter registers.
67 * Each physical counter can act as one 32-bit counter or two 16-bit counters.
68 */
69
70u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr)
71{
72 u32 val_in_latch, val = 0;
73
74 if (phys_ctr < NR_PHYS_CTRS) {
75 READ_SHADOW_REG(val_in_latch, counter_value_in_latch);
76
77 /* Read the latch or the actual counter, whichever is newer. */
78 if (val_in_latch & (1 << phys_ctr)) {
79 READ_SHADOW_REG(val, pm_ctr[phys_ctr]);
80 } else {
81 READ_MMIO_UPPER32(val, pm_ctr[phys_ctr]);
82 }
83 }
84
85 return val;
86}
Arnd Bergmann52318002006-11-20 18:45:12 +010087EXPORT_SYMBOL_GPL(cbe_read_phys_ctr);
Kevin Corryd8bf96e2006-10-24 18:31:22 +020088
89void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val)
90{
91 struct cbe_pmd_shadow_regs *shadow_regs;
92 u32 pm_ctrl;
93
94 if (phys_ctr < NR_PHYS_CTRS) {
95 /* Writing to a counter only writes to a hardware latch.
96 * The new value is not propagated to the actual counter
97 * until the performance monitor is enabled.
98 */
99 WRITE_WO_MMIO(pm_ctr[phys_ctr], val);
100
101 pm_ctrl = cbe_read_pm(cpu, pm_control);
102 if (pm_ctrl & CBE_PM_ENABLE_PERF_MON) {
103 /* The counters are already active, so we need to
104 * rewrite the pm_control register to "re-enable"
105 * the PMU.
106 */
107 cbe_write_pm(cpu, pm_control, pm_ctrl);
108 } else {
109 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
110 shadow_regs->counter_value_in_latch |= (1 << phys_ctr);
111 }
112 }
113}
Arnd Bergmann52318002006-11-20 18:45:12 +0100114EXPORT_SYMBOL_GPL(cbe_write_phys_ctr);
Kevin Corryd8bf96e2006-10-24 18:31:22 +0200115
116/*
117 * "Logical" counter registers.
118 * These will read/write 16-bits or 32-bits depending on the
119 * current size of the counter. Counters 4 - 7 are always 16-bit.
120 */
121
122u32 cbe_read_ctr(u32 cpu, u32 ctr)
123{
124 u32 val;
125 u32 phys_ctr = ctr & (NR_PHYS_CTRS - 1);
126
127 val = cbe_read_phys_ctr(cpu, phys_ctr);
128
129 if (cbe_get_ctr_size(cpu, phys_ctr) == 16)
130 val = (ctr < NR_PHYS_CTRS) ? (val >> 16) : (val & 0xffff);
131
132 return val;
133}
Arnd Bergmann52318002006-11-20 18:45:12 +0100134EXPORT_SYMBOL_GPL(cbe_read_ctr);
Kevin Corryd8bf96e2006-10-24 18:31:22 +0200135
136void cbe_write_ctr(u32 cpu, u32 ctr, u32 val)
137{
138 u32 phys_ctr;
139 u32 phys_val;
140
141 phys_ctr = ctr & (NR_PHYS_CTRS - 1);
142
143 if (cbe_get_ctr_size(cpu, phys_ctr) == 16) {
144 phys_val = cbe_read_phys_ctr(cpu, phys_ctr);
145
146 if (ctr < NR_PHYS_CTRS)
147 val = (val << 16) | (phys_val & 0xffff);
148 else
149 val = (val & 0xffff) | (phys_val & 0xffff0000);
150 }
151
152 cbe_write_phys_ctr(cpu, phys_ctr, val);
153}
Arnd Bergmann52318002006-11-20 18:45:12 +0100154EXPORT_SYMBOL_GPL(cbe_write_ctr);
Kevin Corryd8bf96e2006-10-24 18:31:22 +0200155
156/*
157 * Counter-control registers.
158 * Each "logical" counter has a corresponding control register.
159 */
160
161u32 cbe_read_pm07_control(u32 cpu, u32 ctr)
162{
163 u32 pm07_control = 0;
164
165 if (ctr < NR_CTRS)
166 READ_SHADOW_REG(pm07_control, pm07_control[ctr]);
167
168 return pm07_control;
169}
Arnd Bergmann52318002006-11-20 18:45:12 +0100170EXPORT_SYMBOL_GPL(cbe_read_pm07_control);
Kevin Corryd8bf96e2006-10-24 18:31:22 +0200171
172void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val)
173{
174 if (ctr < NR_CTRS)
175 WRITE_WO_MMIO(pm07_control[ctr], val);
176}
Arnd Bergmann52318002006-11-20 18:45:12 +0100177EXPORT_SYMBOL_GPL(cbe_write_pm07_control);
Kevin Corryd8bf96e2006-10-24 18:31:22 +0200178
179/*
180 * Other PMU control registers. Most of these are write-only.
181 */
182
183u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg)
184{
185 u32 val = 0;
186
187 switch (reg) {
188 case group_control:
189 READ_SHADOW_REG(val, group_control);
190 break;
191
192 case debug_bus_control:
193 READ_SHADOW_REG(val, debug_bus_control);
194 break;
195
196 case trace_address:
197 READ_MMIO_UPPER32(val, trace_address);
198 break;
199
200 case ext_tr_timer:
201 READ_SHADOW_REG(val, ext_tr_timer);
202 break;
203
204 case pm_status:
205 READ_MMIO_UPPER32(val, pm_status);
206 break;
207
208 case pm_control:
209 READ_SHADOW_REG(val, pm_control);
210 break;
211
212 case pm_interval:
213 READ_SHADOW_REG(val, pm_interval);
214 break;
215
216 case pm_start_stop:
217 READ_SHADOW_REG(val, pm_start_stop);
218 break;
219 }
220
221 return val;
222}
Arnd Bergmann52318002006-11-20 18:45:12 +0100223EXPORT_SYMBOL_GPL(cbe_read_pm);
Kevin Corryd8bf96e2006-10-24 18:31:22 +0200224
225void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val)
226{
227 switch (reg) {
228 case group_control:
229 WRITE_WO_MMIO(group_control, val);
230 break;
231
232 case debug_bus_control:
233 WRITE_WO_MMIO(debug_bus_control, val);
234 break;
235
236 case trace_address:
237 WRITE_WO_MMIO(trace_address, val);
238 break;
239
240 case ext_tr_timer:
241 WRITE_WO_MMIO(ext_tr_timer, val);
242 break;
243
244 case pm_status:
245 WRITE_WO_MMIO(pm_status, val);
246 break;
247
248 case pm_control:
249 WRITE_WO_MMIO(pm_control, val);
250 break;
251
252 case pm_interval:
253 WRITE_WO_MMIO(pm_interval, val);
254 break;
255
256 case pm_start_stop:
257 WRITE_WO_MMIO(pm_start_stop, val);
258 break;
259 }
260}
Arnd Bergmann52318002006-11-20 18:45:12 +0100261EXPORT_SYMBOL_GPL(cbe_write_pm);
Kevin Corryd8bf96e2006-10-24 18:31:22 +0200262
263/*
264 * Get/set the size of a physical counter to either 16 or 32 bits.
265 */
266
267u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr)
268{
269 u32 pm_ctrl, size = 0;
270
271 if (phys_ctr < NR_PHYS_CTRS) {
272 pm_ctrl = cbe_read_pm(cpu, pm_control);
273 size = (pm_ctrl & CBE_PM_16BIT_CTR(phys_ctr)) ? 16 : 32;
274 }
275
276 return size;
277}
Arnd Bergmann52318002006-11-20 18:45:12 +0100278EXPORT_SYMBOL_GPL(cbe_get_ctr_size);
Kevin Corryd8bf96e2006-10-24 18:31:22 +0200279
280void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size)
281{
282 u32 pm_ctrl;
283
284 if (phys_ctr < NR_PHYS_CTRS) {
285 pm_ctrl = cbe_read_pm(cpu, pm_control);
286 switch (ctr_size) {
287 case 16:
288 pm_ctrl |= CBE_PM_16BIT_CTR(phys_ctr);
289 break;
290
291 case 32:
292 pm_ctrl &= ~CBE_PM_16BIT_CTR(phys_ctr);
293 break;
294 }
295 cbe_write_pm(cpu, pm_control, pm_ctrl);
296 }
297}
Arnd Bergmann52318002006-11-20 18:45:12 +0100298EXPORT_SYMBOL_GPL(cbe_set_ctr_size);
Kevin Corryd8bf96e2006-10-24 18:31:22 +0200299
300/*
301 * Enable/disable the entire performance monitoring unit.
302 * When we enable the PMU, all pending writes to counters get committed.
303 */
304
305void cbe_enable_pm(u32 cpu)
306{
307 struct cbe_pmd_shadow_regs *shadow_regs;
308 u32 pm_ctrl;
309
310 shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu);
311 shadow_regs->counter_value_in_latch = 0;
312
313 pm_ctrl = cbe_read_pm(cpu, pm_control) | CBE_PM_ENABLE_PERF_MON;
314 cbe_write_pm(cpu, pm_control, pm_ctrl);
315}
Arnd Bergmann52318002006-11-20 18:45:12 +0100316EXPORT_SYMBOL_GPL(cbe_enable_pm);
Kevin Corryd8bf96e2006-10-24 18:31:22 +0200317
318void cbe_disable_pm(u32 cpu)
319{
320 u32 pm_ctrl;
321 pm_ctrl = cbe_read_pm(cpu, pm_control) & ~CBE_PM_ENABLE_PERF_MON;
322 cbe_write_pm(cpu, pm_control, pm_ctrl);
323}
Arnd Bergmann52318002006-11-20 18:45:12 +0100324EXPORT_SYMBOL_GPL(cbe_disable_pm);
Kevin Corryd8bf96e2006-10-24 18:31:22 +0200325
326/*
327 * Reading from the trace_buffer.
328 * The trace buffer is two 64-bit registers. Reading from
329 * the second half automatically increments the trace_address.
330 */
331
332void cbe_read_trace_buffer(u32 cpu, u64 *buf)
333{
334 struct cbe_pmd_regs __iomem *pmd_regs = cbe_get_cpu_pmd_regs(cpu);
335
336 *buf++ = in_be64(&pmd_regs->trace_buffer_0_63);
337 *buf++ = in_be64(&pmd_regs->trace_buffer_64_127);
338}
Arnd Bergmann52318002006-11-20 18:45:12 +0100339EXPORT_SYMBOL_GPL(cbe_read_trace_buffer);
Kevin Corryd8bf96e2006-10-24 18:31:22 +0200340