blob: bb6bff51ce484e6dbc4b315cc1815e3fe04b0d1c [file] [log] [blame]
Maynard Johnson18f21902006-11-20 18:45:16 +01001/*
2 * Cell Broadband Engine OProfile Support
3 *
4 * (C) Copyright IBM Corporation 2006
5 *
6 * Author: David Erb (djerb@us.ibm.com)
7 * Modifications:
Bob Nelson14748552007-07-20 21:39:53 +02008 * Carl Love <carll@us.ibm.com>
9 * Maynard Johnson <maynardj@us.ibm.com>
Maynard Johnson18f21902006-11-20 18:45:16 +010010 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/cpufreq.h>
18#include <linux/delay.h>
19#include <linux/init.h>
20#include <linux/jiffies.h>
21#include <linux/kthread.h>
22#include <linux/oprofile.h>
23#include <linux/percpu.h>
24#include <linux/smp.h>
25#include <linux/spinlock.h>
26#include <linux/timer.h>
27#include <asm/cell-pmu.h>
28#include <asm/cputable.h>
29#include <asm/firmware.h>
30#include <asm/io.h>
31#include <asm/oprofile_impl.h>
32#include <asm/processor.h>
33#include <asm/prom.h>
34#include <asm/ptrace.h>
35#include <asm/reg.h>
36#include <asm/rtas.h>
37#include <asm/system.h>
Benjamin Herrenschmidteef686a02007-10-04 15:40:42 +100038#include <asm/cell-regs.h>
Maynard Johnson18f21902006-11-20 18:45:16 +010039
40#include "../platforms/cell/interrupt.h"
Bob Nelson14748552007-07-20 21:39:53 +020041#include "cell/pr_util.h"
42
43static void cell_global_stop_spu(void);
44
45/*
46 * spu_cycle_reset is the number of cycles between samples.
47 * This variable is used for SPU profiling and should ONLY be set
48 * at the beginning of cell_reg_setup; otherwise, it's read-only.
49 */
50static unsigned int spu_cycle_reset;
51
52#define NUM_SPUS_PER_NODE 8
53#define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */
Maynard Johnson18f21902006-11-20 18:45:16 +010054
55#define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */
Bob Nelson14748552007-07-20 21:39:53 +020056#define PPU_CYCLES_GRP_NUM 1 /* special group number for identifying
57 * PPU_CYCLES event
58 */
59#define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */
Maynard Johnson18f21902006-11-20 18:45:16 +010060
Carl Lovebcb63e22007-02-13 22:02:02 +010061#define NUM_THREADS 2 /* number of physical threads in
62 * physical processor
63 */
64#define NUM_TRACE_BUS_WORDS 4
65#define NUM_INPUT_BUS_WORDS 2
66
Bob Nelson14748552007-07-20 21:39:53 +020067#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
Maynard Johnson18f21902006-11-20 18:45:16 +010068
69struct pmc_cntrl_data {
70 unsigned long vcntr;
71 unsigned long evnts;
72 unsigned long masks;
73 unsigned long enabled;
74};
75
76/*
77 * ibm,cbe-perftools rtas parameters
78 */
Maynard Johnson18f21902006-11-20 18:45:16 +010079struct pm_signal {
80 u16 cpu; /* Processor to modify */
Bob Nelson14748552007-07-20 21:39:53 +020081 u16 sub_unit; /* hw subunit this applies to (if applicable)*/
82 short int signal_group; /* Signal Group to Enable/Disable */
Maynard Johnson18f21902006-11-20 18:45:16 +010083 u8 bus_word; /* Enable/Disable on this Trace/Trigger/Event
84 * Bus Word(s) (bitmask)
85 */
86 u8 bit; /* Trigger/Event bit (if applicable) */
87};
88
89/*
90 * rtas call arguments
91 */
92enum {
93 SUBFUNC_RESET = 1,
94 SUBFUNC_ACTIVATE = 2,
95 SUBFUNC_DEACTIVATE = 3,
96
97 PASSTHRU_IGNORE = 0,
98 PASSTHRU_ENABLE = 1,
99 PASSTHRU_DISABLE = 2,
100};
101
102struct pm_cntrl {
103 u16 enable;
104 u16 stop_at_max;
105 u16 trace_mode;
106 u16 freeze;
107 u16 count_mode;
108};
109
110static struct {
111 u32 group_control;
112 u32 debug_bus_control;
113 struct pm_cntrl pm_cntrl;
114 u32 pm07_cntrl[NR_PHYS_CTRS];
115} pm_regs;
116
Maynard Johnson18f21902006-11-20 18:45:16 +0100117#define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
118#define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
119#define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
120#define GET_POLARITY(x) ((x & 0x00000002) >> 1)
121#define GET_COUNT_CYCLES(x) (x & 0x00000001)
122#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
123
Maynard Johnson18f21902006-11-20 18:45:16 +0100124static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
125
126static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
127
Bob Nelson14748552007-07-20 21:39:53 +0200128/*
129 * The CELL profiling code makes rtas calls to setup the debug bus to
130 * route the performance signals. Additionally, SPU profiling requires
131 * a second rtas call to setup the hardware to capture the SPU PCs.
132 * The EIO error value is returned if the token lookups or the rtas
133 * call fail. The EIO error number is the best choice of the existing
134 * error numbers. The probability of rtas related error is very low. But
135 * by returning EIO and printing additional information to dmsg the user
136 * will know that OProfile did not start and dmesg will tell them why.
137 * OProfile does not support returning errors on Stop. Not a huge issue
138 * since failure to reset the debug bus or stop the SPU PC collection is
139 * not a fatel issue. Chances are if the Stop failed, Start doesn't work
140 * either.
141 */
142
143/*
144 * Interpetation of hdw_thread:
Maynard Johnson18f21902006-11-20 18:45:16 +0100145 * 0 - even virtual cpus 0, 2, 4,...
146 * 1 - odd virtual cpus 1, 3, 5, ...
Bob Nelson14748552007-07-20 21:39:53 +0200147 *
148 * FIXME: this is strictly wrong, we need to clean this up in a number
149 * of places. It works for now. -arnd
Maynard Johnson18f21902006-11-20 18:45:16 +0100150 */
151static u32 hdw_thread;
152
153static u32 virt_cntr_inter_mask;
154static struct timer_list timer_virt_cntr;
155
Bob Nelson14748552007-07-20 21:39:53 +0200156/*
157 * pm_signal needs to be global since it is initialized in
Maynard Johnson18f21902006-11-20 18:45:16 +0100158 * cell_reg_setup at the time when the necessary information
159 * is available.
160 */
161static struct pm_signal pm_signal[NR_PHYS_CTRS];
Bob Nelson14748552007-07-20 21:39:53 +0200162static int pm_rtas_token; /* token for debug bus setup call */
163static int spu_rtas_token; /* token for SPU cycle profiling */
Maynard Johnson18f21902006-11-20 18:45:16 +0100164
165static u32 reset_value[NR_PHYS_CTRS];
166static int num_counters;
167static int oprofile_running;
Thomas Gleixner057b1842007-04-29 16:10:39 +0000168static DEFINE_SPINLOCK(virt_cntr_lock);
Maynard Johnson18f21902006-11-20 18:45:16 +0100169
170static u32 ctr_enabled;
171
Carl Lovebcb63e22007-02-13 22:02:02 +0100172static unsigned char trace_bus[NUM_TRACE_BUS_WORDS];
173static unsigned char input_bus[NUM_INPUT_BUS_WORDS];
Maynard Johnson18f21902006-11-20 18:45:16 +0100174
175/*
176 * Firmware interface functions
177 */
178static int
179rtas_ibm_cbe_perftools(int subfunc, int passthru,
180 void *address, unsigned long length)
181{
182 u64 paddr = __pa(address);
183
Bob Nelson14748552007-07-20 21:39:53 +0200184 return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc,
185 passthru, paddr >> 32, paddr & 0xffffffff, length);
Maynard Johnson18f21902006-11-20 18:45:16 +0100186}
187
188static void pm_rtas_reset_signals(u32 node)
189{
190 int ret;
191 struct pm_signal pm_signal_local;
192
Bob Nelson14748552007-07-20 21:39:53 +0200193 /*
194 * The debug bus is being set to the passthru disable state.
195 * However, the FW still expects atleast one legal signal routing
196 * entry or it will return an error on the arguments. If we don't
197 * supply a valid entry, we must ignore all return values. Ignoring
198 * all return values means we might miss an error we should be
199 * concerned about.
Maynard Johnson18f21902006-11-20 18:45:16 +0100200 */
201
202 /* fw expects physical cpu #. */
203 pm_signal_local.cpu = node;
204 pm_signal_local.signal_group = 21;
205 pm_signal_local.bus_word = 1;
206 pm_signal_local.sub_unit = 0;
207 pm_signal_local.bit = 0;
208
209 ret = rtas_ibm_cbe_perftools(SUBFUNC_RESET, PASSTHRU_DISABLE,
210 &pm_signal_local,
211 sizeof(struct pm_signal));
212
Bob Nelson14748552007-07-20 21:39:53 +0200213 if (unlikely(ret))
214 /*
215 * Not a fatal error. For Oprofile stop, the oprofile
216 * functions do not support returning an error for
217 * failure to stop OProfile.
218 */
Maynard Johnson18f21902006-11-20 18:45:16 +0100219 printk(KERN_WARNING "%s: rtas returned: %d\n",
220 __FUNCTION__, ret);
221}
222
Bob Nelson14748552007-07-20 21:39:53 +0200223static int pm_rtas_activate_signals(u32 node, u32 count)
Maynard Johnson18f21902006-11-20 18:45:16 +0100224{
225 int ret;
Maynard Johnsonc7eb7342007-02-13 22:02:03 +0100226 int i, j;
Maynard Johnson18f21902006-11-20 18:45:16 +0100227 struct pm_signal pm_signal_local[NR_PHYS_CTRS];
228
Bob Nelson14748552007-07-20 21:39:53 +0200229 /*
230 * There is no debug setup required for the cycles event.
Maynard Johnsonc7eb7342007-02-13 22:02:03 +0100231 * Note that only events in the same group can be used.
232 * Otherwise, there will be conflicts in correctly routing
233 * the signals on the debug bus. It is the responsiblity
234 * of the OProfile user tool to check the events are in
235 * the same group.
236 */
237 i = 0;
Maynard Johnson18f21902006-11-20 18:45:16 +0100238 for (j = 0; j < count; j++) {
Maynard Johnsonc7eb7342007-02-13 22:02:03 +0100239 if (pm_signal[j].signal_group != PPU_CYCLES_GRP_NUM) {
240
241 /* fw expects physical cpu # */
242 pm_signal_local[i].cpu = node;
243 pm_signal_local[i].signal_group
244 = pm_signal[j].signal_group;
245 pm_signal_local[i].bus_word = pm_signal[j].bus_word;
246 pm_signal_local[i].sub_unit = pm_signal[j].sub_unit;
247 pm_signal_local[i].bit = pm_signal[j].bit;
248 i++;
249 }
Maynard Johnson18f21902006-11-20 18:45:16 +0100250 }
251
Maynard Johnsonc7eb7342007-02-13 22:02:03 +0100252 if (i != 0) {
253 ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE,
254 pm_signal_local,
255 i * sizeof(struct pm_signal));
Maynard Johnson18f21902006-11-20 18:45:16 +0100256
Bob Nelson14748552007-07-20 21:39:53 +0200257 if (unlikely(ret)) {
Maynard Johnsonc7eb7342007-02-13 22:02:03 +0100258 printk(KERN_WARNING "%s: rtas returned: %d\n",
259 __FUNCTION__, ret);
Bob Nelson14748552007-07-20 21:39:53 +0200260 return -EIO;
261 }
Maynard Johnsonc7eb7342007-02-13 22:02:03 +0100262 }
Bob Nelson14748552007-07-20 21:39:53 +0200263
264 return 0;
Maynard Johnson18f21902006-11-20 18:45:16 +0100265}
266
267/*
268 * PM Signal functions
269 */
270static void set_pm_event(u32 ctr, int event, u32 unit_mask)
271{
272 struct pm_signal *p;
273 u32 signal_bit;
274 u32 bus_word, bus_type, count_cycles, polarity, input_control;
275 int j, i;
276
277 if (event == PPU_CYCLES_EVENT_NUM) {
278 /* Special Event: Count all cpu cycles */
279 pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES;
280 p = &(pm_signal[ctr]);
Maynard Johnsonc7eb7342007-02-13 22:02:03 +0100281 p->signal_group = PPU_CYCLES_GRP_NUM;
Maynard Johnson18f21902006-11-20 18:45:16 +0100282 p->bus_word = 1;
283 p->sub_unit = 0;
284 p->bit = 0;
285 goto out;
286 } else {
287 pm_regs.pm07_cntrl[ctr] = 0;
288 }
289
290 bus_word = GET_BUS_WORD(unit_mask);
291 bus_type = GET_BUS_TYPE(unit_mask);
292 count_cycles = GET_COUNT_CYCLES(unit_mask);
293 polarity = GET_POLARITY(unit_mask);
294 input_control = GET_INPUT_CONTROL(unit_mask);
295 signal_bit = (event % 100);
296
297 p = &(pm_signal[ctr]);
298
299 p->signal_group = event / 100;
300 p->bus_word = bus_word;
Carl Lovebcb63e22007-02-13 22:02:02 +0100301 p->sub_unit = (unit_mask & 0x0000f000) >> 12;
Maynard Johnson18f21902006-11-20 18:45:16 +0100302
303 pm_regs.pm07_cntrl[ctr] = 0;
304 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
305 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity);
306 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control);
307
Bob Nelson14748552007-07-20 21:39:53 +0200308 /*
309 * Some of the islands signal selection is based on 64 bit words.
Carl Lovebcb63e22007-02-13 22:02:02 +0100310 * The debug bus words are 32 bits, the input words to the performance
311 * counters are defined as 32 bits. Need to convert the 64 bit island
312 * specification to the appropriate 32 input bit and bus word for the
Bob Nelson14748552007-07-20 21:39:53 +0200313 * performance counter event selection. See the CELL Performance
Carl Lovebcb63e22007-02-13 22:02:02 +0100314 * monitoring signals manual and the Perf cntr hardware descriptions
315 * for the details.
316 */
Maynard Johnson18f21902006-11-20 18:45:16 +0100317 if (input_control == 0) {
318 if (signal_bit > 31) {
319 signal_bit -= 32;
320 if (bus_word == 0x3)
321 bus_word = 0x2;
322 else if (bus_word == 0xc)
323 bus_word = 0x8;
324 }
325
326 if ((bus_type == 0) && p->signal_group >= 60)
327 bus_type = 2;
328 if ((bus_type == 1) && p->signal_group >= 50)
329 bus_type = 0;
330
331 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_MUX(signal_bit);
332 } else {
333 pm_regs.pm07_cntrl[ctr] = 0;
334 p->bit = signal_bit;
335 }
336
Carl Lovebcb63e22007-02-13 22:02:02 +0100337 for (i = 0; i < NUM_TRACE_BUS_WORDS; i++) {
Maynard Johnson18f21902006-11-20 18:45:16 +0100338 if (bus_word & (1 << i)) {
339 pm_regs.debug_bus_control |=
340 (bus_type << (31 - (2 * i) + 1));
341
Carl Lovebcb63e22007-02-13 22:02:02 +0100342 for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
Maynard Johnson18f21902006-11-20 18:45:16 +0100343 if (input_bus[j] == 0xff) {
344 input_bus[j] = i;
345 pm_regs.group_control |=
346 (i << (31 - i));
Bob Nelson14748552007-07-20 21:39:53 +0200347
Maynard Johnson18f21902006-11-20 18:45:16 +0100348 break;
349 }
350 }
351 }
352 }
353out:
354 ;
355}
356
Carl Lovebcb63e22007-02-13 22:02:02 +0100357static void write_pm_cntrl(int cpu)
Maynard Johnson18f21902006-11-20 18:45:16 +0100358{
Bob Nelson14748552007-07-20 21:39:53 +0200359 /*
360 * Oprofile will use 32 bit counters, set bits 7:10 to 0
Carl Lovebcb63e22007-02-13 22:02:02 +0100361 * pmregs.pm_cntrl is a global
362 */
363
Maynard Johnson18f21902006-11-20 18:45:16 +0100364 u32 val = 0;
Carl Lovebcb63e22007-02-13 22:02:02 +0100365 if (pm_regs.pm_cntrl.enable == 1)
Maynard Johnson18f21902006-11-20 18:45:16 +0100366 val |= CBE_PM_ENABLE_PERF_MON;
367
Carl Lovebcb63e22007-02-13 22:02:02 +0100368 if (pm_regs.pm_cntrl.stop_at_max == 1)
Maynard Johnson18f21902006-11-20 18:45:16 +0100369 val |= CBE_PM_STOP_AT_MAX;
370
Carl Lovebcb63e22007-02-13 22:02:02 +0100371 if (pm_regs.pm_cntrl.trace_mode == 1)
372 val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
Maynard Johnson18f21902006-11-20 18:45:16 +0100373
Carl Lovebcb63e22007-02-13 22:02:02 +0100374 if (pm_regs.pm_cntrl.freeze == 1)
Maynard Johnson18f21902006-11-20 18:45:16 +0100375 val |= CBE_PM_FREEZE_ALL_CTRS;
376
Bob Nelson14748552007-07-20 21:39:53 +0200377 /*
378 * Routine set_count_mode must be called previously to set
Maynard Johnson18f21902006-11-20 18:45:16 +0100379 * the count mode based on the user selection of user and kernel.
380 */
Carl Lovebcb63e22007-02-13 22:02:02 +0100381 val |= CBE_PM_COUNT_MODE_SET(pm_regs.pm_cntrl.count_mode);
Maynard Johnson18f21902006-11-20 18:45:16 +0100382 cbe_write_pm(cpu, pm_control, val);
383}
384
385static inline void
Carl Lovebcb63e22007-02-13 22:02:02 +0100386set_count_mode(u32 kernel, u32 user)
Maynard Johnson18f21902006-11-20 18:45:16 +0100387{
Bob Nelson14748552007-07-20 21:39:53 +0200388 /*
389 * The user must specify user and kernel if they want them. If
Carl Lovebcb63e22007-02-13 22:02:02 +0100390 * neither is specified, OProfile will count in hypervisor mode.
391 * pm_regs.pm_cntrl is a global
Maynard Johnson18f21902006-11-20 18:45:16 +0100392 */
393 if (kernel) {
394 if (user)
Carl Lovebcb63e22007-02-13 22:02:02 +0100395 pm_regs.pm_cntrl.count_mode = CBE_COUNT_ALL_MODES;
Maynard Johnson18f21902006-11-20 18:45:16 +0100396 else
Carl Lovebcb63e22007-02-13 22:02:02 +0100397 pm_regs.pm_cntrl.count_mode =
398 CBE_COUNT_SUPERVISOR_MODE;
Maynard Johnson18f21902006-11-20 18:45:16 +0100399 } else {
400 if (user)
Carl Lovebcb63e22007-02-13 22:02:02 +0100401 pm_regs.pm_cntrl.count_mode = CBE_COUNT_PROBLEM_MODE;
Maynard Johnson18f21902006-11-20 18:45:16 +0100402 else
Carl Lovebcb63e22007-02-13 22:02:02 +0100403 pm_regs.pm_cntrl.count_mode =
404 CBE_COUNT_HYPERVISOR_MODE;
Maynard Johnson18f21902006-11-20 18:45:16 +0100405 }
406}
407
408static inline void enable_ctr(u32 cpu, u32 ctr, u32 * pm07_cntrl)
409{
410
Carl Lovebcb63e22007-02-13 22:02:02 +0100411 pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
Maynard Johnson18f21902006-11-20 18:45:16 +0100412 cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]);
413}
414
415/*
416 * Oprofile is expected to collect data on all CPUs simultaneously.
Bob Nelson14748552007-07-20 21:39:53 +0200417 * However, there is one set of performance counters per node. There are
Maynard Johnson18f21902006-11-20 18:45:16 +0100418 * two hardware threads or virtual CPUs on each node. Hence, OProfile must
419 * multiplex in time the performance counter collection on the two virtual
420 * CPUs. The multiplexing of the performance counters is done by this
421 * virtual counter routine.
422 *
423 * The pmc_values used below is defined as 'per-cpu' but its use is
424 * more akin to 'per-node'. We need to store two sets of counter
425 * values per node -- one for the previous run and one for the next.
426 * The per-cpu[NR_PHYS_CTRS] gives us the storage we need. Each odd/even
427 * pair of per-cpu arrays is used for storing the previous and next
428 * pmc values for a given node.
429 * NOTE: We use the per-cpu variable to improve cache performance.
Bob Nelson14748552007-07-20 21:39:53 +0200430 *
431 * This routine will alternate loading the virtual counters for
432 * virtual CPUs
Maynard Johnson18f21902006-11-20 18:45:16 +0100433 */
434static void cell_virtual_cntr(unsigned long data)
435{
Maynard Johnson18f21902006-11-20 18:45:16 +0100436 int i, prev_hdw_thread, next_hdw_thread;
437 u32 cpu;
438 unsigned long flags;
439
Bob Nelson14748552007-07-20 21:39:53 +0200440 /*
441 * Make sure that the interrupt_hander and the virt counter are
442 * not both playing with the counters on the same node.
Maynard Johnson18f21902006-11-20 18:45:16 +0100443 */
444
445 spin_lock_irqsave(&virt_cntr_lock, flags);
446
447 prev_hdw_thread = hdw_thread;
448
449 /* switch the cpu handling the interrupts */
450 hdw_thread = 1 ^ hdw_thread;
451 next_hdw_thread = hdw_thread;
452
Bob Nelson14748552007-07-20 21:39:53 +0200453 /*
454 * There are some per thread events. Must do the
Carl Lovebcb63e22007-02-13 22:02:02 +0100455 * set event, for the thread that is being started
456 */
Bob Nelson14748552007-07-20 21:39:53 +0200457 for (i = 0; i < num_counters; i++)
Carl Lovebcb63e22007-02-13 22:02:02 +0100458 set_pm_event(i,
459 pmc_cntrl[next_hdw_thread][i].evnts,
460 pmc_cntrl[next_hdw_thread][i].masks);
461
Bob Nelson14748552007-07-20 21:39:53 +0200462 /*
463 * The following is done only once per each node, but
Maynard Johnson18f21902006-11-20 18:45:16 +0100464 * we need cpu #, not node #, to pass to the cbe_xxx functions.
465 */
466 for_each_online_cpu(cpu) {
467 if (cbe_get_hw_thread_id(cpu))
468 continue;
469
Bob Nelson14748552007-07-20 21:39:53 +0200470 /*
471 * stop counters, save counter values, restore counts
Maynard Johnson18f21902006-11-20 18:45:16 +0100472 * for previous thread
473 */
474 cbe_disable_pm(cpu);
475 cbe_disable_pm_interrupts(cpu);
476 for (i = 0; i < num_counters; i++) {
477 per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
478 = cbe_read_ctr(cpu, i);
479
480 if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
481 == 0xFFFFFFFF)
482 /* If the cntr value is 0xffffffff, we must
483 * reset that to 0xfffffff0 when the current
Bob Nelson14748552007-07-20 21:39:53 +0200484 * thread is restarted. This will generate a
Carl Lovebcb63e22007-02-13 22:02:02 +0100485 * new interrupt and make sure that we never
486 * restore the counters to the max value. If
487 * the counters were restored to the max value,
488 * they do not increment and no interrupts are
489 * generated. Hence no more samples will be
490 * collected on that cpu.
Maynard Johnson18f21902006-11-20 18:45:16 +0100491 */
492 cbe_write_ctr(cpu, i, 0xFFFFFFF0);
493 else
494 cbe_write_ctr(cpu, i,
495 per_cpu(pmc_values,
496 cpu +
497 next_hdw_thread)[i]);
498 }
499
Bob Nelson14748552007-07-20 21:39:53 +0200500 /*
501 * Switch to the other thread. Change the interrupt
Maynard Johnson18f21902006-11-20 18:45:16 +0100502 * and control regs to be scheduled on the CPU
503 * corresponding to the thread to execute.
504 */
505 for (i = 0; i < num_counters; i++) {
506 if (pmc_cntrl[next_hdw_thread][i].enabled) {
Bob Nelson14748552007-07-20 21:39:53 +0200507 /*
508 * There are some per thread events.
Maynard Johnson18f21902006-11-20 18:45:16 +0100509 * Must do the set event, enable_cntr
510 * for each cpu.
511 */
Maynard Johnson18f21902006-11-20 18:45:16 +0100512 enable_ctr(cpu, i,
513 pm_regs.pm07_cntrl);
514 } else {
515 cbe_write_pm07_control(cpu, i, 0);
516 }
517 }
518
519 /* Enable interrupts on the CPU thread that is starting */
520 cbe_enable_pm_interrupts(cpu, next_hdw_thread,
521 virt_cntr_inter_mask);
522 cbe_enable_pm(cpu);
523 }
524
525 spin_unlock_irqrestore(&virt_cntr_lock, flags);
526
527 mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
528}
529
530static void start_virt_cntrs(void)
531{
532 init_timer(&timer_virt_cntr);
533 timer_virt_cntr.function = cell_virtual_cntr;
534 timer_virt_cntr.data = 0UL;
535 timer_virt_cntr.expires = jiffies + HZ / 10;
536 add_timer(&timer_virt_cntr);
537}
538
539/* This function is called once for all cpus combined */
Bob Nelson14748552007-07-20 21:39:53 +0200540static int cell_reg_setup(struct op_counter_config *ctr,
541 struct op_system_config *sys, int num_ctrs)
Maynard Johnson18f21902006-11-20 18:45:16 +0100542{
543 int i, j, cpu;
Bob Nelson14748552007-07-20 21:39:53 +0200544 spu_cycle_reset = 0;
545
546 if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
547 spu_cycle_reset = ctr[0].count;
548
549 /*
550 * Each node will need to make the rtas call to start
551 * and stop SPU profiling. Get the token once and store it.
552 */
553 spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
554
555 if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
556 printk(KERN_ERR
557 "%s: rtas token ibm,cbe-spu-perftools unknown\n",
558 __FUNCTION__);
559 return -EIO;
560 }
561 }
Maynard Johnson18f21902006-11-20 18:45:16 +0100562
563 pm_rtas_token = rtas_token("ibm,cbe-perftools");
Bob Nelson14748552007-07-20 21:39:53 +0200564
565 /*
566 * For all events excetp PPU CYCLEs, each node will need to make
567 * the rtas cbe-perftools call to setup and reset the debug bus.
568 * Make the token lookup call once and store it in the global
569 * variable pm_rtas_token.
570 */
571 if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
572 printk(KERN_ERR
573 "%s: rtas token ibm,cbe-perftools unknown\n",
Maynard Johnson18f21902006-11-20 18:45:16 +0100574 __FUNCTION__);
Bob Nelson14748552007-07-20 21:39:53 +0200575 return -EIO;
Maynard Johnson18f21902006-11-20 18:45:16 +0100576 }
577
578 num_counters = num_ctrs;
579
580 pm_regs.group_control = 0;
581 pm_regs.debug_bus_control = 0;
582
583 /* setup the pm_control register */
584 memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl));
585 pm_regs.pm_cntrl.stop_at_max = 1;
586 pm_regs.pm_cntrl.trace_mode = 0;
587 pm_regs.pm_cntrl.freeze = 1;
588
Carl Lovebcb63e22007-02-13 22:02:02 +0100589 set_count_mode(sys->enable_kernel, sys->enable_user);
Maynard Johnson18f21902006-11-20 18:45:16 +0100590
591 /* Setup the thread 0 events */
592 for (i = 0; i < num_ctrs; ++i) {
593
594 pmc_cntrl[0][i].evnts = ctr[i].event;
595 pmc_cntrl[0][i].masks = ctr[i].unit_mask;
596 pmc_cntrl[0][i].enabled = ctr[i].enabled;
597 pmc_cntrl[0][i].vcntr = i;
598
599 for_each_possible_cpu(j)
600 per_cpu(pmc_values, j)[i] = 0;
601 }
602
Bob Nelson14748552007-07-20 21:39:53 +0200603 /*
604 * Setup the thread 1 events, map the thread 0 event to the
Maynard Johnson18f21902006-11-20 18:45:16 +0100605 * equivalent thread 1 event.
606 */
607 for (i = 0; i < num_ctrs; ++i) {
608 if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111))
609 pmc_cntrl[1][i].evnts = ctr[i].event + 19;
610 else if (ctr[i].event == 2203)
611 pmc_cntrl[1][i].evnts = ctr[i].event;
612 else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215))
613 pmc_cntrl[1][i].evnts = ctr[i].event + 16;
614 else
615 pmc_cntrl[1][i].evnts = ctr[i].event;
616
617 pmc_cntrl[1][i].masks = ctr[i].unit_mask;
618 pmc_cntrl[1][i].enabled = ctr[i].enabled;
619 pmc_cntrl[1][i].vcntr = i;
620 }
621
Carl Lovebcb63e22007-02-13 22:02:02 +0100622 for (i = 0; i < NUM_TRACE_BUS_WORDS; i++)
Maynard Johnson18f21902006-11-20 18:45:16 +0100623 trace_bus[i] = 0xff;
624
Carl Lovebcb63e22007-02-13 22:02:02 +0100625 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
Maynard Johnson18f21902006-11-20 18:45:16 +0100626 input_bus[i] = 0xff;
627
Bob Nelson14748552007-07-20 21:39:53 +0200628 /*
629 * Our counters count up, and "count" refers to
Maynard Johnson18f21902006-11-20 18:45:16 +0100630 * how much before the next interrupt, and we interrupt
Bob Nelson14748552007-07-20 21:39:53 +0200631 * on overflow. So we calculate the starting value
Maynard Johnson18f21902006-11-20 18:45:16 +0100632 * which will give us "count" until overflow.
633 * Then we set the events on the enabled counters.
634 */
635 for (i = 0; i < num_counters; ++i) {
636 /* start with virtual counter set 0 */
637 if (pmc_cntrl[0][i].enabled) {
638 /* Using 32bit counters, reset max - count */
639 reset_value[i] = 0xFFFFFFFF - ctr[i].count;
640 set_pm_event(i,
641 pmc_cntrl[0][i].evnts,
642 pmc_cntrl[0][i].masks);
643
644 /* global, used by cell_cpu_setup */
645 ctr_enabled |= (1 << i);
646 }
647 }
648
649 /* initialize the previous counts for the virtual cntrs */
650 for_each_online_cpu(cpu)
651 for (i = 0; i < num_counters; ++i) {
652 per_cpu(pmc_values, cpu)[i] = reset_value[i];
653 }
Bob Nelson14748552007-07-20 21:39:53 +0200654
655 return 0;
Maynard Johnson18f21902006-11-20 18:45:16 +0100656}
657
Bob Nelson14748552007-07-20 21:39:53 +0200658
659
Maynard Johnson18f21902006-11-20 18:45:16 +0100660/* This function is called once for each cpu */
Bob Nelson14748552007-07-20 21:39:53 +0200661static int cell_cpu_setup(struct op_counter_config *cntr)
Maynard Johnson18f21902006-11-20 18:45:16 +0100662{
663 u32 cpu = smp_processor_id();
664 u32 num_enabled = 0;
665 int i;
666
Bob Nelson14748552007-07-20 21:39:53 +0200667 if (spu_cycle_reset)
668 return 0;
669
Maynard Johnson18f21902006-11-20 18:45:16 +0100670 /* There is one performance monitor per processor chip (i.e. node),
671 * so we only need to perform this function once per node.
672 */
673 if (cbe_get_hw_thread_id(cpu))
Bob Nelson14748552007-07-20 21:39:53 +0200674 return 0;
Maynard Johnson18f21902006-11-20 18:45:16 +0100675
676 /* Stop all counters */
677 cbe_disable_pm(cpu);
678 cbe_disable_pm_interrupts(cpu);
679
680 cbe_write_pm(cpu, pm_interval, 0);
681 cbe_write_pm(cpu, pm_start_stop, 0);
682 cbe_write_pm(cpu, group_control, pm_regs.group_control);
683 cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
Carl Lovebcb63e22007-02-13 22:02:02 +0100684 write_pm_cntrl(cpu);
Maynard Johnson18f21902006-11-20 18:45:16 +0100685
686 for (i = 0; i < num_counters; ++i) {
687 if (ctr_enabled & (1 << i)) {
688 pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu);
689 num_enabled++;
690 }
691 }
692
Bob Nelson14748552007-07-20 21:39:53 +0200693 /*
694 * The pm_rtas_activate_signals will return -EIO if the FW
695 * call failed.
696 */
697 return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled);
Maynard Johnson18f21902006-11-20 18:45:16 +0100698}
699
Bob Nelson14748552007-07-20 21:39:53 +0200700#define ENTRIES 303
701#define MAXLFSR 0xFFFFFF
702
703/* precomputed table of 24 bit LFSR values */
704static int initial_lfsr[] = {
705 8221349, 12579195, 5379618, 10097839, 7512963, 7519310, 3955098, 10753424,
706 15507573, 7458917, 285419, 2641121, 9780088, 3915503, 6668768, 1548716,
707 4885000, 8774424, 9650099, 2044357, 2304411, 9326253, 10332526, 4421547,
708 3440748, 10179459, 13332843, 10375561, 1313462, 8375100, 5198480, 6071392,
709 9341783, 1526887, 3985002, 1439429, 13923762, 7010104, 11969769, 4547026,
710 2040072, 4025602, 3437678, 7939992, 11444177, 4496094, 9803157, 10745556,
711 3671780, 4257846, 5662259, 13196905, 3237343, 12077182, 16222879, 7587769,
712 14706824, 2184640, 12591135, 10420257, 7406075, 3648978, 11042541, 15906893,
713 11914928, 4732944, 10695697, 12928164, 11980531, 4430912, 11939291, 2917017,
714 6119256, 4172004, 9373765, 8410071, 14788383, 5047459, 5474428, 1737756,
715 15967514, 13351758, 6691285, 8034329, 2856544, 14394753, 11310160, 12149558,
716 7487528, 7542781, 15668898, 12525138, 12790975, 3707933, 9106617, 1965401,
717 16219109, 12801644, 2443203, 4909502, 8762329, 3120803, 6360315, 9309720,
718 15164599, 10844842, 4456529, 6667610, 14924259, 884312, 6234963, 3326042,
719 15973422, 13919464, 5272099, 6414643, 3909029, 2764324, 5237926, 4774955,
720 10445906, 4955302, 5203726, 10798229, 11443419, 2303395, 333836, 9646934,
721 3464726, 4159182, 568492, 995747, 10318756, 13299332, 4836017, 8237783,
722 3878992, 2581665, 11394667, 5672745, 14412947, 3159169, 9094251, 16467278,
723 8671392, 15230076, 4843545, 7009238, 15504095, 1494895, 9627886, 14485051,
724 8304291, 252817, 12421642, 16085736, 4774072, 2456177, 4160695, 15409741,
725 4902868, 5793091, 13162925, 16039714, 782255, 11347835, 14884586, 366972,
726 16308990, 11913488, 13390465, 2958444, 10340278, 1177858, 1319431, 10426302,
727 2868597, 126119, 5784857, 5245324, 10903900, 16436004, 3389013, 1742384,
728 14674502, 10279218, 8536112, 10364279, 6877778, 14051163, 1025130, 6072469,
729 1988305, 8354440, 8216060, 16342977, 13112639, 3976679, 5913576, 8816697,
730 6879995, 14043764, 3339515, 9364420, 15808858, 12261651, 2141560, 5636398,
731 10345425, 10414756, 781725, 6155650, 4746914, 5078683, 7469001, 6799140,
732 10156444, 9667150, 10116470, 4133858, 2121972, 1124204, 1003577, 1611214,
733 14304602, 16221850, 13878465, 13577744, 3629235, 8772583, 10881308, 2410386,
734 7300044, 5378855, 9301235, 12755149, 4977682, 8083074, 10327581, 6395087,
735 9155434, 15501696, 7514362, 14520507, 15808945, 3244584, 4741962, 9658130,
736 14336147, 8654727, 7969093, 15759799, 14029445, 5038459, 9894848, 8659300,
737 13699287, 8834306, 10712885, 14753895, 10410465, 3373251, 309501, 9561475,
738 5526688, 14647426, 14209836, 5339224, 207299, 14069911, 8722990, 2290950,
739 3258216, 12505185, 6007317, 9218111, 14661019, 10537428, 11731949, 9027003,
740 6641507, 9490160, 200241, 9720425, 16277895, 10816638, 1554761, 10431375,
741 7467528, 6790302, 3429078, 14633753, 14428997, 11463204, 3576212, 2003426,
742 6123687, 820520, 9992513, 15784513, 5778891, 6428165, 8388607
743};
744
745/*
746 * The hardware uses an LFSR counting sequence to determine when to capture
747 * the SPU PCs. An LFSR sequence is like a puesdo random number sequence
748 * where each number occurs once in the sequence but the sequence is not in
749 * numerical order. The SPU PC capture is done when the LFSR sequence reaches
750 * the last value in the sequence. Hence the user specified value N
751 * corresponds to the LFSR number that is N from the end of the sequence.
752 *
753 * To avoid the time to compute the LFSR, a lookup table is used. The 24 bit
754 * LFSR sequence is broken into four ranges. The spacing of the precomputed
755 * values is adjusted in each range so the error between the user specifed
756 * number (N) of events between samples and the actual number of events based
757 * on the precomputed value will be les then about 6.2%. Note, if the user
758 * specifies N < 2^16, the LFSR value that is 2^16 from the end will be used.
759 * This is to prevent the loss of samples because the trace buffer is full.
760 *
761 * User specified N Step between Index in
762 * precomputed values precomputed
763 * table
764 * 0 to 2^16-1 ---- 0
765 * 2^16 to 2^16+2^19-1 2^12 1 to 128
766 * 2^16+2^19 to 2^16+2^19+2^22-1 2^15 129 to 256
767 * 2^16+2^19+2^22 to 2^24-1 2^18 257 to 302
768 *
769 *
770 * For example, the LFSR values in the second range are computed for 2^16,
771 * 2^16+2^12, ... , 2^19-2^16, 2^19 and stored in the table at indicies
772 * 1, 2,..., 127, 128.
773 *
774 * The 24 bit LFSR value for the nth number in the sequence can be
775 * calculated using the following code:
776 *
777 * #define size 24
778 * int calculate_lfsr(int n)
779 * {
780 * int i;
781 * unsigned int newlfsr0;
782 * unsigned int lfsr = 0xFFFFFF;
783 * unsigned int howmany = n;
784 *
785 * for (i = 2; i < howmany + 2; i++) {
786 * newlfsr0 = (((lfsr >> (size - 1 - 0)) & 1) ^
787 * ((lfsr >> (size - 1 - 1)) & 1) ^
788 * (((lfsr >> (size - 1 - 6)) & 1) ^
789 * ((lfsr >> (size - 1 - 23)) & 1)));
790 *
791 * lfsr >>= 1;
792 * lfsr = lfsr | (newlfsr0 << (size - 1));
793 * }
794 * return lfsr;
795 * }
796 */
797
798#define V2_16 (0x1 << 16)
799#define V2_19 (0x1 << 19)
800#define V2_22 (0x1 << 22)
801
802static int calculate_lfsr(int n)
Maynard Johnson18f21902006-11-20 18:45:16 +0100803{
Bob Nelson14748552007-07-20 21:39:53 +0200804 /*
805 * The ranges and steps are in powers of 2 so the calculations
806 * can be done using shifts rather then divide.
807 */
808 int index;
809
810 if ((n >> 16) == 0)
811 index = 0;
812 else if (((n - V2_16) >> 19) == 0)
813 index = ((n - V2_16) >> 12) + 1;
814 else if (((n - V2_16 - V2_19) >> 22) == 0)
815 index = ((n - V2_16 - V2_19) >> 15 ) + 1 + 128;
816 else if (((n - V2_16 - V2_19 - V2_22) >> 24) == 0)
817 index = ((n - V2_16 - V2_19 - V2_22) >> 18 ) + 1 + 256;
818 else
819 index = ENTRIES-1;
820
821 /* make sure index is valid */
822 if ((index > ENTRIES) || (index < 0))
823 index = ENTRIES-1;
824
825 return initial_lfsr[index];
826}
827
828static int pm_rtas_activate_spu_profiling(u32 node)
829{
830 int ret, i;
831 struct pm_signal pm_signal_local[NR_PHYS_CTRS];
832
833 /*
834 * Set up the rtas call to configure the debug bus to
835 * route the SPU PCs. Setup the pm_signal for each SPU
836 */
837 for (i = 0; i < NUM_SPUS_PER_NODE; i++) {
838 pm_signal_local[i].cpu = node;
839 pm_signal_local[i].signal_group = 41;
840 /* spu i on word (i/2) */
841 pm_signal_local[i].bus_word = 1 << i / 2;
842 /* spu i */
843 pm_signal_local[i].sub_unit = i;
844 pm_signal_local[i].bit = 63;
845 }
846
847 ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE,
848 PASSTHRU_ENABLE, pm_signal_local,
849 (NUM_SPUS_PER_NODE
850 * sizeof(struct pm_signal)));
851
852 if (unlikely(ret)) {
853 printk(KERN_WARNING "%s: rtas returned: %d\n",
854 __FUNCTION__, ret);
855 return -EIO;
856 }
857
858 return 0;
859}
860
861#ifdef CONFIG_CPU_FREQ
862static int
863oprof_cpufreq_notify(struct notifier_block *nb, unsigned long val, void *data)
864{
865 int ret = 0;
866 struct cpufreq_freqs *frq = data;
867 if ((val == CPUFREQ_PRECHANGE && frq->old < frq->new) ||
868 (val == CPUFREQ_POSTCHANGE && frq->old > frq->new) ||
869 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE))
870 set_spu_profiling_frequency(frq->new, spu_cycle_reset);
871 return ret;
872}
873
874static struct notifier_block cpu_freq_notifier_block = {
875 .notifier_call = oprof_cpufreq_notify
876};
877#endif
878
879static int cell_global_start_spu(struct op_counter_config *ctr)
880{
881 int subfunc;
882 unsigned int lfsr_value;
883 int cpu;
884 int ret;
885 int rtas_error;
886 unsigned int cpu_khzfreq = 0;
887
888 /* The SPU profiling uses time-based profiling based on
889 * cpu frequency, so if configured with the CPU_FREQ
890 * option, we should detect frequency changes and react
891 * accordingly.
892 */
893#ifdef CONFIG_CPU_FREQ
894 ret = cpufreq_register_notifier(&cpu_freq_notifier_block,
895 CPUFREQ_TRANSITION_NOTIFIER);
896 if (ret < 0)
897 /* this is not a fatal error */
898 printk(KERN_ERR "CPU freq change registration failed: %d\n",
899 ret);
900
901 else
902 cpu_khzfreq = cpufreq_quick_get(smp_processor_id());
903#endif
904
905 set_spu_profiling_frequency(cpu_khzfreq, spu_cycle_reset);
906
907 for_each_online_cpu(cpu) {
908 if (cbe_get_hw_thread_id(cpu))
909 continue;
910
911 /*
912 * Setup SPU cycle-based profiling.
913 * Set perf_mon_control bit 0 to a zero before
914 * enabling spu collection hardware.
915 */
916 cbe_write_pm(cpu, pm_control, 0);
917
918 if (spu_cycle_reset > MAX_SPU_COUNT)
919 /* use largest possible value */
920 lfsr_value = calculate_lfsr(MAX_SPU_COUNT-1);
921 else
922 lfsr_value = calculate_lfsr(spu_cycle_reset);
923
924 /* must use a non zero value. Zero disables data collection. */
925 if (lfsr_value == 0)
926 lfsr_value = calculate_lfsr(1);
927
928 lfsr_value = lfsr_value << 8; /* shift lfsr to correct
929 * register location
930 */
931
932 /* debug bus setup */
933 ret = pm_rtas_activate_spu_profiling(cbe_cpu_to_node(cpu));
934
935 if (unlikely(ret)) {
936 rtas_error = ret;
937 goto out;
938 }
939
940
941 subfunc = 2; /* 2 - activate SPU tracing, 3 - deactivate */
942
943 /* start profiling */
944 ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
945 cbe_cpu_to_node(cpu), lfsr_value);
946
947 if (unlikely(ret != 0)) {
948 printk(KERN_ERR
949 "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n",
950 __FUNCTION__, ret);
951 rtas_error = -EIO;
952 goto out;
953 }
954 }
955
956 rtas_error = start_spu_profiling(spu_cycle_reset);
957 if (rtas_error)
958 goto out_stop;
959
960 oprofile_running = 1;
961 return 0;
962
963out_stop:
964 cell_global_stop_spu(); /* clean up the PMU/debug bus */
965out:
966 return rtas_error;
967}
968
969static int cell_global_start_ppu(struct op_counter_config *ctr)
970{
971 u32 cpu, i;
Maynard Johnson18f21902006-11-20 18:45:16 +0100972 u32 interrupt_mask = 0;
Maynard Johnson18f21902006-11-20 18:45:16 +0100973
974 /* This routine gets called once for the system.
975 * There is one performance monitor per node, so we
976 * only need to perform this function once per node.
977 */
978 for_each_online_cpu(cpu) {
979 if (cbe_get_hw_thread_id(cpu))
980 continue;
981
982 interrupt_mask = 0;
983
984 for (i = 0; i < num_counters; ++i) {
985 if (ctr_enabled & (1 << i)) {
986 cbe_write_ctr(cpu, i, reset_value[i]);
987 enable_ctr(cpu, i, pm_regs.pm07_cntrl);
988 interrupt_mask |=
989 CBE_PM_CTR_OVERFLOW_INTR(i);
990 } else {
991 /* Disable counter */
992 cbe_write_pm07_control(cpu, i, 0);
993 }
994 }
995
Carl Lovebcb63e22007-02-13 22:02:02 +0100996 cbe_get_and_clear_pm_interrupts(cpu);
Maynard Johnson18f21902006-11-20 18:45:16 +0100997 cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
998 cbe_enable_pm(cpu);
999 }
1000
1001 virt_cntr_inter_mask = interrupt_mask;
1002 oprofile_running = 1;
1003 smp_wmb();
1004
Bob Nelson14748552007-07-20 21:39:53 +02001005 /*
1006 * NOTE: start_virt_cntrs will result in cell_virtual_cntr() being
1007 * executed which manipulates the PMU. We start the "virtual counter"
Maynard Johnson18f21902006-11-20 18:45:16 +01001008 * here so that we do not need to synchronize access to the PMU in
1009 * the above for-loop.
1010 */
1011 start_virt_cntrs();
Bob Nelson14748552007-07-20 21:39:53 +02001012
1013 return 0;
Maynard Johnson18f21902006-11-20 18:45:16 +01001014}
1015
Bob Nelson14748552007-07-20 21:39:53 +02001016static int cell_global_start(struct op_counter_config *ctr)
1017{
1018 if (spu_cycle_reset)
1019 return cell_global_start_spu(ctr);
1020 else
1021 return cell_global_start_ppu(ctr);
1022}
1023
1024/*
1025 * Note the generic OProfile stop calls do not support returning
1026 * an error on stop. Hence, will not return an error if the FW
1027 * calls fail on stop. Failure to reset the debug bus is not an issue.
1028 * Failure to disable the SPU profiling is not an issue. The FW calls
1029 * to enable the performance counters and debug bus will work even if
1030 * the hardware was not cleanly reset.
1031 */
1032static void cell_global_stop_spu(void)
1033{
1034 int subfunc, rtn_value;
1035 unsigned int lfsr_value;
1036 int cpu;
1037
1038 oprofile_running = 0;
1039
1040#ifdef CONFIG_CPU_FREQ
1041 cpufreq_unregister_notifier(&cpu_freq_notifier_block,
1042 CPUFREQ_TRANSITION_NOTIFIER);
1043#endif
1044
1045 for_each_online_cpu(cpu) {
1046 if (cbe_get_hw_thread_id(cpu))
1047 continue;
1048
1049 subfunc = 3; /*
1050 * 2 - activate SPU tracing,
1051 * 3 - deactivate
1052 */
1053 lfsr_value = 0x8f100000;
1054
1055 rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
1056 subfunc, cbe_cpu_to_node(cpu),
1057 lfsr_value);
1058
1059 if (unlikely(rtn_value != 0)) {
1060 printk(KERN_ERR
1061 "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n",
1062 __FUNCTION__, rtn_value);
1063 }
1064
1065 /* Deactivate the signals */
1066 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1067 }
1068
1069 stop_spu_profiling();
1070}
1071
1072static void cell_global_stop_ppu(void)
Maynard Johnson18f21902006-11-20 18:45:16 +01001073{
1074 int cpu;
1075
Bob Nelson14748552007-07-20 21:39:53 +02001076 /*
1077 * This routine will be called once for the system.
Maynard Johnson18f21902006-11-20 18:45:16 +01001078 * There is one performance monitor per node, so we
1079 * only need to perform this function once per node.
1080 */
1081 del_timer_sync(&timer_virt_cntr);
1082 oprofile_running = 0;
1083 smp_wmb();
1084
1085 for_each_online_cpu(cpu) {
1086 if (cbe_get_hw_thread_id(cpu))
1087 continue;
1088
1089 cbe_sync_irq(cbe_cpu_to_node(cpu));
1090 /* Stop the counters */
1091 cbe_disable_pm(cpu);
1092
1093 /* Deactivate the signals */
1094 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1095
1096 /* Deactivate interrupts */
1097 cbe_disable_pm_interrupts(cpu);
1098 }
1099}
1100
Bob Nelson14748552007-07-20 21:39:53 +02001101static void cell_global_stop(void)
1102{
1103 if (spu_cycle_reset)
1104 cell_global_stop_spu();
1105 else
1106 cell_global_stop_ppu();
1107}
1108
1109static void cell_handle_interrupt(struct pt_regs *regs,
1110 struct op_counter_config *ctr)
Maynard Johnson18f21902006-11-20 18:45:16 +01001111{
1112 u32 cpu;
1113 u64 pc;
1114 int is_kernel;
1115 unsigned long flags = 0;
1116 u32 interrupt_mask;
1117 int i;
1118
1119 cpu = smp_processor_id();
1120
Bob Nelson14748552007-07-20 21:39:53 +02001121 /*
1122 * Need to make sure the interrupt handler and the virt counter
Maynard Johnson18f21902006-11-20 18:45:16 +01001123 * routine are not running at the same time. See the
1124 * cell_virtual_cntr() routine for additional comments.
1125 */
1126 spin_lock_irqsave(&virt_cntr_lock, flags);
1127
Bob Nelson14748552007-07-20 21:39:53 +02001128 /*
1129 * Need to disable and reenable the performance counters
Maynard Johnson18f21902006-11-20 18:45:16 +01001130 * to get the desired behavior from the hardware. This
1131 * is hardware specific.
1132 */
1133
1134 cbe_disable_pm(cpu);
1135
Carl Lovebcb63e22007-02-13 22:02:02 +01001136 interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
Maynard Johnson18f21902006-11-20 18:45:16 +01001137
Bob Nelson14748552007-07-20 21:39:53 +02001138 /*
1139 * If the interrupt mask has been cleared, then the virt cntr
Maynard Johnson18f21902006-11-20 18:45:16 +01001140 * has cleared the interrupt. When the thread that generated
1141 * the interrupt is restored, the data count will be restored to
1142 * 0xffffff0 to cause the interrupt to be regenerated.
1143 */
1144
1145 if ((oprofile_running == 1) && (interrupt_mask != 0)) {
1146 pc = regs->nip;
1147 is_kernel = is_kernel_addr(pc);
1148
1149 for (i = 0; i < num_counters; ++i) {
1150 if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i))
1151 && ctr[i].enabled) {
1152 oprofile_add_pc(pc, is_kernel, i);
1153 cbe_write_ctr(cpu, i, reset_value[i]);
1154 }
1155 }
1156
Bob Nelson14748552007-07-20 21:39:53 +02001157 /*
1158 * The counters were frozen by the interrupt.
Maynard Johnson18f21902006-11-20 18:45:16 +01001159 * Reenable the interrupt and restart the counters.
1160 * If there was a race between the interrupt handler and
Bob Nelson14748552007-07-20 21:39:53 +02001161 * the virtual counter routine. The virutal counter
Maynard Johnson18f21902006-11-20 18:45:16 +01001162 * routine may have cleared the interrupts. Hence must
1163 * use the virt_cntr_inter_mask to re-enable the interrupts.
1164 */
1165 cbe_enable_pm_interrupts(cpu, hdw_thread,
1166 virt_cntr_inter_mask);
1167
Bob Nelson14748552007-07-20 21:39:53 +02001168 /*
1169 * The writes to the various performance counters only writes
1170 * to a latch. The new values (interrupt setting bits, reset
Maynard Johnson18f21902006-11-20 18:45:16 +01001171 * counter value etc.) are not copied to the actual registers
1172 * until the performance monitor is enabled. In order to get
1173 * this to work as desired, the permormance monitor needs to
Robert P. J. Daybeb7dd82007-05-09 07:14:03 +02001174 * be disabled while writing to the latches. This is a
Maynard Johnson18f21902006-11-20 18:45:16 +01001175 * HW design issue.
1176 */
1177 cbe_enable_pm(cpu);
1178 }
1179 spin_unlock_irqrestore(&virt_cntr_lock, flags);
1180}
1181
Bob Nelson14748552007-07-20 21:39:53 +02001182/*
1183 * This function is called from the generic OProfile
1184 * driver. When profiling PPUs, we need to do the
1185 * generic sync start; otherwise, do spu_sync_start.
1186 */
1187static int cell_sync_start(void)
1188{
1189 if (spu_cycle_reset)
1190 return spu_sync_start();
1191 else
1192 return DO_GENERIC_SYNC;
1193}
1194
1195static int cell_sync_stop(void)
1196{
1197 if (spu_cycle_reset)
1198 return spu_sync_stop();
1199 else
1200 return 1;
1201}
1202
Maynard Johnson18f21902006-11-20 18:45:16 +01001203struct op_powerpc_model op_model_cell = {
1204 .reg_setup = cell_reg_setup,
1205 .cpu_setup = cell_cpu_setup,
1206 .global_start = cell_global_start,
1207 .global_stop = cell_global_stop,
Bob Nelson14748552007-07-20 21:39:53 +02001208 .sync_start = cell_sync_start,
1209 .sync_stop = cell_sync_stop,
Maynard Johnson18f21902006-11-20 18:45:16 +01001210 .handle_interrupt = cell_handle_interrupt,
1211};