blob: 9eed1f68fcab3495981b72f40375e26ed9be4d7d [file] [log] [blame]
Maynard Johnson18f21902006-11-20 18:45:16 +01001/*
2 * Cell Broadband Engine OProfile Support
3 *
4 * (C) Copyright IBM Corporation 2006
5 *
6 * Author: David Erb (djerb@us.ibm.com)
7 * Modifications:
Bob Nelson14748552007-07-20 21:39:53 +02008 * Carl Love <carll@us.ibm.com>
9 * Maynard Johnson <maynardj@us.ibm.com>
Maynard Johnson18f21902006-11-20 18:45:16 +010010 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/cpufreq.h>
18#include <linux/delay.h>
19#include <linux/init.h>
20#include <linux/jiffies.h>
21#include <linux/kthread.h>
22#include <linux/oprofile.h>
23#include <linux/percpu.h>
24#include <linux/smp.h>
25#include <linux/spinlock.h>
26#include <linux/timer.h>
27#include <asm/cell-pmu.h>
28#include <asm/cputable.h>
29#include <asm/firmware.h>
30#include <asm/io.h>
31#include <asm/oprofile_impl.h>
32#include <asm/processor.h>
33#include <asm/prom.h>
34#include <asm/ptrace.h>
35#include <asm/reg.h>
36#include <asm/rtas.h>
37#include <asm/system.h>
Benjamin Herrenschmidteef686a02007-10-04 15:40:42 +100038#include <asm/cell-regs.h>
Maynard Johnson18f21902006-11-20 18:45:16 +010039
40#include "../platforms/cell/interrupt.h"
Bob Nelson14748552007-07-20 21:39:53 +020041#include "cell/pr_util.h"
42
43static void cell_global_stop_spu(void);
44
45/*
46 * spu_cycle_reset is the number of cycles between samples.
47 * This variable is used for SPU profiling and should ONLY be set
48 * at the beginning of cell_reg_setup; otherwise, it's read-only.
49 */
50static unsigned int spu_cycle_reset;
51
52#define NUM_SPUS_PER_NODE 8
53#define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */
Maynard Johnson18f21902006-11-20 18:45:16 +010054
55#define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */
Bob Nelson14748552007-07-20 21:39:53 +020056#define PPU_CYCLES_GRP_NUM 1 /* special group number for identifying
57 * PPU_CYCLES event
58 */
59#define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */
Maynard Johnson18f21902006-11-20 18:45:16 +010060
Carl Lovebcb63e22007-02-13 22:02:02 +010061#define NUM_THREADS 2 /* number of physical threads in
62 * physical processor
63 */
Bob Nelsona1ef4842007-08-17 11:06:09 -050064#define NUM_DEBUG_BUS_WORDS 4
Carl Lovebcb63e22007-02-13 22:02:02 +010065#define NUM_INPUT_BUS_WORDS 2
66
Bob Nelson14748552007-07-20 21:39:53 +020067#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
Maynard Johnson18f21902006-11-20 18:45:16 +010068
69struct pmc_cntrl_data {
70 unsigned long vcntr;
71 unsigned long evnts;
72 unsigned long masks;
73 unsigned long enabled;
74};
75
76/*
77 * ibm,cbe-perftools rtas parameters
78 */
Maynard Johnson18f21902006-11-20 18:45:16 +010079struct pm_signal {
80 u16 cpu; /* Processor to modify */
Bob Nelson14748552007-07-20 21:39:53 +020081 u16 sub_unit; /* hw subunit this applies to (if applicable)*/
82 short int signal_group; /* Signal Group to Enable/Disable */
Maynard Johnson18f21902006-11-20 18:45:16 +010083 u8 bus_word; /* Enable/Disable on this Trace/Trigger/Event
84 * Bus Word(s) (bitmask)
85 */
86 u8 bit; /* Trigger/Event bit (if applicable) */
87};
88
89/*
90 * rtas call arguments
91 */
92enum {
93 SUBFUNC_RESET = 1,
94 SUBFUNC_ACTIVATE = 2,
95 SUBFUNC_DEACTIVATE = 3,
96
97 PASSTHRU_IGNORE = 0,
98 PASSTHRU_ENABLE = 1,
99 PASSTHRU_DISABLE = 2,
100};
101
102struct pm_cntrl {
103 u16 enable;
104 u16 stop_at_max;
105 u16 trace_mode;
106 u16 freeze;
107 u16 count_mode;
108};
109
110static struct {
111 u32 group_control;
112 u32 debug_bus_control;
113 struct pm_cntrl pm_cntrl;
114 u32 pm07_cntrl[NR_PHYS_CTRS];
115} pm_regs;
116
Maynard Johnson18f21902006-11-20 18:45:16 +0100117#define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
118#define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
119#define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
120#define GET_POLARITY(x) ((x & 0x00000002) >> 1)
121#define GET_COUNT_CYCLES(x) (x & 0x00000001)
122#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
123
Maynard Johnson18f21902006-11-20 18:45:16 +0100124static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
125
126static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
127
Bob Nelson14748552007-07-20 21:39:53 +0200128/*
129 * The CELL profiling code makes rtas calls to setup the debug bus to
130 * route the performance signals. Additionally, SPU profiling requires
131 * a second rtas call to setup the hardware to capture the SPU PCs.
132 * The EIO error value is returned if the token lookups or the rtas
133 * call fail. The EIO error number is the best choice of the existing
134 * error numbers. The probability of rtas related error is very low. But
135 * by returning EIO and printing additional information to dmsg the user
136 * will know that OProfile did not start and dmesg will tell them why.
137 * OProfile does not support returning errors on Stop. Not a huge issue
138 * since failure to reset the debug bus or stop the SPU PC collection is
139 * not a fatel issue. Chances are if the Stop failed, Start doesn't work
140 * either.
141 */
142
143/*
144 * Interpetation of hdw_thread:
Maynard Johnson18f21902006-11-20 18:45:16 +0100145 * 0 - even virtual cpus 0, 2, 4,...
146 * 1 - odd virtual cpus 1, 3, 5, ...
Bob Nelson14748552007-07-20 21:39:53 +0200147 *
148 * FIXME: this is strictly wrong, we need to clean this up in a number
149 * of places. It works for now. -arnd
Maynard Johnson18f21902006-11-20 18:45:16 +0100150 */
151static u32 hdw_thread;
152
153static u32 virt_cntr_inter_mask;
154static struct timer_list timer_virt_cntr;
155
Bob Nelson14748552007-07-20 21:39:53 +0200156/*
157 * pm_signal needs to be global since it is initialized in
Maynard Johnson18f21902006-11-20 18:45:16 +0100158 * cell_reg_setup at the time when the necessary information
159 * is available.
160 */
161static struct pm_signal pm_signal[NR_PHYS_CTRS];
Bob Nelson14748552007-07-20 21:39:53 +0200162static int pm_rtas_token; /* token for debug bus setup call */
163static int spu_rtas_token; /* token for SPU cycle profiling */
Maynard Johnson18f21902006-11-20 18:45:16 +0100164
165static u32 reset_value[NR_PHYS_CTRS];
166static int num_counters;
167static int oprofile_running;
Thomas Gleixner057b1842007-04-29 16:10:39 +0000168static DEFINE_SPINLOCK(virt_cntr_lock);
Maynard Johnson18f21902006-11-20 18:45:16 +0100169
170static u32 ctr_enabled;
171
Carl Lovebcb63e22007-02-13 22:02:02 +0100172static unsigned char input_bus[NUM_INPUT_BUS_WORDS];
Maynard Johnson18f21902006-11-20 18:45:16 +0100173
174/*
175 * Firmware interface functions
176 */
177static int
178rtas_ibm_cbe_perftools(int subfunc, int passthru,
179 void *address, unsigned long length)
180{
181 u64 paddr = __pa(address);
182
Bob Nelson14748552007-07-20 21:39:53 +0200183 return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc,
184 passthru, paddr >> 32, paddr & 0xffffffff, length);
Maynard Johnson18f21902006-11-20 18:45:16 +0100185}
186
187static void pm_rtas_reset_signals(u32 node)
188{
189 int ret;
190 struct pm_signal pm_signal_local;
191
Bob Nelson14748552007-07-20 21:39:53 +0200192 /*
193 * The debug bus is being set to the passthru disable state.
194 * However, the FW still expects atleast one legal signal routing
195 * entry or it will return an error on the arguments. If we don't
196 * supply a valid entry, we must ignore all return values. Ignoring
197 * all return values means we might miss an error we should be
198 * concerned about.
Maynard Johnson18f21902006-11-20 18:45:16 +0100199 */
200
201 /* fw expects physical cpu #. */
202 pm_signal_local.cpu = node;
203 pm_signal_local.signal_group = 21;
204 pm_signal_local.bus_word = 1;
205 pm_signal_local.sub_unit = 0;
206 pm_signal_local.bit = 0;
207
208 ret = rtas_ibm_cbe_perftools(SUBFUNC_RESET, PASSTHRU_DISABLE,
209 &pm_signal_local,
210 sizeof(struct pm_signal));
211
Bob Nelson14748552007-07-20 21:39:53 +0200212 if (unlikely(ret))
213 /*
214 * Not a fatal error. For Oprofile stop, the oprofile
215 * functions do not support returning an error for
216 * failure to stop OProfile.
217 */
Maynard Johnson18f21902006-11-20 18:45:16 +0100218 printk(KERN_WARNING "%s: rtas returned: %d\n",
219 __FUNCTION__, ret);
220}
221
Bob Nelson14748552007-07-20 21:39:53 +0200222static int pm_rtas_activate_signals(u32 node, u32 count)
Maynard Johnson18f21902006-11-20 18:45:16 +0100223{
224 int ret;
Maynard Johnsonc7eb7342007-02-13 22:02:03 +0100225 int i, j;
Maynard Johnson18f21902006-11-20 18:45:16 +0100226 struct pm_signal pm_signal_local[NR_PHYS_CTRS];
227
Bob Nelson14748552007-07-20 21:39:53 +0200228 /*
229 * There is no debug setup required for the cycles event.
Maynard Johnsonc7eb7342007-02-13 22:02:03 +0100230 * Note that only events in the same group can be used.
231 * Otherwise, there will be conflicts in correctly routing
232 * the signals on the debug bus. It is the responsiblity
233 * of the OProfile user tool to check the events are in
234 * the same group.
235 */
236 i = 0;
Maynard Johnson18f21902006-11-20 18:45:16 +0100237 for (j = 0; j < count; j++) {
Maynard Johnsonc7eb7342007-02-13 22:02:03 +0100238 if (pm_signal[j].signal_group != PPU_CYCLES_GRP_NUM) {
239
240 /* fw expects physical cpu # */
241 pm_signal_local[i].cpu = node;
242 pm_signal_local[i].signal_group
243 = pm_signal[j].signal_group;
244 pm_signal_local[i].bus_word = pm_signal[j].bus_word;
245 pm_signal_local[i].sub_unit = pm_signal[j].sub_unit;
246 pm_signal_local[i].bit = pm_signal[j].bit;
247 i++;
248 }
Maynard Johnson18f21902006-11-20 18:45:16 +0100249 }
250
Maynard Johnsonc7eb7342007-02-13 22:02:03 +0100251 if (i != 0) {
252 ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE,
253 pm_signal_local,
254 i * sizeof(struct pm_signal));
Maynard Johnson18f21902006-11-20 18:45:16 +0100255
Bob Nelson14748552007-07-20 21:39:53 +0200256 if (unlikely(ret)) {
Maynard Johnsonc7eb7342007-02-13 22:02:03 +0100257 printk(KERN_WARNING "%s: rtas returned: %d\n",
258 __FUNCTION__, ret);
Bob Nelson14748552007-07-20 21:39:53 +0200259 return -EIO;
260 }
Maynard Johnsonc7eb7342007-02-13 22:02:03 +0100261 }
Bob Nelson14748552007-07-20 21:39:53 +0200262
263 return 0;
Maynard Johnson18f21902006-11-20 18:45:16 +0100264}
265
266/*
267 * PM Signal functions
268 */
269static void set_pm_event(u32 ctr, int event, u32 unit_mask)
270{
271 struct pm_signal *p;
272 u32 signal_bit;
273 u32 bus_word, bus_type, count_cycles, polarity, input_control;
274 int j, i;
275
276 if (event == PPU_CYCLES_EVENT_NUM) {
277 /* Special Event: Count all cpu cycles */
278 pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES;
279 p = &(pm_signal[ctr]);
Maynard Johnsonc7eb7342007-02-13 22:02:03 +0100280 p->signal_group = PPU_CYCLES_GRP_NUM;
Maynard Johnson18f21902006-11-20 18:45:16 +0100281 p->bus_word = 1;
282 p->sub_unit = 0;
283 p->bit = 0;
284 goto out;
285 } else {
286 pm_regs.pm07_cntrl[ctr] = 0;
287 }
288
289 bus_word = GET_BUS_WORD(unit_mask);
290 bus_type = GET_BUS_TYPE(unit_mask);
291 count_cycles = GET_COUNT_CYCLES(unit_mask);
292 polarity = GET_POLARITY(unit_mask);
293 input_control = GET_INPUT_CONTROL(unit_mask);
294 signal_bit = (event % 100);
295
296 p = &(pm_signal[ctr]);
297
298 p->signal_group = event / 100;
299 p->bus_word = bus_word;
Bob Nelsona1ef4842007-08-17 11:06:09 -0500300 p->sub_unit = GET_SUB_UNIT(unit_mask);
Maynard Johnson18f21902006-11-20 18:45:16 +0100301
302 pm_regs.pm07_cntrl[ctr] = 0;
303 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
304 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity);
305 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control);
306
Bob Nelson14748552007-07-20 21:39:53 +0200307 /*
308 * Some of the islands signal selection is based on 64 bit words.
Carl Lovebcb63e22007-02-13 22:02:02 +0100309 * The debug bus words are 32 bits, the input words to the performance
310 * counters are defined as 32 bits. Need to convert the 64 bit island
311 * specification to the appropriate 32 input bit and bus word for the
Bob Nelson14748552007-07-20 21:39:53 +0200312 * performance counter event selection. See the CELL Performance
Carl Lovebcb63e22007-02-13 22:02:02 +0100313 * monitoring signals manual and the Perf cntr hardware descriptions
314 * for the details.
315 */
Maynard Johnson18f21902006-11-20 18:45:16 +0100316 if (input_control == 0) {
317 if (signal_bit > 31) {
318 signal_bit -= 32;
319 if (bus_word == 0x3)
320 bus_word = 0x2;
321 else if (bus_word == 0xc)
322 bus_word = 0x8;
323 }
324
325 if ((bus_type == 0) && p->signal_group >= 60)
326 bus_type = 2;
327 if ((bus_type == 1) && p->signal_group >= 50)
328 bus_type = 0;
329
330 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_MUX(signal_bit);
331 } else {
332 pm_regs.pm07_cntrl[ctr] = 0;
333 p->bit = signal_bit;
334 }
335
Bob Nelsona1ef4842007-08-17 11:06:09 -0500336 for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
Maynard Johnson18f21902006-11-20 18:45:16 +0100337 if (bus_word & (1 << i)) {
338 pm_regs.debug_bus_control |=
Bob Nelsona1ef4842007-08-17 11:06:09 -0500339 (bus_type << (30 - (2 * i)));
Maynard Johnson18f21902006-11-20 18:45:16 +0100340
Carl Lovebcb63e22007-02-13 22:02:02 +0100341 for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
Maynard Johnson18f21902006-11-20 18:45:16 +0100342 if (input_bus[j] == 0xff) {
343 input_bus[j] = i;
344 pm_regs.group_control |=
Bob Nelsona1ef4842007-08-17 11:06:09 -0500345 (i << (30 - (2 * j)));
Bob Nelson14748552007-07-20 21:39:53 +0200346
Maynard Johnson18f21902006-11-20 18:45:16 +0100347 break;
348 }
349 }
350 }
351 }
352out:
353 ;
354}
355
Carl Lovebcb63e22007-02-13 22:02:02 +0100356static void write_pm_cntrl(int cpu)
Maynard Johnson18f21902006-11-20 18:45:16 +0100357{
Bob Nelson14748552007-07-20 21:39:53 +0200358 /*
359 * Oprofile will use 32 bit counters, set bits 7:10 to 0
Carl Lovebcb63e22007-02-13 22:02:02 +0100360 * pmregs.pm_cntrl is a global
361 */
362
Maynard Johnson18f21902006-11-20 18:45:16 +0100363 u32 val = 0;
Carl Lovebcb63e22007-02-13 22:02:02 +0100364 if (pm_regs.pm_cntrl.enable == 1)
Maynard Johnson18f21902006-11-20 18:45:16 +0100365 val |= CBE_PM_ENABLE_PERF_MON;
366
Carl Lovebcb63e22007-02-13 22:02:02 +0100367 if (pm_regs.pm_cntrl.stop_at_max == 1)
Maynard Johnson18f21902006-11-20 18:45:16 +0100368 val |= CBE_PM_STOP_AT_MAX;
369
Carl Lovebcb63e22007-02-13 22:02:02 +0100370 if (pm_regs.pm_cntrl.trace_mode == 1)
371 val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
Maynard Johnson18f21902006-11-20 18:45:16 +0100372
Carl Lovebcb63e22007-02-13 22:02:02 +0100373 if (pm_regs.pm_cntrl.freeze == 1)
Maynard Johnson18f21902006-11-20 18:45:16 +0100374 val |= CBE_PM_FREEZE_ALL_CTRS;
375
Bob Nelson14748552007-07-20 21:39:53 +0200376 /*
377 * Routine set_count_mode must be called previously to set
Maynard Johnson18f21902006-11-20 18:45:16 +0100378 * the count mode based on the user selection of user and kernel.
379 */
Carl Lovebcb63e22007-02-13 22:02:02 +0100380 val |= CBE_PM_COUNT_MODE_SET(pm_regs.pm_cntrl.count_mode);
Maynard Johnson18f21902006-11-20 18:45:16 +0100381 cbe_write_pm(cpu, pm_control, val);
382}
383
384static inline void
Carl Lovebcb63e22007-02-13 22:02:02 +0100385set_count_mode(u32 kernel, u32 user)
Maynard Johnson18f21902006-11-20 18:45:16 +0100386{
Bob Nelson14748552007-07-20 21:39:53 +0200387 /*
388 * The user must specify user and kernel if they want them. If
Carl Lovebcb63e22007-02-13 22:02:02 +0100389 * neither is specified, OProfile will count in hypervisor mode.
390 * pm_regs.pm_cntrl is a global
Maynard Johnson18f21902006-11-20 18:45:16 +0100391 */
392 if (kernel) {
393 if (user)
Carl Lovebcb63e22007-02-13 22:02:02 +0100394 pm_regs.pm_cntrl.count_mode = CBE_COUNT_ALL_MODES;
Maynard Johnson18f21902006-11-20 18:45:16 +0100395 else
Carl Lovebcb63e22007-02-13 22:02:02 +0100396 pm_regs.pm_cntrl.count_mode =
397 CBE_COUNT_SUPERVISOR_MODE;
Maynard Johnson18f21902006-11-20 18:45:16 +0100398 } else {
399 if (user)
Carl Lovebcb63e22007-02-13 22:02:02 +0100400 pm_regs.pm_cntrl.count_mode = CBE_COUNT_PROBLEM_MODE;
Maynard Johnson18f21902006-11-20 18:45:16 +0100401 else
Carl Lovebcb63e22007-02-13 22:02:02 +0100402 pm_regs.pm_cntrl.count_mode =
403 CBE_COUNT_HYPERVISOR_MODE;
Maynard Johnson18f21902006-11-20 18:45:16 +0100404 }
405}
406
407static inline void enable_ctr(u32 cpu, u32 ctr, u32 * pm07_cntrl)
408{
409
Carl Lovebcb63e22007-02-13 22:02:02 +0100410 pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
Maynard Johnson18f21902006-11-20 18:45:16 +0100411 cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]);
412}
413
414/*
415 * Oprofile is expected to collect data on all CPUs simultaneously.
Bob Nelson14748552007-07-20 21:39:53 +0200416 * However, there is one set of performance counters per node. There are
Maynard Johnson18f21902006-11-20 18:45:16 +0100417 * two hardware threads or virtual CPUs on each node. Hence, OProfile must
418 * multiplex in time the performance counter collection on the two virtual
419 * CPUs. The multiplexing of the performance counters is done by this
420 * virtual counter routine.
421 *
422 * The pmc_values used below is defined as 'per-cpu' but its use is
423 * more akin to 'per-node'. We need to store two sets of counter
424 * values per node -- one for the previous run and one for the next.
425 * The per-cpu[NR_PHYS_CTRS] gives us the storage we need. Each odd/even
426 * pair of per-cpu arrays is used for storing the previous and next
427 * pmc values for a given node.
428 * NOTE: We use the per-cpu variable to improve cache performance.
Bob Nelson14748552007-07-20 21:39:53 +0200429 *
430 * This routine will alternate loading the virtual counters for
431 * virtual CPUs
Maynard Johnson18f21902006-11-20 18:45:16 +0100432 */
433static void cell_virtual_cntr(unsigned long data)
434{
Maynard Johnson18f21902006-11-20 18:45:16 +0100435 int i, prev_hdw_thread, next_hdw_thread;
436 u32 cpu;
437 unsigned long flags;
438
Bob Nelson14748552007-07-20 21:39:53 +0200439 /*
440 * Make sure that the interrupt_hander and the virt counter are
441 * not both playing with the counters on the same node.
Maynard Johnson18f21902006-11-20 18:45:16 +0100442 */
443
444 spin_lock_irqsave(&virt_cntr_lock, flags);
445
446 prev_hdw_thread = hdw_thread;
447
448 /* switch the cpu handling the interrupts */
449 hdw_thread = 1 ^ hdw_thread;
450 next_hdw_thread = hdw_thread;
451
Bob Nelsona1ef4842007-08-17 11:06:09 -0500452 pm_regs.group_control = 0;
453 pm_regs.debug_bus_control = 0;
454
455 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
456 input_bus[i] = 0xff;
457
Bob Nelson14748552007-07-20 21:39:53 +0200458 /*
459 * There are some per thread events. Must do the
Carl Lovebcb63e22007-02-13 22:02:02 +0100460 * set event, for the thread that is being started
461 */
Bob Nelson14748552007-07-20 21:39:53 +0200462 for (i = 0; i < num_counters; i++)
Carl Lovebcb63e22007-02-13 22:02:02 +0100463 set_pm_event(i,
464 pmc_cntrl[next_hdw_thread][i].evnts,
465 pmc_cntrl[next_hdw_thread][i].masks);
466
Bob Nelson14748552007-07-20 21:39:53 +0200467 /*
468 * The following is done only once per each node, but
Maynard Johnson18f21902006-11-20 18:45:16 +0100469 * we need cpu #, not node #, to pass to the cbe_xxx functions.
470 */
471 for_each_online_cpu(cpu) {
472 if (cbe_get_hw_thread_id(cpu))
473 continue;
474
Bob Nelson14748552007-07-20 21:39:53 +0200475 /*
476 * stop counters, save counter values, restore counts
Maynard Johnson18f21902006-11-20 18:45:16 +0100477 * for previous thread
478 */
479 cbe_disable_pm(cpu);
480 cbe_disable_pm_interrupts(cpu);
481 for (i = 0; i < num_counters; i++) {
482 per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
483 = cbe_read_ctr(cpu, i);
484
485 if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
486 == 0xFFFFFFFF)
487 /* If the cntr value is 0xffffffff, we must
488 * reset that to 0xfffffff0 when the current
Bob Nelson14748552007-07-20 21:39:53 +0200489 * thread is restarted. This will generate a
Carl Lovebcb63e22007-02-13 22:02:02 +0100490 * new interrupt and make sure that we never
491 * restore the counters to the max value. If
492 * the counters were restored to the max value,
493 * they do not increment and no interrupts are
494 * generated. Hence no more samples will be
495 * collected on that cpu.
Maynard Johnson18f21902006-11-20 18:45:16 +0100496 */
497 cbe_write_ctr(cpu, i, 0xFFFFFFF0);
498 else
499 cbe_write_ctr(cpu, i,
500 per_cpu(pmc_values,
501 cpu +
502 next_hdw_thread)[i]);
503 }
504
Bob Nelson14748552007-07-20 21:39:53 +0200505 /*
506 * Switch to the other thread. Change the interrupt
Maynard Johnson18f21902006-11-20 18:45:16 +0100507 * and control regs to be scheduled on the CPU
508 * corresponding to the thread to execute.
509 */
510 for (i = 0; i < num_counters; i++) {
511 if (pmc_cntrl[next_hdw_thread][i].enabled) {
Bob Nelson14748552007-07-20 21:39:53 +0200512 /*
513 * There are some per thread events.
Maynard Johnson18f21902006-11-20 18:45:16 +0100514 * Must do the set event, enable_cntr
515 * for each cpu.
516 */
Maynard Johnson18f21902006-11-20 18:45:16 +0100517 enable_ctr(cpu, i,
518 pm_regs.pm07_cntrl);
519 } else {
520 cbe_write_pm07_control(cpu, i, 0);
521 }
522 }
523
524 /* Enable interrupts on the CPU thread that is starting */
525 cbe_enable_pm_interrupts(cpu, next_hdw_thread,
526 virt_cntr_inter_mask);
527 cbe_enable_pm(cpu);
528 }
529
530 spin_unlock_irqrestore(&virt_cntr_lock, flags);
531
532 mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
533}
534
535static void start_virt_cntrs(void)
536{
537 init_timer(&timer_virt_cntr);
538 timer_virt_cntr.function = cell_virtual_cntr;
539 timer_virt_cntr.data = 0UL;
540 timer_virt_cntr.expires = jiffies + HZ / 10;
541 add_timer(&timer_virt_cntr);
542}
543
544/* This function is called once for all cpus combined */
Bob Nelson14748552007-07-20 21:39:53 +0200545static int cell_reg_setup(struct op_counter_config *ctr,
546 struct op_system_config *sys, int num_ctrs)
Maynard Johnson18f21902006-11-20 18:45:16 +0100547{
548 int i, j, cpu;
Bob Nelson14748552007-07-20 21:39:53 +0200549 spu_cycle_reset = 0;
550
551 if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
552 spu_cycle_reset = ctr[0].count;
553
554 /*
555 * Each node will need to make the rtas call to start
556 * and stop SPU profiling. Get the token once and store it.
557 */
558 spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
559
560 if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
561 printk(KERN_ERR
562 "%s: rtas token ibm,cbe-spu-perftools unknown\n",
563 __FUNCTION__);
564 return -EIO;
565 }
566 }
Maynard Johnson18f21902006-11-20 18:45:16 +0100567
568 pm_rtas_token = rtas_token("ibm,cbe-perftools");
Bob Nelson14748552007-07-20 21:39:53 +0200569
570 /*
571 * For all events excetp PPU CYCLEs, each node will need to make
572 * the rtas cbe-perftools call to setup and reset the debug bus.
573 * Make the token lookup call once and store it in the global
574 * variable pm_rtas_token.
575 */
576 if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
577 printk(KERN_ERR
578 "%s: rtas token ibm,cbe-perftools unknown\n",
Maynard Johnson18f21902006-11-20 18:45:16 +0100579 __FUNCTION__);
Bob Nelson14748552007-07-20 21:39:53 +0200580 return -EIO;
Maynard Johnson18f21902006-11-20 18:45:16 +0100581 }
582
583 num_counters = num_ctrs;
584
585 pm_regs.group_control = 0;
586 pm_regs.debug_bus_control = 0;
587
588 /* setup the pm_control register */
589 memset(&pm_regs.pm_cntrl, 0, sizeof(struct pm_cntrl));
590 pm_regs.pm_cntrl.stop_at_max = 1;
591 pm_regs.pm_cntrl.trace_mode = 0;
592 pm_regs.pm_cntrl.freeze = 1;
593
Carl Lovebcb63e22007-02-13 22:02:02 +0100594 set_count_mode(sys->enable_kernel, sys->enable_user);
Maynard Johnson18f21902006-11-20 18:45:16 +0100595
596 /* Setup the thread 0 events */
597 for (i = 0; i < num_ctrs; ++i) {
598
599 pmc_cntrl[0][i].evnts = ctr[i].event;
600 pmc_cntrl[0][i].masks = ctr[i].unit_mask;
601 pmc_cntrl[0][i].enabled = ctr[i].enabled;
602 pmc_cntrl[0][i].vcntr = i;
603
604 for_each_possible_cpu(j)
605 per_cpu(pmc_values, j)[i] = 0;
606 }
607
Bob Nelson14748552007-07-20 21:39:53 +0200608 /*
609 * Setup the thread 1 events, map the thread 0 event to the
Maynard Johnson18f21902006-11-20 18:45:16 +0100610 * equivalent thread 1 event.
611 */
612 for (i = 0; i < num_ctrs; ++i) {
613 if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111))
614 pmc_cntrl[1][i].evnts = ctr[i].event + 19;
615 else if (ctr[i].event == 2203)
616 pmc_cntrl[1][i].evnts = ctr[i].event;
617 else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215))
618 pmc_cntrl[1][i].evnts = ctr[i].event + 16;
619 else
620 pmc_cntrl[1][i].evnts = ctr[i].event;
621
622 pmc_cntrl[1][i].masks = ctr[i].unit_mask;
623 pmc_cntrl[1][i].enabled = ctr[i].enabled;
624 pmc_cntrl[1][i].vcntr = i;
625 }
626
Carl Lovebcb63e22007-02-13 22:02:02 +0100627 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
Maynard Johnson18f21902006-11-20 18:45:16 +0100628 input_bus[i] = 0xff;
629
Bob Nelson14748552007-07-20 21:39:53 +0200630 /*
631 * Our counters count up, and "count" refers to
Maynard Johnson18f21902006-11-20 18:45:16 +0100632 * how much before the next interrupt, and we interrupt
Bob Nelson14748552007-07-20 21:39:53 +0200633 * on overflow. So we calculate the starting value
Maynard Johnson18f21902006-11-20 18:45:16 +0100634 * which will give us "count" until overflow.
635 * Then we set the events on the enabled counters.
636 */
637 for (i = 0; i < num_counters; ++i) {
638 /* start with virtual counter set 0 */
639 if (pmc_cntrl[0][i].enabled) {
640 /* Using 32bit counters, reset max - count */
641 reset_value[i] = 0xFFFFFFFF - ctr[i].count;
642 set_pm_event(i,
643 pmc_cntrl[0][i].evnts,
644 pmc_cntrl[0][i].masks);
645
646 /* global, used by cell_cpu_setup */
647 ctr_enabled |= (1 << i);
648 }
649 }
650
651 /* initialize the previous counts for the virtual cntrs */
652 for_each_online_cpu(cpu)
653 for (i = 0; i < num_counters; ++i) {
654 per_cpu(pmc_values, cpu)[i] = reset_value[i];
655 }
Bob Nelson14748552007-07-20 21:39:53 +0200656
657 return 0;
Maynard Johnson18f21902006-11-20 18:45:16 +0100658}
659
Bob Nelson14748552007-07-20 21:39:53 +0200660
661
Maynard Johnson18f21902006-11-20 18:45:16 +0100662/* This function is called once for each cpu */
Bob Nelson14748552007-07-20 21:39:53 +0200663static int cell_cpu_setup(struct op_counter_config *cntr)
Maynard Johnson18f21902006-11-20 18:45:16 +0100664{
665 u32 cpu = smp_processor_id();
666 u32 num_enabled = 0;
667 int i;
668
Bob Nelson14748552007-07-20 21:39:53 +0200669 if (spu_cycle_reset)
670 return 0;
671
Maynard Johnson18f21902006-11-20 18:45:16 +0100672 /* There is one performance monitor per processor chip (i.e. node),
673 * so we only need to perform this function once per node.
674 */
675 if (cbe_get_hw_thread_id(cpu))
Bob Nelson14748552007-07-20 21:39:53 +0200676 return 0;
Maynard Johnson18f21902006-11-20 18:45:16 +0100677
678 /* Stop all counters */
679 cbe_disable_pm(cpu);
680 cbe_disable_pm_interrupts(cpu);
681
682 cbe_write_pm(cpu, pm_interval, 0);
683 cbe_write_pm(cpu, pm_start_stop, 0);
684 cbe_write_pm(cpu, group_control, pm_regs.group_control);
685 cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
Carl Lovebcb63e22007-02-13 22:02:02 +0100686 write_pm_cntrl(cpu);
Maynard Johnson18f21902006-11-20 18:45:16 +0100687
688 for (i = 0; i < num_counters; ++i) {
689 if (ctr_enabled & (1 << i)) {
690 pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu);
691 num_enabled++;
692 }
693 }
694
Bob Nelson14748552007-07-20 21:39:53 +0200695 /*
696 * The pm_rtas_activate_signals will return -EIO if the FW
697 * call failed.
698 */
699 return pm_rtas_activate_signals(cbe_cpu_to_node(cpu), num_enabled);
Maynard Johnson18f21902006-11-20 18:45:16 +0100700}
701
Bob Nelson14748552007-07-20 21:39:53 +0200702#define ENTRIES 303
703#define MAXLFSR 0xFFFFFF
704
705/* precomputed table of 24 bit LFSR values */
706static int initial_lfsr[] = {
707 8221349, 12579195, 5379618, 10097839, 7512963, 7519310, 3955098, 10753424,
708 15507573, 7458917, 285419, 2641121, 9780088, 3915503, 6668768, 1548716,
709 4885000, 8774424, 9650099, 2044357, 2304411, 9326253, 10332526, 4421547,
710 3440748, 10179459, 13332843, 10375561, 1313462, 8375100, 5198480, 6071392,
711 9341783, 1526887, 3985002, 1439429, 13923762, 7010104, 11969769, 4547026,
712 2040072, 4025602, 3437678, 7939992, 11444177, 4496094, 9803157, 10745556,
713 3671780, 4257846, 5662259, 13196905, 3237343, 12077182, 16222879, 7587769,
714 14706824, 2184640, 12591135, 10420257, 7406075, 3648978, 11042541, 15906893,
715 11914928, 4732944, 10695697, 12928164, 11980531, 4430912, 11939291, 2917017,
716 6119256, 4172004, 9373765, 8410071, 14788383, 5047459, 5474428, 1737756,
717 15967514, 13351758, 6691285, 8034329, 2856544, 14394753, 11310160, 12149558,
718 7487528, 7542781, 15668898, 12525138, 12790975, 3707933, 9106617, 1965401,
719 16219109, 12801644, 2443203, 4909502, 8762329, 3120803, 6360315, 9309720,
720 15164599, 10844842, 4456529, 6667610, 14924259, 884312, 6234963, 3326042,
721 15973422, 13919464, 5272099, 6414643, 3909029, 2764324, 5237926, 4774955,
722 10445906, 4955302, 5203726, 10798229, 11443419, 2303395, 333836, 9646934,
723 3464726, 4159182, 568492, 995747, 10318756, 13299332, 4836017, 8237783,
724 3878992, 2581665, 11394667, 5672745, 14412947, 3159169, 9094251, 16467278,
725 8671392, 15230076, 4843545, 7009238, 15504095, 1494895, 9627886, 14485051,
726 8304291, 252817, 12421642, 16085736, 4774072, 2456177, 4160695, 15409741,
727 4902868, 5793091, 13162925, 16039714, 782255, 11347835, 14884586, 366972,
728 16308990, 11913488, 13390465, 2958444, 10340278, 1177858, 1319431, 10426302,
729 2868597, 126119, 5784857, 5245324, 10903900, 16436004, 3389013, 1742384,
730 14674502, 10279218, 8536112, 10364279, 6877778, 14051163, 1025130, 6072469,
731 1988305, 8354440, 8216060, 16342977, 13112639, 3976679, 5913576, 8816697,
732 6879995, 14043764, 3339515, 9364420, 15808858, 12261651, 2141560, 5636398,
733 10345425, 10414756, 781725, 6155650, 4746914, 5078683, 7469001, 6799140,
734 10156444, 9667150, 10116470, 4133858, 2121972, 1124204, 1003577, 1611214,
735 14304602, 16221850, 13878465, 13577744, 3629235, 8772583, 10881308, 2410386,
736 7300044, 5378855, 9301235, 12755149, 4977682, 8083074, 10327581, 6395087,
737 9155434, 15501696, 7514362, 14520507, 15808945, 3244584, 4741962, 9658130,
738 14336147, 8654727, 7969093, 15759799, 14029445, 5038459, 9894848, 8659300,
739 13699287, 8834306, 10712885, 14753895, 10410465, 3373251, 309501, 9561475,
740 5526688, 14647426, 14209836, 5339224, 207299, 14069911, 8722990, 2290950,
741 3258216, 12505185, 6007317, 9218111, 14661019, 10537428, 11731949, 9027003,
742 6641507, 9490160, 200241, 9720425, 16277895, 10816638, 1554761, 10431375,
743 7467528, 6790302, 3429078, 14633753, 14428997, 11463204, 3576212, 2003426,
744 6123687, 820520, 9992513, 15784513, 5778891, 6428165, 8388607
745};
746
747/*
748 * The hardware uses an LFSR counting sequence to determine when to capture
749 * the SPU PCs. An LFSR sequence is like a puesdo random number sequence
750 * where each number occurs once in the sequence but the sequence is not in
751 * numerical order. The SPU PC capture is done when the LFSR sequence reaches
752 * the last value in the sequence. Hence the user specified value N
753 * corresponds to the LFSR number that is N from the end of the sequence.
754 *
755 * To avoid the time to compute the LFSR, a lookup table is used. The 24 bit
756 * LFSR sequence is broken into four ranges. The spacing of the precomputed
757 * values is adjusted in each range so the error between the user specifed
758 * number (N) of events between samples and the actual number of events based
759 * on the precomputed value will be les then about 6.2%. Note, if the user
760 * specifies N < 2^16, the LFSR value that is 2^16 from the end will be used.
761 * This is to prevent the loss of samples because the trace buffer is full.
762 *
763 * User specified N Step between Index in
764 * precomputed values precomputed
765 * table
766 * 0 to 2^16-1 ---- 0
767 * 2^16 to 2^16+2^19-1 2^12 1 to 128
768 * 2^16+2^19 to 2^16+2^19+2^22-1 2^15 129 to 256
769 * 2^16+2^19+2^22 to 2^24-1 2^18 257 to 302
770 *
771 *
772 * For example, the LFSR values in the second range are computed for 2^16,
773 * 2^16+2^12, ... , 2^19-2^16, 2^19 and stored in the table at indicies
774 * 1, 2,..., 127, 128.
775 *
776 * The 24 bit LFSR value for the nth number in the sequence can be
777 * calculated using the following code:
778 *
779 * #define size 24
780 * int calculate_lfsr(int n)
781 * {
782 * int i;
783 * unsigned int newlfsr0;
784 * unsigned int lfsr = 0xFFFFFF;
785 * unsigned int howmany = n;
786 *
787 * for (i = 2; i < howmany + 2; i++) {
788 * newlfsr0 = (((lfsr >> (size - 1 - 0)) & 1) ^
789 * ((lfsr >> (size - 1 - 1)) & 1) ^
790 * (((lfsr >> (size - 1 - 6)) & 1) ^
791 * ((lfsr >> (size - 1 - 23)) & 1)));
792 *
793 * lfsr >>= 1;
794 * lfsr = lfsr | (newlfsr0 << (size - 1));
795 * }
796 * return lfsr;
797 * }
798 */
799
800#define V2_16 (0x1 << 16)
801#define V2_19 (0x1 << 19)
802#define V2_22 (0x1 << 22)
803
804static int calculate_lfsr(int n)
Maynard Johnson18f21902006-11-20 18:45:16 +0100805{
Bob Nelson14748552007-07-20 21:39:53 +0200806 /*
807 * The ranges and steps are in powers of 2 so the calculations
808 * can be done using shifts rather then divide.
809 */
810 int index;
811
812 if ((n >> 16) == 0)
813 index = 0;
814 else if (((n - V2_16) >> 19) == 0)
815 index = ((n - V2_16) >> 12) + 1;
816 else if (((n - V2_16 - V2_19) >> 22) == 0)
817 index = ((n - V2_16 - V2_19) >> 15 ) + 1 + 128;
818 else if (((n - V2_16 - V2_19 - V2_22) >> 24) == 0)
819 index = ((n - V2_16 - V2_19 - V2_22) >> 18 ) + 1 + 256;
820 else
821 index = ENTRIES-1;
822
823 /* make sure index is valid */
824 if ((index > ENTRIES) || (index < 0))
825 index = ENTRIES-1;
826
827 return initial_lfsr[index];
828}
829
830static int pm_rtas_activate_spu_profiling(u32 node)
831{
832 int ret, i;
833 struct pm_signal pm_signal_local[NR_PHYS_CTRS];
834
835 /*
836 * Set up the rtas call to configure the debug bus to
837 * route the SPU PCs. Setup the pm_signal for each SPU
838 */
839 for (i = 0; i < NUM_SPUS_PER_NODE; i++) {
840 pm_signal_local[i].cpu = node;
841 pm_signal_local[i].signal_group = 41;
842 /* spu i on word (i/2) */
843 pm_signal_local[i].bus_word = 1 << i / 2;
844 /* spu i */
845 pm_signal_local[i].sub_unit = i;
846 pm_signal_local[i].bit = 63;
847 }
848
849 ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE,
850 PASSTHRU_ENABLE, pm_signal_local,
851 (NUM_SPUS_PER_NODE
852 * sizeof(struct pm_signal)));
853
854 if (unlikely(ret)) {
855 printk(KERN_WARNING "%s: rtas returned: %d\n",
856 __FUNCTION__, ret);
857 return -EIO;
858 }
859
860 return 0;
861}
862
863#ifdef CONFIG_CPU_FREQ
864static int
865oprof_cpufreq_notify(struct notifier_block *nb, unsigned long val, void *data)
866{
867 int ret = 0;
868 struct cpufreq_freqs *frq = data;
869 if ((val == CPUFREQ_PRECHANGE && frq->old < frq->new) ||
870 (val == CPUFREQ_POSTCHANGE && frq->old > frq->new) ||
871 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE))
872 set_spu_profiling_frequency(frq->new, spu_cycle_reset);
873 return ret;
874}
875
876static struct notifier_block cpu_freq_notifier_block = {
877 .notifier_call = oprof_cpufreq_notify
878};
879#endif
880
881static int cell_global_start_spu(struct op_counter_config *ctr)
882{
883 int subfunc;
884 unsigned int lfsr_value;
885 int cpu;
886 int ret;
887 int rtas_error;
888 unsigned int cpu_khzfreq = 0;
889
890 /* The SPU profiling uses time-based profiling based on
891 * cpu frequency, so if configured with the CPU_FREQ
892 * option, we should detect frequency changes and react
893 * accordingly.
894 */
895#ifdef CONFIG_CPU_FREQ
896 ret = cpufreq_register_notifier(&cpu_freq_notifier_block,
897 CPUFREQ_TRANSITION_NOTIFIER);
898 if (ret < 0)
899 /* this is not a fatal error */
900 printk(KERN_ERR "CPU freq change registration failed: %d\n",
901 ret);
902
903 else
904 cpu_khzfreq = cpufreq_quick_get(smp_processor_id());
905#endif
906
907 set_spu_profiling_frequency(cpu_khzfreq, spu_cycle_reset);
908
909 for_each_online_cpu(cpu) {
910 if (cbe_get_hw_thread_id(cpu))
911 continue;
912
913 /*
914 * Setup SPU cycle-based profiling.
915 * Set perf_mon_control bit 0 to a zero before
916 * enabling spu collection hardware.
917 */
918 cbe_write_pm(cpu, pm_control, 0);
919
920 if (spu_cycle_reset > MAX_SPU_COUNT)
921 /* use largest possible value */
922 lfsr_value = calculate_lfsr(MAX_SPU_COUNT-1);
923 else
924 lfsr_value = calculate_lfsr(spu_cycle_reset);
925
926 /* must use a non zero value. Zero disables data collection. */
927 if (lfsr_value == 0)
928 lfsr_value = calculate_lfsr(1);
929
930 lfsr_value = lfsr_value << 8; /* shift lfsr to correct
931 * register location
932 */
933
934 /* debug bus setup */
935 ret = pm_rtas_activate_spu_profiling(cbe_cpu_to_node(cpu));
936
937 if (unlikely(ret)) {
938 rtas_error = ret;
939 goto out;
940 }
941
942
943 subfunc = 2; /* 2 - activate SPU tracing, 3 - deactivate */
944
945 /* start profiling */
946 ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
947 cbe_cpu_to_node(cpu), lfsr_value);
948
949 if (unlikely(ret != 0)) {
950 printk(KERN_ERR
951 "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n",
952 __FUNCTION__, ret);
953 rtas_error = -EIO;
954 goto out;
955 }
956 }
957
958 rtas_error = start_spu_profiling(spu_cycle_reset);
959 if (rtas_error)
960 goto out_stop;
961
962 oprofile_running = 1;
963 return 0;
964
965out_stop:
966 cell_global_stop_spu(); /* clean up the PMU/debug bus */
967out:
968 return rtas_error;
969}
970
971static int cell_global_start_ppu(struct op_counter_config *ctr)
972{
973 u32 cpu, i;
Maynard Johnson18f21902006-11-20 18:45:16 +0100974 u32 interrupt_mask = 0;
Maynard Johnson18f21902006-11-20 18:45:16 +0100975
976 /* This routine gets called once for the system.
977 * There is one performance monitor per node, so we
978 * only need to perform this function once per node.
979 */
980 for_each_online_cpu(cpu) {
981 if (cbe_get_hw_thread_id(cpu))
982 continue;
983
984 interrupt_mask = 0;
985
986 for (i = 0; i < num_counters; ++i) {
987 if (ctr_enabled & (1 << i)) {
988 cbe_write_ctr(cpu, i, reset_value[i]);
989 enable_ctr(cpu, i, pm_regs.pm07_cntrl);
990 interrupt_mask |=
991 CBE_PM_CTR_OVERFLOW_INTR(i);
992 } else {
993 /* Disable counter */
994 cbe_write_pm07_control(cpu, i, 0);
995 }
996 }
997
Carl Lovebcb63e22007-02-13 22:02:02 +0100998 cbe_get_and_clear_pm_interrupts(cpu);
Maynard Johnson18f21902006-11-20 18:45:16 +0100999 cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
1000 cbe_enable_pm(cpu);
1001 }
1002
1003 virt_cntr_inter_mask = interrupt_mask;
1004 oprofile_running = 1;
1005 smp_wmb();
1006
Bob Nelson14748552007-07-20 21:39:53 +02001007 /*
1008 * NOTE: start_virt_cntrs will result in cell_virtual_cntr() being
1009 * executed which manipulates the PMU. We start the "virtual counter"
Maynard Johnson18f21902006-11-20 18:45:16 +01001010 * here so that we do not need to synchronize access to the PMU in
1011 * the above for-loop.
1012 */
1013 start_virt_cntrs();
Bob Nelson14748552007-07-20 21:39:53 +02001014
1015 return 0;
Maynard Johnson18f21902006-11-20 18:45:16 +01001016}
1017
Bob Nelson14748552007-07-20 21:39:53 +02001018static int cell_global_start(struct op_counter_config *ctr)
1019{
1020 if (spu_cycle_reset)
1021 return cell_global_start_spu(ctr);
1022 else
1023 return cell_global_start_ppu(ctr);
1024}
1025
1026/*
1027 * Note the generic OProfile stop calls do not support returning
1028 * an error on stop. Hence, will not return an error if the FW
1029 * calls fail on stop. Failure to reset the debug bus is not an issue.
1030 * Failure to disable the SPU profiling is not an issue. The FW calls
1031 * to enable the performance counters and debug bus will work even if
1032 * the hardware was not cleanly reset.
1033 */
1034static void cell_global_stop_spu(void)
1035{
1036 int subfunc, rtn_value;
1037 unsigned int lfsr_value;
1038 int cpu;
1039
1040 oprofile_running = 0;
1041
1042#ifdef CONFIG_CPU_FREQ
1043 cpufreq_unregister_notifier(&cpu_freq_notifier_block,
1044 CPUFREQ_TRANSITION_NOTIFIER);
1045#endif
1046
1047 for_each_online_cpu(cpu) {
1048 if (cbe_get_hw_thread_id(cpu))
1049 continue;
1050
1051 subfunc = 3; /*
1052 * 2 - activate SPU tracing,
1053 * 3 - deactivate
1054 */
1055 lfsr_value = 0x8f100000;
1056
1057 rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
1058 subfunc, cbe_cpu_to_node(cpu),
1059 lfsr_value);
1060
1061 if (unlikely(rtn_value != 0)) {
1062 printk(KERN_ERR
1063 "%s: rtas call ibm,cbe-spu-perftools failed, return = %d\n",
1064 __FUNCTION__, rtn_value);
1065 }
1066
1067 /* Deactivate the signals */
1068 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1069 }
1070
1071 stop_spu_profiling();
1072}
1073
1074static void cell_global_stop_ppu(void)
Maynard Johnson18f21902006-11-20 18:45:16 +01001075{
1076 int cpu;
1077
Bob Nelson14748552007-07-20 21:39:53 +02001078 /*
1079 * This routine will be called once for the system.
Maynard Johnson18f21902006-11-20 18:45:16 +01001080 * There is one performance monitor per node, so we
1081 * only need to perform this function once per node.
1082 */
1083 del_timer_sync(&timer_virt_cntr);
1084 oprofile_running = 0;
1085 smp_wmb();
1086
1087 for_each_online_cpu(cpu) {
1088 if (cbe_get_hw_thread_id(cpu))
1089 continue;
1090
1091 cbe_sync_irq(cbe_cpu_to_node(cpu));
1092 /* Stop the counters */
1093 cbe_disable_pm(cpu);
1094
1095 /* Deactivate the signals */
1096 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1097
1098 /* Deactivate interrupts */
1099 cbe_disable_pm_interrupts(cpu);
1100 }
1101}
1102
Bob Nelson14748552007-07-20 21:39:53 +02001103static void cell_global_stop(void)
1104{
1105 if (spu_cycle_reset)
1106 cell_global_stop_spu();
1107 else
1108 cell_global_stop_ppu();
1109}
1110
1111static void cell_handle_interrupt(struct pt_regs *regs,
1112 struct op_counter_config *ctr)
Maynard Johnson18f21902006-11-20 18:45:16 +01001113{
1114 u32 cpu;
1115 u64 pc;
1116 int is_kernel;
1117 unsigned long flags = 0;
1118 u32 interrupt_mask;
1119 int i;
1120
1121 cpu = smp_processor_id();
1122
Bob Nelson14748552007-07-20 21:39:53 +02001123 /*
1124 * Need to make sure the interrupt handler and the virt counter
Maynard Johnson18f21902006-11-20 18:45:16 +01001125 * routine are not running at the same time. See the
1126 * cell_virtual_cntr() routine for additional comments.
1127 */
1128 spin_lock_irqsave(&virt_cntr_lock, flags);
1129
Bob Nelson14748552007-07-20 21:39:53 +02001130 /*
1131 * Need to disable and reenable the performance counters
Maynard Johnson18f21902006-11-20 18:45:16 +01001132 * to get the desired behavior from the hardware. This
1133 * is hardware specific.
1134 */
1135
1136 cbe_disable_pm(cpu);
1137
Carl Lovebcb63e22007-02-13 22:02:02 +01001138 interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
Maynard Johnson18f21902006-11-20 18:45:16 +01001139
Bob Nelson14748552007-07-20 21:39:53 +02001140 /*
1141 * If the interrupt mask has been cleared, then the virt cntr
Maynard Johnson18f21902006-11-20 18:45:16 +01001142 * has cleared the interrupt. When the thread that generated
1143 * the interrupt is restored, the data count will be restored to
1144 * 0xffffff0 to cause the interrupt to be regenerated.
1145 */
1146
1147 if ((oprofile_running == 1) && (interrupt_mask != 0)) {
1148 pc = regs->nip;
1149 is_kernel = is_kernel_addr(pc);
1150
1151 for (i = 0; i < num_counters; ++i) {
1152 if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i))
1153 && ctr[i].enabled) {
Bob Nelson101fd462008-02-20 05:00:56 +01001154 oprofile_add_ext_sample(pc, regs, i, is_kernel);
Maynard Johnson18f21902006-11-20 18:45:16 +01001155 cbe_write_ctr(cpu, i, reset_value[i]);
1156 }
1157 }
1158
Bob Nelson14748552007-07-20 21:39:53 +02001159 /*
1160 * The counters were frozen by the interrupt.
Maynard Johnson18f21902006-11-20 18:45:16 +01001161 * Reenable the interrupt and restart the counters.
1162 * If there was a race between the interrupt handler and
Bob Nelson14748552007-07-20 21:39:53 +02001163 * the virtual counter routine. The virutal counter
Maynard Johnson18f21902006-11-20 18:45:16 +01001164 * routine may have cleared the interrupts. Hence must
1165 * use the virt_cntr_inter_mask to re-enable the interrupts.
1166 */
1167 cbe_enable_pm_interrupts(cpu, hdw_thread,
1168 virt_cntr_inter_mask);
1169
Bob Nelson14748552007-07-20 21:39:53 +02001170 /*
1171 * The writes to the various performance counters only writes
1172 * to a latch. The new values (interrupt setting bits, reset
Maynard Johnson18f21902006-11-20 18:45:16 +01001173 * counter value etc.) are not copied to the actual registers
1174 * until the performance monitor is enabled. In order to get
1175 * this to work as desired, the permormance monitor needs to
Robert P. J. Daybeb7dd82007-05-09 07:14:03 +02001176 * be disabled while writing to the latches. This is a
Maynard Johnson18f21902006-11-20 18:45:16 +01001177 * HW design issue.
1178 */
1179 cbe_enable_pm(cpu);
1180 }
1181 spin_unlock_irqrestore(&virt_cntr_lock, flags);
1182}
1183
Bob Nelson14748552007-07-20 21:39:53 +02001184/*
1185 * This function is called from the generic OProfile
1186 * driver. When profiling PPUs, we need to do the
1187 * generic sync start; otherwise, do spu_sync_start.
1188 */
1189static int cell_sync_start(void)
1190{
1191 if (spu_cycle_reset)
1192 return spu_sync_start();
1193 else
1194 return DO_GENERIC_SYNC;
1195}
1196
1197static int cell_sync_stop(void)
1198{
1199 if (spu_cycle_reset)
1200 return spu_sync_stop();
1201 else
1202 return 1;
1203}
1204
Maynard Johnson18f21902006-11-20 18:45:16 +01001205struct op_powerpc_model op_model_cell = {
1206 .reg_setup = cell_reg_setup,
1207 .cpu_setup = cell_cpu_setup,
1208 .global_start = cell_global_start,
1209 .global_stop = cell_global_stop,
Bob Nelson14748552007-07-20 21:39:53 +02001210 .sync_start = cell_sync_start,
1211 .sync_stop = cell_sync_stop,
Maynard Johnson18f21902006-11-20 18:45:16 +01001212 .handle_interrupt = cell_handle_interrupt,
1213};