blob: 095a30304c56861b8259cf776a9870907238083d [file] [log] [blame]
Arnd Bergmann67207b92005-11-15 15:53:48 -05001/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmann3b3d22c2005-12-05 22:52:24 -050023#undef DEBUG
Arnd Bergmann67207b92005-11-15 15:53:48 -050024
25#include <linux/interrupt.h>
26#include <linux/list.h>
27#include <linux/module.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050028#include <linux/ptrace.h>
29#include <linux/slab.h>
30#include <linux/wait.h>
Geoff Levande28b0032006-11-23 00:46:49 +010031#include <linux/mm.h>
32#include <linux/io.h>
Ingo Molnar14cc3e22006-03-26 01:37:14 -080033#include <linux/mutex.h>
Geert Uytterhoevenbce94512007-07-17 04:05:52 -070034#include <linux/linux_logo.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050035#include <asm/spu.h>
Geoff Levand540270d2006-06-19 20:33:29 +020036#include <asm/spu_priv1.h>
Michael Ellermanff8a8f22006-10-24 18:31:27 +020037#include <asm/xmon.h>
Arnd Bergmann3ad216c2007-07-20 21:39:46 +020038#include <asm/prom.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050039
Geoff Levande28b0032006-11-23 00:46:49 +010040const struct spu_management_ops *spu_management_ops;
Jeremy Kerrccf17e92007-04-23 21:08:29 +020041EXPORT_SYMBOL_GPL(spu_management_ops);
42
Geoff Levand540270d2006-06-19 20:33:29 +020043const struct spu_priv1_ops *spu_priv1_ops;
Geoff Levand540270d2006-06-19 20:33:29 +020044EXPORT_SYMBOL_GPL(spu_priv1_ops);
45
Christoph Hellwig24140592007-07-20 21:39:51 +020046struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
47EXPORT_SYMBOL_GPL(cbe_spu_info);
48
49/*
50 * Protects cbe_spu_info and spu->number.
51 */
52static DEFINE_SPINLOCK(spu_lock);
53
54/*
55 * List of all spus in the system.
56 *
57 * This list is iterated by callers from irq context and callers that
58 * want to sleep. Thus modifications need to be done with both
59 * spu_full_list_lock and spu_full_list_mutex held, while iterating
60 * through it requires either of these locks.
61 *
62 * In addition spu_full_list_lock protects all assignmens to
63 * spu->mm.
64 */
65static LIST_HEAD(spu_full_list);
66static DEFINE_SPINLOCK(spu_full_list_lock);
67static DEFINE_MUTEX(spu_full_list_mutex);
68
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +010069void spu_invalidate_slbs(struct spu *spu)
70{
71 struct spu_priv2 __iomem *priv2 = spu->priv2;
72
73 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
74 out_be64(&priv2->slb_invalidate_all_W, 0UL);
75}
76EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
77
78/* This is called by the MM core when a segment size is changed, to
79 * request a flush of all the SPEs using a given mm
80 */
81void spu_flush_all_slbs(struct mm_struct *mm)
82{
83 struct spu *spu;
84 unsigned long flags;
85
Christoph Hellwig24140592007-07-20 21:39:51 +020086 spin_lock_irqsave(&spu_full_list_lock, flags);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +010087 list_for_each_entry(spu, &spu_full_list, full_list) {
88 if (spu->mm == mm)
89 spu_invalidate_slbs(spu);
90 }
Christoph Hellwig24140592007-07-20 21:39:51 +020091 spin_unlock_irqrestore(&spu_full_list_lock, flags);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +010092}
93
94/* The hack below stinks... try to do something better one of
95 * these days... Does it even work properly with NR_CPUS == 1 ?
96 */
97static inline void mm_needs_global_tlbie(struct mm_struct *mm)
98{
99 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
100
101 /* Global TLBIE broadcast required with SPEs. */
102 __cpus_setall(&mm->cpu_vm_mask, nr);
103}
104
105void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
106{
107 unsigned long flags;
108
Christoph Hellwig24140592007-07-20 21:39:51 +0200109 spin_lock_irqsave(&spu_full_list_lock, flags);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100110 spu->mm = mm;
Christoph Hellwig24140592007-07-20 21:39:51 +0200111 spin_unlock_irqrestore(&spu_full_list_lock, flags);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100112 if (mm)
113 mm_needs_global_tlbie(mm);
114}
115EXPORT_SYMBOL_GPL(spu_associate_mm);
116
Arnd Bergmann67207b92005-11-15 15:53:48 -0500117static int __spu_trap_invalid_dma(struct spu *spu)
118{
119 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200120 spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500121 return 0;
122}
123
124static int __spu_trap_dma_align(struct spu *spu)
125{
126 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200127 spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500128 return 0;
129}
130
131static int __spu_trap_error(struct spu *spu)
132{
133 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200134 spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500135 return 0;
136}
137
138static void spu_restart_dma(struct spu *spu)
139{
140 struct spu_priv2 __iomem *priv2 = spu->priv2;
Mark Nutter5473af02005-11-15 15:53:49 -0500141
Arnd Bergmann8837d922006-01-04 20:31:28 +0100142 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
Mark Nutter5473af02005-11-15 15:53:49 -0500143 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500144}
145
146static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
147{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500148 struct spu_priv2 __iomem *priv2 = spu->priv2;
149 struct mm_struct *mm = spu->mm;
arnd@arndb.de724bd802006-06-19 20:33:23 +0200150 u64 esid, vsid, llp;
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100151 int psize;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500152
153 pr_debug("%s\n", __FUNCTION__);
154
Arnd Bergmann8837d922006-01-04 20:31:28 +0100155 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500156 /* SLBs are pre-loaded for context switch, so
157 * we should never get here!
158 */
Mark Nutter5473af02005-11-15 15:53:49 -0500159 printk("%s: invalid access during switch!\n", __func__);
160 return 1;
161 }
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200162 esid = (ea & ESID_MASK) | SLB_ESID_V;
163
164 switch(REGION_ID(ea)) {
165 case USER_REGION_ID:
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000166#ifdef CONFIG_PPC_MM_SLICES
167 psize = get_slice_psize(mm, ea);
168#else
169 psize = mm->context.user_psize;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200170#endif
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200171 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100172 SLB_VSID_USER;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200173 break;
174 case VMALLOC_REGION_ID:
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100175 if (ea < VMALLOC_END)
176 psize = mmu_vmalloc_psize;
177 else
178 psize = mmu_io_psize;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200179 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100180 SLB_VSID_KERNEL;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200181 break;
182 case KERNEL_REGION_ID:
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100183 psize = mmu_linear_psize;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200184 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100185 SLB_VSID_KERNEL;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200186 break;
187 default:
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500188 /* Future: support kernel segments so that drivers
189 * can use SPUs.
190 */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500191 pr_debug("invalid region access at %016lx\n", ea);
192 return 1;
193 }
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100194 llp = mmu_psize_defs[psize].sllp;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500195
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500196 out_be64(&priv2->slb_index_W, spu->slb_replace);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100197 out_be64(&priv2->slb_vsid_RW, vsid | llp);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500198 out_be64(&priv2->slb_esid_RW, esid);
199
200 spu->slb_replace++;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500201 if (spu->slb_replace >= 8)
202 spu->slb_replace = 0;
203
Arnd Bergmann67207b92005-11-15 15:53:48 -0500204 spu_restart_dma(spu);
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +1000205 spu->stats.slb_flt++;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500206 return 0;
207}
208
Mark Nutter5473af02005-11-15 15:53:49 -0500209extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500210static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500211{
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100212 pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500213
Mark Nutter5473af02005-11-15 15:53:49 -0500214 /* Handle kernel space hash faults immediately.
215 User hash faults need to be deferred to process context. */
216 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
217 && REGION_ID(ea) != USER_REGION_ID
218 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
219 spu_restart_dma(spu);
220 return 0;
221 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500222
Arnd Bergmann8837d922006-01-04 20:31:28 +0100223 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
Mark Nutter5473af02005-11-15 15:53:49 -0500224 printk("%s: invalid access during switch!\n", __func__);
225 return 1;
226 }
227
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500228 spu->dar = ea;
229 spu->dsisr = dsisr;
230 mb();
Masato Noguchiba723fe22006-06-19 20:33:33 +0200231 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500232 return 0;
233}
234
235static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200236spu_irq_class_0(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500237{
238 struct spu *spu;
239
240 spu = data;
241 spu->class_0_pending = 1;
Masato Noguchiba723fe22006-06-19 20:33:33 +0200242 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500243
244 return IRQ_HANDLED;
245}
246
Arnd Bergmann51104592005-12-05 22:52:25 -0500247int
Arnd Bergmann67207b92005-11-15 15:53:48 -0500248spu_irq_class_0_bottom(struct spu *spu)
249{
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500250 unsigned long stat, mask;
Ishizaki Kou3650cfe2007-01-12 09:52:41 +0900251 unsigned long flags;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500252
253 spu->class_0_pending = 0;
254
Ishizaki Kou3650cfe2007-01-12 09:52:41 +0900255 spin_lock_irqsave(&spu->register_lock, flags);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100256 mask = spu_int_mask_get(spu, 0);
257 stat = spu_int_stat_get(spu, 0);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500258
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500259 stat &= mask;
260
Arnd Bergmann2cd90bc2006-06-23 20:57:50 +0200261 if (stat & 1) /* invalid DMA alignment */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500262 __spu_trap_dma_align(spu);
263
Arnd Bergmann2cd90bc2006-06-23 20:57:50 +0200264 if (stat & 2) /* invalid MFC DMA */
265 __spu_trap_invalid_dma(spu);
266
Arnd Bergmann67207b92005-11-15 15:53:48 -0500267 if (stat & 4) /* error on SPU */
268 __spu_trap_error(spu);
269
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100270 spu_int_stat_clear(spu, 0, stat);
Ishizaki Kou3650cfe2007-01-12 09:52:41 +0900271 spin_unlock_irqrestore(&spu->register_lock, flags);
Arnd Bergmann51104592005-12-05 22:52:25 -0500272
273 return (stat & 0x7) ? -EIO : 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500274}
Arnd Bergmann51104592005-12-05 22:52:25 -0500275EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500276
277static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200278spu_irq_class_1(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500279{
280 struct spu *spu;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500281 unsigned long stat, mask, dar, dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500282
283 spu = data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500284
285 /* atomically read & clear class1 status. */
286 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100287 mask = spu_int_mask_get(spu, 1);
288 stat = spu_int_stat_get(spu, 1) & mask;
289 dar = spu_mfc_dar_get(spu);
290 dsisr = spu_mfc_dsisr_get(spu);
Arnd Bergmann38307342005-12-09 19:04:18 +0100291 if (stat & 2) /* mapping fault */
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100292 spu_mfc_dsisr_set(spu, 0ul);
293 spu_int_stat_clear(spu, 1, stat);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500294 spin_unlock(&spu->register_lock);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100295 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
296 dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500297
298 if (stat & 1) /* segment fault */
299 __spu_trap_data_seg(spu, dar);
300
301 if (stat & 2) { /* mapping fault */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500302 __spu_trap_data_map(spu, dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500303 }
304
305 if (stat & 4) /* ls compare & suspend on get */
306 ;
307
308 if (stat & 8) /* ls compare & suspend on put */
309 ;
310
Arnd Bergmann67207b92005-11-15 15:53:48 -0500311 return stat ? IRQ_HANDLED : IRQ_NONE;
312}
313
314static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200315spu_irq_class_2(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500316{
317 struct spu *spu;
318 unsigned long stat;
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500319 unsigned long mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500320
321 spu = data;
Masato Noguchiba723fe22006-06-19 20:33:33 +0200322 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100323 stat = spu_int_stat_get(spu, 2);
324 mask = spu_int_mask_get(spu, 2);
Masato Noguchiba723fe22006-06-19 20:33:33 +0200325 /* ignore interrupts we're not waiting for */
326 stat &= mask;
327 /*
328 * mailbox interrupts (0x1 and 0x10) are level triggered.
329 * mask them now before acknowledging.
330 */
331 if (stat & 0x11)
332 spu_int_mask_and(spu, 2, ~(stat & 0x11));
333 /* acknowledge all interrupts before the callbacks */
334 spu_int_stat_clear(spu, 2, stat);
335 spin_unlock(&spu->register_lock);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500336
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500337 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500338
Arnd Bergmann67207b92005-11-15 15:53:48 -0500339 if (stat & 1) /* PPC core mailbox */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200340 spu->ibox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500341
342 if (stat & 2) /* SPU stop-and-signal */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200343 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500344
345 if (stat & 4) /* SPU halted */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200346 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500347
348 if (stat & 8) /* DMA tag group complete */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200349 spu->mfc_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500350
351 if (stat & 0x10) /* SPU mailbox threshold */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200352 spu->wbox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500353
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +1000354 spu->stats.class2_intr++;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500355 return stat ? IRQ_HANDLED : IRQ_NONE;
356}
357
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000358static int spu_request_irqs(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500359{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000360 int ret = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500361
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000362 if (spu->irqs[0] != NO_IRQ) {
363 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
364 spu->number);
365 ret = request_irq(spu->irqs[0], spu_irq_class_0,
366 IRQF_DISABLED,
367 spu->irq_c0, spu);
368 if (ret)
369 goto bail0;
370 }
371 if (spu->irqs[1] != NO_IRQ) {
372 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
373 spu->number);
374 ret = request_irq(spu->irqs[1], spu_irq_class_1,
375 IRQF_DISABLED,
376 spu->irq_c1, spu);
377 if (ret)
378 goto bail1;
379 }
380 if (spu->irqs[2] != NO_IRQ) {
381 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
382 spu->number);
383 ret = request_irq(spu->irqs[2], spu_irq_class_2,
384 IRQF_DISABLED,
385 spu->irq_c2, spu);
386 if (ret)
387 goto bail2;
388 }
389 return 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500390
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000391bail2:
392 if (spu->irqs[1] != NO_IRQ)
393 free_irq(spu->irqs[1], spu);
394bail1:
395 if (spu->irqs[0] != NO_IRQ)
396 free_irq(spu->irqs[0], spu);
397bail0:
Arnd Bergmann67207b92005-11-15 15:53:48 -0500398 return ret;
399}
400
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000401static void spu_free_irqs(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500402{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000403 if (spu->irqs[0] != NO_IRQ)
404 free_irq(spu->irqs[0], spu);
405 if (spu->irqs[1] != NO_IRQ)
406 free_irq(spu->irqs[1], spu);
407 if (spu->irqs[2] != NO_IRQ)
408 free_irq(spu->irqs[2], spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500409}
410
Christoph Hellwig486acd42007-07-20 21:39:54 +0200411void spu_init_channels(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500412{
413 static const struct {
414 unsigned channel;
415 unsigned count;
416 } zero_list[] = {
417 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
418 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
419 }, count_list[] = {
420 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
421 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
422 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
423 };
Arnd Bergmann6ff730c2006-01-04 20:31:31 +0100424 struct spu_priv2 __iomem *priv2;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500425 int i;
426
427 priv2 = spu->priv2;
428
429 /* initialize all channel data to zero */
430 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
431 int count;
432
433 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
434 for (count = 0; count < zero_list[i].count; count++)
435 out_be64(&priv2->spu_chnldata_RW, 0);
436 }
437
438 /* initialize channel counts to meaningful values */
439 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
440 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
441 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
442 }
443}
Christoph Hellwig486acd42007-07-20 21:39:54 +0200444EXPORT_SYMBOL_GPL(spu_init_channels);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500445
Geoff Levand6deac062007-06-16 07:17:32 +1000446static int spu_shutdown(struct sys_device *sysdev)
447{
448 struct spu *spu = container_of(sysdev, struct spu, sysdev);
449
450 spu_free_irqs(spu);
451 spu_destroy_spu(spu);
452 return 0;
453}
454
Jeremy Kerr1d640932006-06-19 20:33:19 +0200455struct sysdev_class spu_sysdev_class = {
Geoff Levand6deac062007-06-16 07:17:32 +1000456 set_kset_name("spu"),
457 .shutdown = spu_shutdown,
Jeremy Kerr1d640932006-06-19 20:33:19 +0200458};
459
Christian Kraffte570beb2006-10-24 18:31:23 +0200460int spu_add_sysdev_attr(struct sysdev_attribute *attr)
461{
462 struct spu *spu;
Christian Kraffte570beb2006-10-24 18:31:23 +0200463
Christoph Hellwig24140592007-07-20 21:39:51 +0200464 mutex_lock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200465 list_for_each_entry(spu, &spu_full_list, full_list)
466 sysdev_create_file(&spu->sysdev, attr);
Christoph Hellwig24140592007-07-20 21:39:51 +0200467 mutex_unlock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200468
Christian Kraffte570beb2006-10-24 18:31:23 +0200469 return 0;
470}
471EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
472
473int spu_add_sysdev_attr_group(struct attribute_group *attrs)
474{
475 struct spu *spu;
Christian Kraffte570beb2006-10-24 18:31:23 +0200476
Christoph Hellwig24140592007-07-20 21:39:51 +0200477 mutex_lock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200478 list_for_each_entry(spu, &spu_full_list, full_list)
479 sysfs_create_group(&spu->sysdev.kobj, attrs);
Christoph Hellwig24140592007-07-20 21:39:51 +0200480 mutex_unlock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200481
Christian Kraffte570beb2006-10-24 18:31:23 +0200482 return 0;
483}
484EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
485
486
487void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
488{
489 struct spu *spu;
Christian Kraffte570beb2006-10-24 18:31:23 +0200490
Christoph Hellwig24140592007-07-20 21:39:51 +0200491 mutex_lock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200492 list_for_each_entry(spu, &spu_full_list, full_list)
493 sysdev_remove_file(&spu->sysdev, attr);
Christoph Hellwig24140592007-07-20 21:39:51 +0200494 mutex_unlock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200495}
496EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
497
498void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
499{
500 struct spu *spu;
Christian Kraffte570beb2006-10-24 18:31:23 +0200501
Christoph Hellwig24140592007-07-20 21:39:51 +0200502 mutex_lock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200503 list_for_each_entry(spu, &spu_full_list, full_list)
504 sysfs_remove_group(&spu->sysdev.kobj, attrs);
Christoph Hellwig24140592007-07-20 21:39:51 +0200505 mutex_unlock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200506}
507EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
508
Jeremy Kerr1d640932006-06-19 20:33:19 +0200509static int spu_create_sysdev(struct spu *spu)
510{
511 int ret;
512
513 spu->sysdev.id = spu->number;
514 spu->sysdev.cls = &spu_sysdev_class;
515 ret = sysdev_register(&spu->sysdev);
516 if (ret) {
517 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
518 spu->number);
519 return ret;
520 }
521
Geoff Levand00215502006-11-20 18:45:02 +0100522 sysfs_add_device_to_node(&spu->sysdev, spu->node);
Jeremy Kerr1d640932006-06-19 20:33:19 +0200523
524 return 0;
525}
526
Geoff Levande28b0032006-11-23 00:46:49 +0100527static int __init create_spu(void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500528{
529 struct spu *spu;
530 int ret;
531 static int number;
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100532 unsigned long flags;
Andre Detsch27ec41d2007-07-20 21:39:33 +0200533 struct timespec ts;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500534
535 ret = -ENOMEM;
Jeremy Kerrecec2172006-06-19 20:33:26 +0200536 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500537 if (!spu)
538 goto out;
539
Christoph Hellwig486acd42007-07-20 21:39:54 +0200540 spu->alloc_state = SPU_FREE;
541
Geoff Levande28b0032006-11-23 00:46:49 +0100542 spin_lock_init(&spu->register_lock);
Christoph Hellwig24140592007-07-20 21:39:51 +0200543 spin_lock(&spu_lock);
Geoff Levande28b0032006-11-23 00:46:49 +0100544 spu->number = number++;
Christoph Hellwig24140592007-07-20 21:39:51 +0200545 spin_unlock(&spu_lock);
Benjamin Herrenschmidte5267b42006-10-10 15:14:12 +1000546
Geoff Levande28b0032006-11-23 00:46:49 +0100547 ret = spu_create_spu(spu, data);
548
Arnd Bergmann67207b92005-11-15 15:53:48 -0500549 if (ret)
550 goto out_free;
551
Masato Noguchi24f43b32006-10-24 18:31:14 +0200552 spu_mfc_sdr_setup(spu);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100553 spu_mfc_sr1_set(spu, 0x33);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500554 ret = spu_request_irqs(spu);
555 if (ret)
Geoff Levande28b0032006-11-23 00:46:49 +0100556 goto out_destroy;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500557
Jeremy Kerr1d640932006-06-19 20:33:19 +0200558 ret = spu_create_sysdev(spu);
559 if (ret)
560 goto out_free_irqs;
561
Christoph Hellwig486acd42007-07-20 21:39:54 +0200562 mutex_lock(&cbe_spu_info[spu->node].list_mutex);
Arnd Bergmannaa6d5b22007-07-20 21:39:44 +0200563 list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
564 cbe_spu_info[spu->node].n_spus++;
Christoph Hellwig486acd42007-07-20 21:39:54 +0200565 mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
Christoph Hellwig24140592007-07-20 21:39:51 +0200566
567 mutex_lock(&spu_full_list_mutex);
568 spin_lock_irqsave(&spu_full_list_lock, flags);
Christian Kraffte570beb2006-10-24 18:31:23 +0200569 list_add(&spu->full_list, &spu_full_list);
Christoph Hellwig24140592007-07-20 21:39:51 +0200570 spin_unlock_irqrestore(&spu_full_list_lock, flags);
571 mutex_unlock(&spu_full_list_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500572
Andre Detsch27ec41d2007-07-20 21:39:33 +0200573 spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
574 ktime_get_ts(&ts);
575 spu->stats.tstamp = timespec_to_ns(&ts);
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000576
Arnd Bergmann9d92af62007-07-20 21:39:45 +0200577 INIT_LIST_HEAD(&spu->aff_list);
578
Arnd Bergmann67207b92005-11-15 15:53:48 -0500579 goto out;
580
Jeremy Kerr1d640932006-06-19 20:33:19 +0200581out_free_irqs:
582 spu_free_irqs(spu);
Geoff Levande28b0032006-11-23 00:46:49 +0100583out_destroy:
584 spu_destroy_spu(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500585out_free:
586 kfree(spu);
587out:
588 return ret;
589}
590
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000591static const char *spu_state_names[] = {
592 "user", "system", "iowait", "idle"
593};
594
595static unsigned long long spu_acct_time(struct spu *spu,
596 enum spu_utilization_state state)
597{
Andre Detsch27ec41d2007-07-20 21:39:33 +0200598 struct timespec ts;
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000599 unsigned long long time = spu->stats.times[state];
600
Andre Detsch27ec41d2007-07-20 21:39:33 +0200601 /*
602 * If the spu is idle or the context is stopped, utilization
603 * statistics are not updated. Apply the time delta from the
604 * last recorded state of the spu.
605 */
606 if (spu->stats.util_state == state) {
607 ktime_get_ts(&ts);
608 time += timespec_to_ns(&ts) - spu->stats.tstamp;
609 }
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000610
Andre Detsch27ec41d2007-07-20 21:39:33 +0200611 return time / NSEC_PER_MSEC;
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000612}
613
614
615static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf)
616{
617 struct spu *spu = container_of(sysdev, struct spu, sysdev);
618
619 return sprintf(buf, "%s %llu %llu %llu %llu "
620 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
Andre Detsch27ec41d2007-07-20 21:39:33 +0200621 spu_state_names[spu->stats.util_state],
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000622 spu_acct_time(spu, SPU_UTIL_USER),
623 spu_acct_time(spu, SPU_UTIL_SYSTEM),
624 spu_acct_time(spu, SPU_UTIL_IOWAIT),
Andre Detsch27ec41d2007-07-20 21:39:33 +0200625 spu_acct_time(spu, SPU_UTIL_IDLE_LOADED),
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000626 spu->stats.vol_ctx_switch,
627 spu->stats.invol_ctx_switch,
628 spu->stats.slb_flt,
629 spu->stats.hash_flt,
630 spu->stats.min_flt,
631 spu->stats.maj_flt,
632 spu->stats.class2_intr,
633 spu->stats.libassist);
634}
635
636static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
637
Arnd Bergmann67207b92005-11-15 15:53:48 -0500638static int __init init_spu_base(void)
639{
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200640 int i, ret = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500641
Arnd Bergmannaa6d5b22007-07-20 21:39:44 +0200642 for (i = 0; i < MAX_NUMNODES; i++) {
Christoph Hellwig486acd42007-07-20 21:39:54 +0200643 mutex_init(&cbe_spu_info[i].list_mutex);
Arnd Bergmannaa6d5b22007-07-20 21:39:44 +0200644 INIT_LIST_HEAD(&cbe_spu_info[i].spus);
Arnd Bergmannaa6d5b22007-07-20 21:39:44 +0200645 }
Jeremy Kerrccf17e92007-04-23 21:08:29 +0200646
Stephen Rothwellda06aa02006-11-27 19:18:54 +0100647 if (!spu_management_ops)
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200648 goto out;
Stephen Rothwellda06aa02006-11-27 19:18:54 +0100649
Jeremy Kerr1d640932006-06-19 20:33:19 +0200650 /* create sysdev class for spus */
651 ret = sysdev_class_register(&spu_sysdev_class);
652 if (ret)
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200653 goto out;
Jeremy Kerr1d640932006-06-19 20:33:19 +0200654
Geoff Levande28b0032006-11-23 00:46:49 +0100655 ret = spu_enumerate_spus(create_spu);
656
Geert Uytterhoevenbce94512007-07-17 04:05:52 -0700657 if (ret < 0) {
Geoff Levande28b0032006-11-23 00:46:49 +0100658 printk(KERN_WARNING "%s: Error initializing spus\n",
659 __FUNCTION__);
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200660 goto out_unregister_sysdev_class;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500661 }
Michael Ellermanff8a8f22006-10-24 18:31:27 +0200662
Geert Uytterhoevenbce94512007-07-17 04:05:52 -0700663 if (ret > 0) {
664 /*
665 * We cannot put the forward declaration in
666 * <linux/linux_logo.h> because of conflicting session type
667 * conflicts for const and __initdata with different compiler
668 * versions
669 */
670 extern const struct linux_logo logo_spe_clut224;
671
672 fb_append_extra_logo(&logo_spe_clut224, ret);
673 }
674
Christoph Hellwig24140592007-07-20 21:39:51 +0200675 mutex_lock(&spu_full_list_mutex);
Michael Ellermanff8a8f22006-10-24 18:31:27 +0200676 xmon_register_spus(&spu_full_list);
Andre Detsch8d2655e2007-07-20 21:39:27 +0200677 crash_register_spus(&spu_full_list);
Christoph Hellwig24140592007-07-20 21:39:51 +0200678 mutex_unlock(&spu_full_list_mutex);
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000679 spu_add_sysdev_attr(&attr_stat);
680
Andre Detschf5996442007-08-03 18:53:46 -0700681 spu_init_affinity();
Arnd Bergmann3ad216c2007-07-20 21:39:46 +0200682
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200683 return 0;
684
685 out_unregister_sysdev_class:
686 sysdev_class_unregister(&spu_sysdev_class);
687 out:
Arnd Bergmann67207b92005-11-15 15:53:48 -0500688 return ret;
689}
690module_init(init_spu_base);
691
692MODULE_LICENSE("GPL");
693MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");