blob: 3a5302151e095137a22dd72ec6a3b0248756d377 [file] [log] [blame]
Arnd Bergmann67207b92005-11-15 15:53:48 -05001/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmann3b3d22c2005-12-05 22:52:24 -050023#undef DEBUG
Arnd Bergmann67207b92005-11-15 15:53:48 -050024
25#include <linux/interrupt.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/poll.h>
29#include <linux/ptrace.h>
30#include <linux/slab.h>
31#include <linux/wait.h>
32
33#include <asm/io.h>
34#include <asm/prom.h>
35#include <asm/semaphore.h>
36#include <asm/spu.h>
37#include <asm/mmu_context.h>
38
39#include "interrupt.h"
40
41static int __spu_trap_invalid_dma(struct spu *spu)
42{
43 pr_debug("%s\n", __FUNCTION__);
44 force_sig(SIGBUS, /* info, */ current);
45 return 0;
46}
47
48static int __spu_trap_dma_align(struct spu *spu)
49{
50 pr_debug("%s\n", __FUNCTION__);
51 force_sig(SIGBUS, /* info, */ current);
52 return 0;
53}
54
55static int __spu_trap_error(struct spu *spu)
56{
57 pr_debug("%s\n", __FUNCTION__);
58 force_sig(SIGILL, /* info, */ current);
59 return 0;
60}
61
62static void spu_restart_dma(struct spu *spu)
63{
64 struct spu_priv2 __iomem *priv2 = spu->priv2;
Mark Nutter5473af02005-11-15 15:53:49 -050065
66 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING_nr, &spu->flags))
67 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
Arnd Bergmann67207b92005-11-15 15:53:48 -050068}
69
70static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
71{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050072 struct spu_priv2 __iomem *priv2 = spu->priv2;
73 struct mm_struct *mm = spu->mm;
74 u64 esid, vsid;
Arnd Bergmann67207b92005-11-15 15:53:48 -050075
76 pr_debug("%s\n", __FUNCTION__);
77
Mark Nutter5473af02005-11-15 15:53:49 -050078 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE_nr, &spu->flags)) {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050079 /* SLBs are pre-loaded for context switch, so
80 * we should never get here!
81 */
Mark Nutter5473af02005-11-15 15:53:49 -050082 printk("%s: invalid access during switch!\n", __func__);
83 return 1;
84 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050085 if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
86 /* Future: support kernel segments so that drivers
87 * can use SPUs.
88 */
Arnd Bergmann67207b92005-11-15 15:53:48 -050089 pr_debug("invalid region access at %016lx\n", ea);
90 return 1;
91 }
92
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050093 esid = (ea & ESID_MASK) | SLB_ESID_V;
94 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER;
95 if (in_hugepage_area(mm->context, ea))
96 vsid |= SLB_VSID_L;
Arnd Bergmann67207b92005-11-15 15:53:48 -050097
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050098 out_be64(&priv2->slb_index_W, spu->slb_replace);
99 out_be64(&priv2->slb_vsid_RW, vsid);
100 out_be64(&priv2->slb_esid_RW, esid);
101
102 spu->slb_replace++;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500103 if (spu->slb_replace >= 8)
104 spu->slb_replace = 0;
105
Arnd Bergmann67207b92005-11-15 15:53:48 -0500106 spu_restart_dma(spu);
107
Arnd Bergmann67207b92005-11-15 15:53:48 -0500108 return 0;
109}
110
Mark Nutter5473af02005-11-15 15:53:49 -0500111extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500112static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500113{
Arnd Bergmann67207b92005-11-15 15:53:48 -0500114 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500115
Mark Nutter5473af02005-11-15 15:53:49 -0500116 /* Handle kernel space hash faults immediately.
117 User hash faults need to be deferred to process context. */
118 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
119 && REGION_ID(ea) != USER_REGION_ID
120 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
121 spu_restart_dma(spu);
122 return 0;
123 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500124
Mark Nutter5473af02005-11-15 15:53:49 -0500125 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE_nr, &spu->flags)) {
126 printk("%s: invalid access during switch!\n", __func__);
127 return 1;
128 }
129
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500130 spu->dar = ea;
131 spu->dsisr = dsisr;
132 mb();
Arnd Bergmann51104592005-12-05 22:52:25 -0500133 if (spu->stop_callback)
134 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500135 return 0;
136}
137
138static int __spu_trap_mailbox(struct spu *spu)
139{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500140 if (spu->ibox_callback)
141 spu->ibox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500142
143 /* atomically disable SPU mailbox interrupts */
144 spin_lock(&spu->register_lock);
145 out_be64(&spu->priv1->int_mask_class2_RW,
146 in_be64(&spu->priv1->int_mask_class2_RW) & ~0x1);
147 spin_unlock(&spu->register_lock);
148 return 0;
149}
150
151static int __spu_trap_stop(struct spu *spu)
152{
153 pr_debug("%s\n", __FUNCTION__);
154 spu->stop_code = in_be32(&spu->problem->spu_status_R);
Arnd Bergmann51104592005-12-05 22:52:25 -0500155 if (spu->stop_callback)
156 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500157 return 0;
158}
159
160static int __spu_trap_halt(struct spu *spu)
161{
162 pr_debug("%s\n", __FUNCTION__);
163 spu->stop_code = in_be32(&spu->problem->spu_status_R);
Arnd Bergmann51104592005-12-05 22:52:25 -0500164 if (spu->stop_callback)
165 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500166 return 0;
167}
168
169static int __spu_trap_tag_group(struct spu *spu)
170{
171 pr_debug("%s\n", __FUNCTION__);
172 /* wake_up(&spu->dma_wq); */
173 return 0;
174}
175
176static int __spu_trap_spubox(struct spu *spu)
177{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500178 if (spu->wbox_callback)
179 spu->wbox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500180
181 /* atomically disable SPU mailbox interrupts */
182 spin_lock(&spu->register_lock);
183 out_be64(&spu->priv1->int_mask_class2_RW,
184 in_be64(&spu->priv1->int_mask_class2_RW) & ~0x10);
185 spin_unlock(&spu->register_lock);
186 return 0;
187}
188
189static irqreturn_t
190spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
191{
192 struct spu *spu;
193
194 spu = data;
195 spu->class_0_pending = 1;
Arnd Bergmann51104592005-12-05 22:52:25 -0500196 if (spu->stop_callback)
197 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500198
199 return IRQ_HANDLED;
200}
201
Arnd Bergmann51104592005-12-05 22:52:25 -0500202int
Arnd Bergmann67207b92005-11-15 15:53:48 -0500203spu_irq_class_0_bottom(struct spu *spu)
204{
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500205 unsigned long stat, mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500206
207 spu->class_0_pending = 0;
208
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500209 mask = in_be64(&spu->priv1->int_mask_class0_RW);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500210 stat = in_be64(&spu->priv1->int_stat_class0_RW);
211
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500212 stat &= mask;
213
Arnd Bergmann67207b92005-11-15 15:53:48 -0500214 if (stat & 1) /* invalid MFC DMA */
215 __spu_trap_invalid_dma(spu);
216
217 if (stat & 2) /* invalid DMA alignment */
218 __spu_trap_dma_align(spu);
219
220 if (stat & 4) /* error on SPU */
221 __spu_trap_error(spu);
222
223 out_be64(&spu->priv1->int_stat_class0_RW, stat);
Arnd Bergmann51104592005-12-05 22:52:25 -0500224
225 return (stat & 0x7) ? -EIO : 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500226}
Arnd Bergmann51104592005-12-05 22:52:25 -0500227EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500228
229static irqreturn_t
230spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
231{
232 struct spu *spu;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500233 unsigned long stat, mask, dar, dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500234
235 spu = data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500236
237 /* atomically read & clear class1 status. */
238 spin_lock(&spu->register_lock);
239 mask = in_be64(&spu->priv1->int_mask_class1_RW);
240 stat = in_be64(&spu->priv1->int_stat_class1_RW) & mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500241 dar = in_be64(&spu->priv1->mfc_dar_RW);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500242 dsisr = in_be64(&spu->priv1->mfc_dsisr_RW);
Arnd Bergmann38307342005-12-09 19:04:18 +0100243 if (stat & 2) /* mapping fault */
244 out_be64(&spu->priv1->mfc_dsisr_RW, 0UL);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500245 out_be64(&spu->priv1->int_stat_class1_RW, stat);
246 spin_unlock(&spu->register_lock);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500247
248 if (stat & 1) /* segment fault */
249 __spu_trap_data_seg(spu, dar);
250
251 if (stat & 2) { /* mapping fault */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500252 __spu_trap_data_map(spu, dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500253 }
254
255 if (stat & 4) /* ls compare & suspend on get */
256 ;
257
258 if (stat & 8) /* ls compare & suspend on put */
259 ;
260
Arnd Bergmann67207b92005-11-15 15:53:48 -0500261 return stat ? IRQ_HANDLED : IRQ_NONE;
262}
Arnd Bergmann51104592005-12-05 22:52:25 -0500263EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500264
265static irqreturn_t
266spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
267{
268 struct spu *spu;
269 unsigned long stat;
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500270 unsigned long mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500271
272 spu = data;
273 stat = in_be64(&spu->priv1->int_stat_class2_RW);
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500274 mask = in_be64(&spu->priv1->int_mask_class2_RW);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500275
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500276 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500277
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500278 stat &= mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500279
280 if (stat & 1) /* PPC core mailbox */
281 __spu_trap_mailbox(spu);
282
283 if (stat & 2) /* SPU stop-and-signal */
284 __spu_trap_stop(spu);
285
286 if (stat & 4) /* SPU halted */
287 __spu_trap_halt(spu);
288
289 if (stat & 8) /* DMA tag group complete */
290 __spu_trap_tag_group(spu);
291
292 if (stat & 0x10) /* SPU mailbox threshold */
293 __spu_trap_spubox(spu);
294
295 out_be64(&spu->priv1->int_stat_class2_RW, stat);
296 return stat ? IRQ_HANDLED : IRQ_NONE;
297}
298
299static int
300spu_request_irqs(struct spu *spu)
301{
302 int ret;
303 int irq_base;
304
305 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
306
307 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number);
308 ret = request_irq(irq_base + spu->isrc,
309 spu_irq_class_0, 0, spu->irq_c0, spu);
310 if (ret)
311 goto out;
312 out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
313
314 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
315 ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
316 spu_irq_class_1, 0, spu->irq_c1, spu);
317 if (ret)
318 goto out1;
319 out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
320
321 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
322 ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
323 spu_irq_class_2, 0, spu->irq_c2, spu);
324 if (ret)
325 goto out2;
326 out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
327 goto out;
328
329out2:
330 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
331out1:
332 free_irq(irq_base + spu->isrc, spu);
333out:
334 return ret;
335}
336
337static void
338spu_free_irqs(struct spu *spu)
339{
340 int irq_base;
341
342 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
343
344 free_irq(irq_base + spu->isrc, spu);
345 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
346 free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu);
347}
348
349static LIST_HEAD(spu_list);
350static DECLARE_MUTEX(spu_mutex);
351
352static void spu_init_channels(struct spu *spu)
353{
354 static const struct {
355 unsigned channel;
356 unsigned count;
357 } zero_list[] = {
358 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
359 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
360 }, count_list[] = {
361 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
362 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
363 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
364 };
365 struct spu_priv2 *priv2;
366 int i;
367
368 priv2 = spu->priv2;
369
370 /* initialize all channel data to zero */
371 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
372 int count;
373
374 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
375 for (count = 0; count < zero_list[i].count; count++)
376 out_be64(&priv2->spu_chnldata_RW, 0);
377 }
378
379 /* initialize channel counts to meaningful values */
380 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
381 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
382 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
383 }
384}
385
386static void spu_init_regs(struct spu *spu)
387{
388 out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
389 out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
390 out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
391}
392
393struct spu *spu_alloc(void)
394{
395 struct spu *spu;
396
397 down(&spu_mutex);
398 if (!list_empty(&spu_list)) {
399 spu = list_entry(spu_list.next, struct spu, list);
400 list_del_init(&spu->list);
401 pr_debug("Got SPU %x %d\n", spu->isrc, spu->number);
402 } else {
403 pr_debug("No SPU left\n");
404 spu = NULL;
405 }
406 up(&spu_mutex);
407
408 if (spu) {
409 spu_init_channels(spu);
410 spu_init_regs(spu);
411 }
412
413 return spu;
414}
Arnd Bergmann39c73c32005-12-05 22:52:21 -0500415EXPORT_SYMBOL_GPL(spu_alloc);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500416
417void spu_free(struct spu *spu)
418{
419 down(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500420 list_add_tail(&spu->list, &spu_list);
421 up(&spu_mutex);
422}
Arnd Bergmann39c73c32005-12-05 22:52:21 -0500423EXPORT_SYMBOL_GPL(spu_free);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500424
Arnd Bergmann67207b92005-11-15 15:53:48 -0500425static int spu_handle_mm_fault(struct spu *spu)
426{
Arnd Bergmann67207b92005-11-15 15:53:48 -0500427 struct mm_struct *mm = spu->mm;
428 struct vm_area_struct *vma;
429 u64 ea, dsisr, is_write;
430 int ret;
431
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500432 ea = spu->dar;
433 dsisr = spu->dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500434#if 0
435 if (!IS_VALID_EA(ea)) {
436 return -EFAULT;
437 }
438#endif /* XXX */
439 if (mm == NULL) {
440 return -EFAULT;
441 }
442 if (mm->pgd == NULL) {
443 return -EFAULT;
444 }
445
446 down_read(&mm->mmap_sem);
447 vma = find_vma(mm, ea);
448 if (!vma)
449 goto bad_area;
450 if (vma->vm_start <= ea)
451 goto good_area;
452 if (!(vma->vm_flags & VM_GROWSDOWN))
453 goto bad_area;
454#if 0
455 if (expand_stack(vma, ea))
456 goto bad_area;
457#endif /* XXX */
458good_area:
459 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
460 if (is_write) {
461 if (!(vma->vm_flags & VM_WRITE))
462 goto bad_area;
463 } else {
464 if (dsisr & MFC_DSISR_ACCESS_DENIED)
465 goto bad_area;
466 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
467 goto bad_area;
468 }
469 ret = 0;
470 switch (handle_mm_fault(mm, vma, ea, is_write)) {
471 case VM_FAULT_MINOR:
472 current->min_flt++;
473 break;
474 case VM_FAULT_MAJOR:
475 current->maj_flt++;
476 break;
477 case VM_FAULT_SIGBUS:
478 ret = -EFAULT;
479 goto bad_area;
480 case VM_FAULT_OOM:
481 ret = -ENOMEM;
482 goto bad_area;
483 default:
484 BUG();
485 }
486 up_read(&mm->mmap_sem);
487 return ret;
488
489bad_area:
490 up_read(&mm->mmap_sem);
491 return -EFAULT;
492}
493
Arnd Bergmann51104592005-12-05 22:52:25 -0500494int spu_irq_class_1_bottom(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500495{
Arnd Bergmann67207b92005-11-15 15:53:48 -0500496 u64 ea, dsisr, access, error = 0UL;
497 int ret = 0;
498
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500499 ea = spu->dar;
500 dsisr = spu->dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500501 if (dsisr & MFC_DSISR_PTE_NOT_FOUND) {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500502 access = (_PAGE_PRESENT | _PAGE_USER);
503 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500504 if (hash_page(ea, access, 0x300) != 0)
505 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
506 }
507 if ((error & CLASS1_ENABLE_STORAGE_FAULT_INTR) ||
508 (dsisr & MFC_DSISR_ACCESS_DENIED)) {
509 if ((ret = spu_handle_mm_fault(spu)) != 0)
510 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
511 else
512 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
513 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500514 spu->dar = 0UL;
515 spu->dsisr = 0UL;
516 if (!error) {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500517 spu_restart_dma(spu);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500518 } else {
519 __spu_trap_invalid_dma(spu);
520 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500521 return ret;
522}
523
Arnd Bergmann67207b92005-11-15 15:53:48 -0500524static void __iomem * __init map_spe_prop(struct device_node *n,
525 const char *name)
526{
527 struct address_prop {
528 unsigned long address;
529 unsigned int len;
530 } __attribute__((packed)) *prop;
531
532 void *p;
533 int proplen;
534
535 p = get_property(n, name, &proplen);
536 if (proplen != sizeof (struct address_prop))
537 return NULL;
538
539 prop = p;
540
541 return ioremap(prop->address, prop->len);
542}
543
544static void spu_unmap(struct spu *spu)
545{
546 iounmap(spu->priv2);
547 iounmap(spu->priv1);
548 iounmap(spu->problem);
549 iounmap((u8 __iomem *)spu->local_store);
550}
551
552static int __init spu_map_device(struct spu *spu, struct device_node *spe)
553{
554 char *prop;
555 int ret;
556
557 ret = -ENODEV;
558 prop = get_property(spe, "isrc", NULL);
559 if (!prop)
560 goto out;
561 spu->isrc = *(unsigned int *)prop;
562
563 spu->name = get_property(spe, "name", NULL);
564 if (!spu->name)
565 goto out;
566
567 prop = get_property(spe, "local-store", NULL);
568 if (!prop)
569 goto out;
570 spu->local_store_phys = *(unsigned long *)prop;
571
572 /* we use local store as ram, not io memory */
573 spu->local_store = (void __force *)map_spe_prop(spe, "local-store");
574 if (!spu->local_store)
575 goto out;
576
577 spu->problem= map_spe_prop(spe, "problem");
578 if (!spu->problem)
579 goto out_unmap;
580
581 spu->priv1= map_spe_prop(spe, "priv1");
582 if (!spu->priv1)
583 goto out_unmap;
584
585 spu->priv2= map_spe_prop(spe, "priv2");
586 if (!spu->priv2)
587 goto out_unmap;
588 ret = 0;
589 goto out;
590
591out_unmap:
592 spu_unmap(spu);
593out:
594 return ret;
595}
596
597static int __init find_spu_node_id(struct device_node *spe)
598{
599 unsigned int *id;
600 struct device_node *cpu;
601
602 cpu = spe->parent->parent;
603 id = (unsigned int *)get_property(cpu, "node-id", NULL);
604
605 return id ? *id : 0;
606}
607
608static int __init create_spu(struct device_node *spe)
609{
610 struct spu *spu;
611 int ret;
612 static int number;
613
614 ret = -ENOMEM;
615 spu = kmalloc(sizeof (*spu), GFP_KERNEL);
616 if (!spu)
617 goto out;
618
619 ret = spu_map_device(spu, spe);
620 if (ret)
621 goto out_free;
622
623 spu->node = find_spu_node_id(spe);
624 spu->stop_code = 0;
625 spu->slb_replace = 0;
626 spu->mm = NULL;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500627 spu->ctx = NULL;
628 spu->rq = NULL;
629 spu->pid = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500630 spu->class_0_pending = 0;
Mark Nutter5473af02005-11-15 15:53:49 -0500631 spu->flags = 0UL;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500632 spu->dar = 0UL;
633 spu->dsisr = 0UL;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500634 spin_lock_init(&spu->register_lock);
635
636 out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
637 out_be64(&spu->priv1->mfc_sr1_RW, 0x33);
638
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500639 spu->ibox_callback = NULL;
640 spu->wbox_callback = NULL;
Arnd Bergmann51104592005-12-05 22:52:25 -0500641 spu->stop_callback = NULL;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500642
643 down(&spu_mutex);
644 spu->number = number++;
645 ret = spu_request_irqs(spu);
646 if (ret)
647 goto out_unmap;
648
649 list_add(&spu->list, &spu_list);
650 up(&spu_mutex);
651
652 pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
653 spu->name, spu->isrc, spu->local_store,
654 spu->problem, spu->priv1, spu->priv2, spu->number);
655 goto out;
656
657out_unmap:
658 up(&spu_mutex);
659 spu_unmap(spu);
660out_free:
661 kfree(spu);
662out:
663 return ret;
664}
665
666static void destroy_spu(struct spu *spu)
667{
668 list_del_init(&spu->list);
669
670 spu_free_irqs(spu);
671 spu_unmap(spu);
672 kfree(spu);
673}
674
675static void cleanup_spu_base(void)
676{
677 struct spu *spu, *tmp;
678 down(&spu_mutex);
679 list_for_each_entry_safe(spu, tmp, &spu_list, list)
680 destroy_spu(spu);
681 up(&spu_mutex);
682}
683module_exit(cleanup_spu_base);
684
685static int __init init_spu_base(void)
686{
687 struct device_node *node;
688 int ret;
689
690 ret = -ENODEV;
691 for (node = of_find_node_by_type(NULL, "spe");
692 node; node = of_find_node_by_type(node, "spe")) {
693 ret = create_spu(node);
694 if (ret) {
695 printk(KERN_WARNING "%s: Error initializing %s\n",
696 __FUNCTION__, node->name);
697 cleanup_spu_base();
698 break;
699 }
700 }
701 /* in some old firmware versions, the spe is called 'spc', so we
702 look for that as well */
703 for (node = of_find_node_by_type(NULL, "spc");
704 node; node = of_find_node_by_type(node, "spc")) {
705 ret = create_spu(node);
706 if (ret) {
707 printk(KERN_WARNING "%s: Error initializing %s\n",
708 __FUNCTION__, node->name);
709 cleanup_spu_base();
710 break;
711 }
712 }
713 return ret;
714}
715module_init(init_spu_base);
716
717MODULE_LICENSE("GPL");
718MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");