blob: 44492d87cdf7c23a6329e0e8a4b41e5f6fa5f11f [file] [log] [blame]
Arnd Bergmann67207b92005-11-15 15:53:48 -05001/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23#define DEBUG 1
24
25#include <linux/interrupt.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/poll.h>
29#include <linux/ptrace.h>
30#include <linux/slab.h>
31#include <linux/wait.h>
32
33#include <asm/io.h>
34#include <asm/prom.h>
35#include <asm/semaphore.h>
36#include <asm/spu.h>
37#include <asm/mmu_context.h>
38
39#include "interrupt.h"
40
41static int __spu_trap_invalid_dma(struct spu *spu)
42{
43 pr_debug("%s\n", __FUNCTION__);
44 force_sig(SIGBUS, /* info, */ current);
45 return 0;
46}
47
48static int __spu_trap_dma_align(struct spu *spu)
49{
50 pr_debug("%s\n", __FUNCTION__);
51 force_sig(SIGBUS, /* info, */ current);
52 return 0;
53}
54
55static int __spu_trap_error(struct spu *spu)
56{
57 pr_debug("%s\n", __FUNCTION__);
58 force_sig(SIGILL, /* info, */ current);
59 return 0;
60}
61
62static void spu_restart_dma(struct spu *spu)
63{
64 struct spu_priv2 __iomem *priv2 = spu->priv2;
Mark Nutter5473af02005-11-15 15:53:49 -050065
66 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING_nr, &spu->flags))
67 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
Arnd Bergmann67207b92005-11-15 15:53:48 -050068}
69
70static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
71{
72 struct spu_priv2 __iomem *priv2;
73 struct mm_struct *mm;
74
75 pr_debug("%s\n", __FUNCTION__);
76
Mark Nutter5473af02005-11-15 15:53:49 -050077 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE_nr, &spu->flags)) {
78 printk("%s: invalid access during switch!\n", __func__);
79 return 1;
80 }
81
Arnd Bergmann67207b92005-11-15 15:53:48 -050082 if (REGION_ID(ea) != USER_REGION_ID) {
83 pr_debug("invalid region access at %016lx\n", ea);
84 return 1;
85 }
86
87 priv2 = spu->priv2;
88 mm = spu->mm;
89
90 if (spu->slb_replace >= 8)
91 spu->slb_replace = 0;
92
93 out_be64(&priv2->slb_index_W, spu->slb_replace);
94 out_be64(&priv2->slb_vsid_RW,
95 (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT)
96 | SLB_VSID_USER);
97 out_be64(&priv2->slb_esid_RW, (ea & ESID_MASK) | SLB_ESID_V);
98
99 spu_restart_dma(spu);
100
101 pr_debug("set slb %d context %lx, ea %016lx, vsid %016lx, esid %016lx\n",
102 spu->slb_replace, mm->context.id, ea,
103 (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT)| SLB_VSID_USER,
104 (ea & ESID_MASK) | SLB_ESID_V);
105 return 0;
106}
107
Mark Nutter5473af02005-11-15 15:53:49 -0500108extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
Arnd Bergmann67207b92005-11-15 15:53:48 -0500109static int __spu_trap_data_map(struct spu *spu, unsigned long ea)
110{
111 unsigned long dsisr;
112 struct spu_priv1 __iomem *priv1;
113
114 pr_debug("%s\n", __FUNCTION__);
115 priv1 = spu->priv1;
116 dsisr = in_be64(&priv1->mfc_dsisr_RW);
117
Mark Nutter5473af02005-11-15 15:53:49 -0500118 /* Handle kernel space hash faults immediately.
119 User hash faults need to be deferred to process context. */
120 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
121 && REGION_ID(ea) != USER_REGION_ID
122 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
123 spu_restart_dma(spu);
124 return 0;
125 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500126
Mark Nutter5473af02005-11-15 15:53:49 -0500127 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE_nr, &spu->flags)) {
128 printk("%s: invalid access during switch!\n", __func__);
129 return 1;
130 }
131
132 wake_up(&spu->stop_wq);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500133 return 0;
134}
135
136static int __spu_trap_mailbox(struct spu *spu)
137{
138 wake_up_all(&spu->ibox_wq);
139 kill_fasync(&spu->ibox_fasync, SIGIO, POLLIN);
140
141 /* atomically disable SPU mailbox interrupts */
142 spin_lock(&spu->register_lock);
143 out_be64(&spu->priv1->int_mask_class2_RW,
144 in_be64(&spu->priv1->int_mask_class2_RW) & ~0x1);
145 spin_unlock(&spu->register_lock);
146 return 0;
147}
148
149static int __spu_trap_stop(struct spu *spu)
150{
151 pr_debug("%s\n", __FUNCTION__);
152 spu->stop_code = in_be32(&spu->problem->spu_status_R);
153 wake_up(&spu->stop_wq);
154 return 0;
155}
156
157static int __spu_trap_halt(struct spu *spu)
158{
159 pr_debug("%s\n", __FUNCTION__);
160 spu->stop_code = in_be32(&spu->problem->spu_status_R);
161 wake_up(&spu->stop_wq);
162 return 0;
163}
164
165static int __spu_trap_tag_group(struct spu *spu)
166{
167 pr_debug("%s\n", __FUNCTION__);
168 /* wake_up(&spu->dma_wq); */
169 return 0;
170}
171
172static int __spu_trap_spubox(struct spu *spu)
173{
174 wake_up_all(&spu->wbox_wq);
175 kill_fasync(&spu->wbox_fasync, SIGIO, POLLOUT);
176
177 /* atomically disable SPU mailbox interrupts */
178 spin_lock(&spu->register_lock);
179 out_be64(&spu->priv1->int_mask_class2_RW,
180 in_be64(&spu->priv1->int_mask_class2_RW) & ~0x10);
181 spin_unlock(&spu->register_lock);
182 return 0;
183}
184
185static irqreturn_t
186spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
187{
188 struct spu *spu;
189
190 spu = data;
191 spu->class_0_pending = 1;
192 wake_up(&spu->stop_wq);
193
194 return IRQ_HANDLED;
195}
196
197static int
198spu_irq_class_0_bottom(struct spu *spu)
199{
200 unsigned long stat;
201
202 spu->class_0_pending = 0;
203
204 stat = in_be64(&spu->priv1->int_stat_class0_RW);
205
206 if (stat & 1) /* invalid MFC DMA */
207 __spu_trap_invalid_dma(spu);
208
209 if (stat & 2) /* invalid DMA alignment */
210 __spu_trap_dma_align(spu);
211
212 if (stat & 4) /* error on SPU */
213 __spu_trap_error(spu);
214
215 out_be64(&spu->priv1->int_stat_class0_RW, stat);
216 return 0;
217}
218
219static irqreturn_t
220spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
221{
222 struct spu *spu;
223 unsigned long stat, dar;
224
225 spu = data;
226 stat = in_be64(&spu->priv1->int_stat_class1_RW);
227 dar = in_be64(&spu->priv1->mfc_dar_RW);
228
229 if (stat & 1) /* segment fault */
230 __spu_trap_data_seg(spu, dar);
231
232 if (stat & 2) { /* mapping fault */
233 __spu_trap_data_map(spu, dar);
234 }
235
236 if (stat & 4) /* ls compare & suspend on get */
237 ;
238
239 if (stat & 8) /* ls compare & suspend on put */
240 ;
241
242 out_be64(&spu->priv1->int_stat_class1_RW, stat);
243 return stat ? IRQ_HANDLED : IRQ_NONE;
244}
245
246static irqreturn_t
247spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
248{
249 struct spu *spu;
250 unsigned long stat;
251
252 spu = data;
253 stat = in_be64(&spu->priv1->int_stat_class2_RW);
254
255 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat,
256 in_be64(&spu->priv1->int_mask_class2_RW));
257
258
259 if (stat & 1) /* PPC core mailbox */
260 __spu_trap_mailbox(spu);
261
262 if (stat & 2) /* SPU stop-and-signal */
263 __spu_trap_stop(spu);
264
265 if (stat & 4) /* SPU halted */
266 __spu_trap_halt(spu);
267
268 if (stat & 8) /* DMA tag group complete */
269 __spu_trap_tag_group(spu);
270
271 if (stat & 0x10) /* SPU mailbox threshold */
272 __spu_trap_spubox(spu);
273
274 out_be64(&spu->priv1->int_stat_class2_RW, stat);
275 return stat ? IRQ_HANDLED : IRQ_NONE;
276}
277
278static int
279spu_request_irqs(struct spu *spu)
280{
281 int ret;
282 int irq_base;
283
284 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
285
286 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number);
287 ret = request_irq(irq_base + spu->isrc,
288 spu_irq_class_0, 0, spu->irq_c0, spu);
289 if (ret)
290 goto out;
291 out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
292
293 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
294 ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
295 spu_irq_class_1, 0, spu->irq_c1, spu);
296 if (ret)
297 goto out1;
298 out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
299
300 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
301 ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
302 spu_irq_class_2, 0, spu->irq_c2, spu);
303 if (ret)
304 goto out2;
305 out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
306 goto out;
307
308out2:
309 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
310out1:
311 free_irq(irq_base + spu->isrc, spu);
312out:
313 return ret;
314}
315
316static void
317spu_free_irqs(struct spu *spu)
318{
319 int irq_base;
320
321 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
322
323 free_irq(irq_base + spu->isrc, spu);
324 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
325 free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu);
326}
327
328static LIST_HEAD(spu_list);
329static DECLARE_MUTEX(spu_mutex);
330
331static void spu_init_channels(struct spu *spu)
332{
333 static const struct {
334 unsigned channel;
335 unsigned count;
336 } zero_list[] = {
337 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
338 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
339 }, count_list[] = {
340 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
341 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
342 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
343 };
344 struct spu_priv2 *priv2;
345 int i;
346
347 priv2 = spu->priv2;
348
349 /* initialize all channel data to zero */
350 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
351 int count;
352
353 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
354 for (count = 0; count < zero_list[i].count; count++)
355 out_be64(&priv2->spu_chnldata_RW, 0);
356 }
357
358 /* initialize channel counts to meaningful values */
359 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
360 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
361 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
362 }
363}
364
365static void spu_init_regs(struct spu *spu)
366{
367 out_be64(&spu->priv1->int_mask_class0_RW, 0x7);
368 out_be64(&spu->priv1->int_mask_class1_RW, 0x3);
369 out_be64(&spu->priv1->int_mask_class2_RW, 0xe);
370}
371
372struct spu *spu_alloc(void)
373{
374 struct spu *spu;
375
376 down(&spu_mutex);
377 if (!list_empty(&spu_list)) {
378 spu = list_entry(spu_list.next, struct spu, list);
379 list_del_init(&spu->list);
380 pr_debug("Got SPU %x %d\n", spu->isrc, spu->number);
381 } else {
382 pr_debug("No SPU left\n");
383 spu = NULL;
384 }
385 up(&spu_mutex);
386
387 if (spu) {
388 spu_init_channels(spu);
389 spu_init_regs(spu);
390 }
391
392 return spu;
393}
394EXPORT_SYMBOL(spu_alloc);
395
396void spu_free(struct spu *spu)
397{
398 down(&spu_mutex);
399 spu->ibox_fasync = NULL;
400 spu->wbox_fasync = NULL;
401 list_add_tail(&spu->list, &spu_list);
402 up(&spu_mutex);
403}
404EXPORT_SYMBOL(spu_free);
405
Arnd Bergmann67207b92005-11-15 15:53:48 -0500406static int spu_handle_mm_fault(struct spu *spu)
407{
408 struct spu_priv1 __iomem *priv1;
409 struct mm_struct *mm = spu->mm;
410 struct vm_area_struct *vma;
411 u64 ea, dsisr, is_write;
412 int ret;
413
414 priv1 = spu->priv1;
415 ea = in_be64(&priv1->mfc_dar_RW);
416 dsisr = in_be64(&priv1->mfc_dsisr_RW);
417#if 0
418 if (!IS_VALID_EA(ea)) {
419 return -EFAULT;
420 }
421#endif /* XXX */
422 if (mm == NULL) {
423 return -EFAULT;
424 }
425 if (mm->pgd == NULL) {
426 return -EFAULT;
427 }
428
429 down_read(&mm->mmap_sem);
430 vma = find_vma(mm, ea);
431 if (!vma)
432 goto bad_area;
433 if (vma->vm_start <= ea)
434 goto good_area;
435 if (!(vma->vm_flags & VM_GROWSDOWN))
436 goto bad_area;
437#if 0
438 if (expand_stack(vma, ea))
439 goto bad_area;
440#endif /* XXX */
441good_area:
442 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
443 if (is_write) {
444 if (!(vma->vm_flags & VM_WRITE))
445 goto bad_area;
446 } else {
447 if (dsisr & MFC_DSISR_ACCESS_DENIED)
448 goto bad_area;
449 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
450 goto bad_area;
451 }
452 ret = 0;
453 switch (handle_mm_fault(mm, vma, ea, is_write)) {
454 case VM_FAULT_MINOR:
455 current->min_flt++;
456 break;
457 case VM_FAULT_MAJOR:
458 current->maj_flt++;
459 break;
460 case VM_FAULT_SIGBUS:
461 ret = -EFAULT;
462 goto bad_area;
463 case VM_FAULT_OOM:
464 ret = -ENOMEM;
465 goto bad_area;
466 default:
467 BUG();
468 }
469 up_read(&mm->mmap_sem);
470 return ret;
471
472bad_area:
473 up_read(&mm->mmap_sem);
474 return -EFAULT;
475}
476
477static int spu_handle_pte_fault(struct spu *spu)
478{
479 struct spu_priv1 __iomem *priv1;
480 u64 ea, dsisr, access, error = 0UL;
481 int ret = 0;
482
483 priv1 = spu->priv1;
484 ea = in_be64(&priv1->mfc_dar_RW);
485 dsisr = in_be64(&priv1->mfc_dsisr_RW);
486 access = (_PAGE_PRESENT | _PAGE_USER);
487 if (dsisr & MFC_DSISR_PTE_NOT_FOUND) {
488 if (hash_page(ea, access, 0x300) != 0)
489 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
490 }
491 if ((error & CLASS1_ENABLE_STORAGE_FAULT_INTR) ||
492 (dsisr & MFC_DSISR_ACCESS_DENIED)) {
493 if ((ret = spu_handle_mm_fault(spu)) != 0)
494 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
495 else
496 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
497 }
498 if (!error)
499 spu_restart_dma(spu);
500
501 return ret;
502}
503
504int spu_run(struct spu *spu)
505{
506 struct spu_problem __iomem *prob;
507 struct spu_priv1 __iomem *priv1;
508 struct spu_priv2 __iomem *priv2;
509 unsigned long status;
510 int ret;
511
512 prob = spu->problem;
513 priv1 = spu->priv1;
514 priv2 = spu->priv2;
515
516 /* Let SPU run. */
517 spu->mm = current->mm;
518 eieio();
519 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
520
521 do {
522 ret = wait_event_interruptible(spu->stop_wq,
523 (!((status = in_be32(&prob->spu_status_R)) & 0x1))
524 || (in_be64(&priv1->mfc_dsisr_RW) & MFC_DSISR_PTE_NOT_FOUND)
525 || spu->class_0_pending);
526
527 if (status & SPU_STATUS_STOPPED_BY_STOP)
528 ret = -EAGAIN;
529 else if (status & SPU_STATUS_STOPPED_BY_HALT)
530 ret = -EIO;
531 else if (in_be64(&priv1->mfc_dsisr_RW) & MFC_DSISR_PTE_NOT_FOUND)
532 ret = spu_handle_pte_fault(spu);
533
534 if (spu->class_0_pending)
535 spu_irq_class_0_bottom(spu);
536
537 if (!ret && signal_pending(current))
538 ret = -ERESTARTSYS;
539
540 } while (!ret);
541
542 /* Ensure SPU is stopped. */
543 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
544 eieio();
545 while (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)
546 cpu_relax();
547
548 out_be64(&priv2->slb_invalidate_all_W, 0);
549 out_be64(&priv1->tlb_invalidate_entry_W, 0UL);
550 eieio();
551
552 spu->mm = NULL;
553
554 /* Check for SPU breakpoint. */
555 if (unlikely(current->ptrace & PT_PTRACED)) {
556 status = in_be32(&prob->spu_status_R);
557
558 if ((status & SPU_STATUS_STOPPED_BY_STOP)
559 && status >> SPU_STOP_STATUS_SHIFT == 0x3fff) {
560 force_sig(SIGTRAP, current);
561 ret = -ERESTARTSYS;
562 }
563 }
564
565 return ret;
566}
567EXPORT_SYMBOL(spu_run);
568
569static void __iomem * __init map_spe_prop(struct device_node *n,
570 const char *name)
571{
572 struct address_prop {
573 unsigned long address;
574 unsigned int len;
575 } __attribute__((packed)) *prop;
576
577 void *p;
578 int proplen;
579
580 p = get_property(n, name, &proplen);
581 if (proplen != sizeof (struct address_prop))
582 return NULL;
583
584 prop = p;
585
586 return ioremap(prop->address, prop->len);
587}
588
589static void spu_unmap(struct spu *spu)
590{
591 iounmap(spu->priv2);
592 iounmap(spu->priv1);
593 iounmap(spu->problem);
594 iounmap((u8 __iomem *)spu->local_store);
595}
596
597static int __init spu_map_device(struct spu *spu, struct device_node *spe)
598{
599 char *prop;
600 int ret;
601
602 ret = -ENODEV;
603 prop = get_property(spe, "isrc", NULL);
604 if (!prop)
605 goto out;
606 spu->isrc = *(unsigned int *)prop;
607
608 spu->name = get_property(spe, "name", NULL);
609 if (!spu->name)
610 goto out;
611
612 prop = get_property(spe, "local-store", NULL);
613 if (!prop)
614 goto out;
615 spu->local_store_phys = *(unsigned long *)prop;
616
617 /* we use local store as ram, not io memory */
618 spu->local_store = (void __force *)map_spe_prop(spe, "local-store");
619 if (!spu->local_store)
620 goto out;
621
622 spu->problem= map_spe_prop(spe, "problem");
623 if (!spu->problem)
624 goto out_unmap;
625
626 spu->priv1= map_spe_prop(spe, "priv1");
627 if (!spu->priv1)
628 goto out_unmap;
629
630 spu->priv2= map_spe_prop(spe, "priv2");
631 if (!spu->priv2)
632 goto out_unmap;
633 ret = 0;
634 goto out;
635
636out_unmap:
637 spu_unmap(spu);
638out:
639 return ret;
640}
641
642static int __init find_spu_node_id(struct device_node *spe)
643{
644 unsigned int *id;
645 struct device_node *cpu;
646
647 cpu = spe->parent->parent;
648 id = (unsigned int *)get_property(cpu, "node-id", NULL);
649
650 return id ? *id : 0;
651}
652
653static int __init create_spu(struct device_node *spe)
654{
655 struct spu *spu;
656 int ret;
657 static int number;
658
659 ret = -ENOMEM;
660 spu = kmalloc(sizeof (*spu), GFP_KERNEL);
661 if (!spu)
662 goto out;
663
664 ret = spu_map_device(spu, spe);
665 if (ret)
666 goto out_free;
667
668 spu->node = find_spu_node_id(spe);
669 spu->stop_code = 0;
670 spu->slb_replace = 0;
671 spu->mm = NULL;
672 spu->class_0_pending = 0;
Mark Nutter5473af02005-11-15 15:53:49 -0500673 spu->flags = 0UL;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500674 spin_lock_init(&spu->register_lock);
675
676 out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1));
677 out_be64(&spu->priv1->mfc_sr1_RW, 0x33);
678
679 init_waitqueue_head(&spu->stop_wq);
680 init_waitqueue_head(&spu->wbox_wq);
681 init_waitqueue_head(&spu->ibox_wq);
682
683 spu->ibox_fasync = NULL;
684 spu->wbox_fasync = NULL;
685
686 down(&spu_mutex);
687 spu->number = number++;
688 ret = spu_request_irqs(spu);
689 if (ret)
690 goto out_unmap;
691
692 list_add(&spu->list, &spu_list);
693 up(&spu_mutex);
694
695 pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
696 spu->name, spu->isrc, spu->local_store,
697 spu->problem, spu->priv1, spu->priv2, spu->number);
698 goto out;
699
700out_unmap:
701 up(&spu_mutex);
702 spu_unmap(spu);
703out_free:
704 kfree(spu);
705out:
706 return ret;
707}
708
709static void destroy_spu(struct spu *spu)
710{
711 list_del_init(&spu->list);
712
713 spu_free_irqs(spu);
714 spu_unmap(spu);
715 kfree(spu);
716}
717
718static void cleanup_spu_base(void)
719{
720 struct spu *spu, *tmp;
721 down(&spu_mutex);
722 list_for_each_entry_safe(spu, tmp, &spu_list, list)
723 destroy_spu(spu);
724 up(&spu_mutex);
725}
726module_exit(cleanup_spu_base);
727
728static int __init init_spu_base(void)
729{
730 struct device_node *node;
731 int ret;
732
733 ret = -ENODEV;
734 for (node = of_find_node_by_type(NULL, "spe");
735 node; node = of_find_node_by_type(node, "spe")) {
736 ret = create_spu(node);
737 if (ret) {
738 printk(KERN_WARNING "%s: Error initializing %s\n",
739 __FUNCTION__, node->name);
740 cleanup_spu_base();
741 break;
742 }
743 }
744 /* in some old firmware versions, the spe is called 'spc', so we
745 look for that as well */
746 for (node = of_find_node_by_type(NULL, "spc");
747 node; node = of_find_node_by_type(node, "spc")) {
748 ret = create_spu(node);
749 if (ret) {
750 printk(KERN_WARNING "%s: Error initializing %s\n",
751 __FUNCTION__, node->name);
752 cleanup_spu_base();
753 break;
754 }
755 }
756 return ret;
757}
758module_init(init_spu_base);
759
760MODULE_LICENSE("GPL");
761MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");