blob: 56ff8b36103d51342a22708b13d42c385066c9e3 [file] [log] [blame]
Arnd Bergmann67207b92005-11-15 15:53:48 -05001/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmann3b3d22c2005-12-05 22:52:24 -050023#undef DEBUG
Arnd Bergmann67207b92005-11-15 15:53:48 -050024
25#include <linux/interrupt.h>
26#include <linux/list.h>
27#include <linux/module.h>
Arnd Bergmann7650f2f2006-10-04 17:26:20 +020028#include <linux/pci.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050029#include <linux/poll.h>
30#include <linux/ptrace.h>
31#include <linux/slab.h>
32#include <linux/wait.h>
33
Arnd Bergmann7650f2f2006-10-04 17:26:20 +020034#include <asm/firmware.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050035#include <asm/io.h>
36#include <asm/prom.h>
Ingo Molnar14cc3e22006-03-26 01:37:14 -080037#include <linux/mutex.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050038#include <asm/spu.h>
Geoff Levand540270d2006-06-19 20:33:29 +020039#include <asm/spu_priv1.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050040#include <asm/mmu_context.h>
Michael Ellermanff8a8f22006-10-24 18:31:27 +020041#include <asm/xmon.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050042
43#include "interrupt.h"
44
Geoff Levand540270d2006-06-19 20:33:29 +020045const struct spu_priv1_ops *spu_priv1_ops;
46
47EXPORT_SYMBOL_GPL(spu_priv1_ops);
48
Arnd Bergmann67207b92005-11-15 15:53:48 -050049static int __spu_trap_invalid_dma(struct spu *spu)
50{
51 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmann9add11d2006-10-04 17:26:14 +020052 spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
Arnd Bergmann67207b92005-11-15 15:53:48 -050053 return 0;
54}
55
56static int __spu_trap_dma_align(struct spu *spu)
57{
58 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmann9add11d2006-10-04 17:26:14 +020059 spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
Arnd Bergmann67207b92005-11-15 15:53:48 -050060 return 0;
61}
62
63static int __spu_trap_error(struct spu *spu)
64{
65 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmann9add11d2006-10-04 17:26:14 +020066 spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
Arnd Bergmann67207b92005-11-15 15:53:48 -050067 return 0;
68}
69
70static void spu_restart_dma(struct spu *spu)
71{
72 struct spu_priv2 __iomem *priv2 = spu->priv2;
Mark Nutter5473af02005-11-15 15:53:49 -050073
Arnd Bergmann8837d922006-01-04 20:31:28 +010074 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
Mark Nutter5473af02005-11-15 15:53:49 -050075 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
Arnd Bergmann67207b92005-11-15 15:53:48 -050076}
77
78static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
79{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050080 struct spu_priv2 __iomem *priv2 = spu->priv2;
81 struct mm_struct *mm = spu->mm;
arnd@arndb.de724bd802006-06-19 20:33:23 +020082 u64 esid, vsid, llp;
Arnd Bergmann67207b92005-11-15 15:53:48 -050083
84 pr_debug("%s\n", __FUNCTION__);
85
Arnd Bergmann8837d922006-01-04 20:31:28 +010086 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050087 /* SLBs are pre-loaded for context switch, so
88 * we should never get here!
89 */
Mark Nutter5473af02005-11-15 15:53:49 -050090 printk("%s: invalid access during switch!\n", __func__);
91 return 1;
92 }
arnd@arndb.de0afacde2006-10-24 18:31:18 +020093 esid = (ea & ESID_MASK) | SLB_ESID_V;
94
95 switch(REGION_ID(ea)) {
96 case USER_REGION_ID:
97#ifdef CONFIG_HUGETLB_PAGE
98 if (in_hugepage_area(mm->context, ea))
99 llp = mmu_psize_defs[mmu_huge_psize].sllp;
100 else
101#endif
102 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
103 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
104 SLB_VSID_USER | llp;
105 break;
106 case VMALLOC_REGION_ID:
107 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
108 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
109 SLB_VSID_KERNEL | llp;
110 break;
111 case KERNEL_REGION_ID:
112 llp = mmu_psize_defs[mmu_linear_psize].sllp;
113 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
114 SLB_VSID_KERNEL | llp;
115 break;
116 default:
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500117 /* Future: support kernel segments so that drivers
118 * can use SPUs.
119 */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500120 pr_debug("invalid region access at %016lx\n", ea);
121 return 1;
122 }
123
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500124 out_be64(&priv2->slb_index_W, spu->slb_replace);
125 out_be64(&priv2->slb_vsid_RW, vsid);
126 out_be64(&priv2->slb_esid_RW, esid);
127
128 spu->slb_replace++;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500129 if (spu->slb_replace >= 8)
130 spu->slb_replace = 0;
131
Arnd Bergmann67207b92005-11-15 15:53:48 -0500132 spu_restart_dma(spu);
133
Arnd Bergmann67207b92005-11-15 15:53:48 -0500134 return 0;
135}
136
Mark Nutter5473af02005-11-15 15:53:49 -0500137extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500138static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500139{
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100140 pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500141
Mark Nutter5473af02005-11-15 15:53:49 -0500142 /* Handle kernel space hash faults immediately.
143 User hash faults need to be deferred to process context. */
144 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
145 && REGION_ID(ea) != USER_REGION_ID
146 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
147 spu_restart_dma(spu);
148 return 0;
149 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500150
Arnd Bergmann8837d922006-01-04 20:31:28 +0100151 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
Mark Nutter5473af02005-11-15 15:53:49 -0500152 printk("%s: invalid access during switch!\n", __func__);
153 return 1;
154 }
155
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500156 spu->dar = ea;
157 spu->dsisr = dsisr;
158 mb();
Masato Noguchiba723fe22006-06-19 20:33:33 +0200159 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500160 return 0;
161}
162
163static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200164spu_irq_class_0(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500165{
166 struct spu *spu;
167
168 spu = data;
169 spu->class_0_pending = 1;
Masato Noguchiba723fe22006-06-19 20:33:33 +0200170 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500171
172 return IRQ_HANDLED;
173}
174
Arnd Bergmann51104592005-12-05 22:52:25 -0500175int
Arnd Bergmann67207b92005-11-15 15:53:48 -0500176spu_irq_class_0_bottom(struct spu *spu)
177{
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500178 unsigned long stat, mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500179
180 spu->class_0_pending = 0;
181
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100182 mask = spu_int_mask_get(spu, 0);
183 stat = spu_int_stat_get(spu, 0);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500184
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500185 stat &= mask;
186
Arnd Bergmann2cd90bc2006-06-23 20:57:50 +0200187 if (stat & 1) /* invalid DMA alignment */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500188 __spu_trap_dma_align(spu);
189
Arnd Bergmann2cd90bc2006-06-23 20:57:50 +0200190 if (stat & 2) /* invalid MFC DMA */
191 __spu_trap_invalid_dma(spu);
192
Arnd Bergmann67207b92005-11-15 15:53:48 -0500193 if (stat & 4) /* error on SPU */
194 __spu_trap_error(spu);
195
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100196 spu_int_stat_clear(spu, 0, stat);
Arnd Bergmann51104592005-12-05 22:52:25 -0500197
198 return (stat & 0x7) ? -EIO : 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500199}
Arnd Bergmann51104592005-12-05 22:52:25 -0500200EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500201
202static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200203spu_irq_class_1(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500204{
205 struct spu *spu;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500206 unsigned long stat, mask, dar, dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500207
208 spu = data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500209
210 /* atomically read & clear class1 status. */
211 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100212 mask = spu_int_mask_get(spu, 1);
213 stat = spu_int_stat_get(spu, 1) & mask;
214 dar = spu_mfc_dar_get(spu);
215 dsisr = spu_mfc_dsisr_get(spu);
Arnd Bergmann38307342005-12-09 19:04:18 +0100216 if (stat & 2) /* mapping fault */
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100217 spu_mfc_dsisr_set(spu, 0ul);
218 spu_int_stat_clear(spu, 1, stat);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500219 spin_unlock(&spu->register_lock);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100220 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
221 dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500222
223 if (stat & 1) /* segment fault */
224 __spu_trap_data_seg(spu, dar);
225
226 if (stat & 2) { /* mapping fault */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500227 __spu_trap_data_map(spu, dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500228 }
229
230 if (stat & 4) /* ls compare & suspend on get */
231 ;
232
233 if (stat & 8) /* ls compare & suspend on put */
234 ;
235
Arnd Bergmann67207b92005-11-15 15:53:48 -0500236 return stat ? IRQ_HANDLED : IRQ_NONE;
237}
Arnd Bergmann51104592005-12-05 22:52:25 -0500238EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500239
240static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200241spu_irq_class_2(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500242{
243 struct spu *spu;
244 unsigned long stat;
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500245 unsigned long mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500246
247 spu = data;
Masato Noguchiba723fe22006-06-19 20:33:33 +0200248 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100249 stat = spu_int_stat_get(spu, 2);
250 mask = spu_int_mask_get(spu, 2);
Masato Noguchiba723fe22006-06-19 20:33:33 +0200251 /* ignore interrupts we're not waiting for */
252 stat &= mask;
253 /*
254 * mailbox interrupts (0x1 and 0x10) are level triggered.
255 * mask them now before acknowledging.
256 */
257 if (stat & 0x11)
258 spu_int_mask_and(spu, 2, ~(stat & 0x11));
259 /* acknowledge all interrupts before the callbacks */
260 spu_int_stat_clear(spu, 2, stat);
261 spin_unlock(&spu->register_lock);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500262
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500263 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500264
Arnd Bergmann67207b92005-11-15 15:53:48 -0500265 if (stat & 1) /* PPC core mailbox */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200266 spu->ibox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500267
268 if (stat & 2) /* SPU stop-and-signal */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200269 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500270
271 if (stat & 4) /* SPU halted */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200272 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500273
274 if (stat & 8) /* DMA tag group complete */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200275 spu->mfc_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500276
277 if (stat & 0x10) /* SPU mailbox threshold */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200278 spu->wbox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500279
Arnd Bergmann67207b92005-11-15 15:53:48 -0500280 return stat ? IRQ_HANDLED : IRQ_NONE;
281}
282
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000283static int spu_request_irqs(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500284{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000285 int ret = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500286
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000287 if (spu->irqs[0] != NO_IRQ) {
288 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
289 spu->number);
290 ret = request_irq(spu->irqs[0], spu_irq_class_0,
291 IRQF_DISABLED,
292 spu->irq_c0, spu);
293 if (ret)
294 goto bail0;
295 }
296 if (spu->irqs[1] != NO_IRQ) {
297 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
298 spu->number);
299 ret = request_irq(spu->irqs[1], spu_irq_class_1,
300 IRQF_DISABLED,
301 spu->irq_c1, spu);
302 if (ret)
303 goto bail1;
304 }
305 if (spu->irqs[2] != NO_IRQ) {
306 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
307 spu->number);
308 ret = request_irq(spu->irqs[2], spu_irq_class_2,
309 IRQF_DISABLED,
310 spu->irq_c2, spu);
311 if (ret)
312 goto bail2;
313 }
314 return 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500315
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000316bail2:
317 if (spu->irqs[1] != NO_IRQ)
318 free_irq(spu->irqs[1], spu);
319bail1:
320 if (spu->irqs[0] != NO_IRQ)
321 free_irq(spu->irqs[0], spu);
322bail0:
Arnd Bergmann67207b92005-11-15 15:53:48 -0500323 return ret;
324}
325
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000326static void spu_free_irqs(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500327{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000328 if (spu->irqs[0] != NO_IRQ)
329 free_irq(spu->irqs[0], spu);
330 if (spu->irqs[1] != NO_IRQ)
331 free_irq(spu->irqs[1], spu);
332 if (spu->irqs[2] != NO_IRQ)
333 free_irq(spu->irqs[2], spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500334}
335
Mark Nuttera68cf982006-10-04 17:26:12 +0200336static struct list_head spu_list[MAX_NUMNODES];
Christian Kraffte570beb2006-10-24 18:31:23 +0200337static LIST_HEAD(spu_full_list);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800338static DEFINE_MUTEX(spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500339
340static void spu_init_channels(struct spu *spu)
341{
342 static const struct {
343 unsigned channel;
344 unsigned count;
345 } zero_list[] = {
346 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
347 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
348 }, count_list[] = {
349 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
350 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
351 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
352 };
Arnd Bergmann6ff730c2006-01-04 20:31:31 +0100353 struct spu_priv2 __iomem *priv2;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500354 int i;
355
356 priv2 = spu->priv2;
357
358 /* initialize all channel data to zero */
359 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
360 int count;
361
362 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
363 for (count = 0; count < zero_list[i].count; count++)
364 out_be64(&priv2->spu_chnldata_RW, 0);
365 }
366
367 /* initialize channel counts to meaningful values */
368 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
369 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
370 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
371 }
372}
373
Mark Nuttera68cf982006-10-04 17:26:12 +0200374struct spu *spu_alloc_node(int node)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500375{
Mark Nuttera68cf982006-10-04 17:26:12 +0200376 struct spu *spu = NULL;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500377
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800378 mutex_lock(&spu_mutex);
Mark Nuttera68cf982006-10-04 17:26:12 +0200379 if (!list_empty(&spu_list[node])) {
380 spu = list_entry(spu_list[node].next, struct spu, list);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500381 list_del_init(&spu->list);
Geoff Levandcc21a662006-10-24 18:31:15 +0200382 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
Mark Nuttera68cf982006-10-04 17:26:12 +0200383 spu_init_channels(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500384 }
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800385 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500386
Mark Nuttera68cf982006-10-04 17:26:12 +0200387 return spu;
388}
389EXPORT_SYMBOL_GPL(spu_alloc_node);
390
391struct spu *spu_alloc(void)
392{
393 struct spu *spu = NULL;
394 int node;
395
396 for (node = 0; node < MAX_NUMNODES; node++) {
397 spu = spu_alloc_node(node);
398 if (spu)
399 break;
400 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500401
402 return spu;
403}
Arnd Bergmann67207b92005-11-15 15:53:48 -0500404
405void spu_free(struct spu *spu)
406{
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800407 mutex_lock(&spu_mutex);
Mark Nuttera68cf982006-10-04 17:26:12 +0200408 list_add_tail(&spu->list, &spu_list[spu->node]);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800409 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500410}
Arnd Bergmann39c73c32005-12-05 22:52:21 -0500411EXPORT_SYMBOL_GPL(spu_free);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500412
Arnd Bergmann67207b92005-11-15 15:53:48 -0500413static int spu_handle_mm_fault(struct spu *spu)
414{
Arnd Bergmann67207b92005-11-15 15:53:48 -0500415 struct mm_struct *mm = spu->mm;
416 struct vm_area_struct *vma;
417 u64 ea, dsisr, is_write;
418 int ret;
419
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500420 ea = spu->dar;
421 dsisr = spu->dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500422#if 0
423 if (!IS_VALID_EA(ea)) {
424 return -EFAULT;
425 }
426#endif /* XXX */
427 if (mm == NULL) {
428 return -EFAULT;
429 }
430 if (mm->pgd == NULL) {
431 return -EFAULT;
432 }
433
434 down_read(&mm->mmap_sem);
435 vma = find_vma(mm, ea);
436 if (!vma)
437 goto bad_area;
438 if (vma->vm_start <= ea)
439 goto good_area;
440 if (!(vma->vm_flags & VM_GROWSDOWN))
441 goto bad_area;
442#if 0
443 if (expand_stack(vma, ea))
444 goto bad_area;
445#endif /* XXX */
446good_area:
447 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
448 if (is_write) {
449 if (!(vma->vm_flags & VM_WRITE))
450 goto bad_area;
451 } else {
452 if (dsisr & MFC_DSISR_ACCESS_DENIED)
453 goto bad_area;
454 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
455 goto bad_area;
456 }
457 ret = 0;
458 switch (handle_mm_fault(mm, vma, ea, is_write)) {
459 case VM_FAULT_MINOR:
460 current->min_flt++;
461 break;
462 case VM_FAULT_MAJOR:
463 current->maj_flt++;
464 break;
465 case VM_FAULT_SIGBUS:
466 ret = -EFAULT;
467 goto bad_area;
468 case VM_FAULT_OOM:
469 ret = -ENOMEM;
470 goto bad_area;
471 default:
472 BUG();
473 }
474 up_read(&mm->mmap_sem);
475 return ret;
476
477bad_area:
478 up_read(&mm->mmap_sem);
479 return -EFAULT;
480}
481
Arnd Bergmann51104592005-12-05 22:52:25 -0500482int spu_irq_class_1_bottom(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500483{
Arnd Bergmann67207b92005-11-15 15:53:48 -0500484 u64 ea, dsisr, access, error = 0UL;
485 int ret = 0;
486
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500487 ea = spu->dar;
488 dsisr = spu->dsisr;
Arnd Bergmann79c227a2006-03-24 19:49:27 +0100489 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
Arnd Bergmannf8072212006-04-29 02:40:21 +0200490 u64 flags;
491
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500492 access = (_PAGE_PRESENT | _PAGE_USER);
493 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
Arnd Bergmannf8072212006-04-29 02:40:21 +0200494 local_irq_save(flags);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500495 if (hash_page(ea, access, 0x300) != 0)
496 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
Arnd Bergmannf8072212006-04-29 02:40:21 +0200497 local_irq_restore(flags);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500498 }
Arnd Bergmann79c227a2006-03-24 19:49:27 +0100499 if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500500 if ((ret = spu_handle_mm_fault(spu)) != 0)
501 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
502 else
503 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
504 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500505 spu->dar = 0UL;
506 spu->dsisr = 0UL;
507 if (!error) {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500508 spu_restart_dma(spu);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500509 } else {
510 __spu_trap_invalid_dma(spu);
511 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500512 return ret;
513}
514
Joel H Schoppbed120c2006-05-01 12:16:11 -0700515static int __init find_spu_node_id(struct device_node *spe)
516{
Jeremy Kerrc61c27d2006-07-12 15:39:54 +1000517 const unsigned int *id;
Joel H Schoppbed120c2006-05-01 12:16:11 -0700518 struct device_node *cpu;
519 cpu = spe->parent->parent;
Jeremy Kerrc61c27d2006-07-12 15:39:54 +1000520 id = get_property(cpu, "node-id", NULL);
Joel H Schoppbed120c2006-05-01 12:16:11 -0700521 return id ? *id : 0;
522}
523
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700524static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
525 const char *prop)
Joel H Schoppbed120c2006-05-01 12:16:11 -0700526{
527 static DEFINE_MUTEX(add_spumem_mutex);
528
Jeremy Kerrc61c27d2006-07-12 15:39:54 +1000529 const struct address_prop {
Joel H Schoppbed120c2006-05-01 12:16:11 -0700530 unsigned long address;
531 unsigned int len;
532 } __attribute__((packed)) *p;
533 int proplen;
534
535 unsigned long start_pfn, nr_pages;
Joel H Schoppbed120c2006-05-01 12:16:11 -0700536 struct pglist_data *pgdata;
537 struct zone *zone;
538 int ret;
539
Jeremy Kerrc61c27d2006-07-12 15:39:54 +1000540 p = get_property(spe, prop, &proplen);
Joel H Schoppbed120c2006-05-01 12:16:11 -0700541 WARN_ON(proplen != sizeof (*p));
542
543 start_pfn = p->address >> PAGE_SHIFT;
544 nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
545
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700546 pgdata = NODE_DATA(spu->nid);
Joel H Schoppbed120c2006-05-01 12:16:11 -0700547 zone = pgdata->node_zones;
548
549 /* XXX rethink locking here */
550 mutex_lock(&add_spumem_mutex);
551 ret = __add_pages(zone, start_pfn, nr_pages);
552 mutex_unlock(&add_spumem_mutex);
553
554 return ret;
555}
556
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700557static void __iomem * __init map_spe_prop(struct spu *spu,
558 struct device_node *n, const char *name)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500559{
Jeremy Kerrc61c27d2006-07-12 15:39:54 +1000560 const struct address_prop {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500561 unsigned long address;
562 unsigned int len;
563 } __attribute__((packed)) *prop;
564
Jeremy Kerrc61c27d2006-07-12 15:39:54 +1000565 const void *p;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500566 int proplen;
Al Viroed2bfcd2006-09-23 01:37:41 +0100567 void __iomem *ret = NULL;
Joel H Schoppbed120c2006-05-01 12:16:11 -0700568 int err = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500569
570 p = get_property(n, name, &proplen);
571 if (proplen != sizeof (struct address_prop))
572 return NULL;
573
574 prop = p;
575
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700576 err = cell_spuprop_present(spu, n, name);
Joel H Schoppbed120c2006-05-01 12:16:11 -0700577 if (err && (err != -EEXIST))
578 goto out;
579
580 ret = ioremap(prop->address, prop->len);
581
582 out:
583 return ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500584}
585
586static void spu_unmap(struct spu *spu)
587{
588 iounmap(spu->priv2);
589 iounmap(spu->priv1);
590 iounmap(spu->problem);
Al Viroed2bfcd2006-09-23 01:37:41 +0100591 iounmap((__force u8 __iomem *)spu->local_store);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500592}
593
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000594/* This function shall be abstracted for HV platforms */
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200595static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000596{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000597 unsigned int isrc;
Jeremy Kerrc61c27d2006-07-12 15:39:54 +1000598 const u32 *tmp;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000599
Benjamin Herrenschmidt2e194582006-09-29 15:00:29 +1000600 /* Get the interrupt source unit from the device-tree */
Jeremy Kerrc61c27d2006-07-12 15:39:54 +1000601 tmp = get_property(np, "isrc", NULL);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000602 if (!tmp)
603 return -ENODEV;
Benjamin Herrenschmidt2e194582006-09-29 15:00:29 +1000604 isrc = tmp[0];
605
606 /* Add the node number */
607 isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000608
609 /* Now map interrupts of all 3 classes */
Benjamin Herrenschmidt2e194582006-09-29 15:00:29 +1000610 spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
611 spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
612 spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000613
614 /* Right now, we only fail if class 2 failed */
615 return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
616}
617
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200618static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500619{
Jeremy Kerrc61c27d2006-07-12 15:39:54 +1000620 const char *prop;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500621 int ret;
622
623 ret = -ENODEV;
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700624 spu->name = get_property(node, "name", NULL);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500625 if (!spu->name)
626 goto out;
627
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700628 prop = get_property(node, "local-store", NULL);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500629 if (!prop)
630 goto out;
631 spu->local_store_phys = *(unsigned long *)prop;
632
633 /* we use local store as ram, not io memory */
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700634 spu->local_store = (void __force *)
635 map_spe_prop(spu, node, "local-store");
Arnd Bergmann67207b92005-11-15 15:53:48 -0500636 if (!spu->local_store)
637 goto out;
638
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700639 prop = get_property(node, "problem", NULL);
Mark Nutter6df10a82006-03-23 00:00:12 +0100640 if (!prop)
641 goto out_unmap;
642 spu->problem_phys = *(unsigned long *)prop;
643
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700644 spu->problem= map_spe_prop(spu, node, "problem");
Arnd Bergmann67207b92005-11-15 15:53:48 -0500645 if (!spu->problem)
646 goto out_unmap;
647
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700648 spu->priv1= map_spe_prop(spu, node, "priv1");
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100649 /* priv1 is not available on a hypervisor */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500650
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700651 spu->priv2= map_spe_prop(spu, node, "priv2");
Arnd Bergmann67207b92005-11-15 15:53:48 -0500652 if (!spu->priv2)
653 goto out_unmap;
654 ret = 0;
655 goto out;
656
657out_unmap:
658 spu_unmap(spu);
659out:
660 return ret;
661}
662
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200663static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
664{
665 struct of_irq oirq;
666 int ret;
667 int i;
668
669 for (i=0; i < 3; i++) {
670 ret = of_irq_map_one(np, i, &oirq);
Benjamin Herrenschmidtab56dbd2006-11-10 15:11:20 +1100671 if (ret) {
672 pr_debug("spu_new: failed to get irq %d\n", i);
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200673 goto err;
Benjamin Herrenschmidtab56dbd2006-11-10 15:11:20 +1100674 }
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200675 ret = -EINVAL;
Benjamin Herrenschmidtab56dbd2006-11-10 15:11:20 +1100676 pr_debug(" irq %d no 0x%x on %s\n", i, oirq.specifier[0],
677 oirq.controller->full_name);
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200678 spu->irqs[i] = irq_create_of_mapping(oirq.controller,
679 oirq.specifier, oirq.size);
Benjamin Herrenschmidtab56dbd2006-11-10 15:11:20 +1100680 if (spu->irqs[i] == NO_IRQ) {
681 pr_debug("spu_new: failed to map it !\n");
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200682 goto err;
Benjamin Herrenschmidtab56dbd2006-11-10 15:11:20 +1100683 }
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200684 }
685 return 0;
686
687err:
688 pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name);
689 for (; i >= 0; i--) {
690 if (spu->irqs[i] != NO_IRQ)
691 irq_dispose_mapping(spu->irqs[i]);
692 }
693 return ret;
694}
695
696static int spu_map_resource(struct device_node *node, int nr,
697 void __iomem** virt, unsigned long *phys)
698{
699 struct resource resource = { };
700 int ret;
701
Benjamin Herrenschmidtab56dbd2006-11-10 15:11:20 +1100702 ret = of_address_to_resource(node, nr, &resource);
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200703 if (ret)
704 goto out;
705
706 if (phys)
707 *phys = resource.start;
708 *virt = ioremap(resource.start, resource.end - resource.start);
709 if (!*virt)
710 ret = -EINVAL;
711
712out:
713 return ret;
714}
715
716static int __init spu_map_device(struct spu *spu, struct device_node *node)
717{
718 int ret = -ENODEV;
719 spu->name = get_property(node, "name", NULL);
720 if (!spu->name)
721 goto out;
722
723 ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
724 &spu->local_store_phys);
Benjamin Herrenschmidtab56dbd2006-11-10 15:11:20 +1100725 if (ret) {
726 pr_debug("spu_new: failed to map %s resource 0\n",
727 node->full_name);
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200728 goto out;
Benjamin Herrenschmidtab56dbd2006-11-10 15:11:20 +1100729 }
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200730 ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
731 &spu->problem_phys);
Benjamin Herrenschmidtab56dbd2006-11-10 15:11:20 +1100732 if (ret) {
733 pr_debug("spu_new: failed to map %s resource 1\n",
734 node->full_name);
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200735 goto out_unmap;
Benjamin Herrenschmidtab56dbd2006-11-10 15:11:20 +1100736 }
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200737 ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
738 NULL);
Benjamin Herrenschmidtab56dbd2006-11-10 15:11:20 +1100739 if (ret) {
740 pr_debug("spu_new: failed to map %s resource 2\n",
741 node->full_name);
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200742 goto out_unmap;
Benjamin Herrenschmidtab56dbd2006-11-10 15:11:20 +1100743 }
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200744
745 if (!firmware_has_feature(FW_FEATURE_LPAR))
746 ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1,
747 NULL);
Benjamin Herrenschmidtab56dbd2006-11-10 15:11:20 +1100748 if (ret) {
749 pr_debug("spu_new: failed to map %s resource 3\n",
750 node->full_name);
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200751 goto out_unmap;
Benjamin Herrenschmidtab56dbd2006-11-10 15:11:20 +1100752 }
753 pr_debug("spu_new: %s maps:\n", node->full_name);
754 pr_debug(" local store : 0x%016lx -> 0x%p\n",
755 spu->local_store_phys, spu->local_store);
756 pr_debug(" problem state : 0x%016lx -> 0x%p\n",
757 spu->problem_phys, spu->problem);
758 pr_debug(" priv2 : 0x%p\n", spu->priv2);
759 pr_debug(" priv1 : 0x%p\n", spu->priv1);
760
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200761 return 0;
762
763out_unmap:
764 spu_unmap(spu);
765out:
766 pr_debug("failed to map spe %s: %d\n", spu->name, ret);
767 return ret;
768}
769
Jeremy Kerr1d640932006-06-19 20:33:19 +0200770struct sysdev_class spu_sysdev_class = {
771 set_kset_name("spu")
772};
773
Christian Kraffte570beb2006-10-24 18:31:23 +0200774int spu_add_sysdev_attr(struct sysdev_attribute *attr)
775{
776 struct spu *spu;
777 mutex_lock(&spu_mutex);
778
779 list_for_each_entry(spu, &spu_full_list, full_list)
780 sysdev_create_file(&spu->sysdev, attr);
781
782 mutex_unlock(&spu_mutex);
783 return 0;
784}
785EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
786
787int spu_add_sysdev_attr_group(struct attribute_group *attrs)
788{
789 struct spu *spu;
790 mutex_lock(&spu_mutex);
791
792 list_for_each_entry(spu, &spu_full_list, full_list)
793 sysfs_create_group(&spu->sysdev.kobj, attrs);
794
795 mutex_unlock(&spu_mutex);
796 return 0;
797}
798EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
799
800
801void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
802{
803 struct spu *spu;
804 mutex_lock(&spu_mutex);
805
806 list_for_each_entry(spu, &spu_full_list, full_list)
807 sysdev_remove_file(&spu->sysdev, attr);
808
809 mutex_unlock(&spu_mutex);
810}
811EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
812
813void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
814{
815 struct spu *spu;
816 mutex_lock(&spu_mutex);
817
818 list_for_each_entry(spu, &spu_full_list, full_list)
819 sysfs_remove_group(&spu->sysdev.kobj, attrs);
820
821 mutex_unlock(&spu_mutex);
822}
823EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
824
Jeremy Kerr1d640932006-06-19 20:33:19 +0200825static int spu_create_sysdev(struct spu *spu)
826{
827 int ret;
828
829 spu->sysdev.id = spu->number;
830 spu->sysdev.cls = &spu_sysdev_class;
831 ret = sysdev_register(&spu->sysdev);
832 if (ret) {
833 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
834 spu->number);
835 return ret;
836 }
837
Geoff Levand00215502006-11-20 18:45:02 +0100838 sysfs_add_device_to_node(&spu->sysdev, spu->node);
Jeremy Kerr1d640932006-06-19 20:33:19 +0200839
840 return 0;
841}
842
843static void spu_destroy_sysdev(struct spu *spu)
844{
Geoff Levand00215502006-11-20 18:45:02 +0100845 sysfs_remove_device_from_node(&spu->sysdev, spu->node);
Jeremy Kerr1d640932006-06-19 20:33:19 +0200846 sysdev_unregister(&spu->sysdev);
847}
848
Arnd Bergmann67207b92005-11-15 15:53:48 -0500849static int __init create_spu(struct device_node *spe)
850{
851 struct spu *spu;
852 int ret;
853 static int number;
854
855 ret = -ENOMEM;
Jeremy Kerrecec2172006-06-19 20:33:26 +0200856 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500857 if (!spu)
858 goto out;
859
Benjamin Herrenschmidte5267b42006-10-10 15:14:12 +1000860 spu->node = find_spu_node_id(spe);
861 if (spu->node >= MAX_NUMNODES) {
862 printk(KERN_WARNING "SPE %s on node %d ignored,"
863 " node number too big\n", spe->full_name, spu->node);
864 printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
865 return -ENODEV;
866 }
867 spu->nid = of_node_to_nid(spe);
868 if (spu->nid == -1)
869 spu->nid = 0;
870
Arnd Bergmann67207b92005-11-15 15:53:48 -0500871 ret = spu_map_device(spu, spe);
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200872 /* try old method */
873 if (ret)
874 ret = spu_map_device_old(spu, spe);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500875 if (ret)
876 goto out_free;
877
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000878 ret = spu_map_interrupts(spu, spe);
879 if (ret)
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200880 ret = spu_map_interrupts_old(spu, spe);
881 if (ret)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000882 goto out_unmap;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500883 spin_lock_init(&spu->register_lock);
Masato Noguchi24f43b32006-10-24 18:31:14 +0200884 spu_mfc_sdr_setup(spu);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100885 spu_mfc_sr1_set(spu, 0x33);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800886 mutex_lock(&spu_mutex);
Jeremy Kerrecec2172006-06-19 20:33:26 +0200887
Arnd Bergmann67207b92005-11-15 15:53:48 -0500888 spu->number = number++;
889 ret = spu_request_irqs(spu);
890 if (ret)
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200891 goto out_unlock;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500892
Jeremy Kerr1d640932006-06-19 20:33:19 +0200893 ret = spu_create_sysdev(spu);
894 if (ret)
895 goto out_free_irqs;
896
Mark Nuttera68cf982006-10-04 17:26:12 +0200897 list_add(&spu->list, &spu_list[spu->node]);
Christian Kraffte570beb2006-10-24 18:31:23 +0200898 list_add(&spu->full_list, &spu_full_list);
899 spu->devnode = of_node_get(spe);
900
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800901 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500902
Geoff Levandcc21a662006-10-24 18:31:15 +0200903 pr_debug(KERN_DEBUG "Using SPE %s %p %p %p %p %d\n",
904 spu->name, spu->local_store,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500905 spu->problem, spu->priv1, spu->priv2, spu->number);
906 goto out;
907
Jeremy Kerr1d640932006-06-19 20:33:19 +0200908out_free_irqs:
909 spu_free_irqs(spu);
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200910out_unlock:
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800911 mutex_unlock(&spu_mutex);
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200912out_unmap:
Arnd Bergmann67207b92005-11-15 15:53:48 -0500913 spu_unmap(spu);
914out_free:
915 kfree(spu);
916out:
917 return ret;
918}
919
920static void destroy_spu(struct spu *spu)
921{
922 list_del_init(&spu->list);
Christian Kraffte570beb2006-10-24 18:31:23 +0200923 list_del_init(&spu->full_list);
924
925 of_node_put(spu->devnode);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500926
Jeremy Kerr1d640932006-06-19 20:33:19 +0200927 spu_destroy_sysdev(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500928 spu_free_irqs(spu);
929 spu_unmap(spu);
930 kfree(spu);
931}
932
933static void cleanup_spu_base(void)
934{
935 struct spu *spu, *tmp;
Mark Nuttera68cf982006-10-04 17:26:12 +0200936 int node;
937
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800938 mutex_lock(&spu_mutex);
Mark Nuttera68cf982006-10-04 17:26:12 +0200939 for (node = 0; node < MAX_NUMNODES; node++) {
940 list_for_each_entry_safe(spu, tmp, &spu_list[node], list)
941 destroy_spu(spu);
942 }
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800943 mutex_unlock(&spu_mutex);
Jeremy Kerr1d640932006-06-19 20:33:19 +0200944 sysdev_class_unregister(&spu_sysdev_class);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500945}
946module_exit(cleanup_spu_base);
947
948static int __init init_spu_base(void)
949{
950 struct device_node *node;
Mark Nuttera68cf982006-10-04 17:26:12 +0200951 int i, ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500952
Jeremy Kerr1d640932006-06-19 20:33:19 +0200953 /* create sysdev class for spus */
954 ret = sysdev_class_register(&spu_sysdev_class);
955 if (ret)
956 return ret;
957
Mark Nuttera68cf982006-10-04 17:26:12 +0200958 for (i = 0; i < MAX_NUMNODES; i++)
959 INIT_LIST_HEAD(&spu_list[i]);
960
Arnd Bergmann67207b92005-11-15 15:53:48 -0500961 ret = -ENODEV;
962 for (node = of_find_node_by_type(NULL, "spe");
963 node; node = of_find_node_by_type(node, "spe")) {
964 ret = create_spu(node);
965 if (ret) {
966 printk(KERN_WARNING "%s: Error initializing %s\n",
967 __FUNCTION__, node->name);
968 cleanup_spu_base();
969 break;
970 }
971 }
Michael Ellermanff8a8f22006-10-24 18:31:27 +0200972
973 xmon_register_spus(&spu_full_list);
974
Arnd Bergmann67207b92005-11-15 15:53:48 -0500975 return ret;
976}
977module_init(init_spu_base);
978
979MODULE_LICENSE("GPL");
980MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");