blob: ef47a6239d488a0726dfeedf53b658d43f23b4f1 [file] [log] [blame]
Arnd Bergmann67207b92005-11-15 15:53:48 -05001/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmann3b3d22c2005-12-05 22:52:24 -050023#undef DEBUG
Arnd Bergmann67207b92005-11-15 15:53:48 -050024
25#include <linux/interrupt.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/poll.h>
29#include <linux/ptrace.h>
30#include <linux/slab.h>
31#include <linux/wait.h>
32
33#include <asm/io.h>
34#include <asm/prom.h>
Ingo Molnar14cc3e22006-03-26 01:37:14 -080035#include <linux/mutex.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050036#include <asm/spu.h>
37#include <asm/mmu_context.h>
38
39#include "interrupt.h"
40
41static int __spu_trap_invalid_dma(struct spu *spu)
42{
43 pr_debug("%s\n", __FUNCTION__);
44 force_sig(SIGBUS, /* info, */ current);
45 return 0;
46}
47
48static int __spu_trap_dma_align(struct spu *spu)
49{
50 pr_debug("%s\n", __FUNCTION__);
51 force_sig(SIGBUS, /* info, */ current);
52 return 0;
53}
54
55static int __spu_trap_error(struct spu *spu)
56{
57 pr_debug("%s\n", __FUNCTION__);
58 force_sig(SIGILL, /* info, */ current);
59 return 0;
60}
61
62static void spu_restart_dma(struct spu *spu)
63{
64 struct spu_priv2 __iomem *priv2 = spu->priv2;
Mark Nutter5473af02005-11-15 15:53:49 -050065
Arnd Bergmann8837d922006-01-04 20:31:28 +010066 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
Mark Nutter5473af02005-11-15 15:53:49 -050067 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
Arnd Bergmann67207b92005-11-15 15:53:48 -050068}
69
70static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
71{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050072 struct spu_priv2 __iomem *priv2 = spu->priv2;
73 struct mm_struct *mm = spu->mm;
74 u64 esid, vsid;
Arnd Bergmann67207b92005-11-15 15:53:48 -050075
76 pr_debug("%s\n", __FUNCTION__);
77
Arnd Bergmann8837d922006-01-04 20:31:28 +010078 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050079 /* SLBs are pre-loaded for context switch, so
80 * we should never get here!
81 */
Mark Nutter5473af02005-11-15 15:53:49 -050082 printk("%s: invalid access during switch!\n", __func__);
83 return 1;
84 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050085 if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
86 /* Future: support kernel segments so that drivers
87 * can use SPUs.
88 */
Arnd Bergmann67207b92005-11-15 15:53:48 -050089 pr_debug("invalid region access at %016lx\n", ea);
90 return 1;
91 }
92
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050093 esid = (ea & ESID_MASK) | SLB_ESID_V;
94 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER;
95 if (in_hugepage_area(mm->context, ea))
96 vsid |= SLB_VSID_L;
Arnd Bergmann67207b92005-11-15 15:53:48 -050097
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050098 out_be64(&priv2->slb_index_W, spu->slb_replace);
99 out_be64(&priv2->slb_vsid_RW, vsid);
100 out_be64(&priv2->slb_esid_RW, esid);
101
102 spu->slb_replace++;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500103 if (spu->slb_replace >= 8)
104 spu->slb_replace = 0;
105
Arnd Bergmann67207b92005-11-15 15:53:48 -0500106 spu_restart_dma(spu);
107
Arnd Bergmann67207b92005-11-15 15:53:48 -0500108 return 0;
109}
110
Mark Nutter5473af02005-11-15 15:53:49 -0500111extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500112static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500113{
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100114 pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500115
Mark Nutter5473af02005-11-15 15:53:49 -0500116 /* Handle kernel space hash faults immediately.
117 User hash faults need to be deferred to process context. */
118 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
119 && REGION_ID(ea) != USER_REGION_ID
120 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
121 spu_restart_dma(spu);
122 return 0;
123 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500124
Arnd Bergmann8837d922006-01-04 20:31:28 +0100125 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
Mark Nutter5473af02005-11-15 15:53:49 -0500126 printk("%s: invalid access during switch!\n", __func__);
127 return 1;
128 }
129
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500130 spu->dar = ea;
131 spu->dsisr = dsisr;
132 mb();
Arnd Bergmann51104592005-12-05 22:52:25 -0500133 if (spu->stop_callback)
134 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500135 return 0;
136}
137
138static int __spu_trap_mailbox(struct spu *spu)
139{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500140 if (spu->ibox_callback)
141 spu->ibox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500142
143 /* atomically disable SPU mailbox interrupts */
144 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100145 spu_int_mask_and(spu, 2, ~0x1);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500146 spin_unlock(&spu->register_lock);
147 return 0;
148}
149
150static int __spu_trap_stop(struct spu *spu)
151{
152 pr_debug("%s\n", __FUNCTION__);
153 spu->stop_code = in_be32(&spu->problem->spu_status_R);
Arnd Bergmann51104592005-12-05 22:52:25 -0500154 if (spu->stop_callback)
155 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500156 return 0;
157}
158
159static int __spu_trap_halt(struct spu *spu)
160{
161 pr_debug("%s\n", __FUNCTION__);
162 spu->stop_code = in_be32(&spu->problem->spu_status_R);
Arnd Bergmann51104592005-12-05 22:52:25 -0500163 if (spu->stop_callback)
164 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500165 return 0;
166}
167
168static int __spu_trap_tag_group(struct spu *spu)
169{
170 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100171 spu->mfc_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500172 return 0;
173}
174
175static int __spu_trap_spubox(struct spu *spu)
176{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500177 if (spu->wbox_callback)
178 spu->wbox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500179
180 /* atomically disable SPU mailbox interrupts */
181 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100182 spu_int_mask_and(spu, 2, ~0x10);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500183 spin_unlock(&spu->register_lock);
184 return 0;
185}
186
187static irqreturn_t
188spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
189{
190 struct spu *spu;
191
192 spu = data;
193 spu->class_0_pending = 1;
Arnd Bergmann51104592005-12-05 22:52:25 -0500194 if (spu->stop_callback)
195 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500196
197 return IRQ_HANDLED;
198}
199
Arnd Bergmann51104592005-12-05 22:52:25 -0500200int
Arnd Bergmann67207b92005-11-15 15:53:48 -0500201spu_irq_class_0_bottom(struct spu *spu)
202{
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500203 unsigned long stat, mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500204
205 spu->class_0_pending = 0;
206
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100207 mask = spu_int_mask_get(spu, 0);
208 stat = spu_int_stat_get(spu, 0);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500209
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500210 stat &= mask;
211
Arnd Bergmann67207b92005-11-15 15:53:48 -0500212 if (stat & 1) /* invalid MFC DMA */
213 __spu_trap_invalid_dma(spu);
214
215 if (stat & 2) /* invalid DMA alignment */
216 __spu_trap_dma_align(spu);
217
218 if (stat & 4) /* error on SPU */
219 __spu_trap_error(spu);
220
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100221 spu_int_stat_clear(spu, 0, stat);
Arnd Bergmann51104592005-12-05 22:52:25 -0500222
223 return (stat & 0x7) ? -EIO : 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500224}
Arnd Bergmann51104592005-12-05 22:52:25 -0500225EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500226
227static irqreturn_t
228spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
229{
230 struct spu *spu;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500231 unsigned long stat, mask, dar, dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500232
233 spu = data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500234
235 /* atomically read & clear class1 status. */
236 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100237 mask = spu_int_mask_get(spu, 1);
238 stat = spu_int_stat_get(spu, 1) & mask;
239 dar = spu_mfc_dar_get(spu);
240 dsisr = spu_mfc_dsisr_get(spu);
Arnd Bergmann38307342005-12-09 19:04:18 +0100241 if (stat & 2) /* mapping fault */
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100242 spu_mfc_dsisr_set(spu, 0ul);
243 spu_int_stat_clear(spu, 1, stat);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500244 spin_unlock(&spu->register_lock);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100245 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
246 dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500247
248 if (stat & 1) /* segment fault */
249 __spu_trap_data_seg(spu, dar);
250
251 if (stat & 2) { /* mapping fault */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500252 __spu_trap_data_map(spu, dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500253 }
254
255 if (stat & 4) /* ls compare & suspend on get */
256 ;
257
258 if (stat & 8) /* ls compare & suspend on put */
259 ;
260
Arnd Bergmann67207b92005-11-15 15:53:48 -0500261 return stat ? IRQ_HANDLED : IRQ_NONE;
262}
Arnd Bergmann51104592005-12-05 22:52:25 -0500263EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500264
265static irqreturn_t
266spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
267{
268 struct spu *spu;
269 unsigned long stat;
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500270 unsigned long mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500271
272 spu = data;
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100273 stat = spu_int_stat_get(spu, 2);
274 mask = spu_int_mask_get(spu, 2);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500275
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500276 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500277
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500278 stat &= mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500279
280 if (stat & 1) /* PPC core mailbox */
281 __spu_trap_mailbox(spu);
282
283 if (stat & 2) /* SPU stop-and-signal */
284 __spu_trap_stop(spu);
285
286 if (stat & 4) /* SPU halted */
287 __spu_trap_halt(spu);
288
289 if (stat & 8) /* DMA tag group complete */
290 __spu_trap_tag_group(spu);
291
292 if (stat & 0x10) /* SPU mailbox threshold */
293 __spu_trap_spubox(spu);
294
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100295 spu_int_stat_clear(spu, 2, stat);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500296 return stat ? IRQ_HANDLED : IRQ_NONE;
297}
298
299static int
300spu_request_irqs(struct spu *spu)
301{
302 int ret;
303 int irq_base;
304
305 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
306
307 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number);
308 ret = request_irq(irq_base + spu->isrc,
Arnd Bergmannf8072212006-04-29 02:40:21 +0200309 spu_irq_class_0, SA_INTERRUPT, spu->irq_c0, spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500310 if (ret)
311 goto out;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500312
313 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
314 ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
Arnd Bergmannf8072212006-04-29 02:40:21 +0200315 spu_irq_class_1, SA_INTERRUPT, spu->irq_c1, spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500316 if (ret)
317 goto out1;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500318
319 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
320 ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
Arnd Bergmannf8072212006-04-29 02:40:21 +0200321 spu_irq_class_2, SA_INTERRUPT, spu->irq_c2, spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500322 if (ret)
323 goto out2;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500324 goto out;
325
326out2:
327 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
328out1:
329 free_irq(irq_base + spu->isrc, spu);
330out:
331 return ret;
332}
333
334static void
335spu_free_irqs(struct spu *spu)
336{
337 int irq_base;
338
339 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
340
341 free_irq(irq_base + spu->isrc, spu);
342 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
343 free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu);
344}
345
346static LIST_HEAD(spu_list);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800347static DEFINE_MUTEX(spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500348
349static void spu_init_channels(struct spu *spu)
350{
351 static const struct {
352 unsigned channel;
353 unsigned count;
354 } zero_list[] = {
355 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
356 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
357 }, count_list[] = {
358 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
359 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
360 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
361 };
Arnd Bergmann6ff730c2006-01-04 20:31:31 +0100362 struct spu_priv2 __iomem *priv2;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500363 int i;
364
365 priv2 = spu->priv2;
366
367 /* initialize all channel data to zero */
368 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
369 int count;
370
371 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
372 for (count = 0; count < zero_list[i].count; count++)
373 out_be64(&priv2->spu_chnldata_RW, 0);
374 }
375
376 /* initialize channel counts to meaningful values */
377 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
378 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
379 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
380 }
381}
382
Arnd Bergmann67207b92005-11-15 15:53:48 -0500383struct spu *spu_alloc(void)
384{
385 struct spu *spu;
386
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800387 mutex_lock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500388 if (!list_empty(&spu_list)) {
389 spu = list_entry(spu_list.next, struct spu, list);
390 list_del_init(&spu->list);
391 pr_debug("Got SPU %x %d\n", spu->isrc, spu->number);
392 } else {
393 pr_debug("No SPU left\n");
394 spu = NULL;
395 }
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800396 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500397
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100398 if (spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500399 spu_init_channels(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500400
401 return spu;
402}
Arnd Bergmann39c73c32005-12-05 22:52:21 -0500403EXPORT_SYMBOL_GPL(spu_alloc);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500404
405void spu_free(struct spu *spu)
406{
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800407 mutex_lock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500408 list_add_tail(&spu->list, &spu_list);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800409 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500410}
Arnd Bergmann39c73c32005-12-05 22:52:21 -0500411EXPORT_SYMBOL_GPL(spu_free);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500412
Arnd Bergmann67207b92005-11-15 15:53:48 -0500413static int spu_handle_mm_fault(struct spu *spu)
414{
Arnd Bergmann67207b92005-11-15 15:53:48 -0500415 struct mm_struct *mm = spu->mm;
416 struct vm_area_struct *vma;
417 u64 ea, dsisr, is_write;
418 int ret;
419
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500420 ea = spu->dar;
421 dsisr = spu->dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500422#if 0
423 if (!IS_VALID_EA(ea)) {
424 return -EFAULT;
425 }
426#endif /* XXX */
427 if (mm == NULL) {
428 return -EFAULT;
429 }
430 if (mm->pgd == NULL) {
431 return -EFAULT;
432 }
433
434 down_read(&mm->mmap_sem);
435 vma = find_vma(mm, ea);
436 if (!vma)
437 goto bad_area;
438 if (vma->vm_start <= ea)
439 goto good_area;
440 if (!(vma->vm_flags & VM_GROWSDOWN))
441 goto bad_area;
442#if 0
443 if (expand_stack(vma, ea))
444 goto bad_area;
445#endif /* XXX */
446good_area:
447 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
448 if (is_write) {
449 if (!(vma->vm_flags & VM_WRITE))
450 goto bad_area;
451 } else {
452 if (dsisr & MFC_DSISR_ACCESS_DENIED)
453 goto bad_area;
454 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
455 goto bad_area;
456 }
457 ret = 0;
458 switch (handle_mm_fault(mm, vma, ea, is_write)) {
459 case VM_FAULT_MINOR:
460 current->min_flt++;
461 break;
462 case VM_FAULT_MAJOR:
463 current->maj_flt++;
464 break;
465 case VM_FAULT_SIGBUS:
466 ret = -EFAULT;
467 goto bad_area;
468 case VM_FAULT_OOM:
469 ret = -ENOMEM;
470 goto bad_area;
471 default:
472 BUG();
473 }
474 up_read(&mm->mmap_sem);
475 return ret;
476
477bad_area:
478 up_read(&mm->mmap_sem);
479 return -EFAULT;
480}
481
Arnd Bergmann51104592005-12-05 22:52:25 -0500482int spu_irq_class_1_bottom(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500483{
Arnd Bergmann67207b92005-11-15 15:53:48 -0500484 u64 ea, dsisr, access, error = 0UL;
485 int ret = 0;
486
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500487 ea = spu->dar;
488 dsisr = spu->dsisr;
Arnd Bergmann79c227a2006-03-24 19:49:27 +0100489 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
Arnd Bergmannf8072212006-04-29 02:40:21 +0200490 u64 flags;
491
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500492 access = (_PAGE_PRESENT | _PAGE_USER);
493 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
Arnd Bergmannf8072212006-04-29 02:40:21 +0200494 local_irq_save(flags);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500495 if (hash_page(ea, access, 0x300) != 0)
496 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
Arnd Bergmannf8072212006-04-29 02:40:21 +0200497 local_irq_restore(flags);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500498 }
Arnd Bergmann79c227a2006-03-24 19:49:27 +0100499 if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500500 if ((ret = spu_handle_mm_fault(spu)) != 0)
501 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
502 else
503 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
504 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500505 spu->dar = 0UL;
506 spu->dsisr = 0UL;
507 if (!error) {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500508 spu_restart_dma(spu);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500509 } else {
510 __spu_trap_invalid_dma(spu);
511 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500512 return ret;
513}
514
Arnd Bergmann2fb9d202006-01-05 14:05:29 +0000515void spu_irq_setaffinity(struct spu *spu, int cpu)
516{
517 u64 target = iic_get_target_id(cpu);
518 u64 route = target << 48 | target << 32 | target << 16;
519 spu_int_route_set(spu, route);
520}
521EXPORT_SYMBOL_GPL(spu_irq_setaffinity);
522
Arnd Bergmann67207b92005-11-15 15:53:48 -0500523static void __iomem * __init map_spe_prop(struct device_node *n,
524 const char *name)
525{
526 struct address_prop {
527 unsigned long address;
528 unsigned int len;
529 } __attribute__((packed)) *prop;
530
531 void *p;
532 int proplen;
533
534 p = get_property(n, name, &proplen);
535 if (proplen != sizeof (struct address_prop))
536 return NULL;
537
538 prop = p;
539
540 return ioremap(prop->address, prop->len);
541}
542
543static void spu_unmap(struct spu *spu)
544{
545 iounmap(spu->priv2);
546 iounmap(spu->priv1);
547 iounmap(spu->problem);
548 iounmap((u8 __iomem *)spu->local_store);
549}
550
551static int __init spu_map_device(struct spu *spu, struct device_node *spe)
552{
553 char *prop;
554 int ret;
555
556 ret = -ENODEV;
557 prop = get_property(spe, "isrc", NULL);
558 if (!prop)
559 goto out;
560 spu->isrc = *(unsigned int *)prop;
561
562 spu->name = get_property(spe, "name", NULL);
563 if (!spu->name)
564 goto out;
565
566 prop = get_property(spe, "local-store", NULL);
567 if (!prop)
568 goto out;
569 spu->local_store_phys = *(unsigned long *)prop;
570
571 /* we use local store as ram, not io memory */
572 spu->local_store = (void __force *)map_spe_prop(spe, "local-store");
573 if (!spu->local_store)
574 goto out;
575
Mark Nutter6df10a82006-03-23 00:00:12 +0100576 prop = get_property(spe, "problem", NULL);
577 if (!prop)
578 goto out_unmap;
579 spu->problem_phys = *(unsigned long *)prop;
580
Arnd Bergmann67207b92005-11-15 15:53:48 -0500581 spu->problem= map_spe_prop(spe, "problem");
582 if (!spu->problem)
583 goto out_unmap;
584
585 spu->priv1= map_spe_prop(spe, "priv1");
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100586 /* priv1 is not available on a hypervisor */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500587
588 spu->priv2= map_spe_prop(spe, "priv2");
589 if (!spu->priv2)
590 goto out_unmap;
591 ret = 0;
592 goto out;
593
594out_unmap:
595 spu_unmap(spu);
596out:
597 return ret;
598}
599
600static int __init find_spu_node_id(struct device_node *spe)
601{
602 unsigned int *id;
603 struct device_node *cpu;
604
605 cpu = spe->parent->parent;
606 id = (unsigned int *)get_property(cpu, "node-id", NULL);
607
608 return id ? *id : 0;
609}
610
611static int __init create_spu(struct device_node *spe)
612{
613 struct spu *spu;
614 int ret;
615 static int number;
616
617 ret = -ENOMEM;
618 spu = kmalloc(sizeof (*spu), GFP_KERNEL);
619 if (!spu)
620 goto out;
621
622 ret = spu_map_device(spu, spe);
623 if (ret)
624 goto out_free;
625
626 spu->node = find_spu_node_id(spe);
627 spu->stop_code = 0;
628 spu->slb_replace = 0;
629 spu->mm = NULL;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500630 spu->ctx = NULL;
631 spu->rq = NULL;
632 spu->pid = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500633 spu->class_0_pending = 0;
Mark Nutter5473af02005-11-15 15:53:49 -0500634 spu->flags = 0UL;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500635 spu->dar = 0UL;
636 spu->dsisr = 0UL;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500637 spin_lock_init(&spu->register_lock);
638
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100639 spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
640 spu_mfc_sr1_set(spu, 0x33);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500641
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500642 spu->ibox_callback = NULL;
643 spu->wbox_callback = NULL;
Arnd Bergmann51104592005-12-05 22:52:25 -0500644 spu->stop_callback = NULL;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100645 spu->mfc_callback = NULL;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500646
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800647 mutex_lock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500648 spu->number = number++;
649 ret = spu_request_irqs(spu);
650 if (ret)
651 goto out_unmap;
652
653 list_add(&spu->list, &spu_list);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800654 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500655
656 pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
657 spu->name, spu->isrc, spu->local_store,
658 spu->problem, spu->priv1, spu->priv2, spu->number);
659 goto out;
660
661out_unmap:
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800662 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500663 spu_unmap(spu);
664out_free:
665 kfree(spu);
666out:
667 return ret;
668}
669
670static void destroy_spu(struct spu *spu)
671{
672 list_del_init(&spu->list);
673
674 spu_free_irqs(spu);
675 spu_unmap(spu);
676 kfree(spu);
677}
678
679static void cleanup_spu_base(void)
680{
681 struct spu *spu, *tmp;
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800682 mutex_lock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500683 list_for_each_entry_safe(spu, tmp, &spu_list, list)
684 destroy_spu(spu);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800685 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500686}
687module_exit(cleanup_spu_base);
688
689static int __init init_spu_base(void)
690{
691 struct device_node *node;
692 int ret;
693
694 ret = -ENODEV;
695 for (node = of_find_node_by_type(NULL, "spe");
696 node; node = of_find_node_by_type(node, "spe")) {
697 ret = create_spu(node);
698 if (ret) {
699 printk(KERN_WARNING "%s: Error initializing %s\n",
700 __FUNCTION__, node->name);
701 cleanup_spu_base();
702 break;
703 }
704 }
705 /* in some old firmware versions, the spe is called 'spc', so we
706 look for that as well */
707 for (node = of_find_node_by_type(NULL, "spc");
708 node; node = of_find_node_by_type(node, "spc")) {
709 ret = create_spu(node);
710 if (ret) {
711 printk(KERN_WARNING "%s: Error initializing %s\n",
712 __FUNCTION__, node->name);
713 cleanup_spu_base();
714 break;
715 }
716 }
717 return ret;
718}
719module_init(init_spu_base);
720
721MODULE_LICENSE("GPL");
722MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");