blob: d5877aacc3b63e62537d376b903f549b8d8588ca [file] [log] [blame]
Arnd Bergmann67207b92005-11-15 15:53:48 -05001/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmann3b3d22c2005-12-05 22:52:24 -050023#undef DEBUG
Arnd Bergmann67207b92005-11-15 15:53:48 -050024
25#include <linux/interrupt.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/poll.h>
29#include <linux/ptrace.h>
30#include <linux/slab.h>
31#include <linux/wait.h>
32
33#include <asm/io.h>
34#include <asm/prom.h>
Ingo Molnar14cc3e22006-03-26 01:37:14 -080035#include <linux/mutex.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050036#include <asm/spu.h>
37#include <asm/mmu_context.h>
38
39#include "interrupt.h"
40
41static int __spu_trap_invalid_dma(struct spu *spu)
42{
43 pr_debug("%s\n", __FUNCTION__);
44 force_sig(SIGBUS, /* info, */ current);
45 return 0;
46}
47
48static int __spu_trap_dma_align(struct spu *spu)
49{
50 pr_debug("%s\n", __FUNCTION__);
51 force_sig(SIGBUS, /* info, */ current);
52 return 0;
53}
54
55static int __spu_trap_error(struct spu *spu)
56{
57 pr_debug("%s\n", __FUNCTION__);
58 force_sig(SIGILL, /* info, */ current);
59 return 0;
60}
61
62static void spu_restart_dma(struct spu *spu)
63{
64 struct spu_priv2 __iomem *priv2 = spu->priv2;
Mark Nutter5473af02005-11-15 15:53:49 -050065
Arnd Bergmann8837d922006-01-04 20:31:28 +010066 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
Mark Nutter5473af02005-11-15 15:53:49 -050067 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
Arnd Bergmann67207b92005-11-15 15:53:48 -050068}
69
70static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
71{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050072 struct spu_priv2 __iomem *priv2 = spu->priv2;
73 struct mm_struct *mm = spu->mm;
74 u64 esid, vsid;
Arnd Bergmann67207b92005-11-15 15:53:48 -050075
76 pr_debug("%s\n", __FUNCTION__);
77
Arnd Bergmann8837d922006-01-04 20:31:28 +010078 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050079 /* SLBs are pre-loaded for context switch, so
80 * we should never get here!
81 */
Mark Nutter5473af02005-11-15 15:53:49 -050082 printk("%s: invalid access during switch!\n", __func__);
83 return 1;
84 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050085 if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
86 /* Future: support kernel segments so that drivers
87 * can use SPUs.
88 */
Arnd Bergmann67207b92005-11-15 15:53:48 -050089 pr_debug("invalid region access at %016lx\n", ea);
90 return 1;
91 }
92
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050093 esid = (ea & ESID_MASK) | SLB_ESID_V;
94 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER;
95 if (in_hugepage_area(mm->context, ea))
96 vsid |= SLB_VSID_L;
Arnd Bergmann67207b92005-11-15 15:53:48 -050097
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050098 out_be64(&priv2->slb_index_W, spu->slb_replace);
99 out_be64(&priv2->slb_vsid_RW, vsid);
100 out_be64(&priv2->slb_esid_RW, esid);
101
102 spu->slb_replace++;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500103 if (spu->slb_replace >= 8)
104 spu->slb_replace = 0;
105
Arnd Bergmann67207b92005-11-15 15:53:48 -0500106 spu_restart_dma(spu);
107
Arnd Bergmann67207b92005-11-15 15:53:48 -0500108 return 0;
109}
110
Mark Nutter5473af02005-11-15 15:53:49 -0500111extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500112static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500113{
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100114 pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500115
Mark Nutter5473af02005-11-15 15:53:49 -0500116 /* Handle kernel space hash faults immediately.
117 User hash faults need to be deferred to process context. */
118 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
119 && REGION_ID(ea) != USER_REGION_ID
120 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
121 spu_restart_dma(spu);
122 return 0;
123 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500124
Arnd Bergmann8837d922006-01-04 20:31:28 +0100125 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
Mark Nutter5473af02005-11-15 15:53:49 -0500126 printk("%s: invalid access during switch!\n", __func__);
127 return 1;
128 }
129
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500130 spu->dar = ea;
131 spu->dsisr = dsisr;
132 mb();
Arnd Bergmann51104592005-12-05 22:52:25 -0500133 if (spu->stop_callback)
134 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500135 return 0;
136}
137
138static int __spu_trap_mailbox(struct spu *spu)
139{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500140 if (spu->ibox_callback)
141 spu->ibox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500142
143 /* atomically disable SPU mailbox interrupts */
144 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100145 spu_int_mask_and(spu, 2, ~0x1);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500146 spin_unlock(&spu->register_lock);
147 return 0;
148}
149
150static int __spu_trap_stop(struct spu *spu)
151{
152 pr_debug("%s\n", __FUNCTION__);
153 spu->stop_code = in_be32(&spu->problem->spu_status_R);
Arnd Bergmann51104592005-12-05 22:52:25 -0500154 if (spu->stop_callback)
155 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500156 return 0;
157}
158
159static int __spu_trap_halt(struct spu *spu)
160{
161 pr_debug("%s\n", __FUNCTION__);
162 spu->stop_code = in_be32(&spu->problem->spu_status_R);
Arnd Bergmann51104592005-12-05 22:52:25 -0500163 if (spu->stop_callback)
164 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500165 return 0;
166}
167
168static int __spu_trap_tag_group(struct spu *spu)
169{
170 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100171 spu->mfc_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500172 return 0;
173}
174
175static int __spu_trap_spubox(struct spu *spu)
176{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500177 if (spu->wbox_callback)
178 spu->wbox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500179
180 /* atomically disable SPU mailbox interrupts */
181 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100182 spu_int_mask_and(spu, 2, ~0x10);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500183 spin_unlock(&spu->register_lock);
184 return 0;
185}
186
187static irqreturn_t
188spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
189{
190 struct spu *spu;
191
192 spu = data;
193 spu->class_0_pending = 1;
Arnd Bergmann51104592005-12-05 22:52:25 -0500194 if (spu->stop_callback)
195 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500196
197 return IRQ_HANDLED;
198}
199
Arnd Bergmann51104592005-12-05 22:52:25 -0500200int
Arnd Bergmann67207b92005-11-15 15:53:48 -0500201spu_irq_class_0_bottom(struct spu *spu)
202{
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500203 unsigned long stat, mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500204
205 spu->class_0_pending = 0;
206
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100207 mask = spu_int_mask_get(spu, 0);
208 stat = spu_int_stat_get(spu, 0);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500209
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500210 stat &= mask;
211
Arnd Bergmann67207b92005-11-15 15:53:48 -0500212 if (stat & 1) /* invalid MFC DMA */
213 __spu_trap_invalid_dma(spu);
214
215 if (stat & 2) /* invalid DMA alignment */
216 __spu_trap_dma_align(spu);
217
218 if (stat & 4) /* error on SPU */
219 __spu_trap_error(spu);
220
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100221 spu_int_stat_clear(spu, 0, stat);
Arnd Bergmann51104592005-12-05 22:52:25 -0500222
223 return (stat & 0x7) ? -EIO : 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500224}
Arnd Bergmann51104592005-12-05 22:52:25 -0500225EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500226
227static irqreturn_t
228spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
229{
230 struct spu *spu;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500231 unsigned long stat, mask, dar, dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500232
233 spu = data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500234
235 /* atomically read & clear class1 status. */
236 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100237 mask = spu_int_mask_get(spu, 1);
238 stat = spu_int_stat_get(spu, 1) & mask;
239 dar = spu_mfc_dar_get(spu);
240 dsisr = spu_mfc_dsisr_get(spu);
Arnd Bergmann38307342005-12-09 19:04:18 +0100241 if (stat & 2) /* mapping fault */
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100242 spu_mfc_dsisr_set(spu, 0ul);
243 spu_int_stat_clear(spu, 1, stat);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500244 spin_unlock(&spu->register_lock);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100245 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
246 dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500247
248 if (stat & 1) /* segment fault */
249 __spu_trap_data_seg(spu, dar);
250
251 if (stat & 2) { /* mapping fault */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500252 __spu_trap_data_map(spu, dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500253 }
254
255 if (stat & 4) /* ls compare & suspend on get */
256 ;
257
258 if (stat & 8) /* ls compare & suspend on put */
259 ;
260
Arnd Bergmann67207b92005-11-15 15:53:48 -0500261 return stat ? IRQ_HANDLED : IRQ_NONE;
262}
Arnd Bergmann51104592005-12-05 22:52:25 -0500263EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500264
265static irqreturn_t
266spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
267{
268 struct spu *spu;
269 unsigned long stat;
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500270 unsigned long mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500271
272 spu = data;
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100273 stat = spu_int_stat_get(spu, 2);
274 mask = spu_int_mask_get(spu, 2);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500275
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500276 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500277
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500278 stat &= mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500279
280 if (stat & 1) /* PPC core mailbox */
281 __spu_trap_mailbox(spu);
282
283 if (stat & 2) /* SPU stop-and-signal */
284 __spu_trap_stop(spu);
285
286 if (stat & 4) /* SPU halted */
287 __spu_trap_halt(spu);
288
289 if (stat & 8) /* DMA tag group complete */
290 __spu_trap_tag_group(spu);
291
292 if (stat & 0x10) /* SPU mailbox threshold */
293 __spu_trap_spubox(spu);
294
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100295 spu_int_stat_clear(spu, 2, stat);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500296 return stat ? IRQ_HANDLED : IRQ_NONE;
297}
298
299static int
300spu_request_irqs(struct spu *spu)
301{
302 int ret;
303 int irq_base;
304
305 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
306
307 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number);
308 ret = request_irq(irq_base + spu->isrc,
Arnd Bergmannf8072212006-04-29 02:40:21 +0200309 spu_irq_class_0, SA_INTERRUPT, spu->irq_c0, spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500310 if (ret)
311 goto out;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500312
313 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
314 ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
Arnd Bergmannf8072212006-04-29 02:40:21 +0200315 spu_irq_class_1, SA_INTERRUPT, spu->irq_c1, spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500316 if (ret)
317 goto out1;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500318
319 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
320 ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
Arnd Bergmannf8072212006-04-29 02:40:21 +0200321 spu_irq_class_2, SA_INTERRUPT, spu->irq_c2, spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500322 if (ret)
323 goto out2;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500324 goto out;
325
326out2:
327 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
328out1:
329 free_irq(irq_base + spu->isrc, spu);
330out:
331 return ret;
332}
333
334static void
335spu_free_irqs(struct spu *spu)
336{
337 int irq_base;
338
339 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
340
341 free_irq(irq_base + spu->isrc, spu);
342 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
343 free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu);
344}
345
346static LIST_HEAD(spu_list);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800347static DEFINE_MUTEX(spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500348
349static void spu_init_channels(struct spu *spu)
350{
351 static const struct {
352 unsigned channel;
353 unsigned count;
354 } zero_list[] = {
355 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
356 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
357 }, count_list[] = {
358 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
359 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
360 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
361 };
Arnd Bergmann6ff730c2006-01-04 20:31:31 +0100362 struct spu_priv2 __iomem *priv2;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500363 int i;
364
365 priv2 = spu->priv2;
366
367 /* initialize all channel data to zero */
368 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
369 int count;
370
371 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
372 for (count = 0; count < zero_list[i].count; count++)
373 out_be64(&priv2->spu_chnldata_RW, 0);
374 }
375
376 /* initialize channel counts to meaningful values */
377 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
378 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
379 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
380 }
381}
382
Arnd Bergmann67207b92005-11-15 15:53:48 -0500383struct spu *spu_alloc(void)
384{
385 struct spu *spu;
386
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800387 mutex_lock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500388 if (!list_empty(&spu_list)) {
389 spu = list_entry(spu_list.next, struct spu, list);
390 list_del_init(&spu->list);
391 pr_debug("Got SPU %x %d\n", spu->isrc, spu->number);
392 } else {
393 pr_debug("No SPU left\n");
394 spu = NULL;
395 }
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800396 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500397
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100398 if (spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500399 spu_init_channels(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500400
401 return spu;
402}
Arnd Bergmann39c73c32005-12-05 22:52:21 -0500403EXPORT_SYMBOL_GPL(spu_alloc);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500404
405void spu_free(struct spu *spu)
406{
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800407 mutex_lock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500408 list_add_tail(&spu->list, &spu_list);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800409 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500410}
Arnd Bergmann39c73c32005-12-05 22:52:21 -0500411EXPORT_SYMBOL_GPL(spu_free);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500412
Arnd Bergmann67207b92005-11-15 15:53:48 -0500413static int spu_handle_mm_fault(struct spu *spu)
414{
Arnd Bergmann67207b92005-11-15 15:53:48 -0500415 struct mm_struct *mm = spu->mm;
416 struct vm_area_struct *vma;
417 u64 ea, dsisr, is_write;
418 int ret;
419
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500420 ea = spu->dar;
421 dsisr = spu->dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500422#if 0
423 if (!IS_VALID_EA(ea)) {
424 return -EFAULT;
425 }
426#endif /* XXX */
427 if (mm == NULL) {
428 return -EFAULT;
429 }
430 if (mm->pgd == NULL) {
431 return -EFAULT;
432 }
433
434 down_read(&mm->mmap_sem);
435 vma = find_vma(mm, ea);
436 if (!vma)
437 goto bad_area;
438 if (vma->vm_start <= ea)
439 goto good_area;
440 if (!(vma->vm_flags & VM_GROWSDOWN))
441 goto bad_area;
442#if 0
443 if (expand_stack(vma, ea))
444 goto bad_area;
445#endif /* XXX */
446good_area:
447 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
448 if (is_write) {
449 if (!(vma->vm_flags & VM_WRITE))
450 goto bad_area;
451 } else {
452 if (dsisr & MFC_DSISR_ACCESS_DENIED)
453 goto bad_area;
454 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
455 goto bad_area;
456 }
457 ret = 0;
458 switch (handle_mm_fault(mm, vma, ea, is_write)) {
459 case VM_FAULT_MINOR:
460 current->min_flt++;
461 break;
462 case VM_FAULT_MAJOR:
463 current->maj_flt++;
464 break;
465 case VM_FAULT_SIGBUS:
466 ret = -EFAULT;
467 goto bad_area;
468 case VM_FAULT_OOM:
469 ret = -ENOMEM;
470 goto bad_area;
471 default:
472 BUG();
473 }
474 up_read(&mm->mmap_sem);
475 return ret;
476
477bad_area:
478 up_read(&mm->mmap_sem);
479 return -EFAULT;
480}
481
Arnd Bergmann51104592005-12-05 22:52:25 -0500482int spu_irq_class_1_bottom(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500483{
Arnd Bergmann67207b92005-11-15 15:53:48 -0500484 u64 ea, dsisr, access, error = 0UL;
485 int ret = 0;
486
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500487 ea = spu->dar;
488 dsisr = spu->dsisr;
Arnd Bergmann79c227a2006-03-24 19:49:27 +0100489 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
Arnd Bergmannf8072212006-04-29 02:40:21 +0200490 u64 flags;
491
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500492 access = (_PAGE_PRESENT | _PAGE_USER);
493 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
Arnd Bergmannf8072212006-04-29 02:40:21 +0200494 local_irq_save(flags);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500495 if (hash_page(ea, access, 0x300) != 0)
496 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
Arnd Bergmannf8072212006-04-29 02:40:21 +0200497 local_irq_restore(flags);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500498 }
Arnd Bergmann79c227a2006-03-24 19:49:27 +0100499 if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500500 if ((ret = spu_handle_mm_fault(spu)) != 0)
501 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
502 else
503 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
504 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500505 spu->dar = 0UL;
506 spu->dsisr = 0UL;
507 if (!error) {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500508 spu_restart_dma(spu);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500509 } else {
510 __spu_trap_invalid_dma(spu);
511 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500512 return ret;
513}
514
Arnd Bergmann2fb9d202006-01-05 14:05:29 +0000515void spu_irq_setaffinity(struct spu *spu, int cpu)
516{
517 u64 target = iic_get_target_id(cpu);
518 u64 route = target << 48 | target << 32 | target << 16;
519 spu_int_route_set(spu, route);
520}
521EXPORT_SYMBOL_GPL(spu_irq_setaffinity);
522
Joel H Schoppbed120c2006-05-01 12:16:11 -0700523static int __init find_spu_node_id(struct device_node *spe)
524{
525 unsigned int *id;
526 struct device_node *cpu;
527 cpu = spe->parent->parent;
528 id = (unsigned int *)get_property(cpu, "node-id", NULL);
529 return id ? *id : 0;
530}
531
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700532static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
533 const char *prop)
Joel H Schoppbed120c2006-05-01 12:16:11 -0700534{
535 static DEFINE_MUTEX(add_spumem_mutex);
536
537 struct address_prop {
538 unsigned long address;
539 unsigned int len;
540 } __attribute__((packed)) *p;
541 int proplen;
542
543 unsigned long start_pfn, nr_pages;
Joel H Schoppbed120c2006-05-01 12:16:11 -0700544 struct pglist_data *pgdata;
545 struct zone *zone;
546 int ret;
547
548 p = (void*)get_property(spe, prop, &proplen);
549 WARN_ON(proplen != sizeof (*p));
550
551 start_pfn = p->address >> PAGE_SHIFT;
552 nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
553
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700554 pgdata = NODE_DATA(spu->nid);
Joel H Schoppbed120c2006-05-01 12:16:11 -0700555 zone = pgdata->node_zones;
556
557 /* XXX rethink locking here */
558 mutex_lock(&add_spumem_mutex);
559 ret = __add_pages(zone, start_pfn, nr_pages);
560 mutex_unlock(&add_spumem_mutex);
561
562 return ret;
563}
564
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700565static void __iomem * __init map_spe_prop(struct spu *spu,
566 struct device_node *n, const char *name)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500567{
568 struct address_prop {
569 unsigned long address;
570 unsigned int len;
571 } __attribute__((packed)) *prop;
572
573 void *p;
574 int proplen;
Joel H Schoppbed120c2006-05-01 12:16:11 -0700575 void* ret = NULL;
576 int err = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500577
578 p = get_property(n, name, &proplen);
579 if (proplen != sizeof (struct address_prop))
580 return NULL;
581
582 prop = p;
583
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700584 err = cell_spuprop_present(spu, n, name);
Joel H Schoppbed120c2006-05-01 12:16:11 -0700585 if (err && (err != -EEXIST))
586 goto out;
587
588 ret = ioremap(prop->address, prop->len);
589
590 out:
591 return ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500592}
593
594static void spu_unmap(struct spu *spu)
595{
596 iounmap(spu->priv2);
597 iounmap(spu->priv1);
598 iounmap(spu->problem);
599 iounmap((u8 __iomem *)spu->local_store);
600}
601
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700602static int __init spu_map_device(struct spu *spu, struct device_node *node)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500603{
604 char *prop;
605 int ret;
606
607 ret = -ENODEV;
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700608 prop = get_property(node, "isrc", NULL);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500609 if (!prop)
610 goto out;
611 spu->isrc = *(unsigned int *)prop;
612
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700613 spu->name = get_property(node, "name", NULL);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500614 if (!spu->name)
615 goto out;
616
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700617 prop = get_property(node, "local-store", NULL);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500618 if (!prop)
619 goto out;
620 spu->local_store_phys = *(unsigned long *)prop;
621
622 /* we use local store as ram, not io memory */
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700623 spu->local_store = (void __force *)
624 map_spe_prop(spu, node, "local-store");
Arnd Bergmann67207b92005-11-15 15:53:48 -0500625 if (!spu->local_store)
626 goto out;
627
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700628 prop = get_property(node, "problem", NULL);
Mark Nutter6df10a82006-03-23 00:00:12 +0100629 if (!prop)
630 goto out_unmap;
631 spu->problem_phys = *(unsigned long *)prop;
632
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700633 spu->problem= map_spe_prop(spu, node, "problem");
Arnd Bergmann67207b92005-11-15 15:53:48 -0500634 if (!spu->problem)
635 goto out_unmap;
636
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700637 spu->priv1= map_spe_prop(spu, node, "priv1");
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100638 /* priv1 is not available on a hypervisor */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500639
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700640 spu->priv2= map_spe_prop(spu, node, "priv2");
Arnd Bergmann67207b92005-11-15 15:53:48 -0500641 if (!spu->priv2)
642 goto out_unmap;
643 ret = 0;
644 goto out;
645
646out_unmap:
647 spu_unmap(spu);
648out:
649 return ret;
650}
651
Jeremy Kerr1d640932006-06-19 20:33:19 +0200652struct sysdev_class spu_sysdev_class = {
653 set_kset_name("spu")
654};
655
656static ssize_t spu_show_isrc(struct sys_device *sysdev, char *buf)
657{
658 struct spu *spu = container_of(sysdev, struct spu, sysdev);
659 return sprintf(buf, "%d\n", spu->isrc);
660
661}
662static SYSDEV_ATTR(isrc, 0400, spu_show_isrc, NULL);
663
664extern int attach_sysdev_to_node(struct sys_device *dev, int nid);
665
666static int spu_create_sysdev(struct spu *spu)
667{
668 int ret;
669
670 spu->sysdev.id = spu->number;
671 spu->sysdev.cls = &spu_sysdev_class;
672 ret = sysdev_register(&spu->sysdev);
673 if (ret) {
674 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
675 spu->number);
676 return ret;
677 }
678
679 sysdev_create_file(&spu->sysdev, &attr_isrc);
680 sysfs_add_device_to_node(&spu->sysdev, spu->nid);
681
682 return 0;
683}
684
685static void spu_destroy_sysdev(struct spu *spu)
686{
687 sysdev_remove_file(&spu->sysdev, &attr_isrc);
688 sysfs_remove_device_from_node(&spu->sysdev, spu->nid);
689 sysdev_unregister(&spu->sysdev);
690}
691
Arnd Bergmann67207b92005-11-15 15:53:48 -0500692static int __init create_spu(struct device_node *spe)
693{
694 struct spu *spu;
695 int ret;
696 static int number;
697
698 ret = -ENOMEM;
699 spu = kmalloc(sizeof (*spu), GFP_KERNEL);
700 if (!spu)
701 goto out;
702
703 ret = spu_map_device(spu, spe);
704 if (ret)
705 goto out_free;
706
707 spu->node = find_spu_node_id(spe);
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700708 spu->nid = of_node_to_nid(spe);
709 if (spu->nid == -1)
710 spu->nid = 0;
711
Arnd Bergmann67207b92005-11-15 15:53:48 -0500712 spu->stop_code = 0;
713 spu->slb_replace = 0;
714 spu->mm = NULL;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500715 spu->ctx = NULL;
716 spu->rq = NULL;
717 spu->pid = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500718 spu->class_0_pending = 0;
Mark Nutter5473af02005-11-15 15:53:49 -0500719 spu->flags = 0UL;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500720 spu->dar = 0UL;
721 spu->dsisr = 0UL;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500722 spin_lock_init(&spu->register_lock);
723
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100724 spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
725 spu_mfc_sr1_set(spu, 0x33);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500726
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500727 spu->ibox_callback = NULL;
728 spu->wbox_callback = NULL;
Arnd Bergmann51104592005-12-05 22:52:25 -0500729 spu->stop_callback = NULL;
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100730 spu->mfc_callback = NULL;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500731
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800732 mutex_lock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500733 spu->number = number++;
734 ret = spu_request_irqs(spu);
735 if (ret)
736 goto out_unmap;
737
Jeremy Kerr1d640932006-06-19 20:33:19 +0200738 ret = spu_create_sysdev(spu);
739 if (ret)
740 goto out_free_irqs;
741
Arnd Bergmann67207b92005-11-15 15:53:48 -0500742 list_add(&spu->list, &spu_list);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800743 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500744
745 pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
746 spu->name, spu->isrc, spu->local_store,
747 spu->problem, spu->priv1, spu->priv2, spu->number);
748 goto out;
749
Jeremy Kerr1d640932006-06-19 20:33:19 +0200750out_free_irqs:
751 spu_free_irqs(spu);
752
Arnd Bergmann67207b92005-11-15 15:53:48 -0500753out_unmap:
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800754 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500755 spu_unmap(spu);
756out_free:
757 kfree(spu);
758out:
759 return ret;
760}
761
762static void destroy_spu(struct spu *spu)
763{
764 list_del_init(&spu->list);
765
Jeremy Kerr1d640932006-06-19 20:33:19 +0200766 spu_destroy_sysdev(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500767 spu_free_irqs(spu);
768 spu_unmap(spu);
769 kfree(spu);
770}
771
772static void cleanup_spu_base(void)
773{
774 struct spu *spu, *tmp;
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800775 mutex_lock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500776 list_for_each_entry_safe(spu, tmp, &spu_list, list)
777 destroy_spu(spu);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800778 mutex_unlock(&spu_mutex);
Jeremy Kerr1d640932006-06-19 20:33:19 +0200779 sysdev_class_unregister(&spu_sysdev_class);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500780}
781module_exit(cleanup_spu_base);
782
783static int __init init_spu_base(void)
784{
785 struct device_node *node;
786 int ret;
787
Jeremy Kerr1d640932006-06-19 20:33:19 +0200788 /* create sysdev class for spus */
789 ret = sysdev_class_register(&spu_sysdev_class);
790 if (ret)
791 return ret;
792
Arnd Bergmann67207b92005-11-15 15:53:48 -0500793 ret = -ENODEV;
794 for (node = of_find_node_by_type(NULL, "spe");
795 node; node = of_find_node_by_type(node, "spe")) {
796 ret = create_spu(node);
797 if (ret) {
798 printk(KERN_WARNING "%s: Error initializing %s\n",
799 __FUNCTION__, node->name);
800 cleanup_spu_base();
801 break;
802 }
803 }
804 /* in some old firmware versions, the spe is called 'spc', so we
805 look for that as well */
806 for (node = of_find_node_by_type(NULL, "spc");
807 node; node = of_find_node_by_type(node, "spc")) {
808 ret = create_spu(node);
809 if (ret) {
810 printk(KERN_WARNING "%s: Error initializing %s\n",
811 __FUNCTION__, node->name);
812 cleanup_spu_base();
813 break;
814 }
815 }
816 return ret;
817}
818module_init(init_spu_base);
819
820MODULE_LICENSE("GPL");
821MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");