blob: c3bb7299c4b56ba688377ce2c402c7ae8b999907 [file] [log] [blame]
Arnd Bergmann67207b92005-11-15 15:53:48 -05001/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmann3b3d22c2005-12-05 22:52:24 -050023#undef DEBUG
Arnd Bergmann67207b92005-11-15 15:53:48 -050024
25#include <linux/interrupt.h>
26#include <linux/list.h>
27#include <linux/module.h>
28#include <linux/poll.h>
29#include <linux/ptrace.h>
30#include <linux/slab.h>
31#include <linux/wait.h>
32
33#include <asm/io.h>
34#include <asm/prom.h>
Ingo Molnar14cc3e22006-03-26 01:37:14 -080035#include <linux/mutex.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050036#include <asm/spu.h>
Geoff Levand540270d2006-06-19 20:33:29 +020037#include <asm/spu_priv1.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050038#include <asm/mmu_context.h>
39
40#include "interrupt.h"
41
Geoff Levand540270d2006-06-19 20:33:29 +020042const struct spu_priv1_ops *spu_priv1_ops;
43
44EXPORT_SYMBOL_GPL(spu_priv1_ops);
45
Arnd Bergmann67207b92005-11-15 15:53:48 -050046static int __spu_trap_invalid_dma(struct spu *spu)
47{
48 pr_debug("%s\n", __FUNCTION__);
49 force_sig(SIGBUS, /* info, */ current);
50 return 0;
51}
52
53static int __spu_trap_dma_align(struct spu *spu)
54{
55 pr_debug("%s\n", __FUNCTION__);
56 force_sig(SIGBUS, /* info, */ current);
57 return 0;
58}
59
60static int __spu_trap_error(struct spu *spu)
61{
62 pr_debug("%s\n", __FUNCTION__);
63 force_sig(SIGILL, /* info, */ current);
64 return 0;
65}
66
67static void spu_restart_dma(struct spu *spu)
68{
69 struct spu_priv2 __iomem *priv2 = spu->priv2;
Mark Nutter5473af02005-11-15 15:53:49 -050070
Arnd Bergmann8837d922006-01-04 20:31:28 +010071 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
Mark Nutter5473af02005-11-15 15:53:49 -050072 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
Arnd Bergmann67207b92005-11-15 15:53:48 -050073}
74
75static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
76{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050077 struct spu_priv2 __iomem *priv2 = spu->priv2;
78 struct mm_struct *mm = spu->mm;
arnd@arndb.de724bd802006-06-19 20:33:23 +020079 u64 esid, vsid, llp;
Arnd Bergmann67207b92005-11-15 15:53:48 -050080
81 pr_debug("%s\n", __FUNCTION__);
82
Arnd Bergmann8837d922006-01-04 20:31:28 +010083 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050084 /* SLBs are pre-loaded for context switch, so
85 * we should never get here!
86 */
Mark Nutter5473af02005-11-15 15:53:49 -050087 printk("%s: invalid access during switch!\n", __func__);
88 return 1;
89 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050090 if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
91 /* Future: support kernel segments so that drivers
92 * can use SPUs.
93 */
Arnd Bergmann67207b92005-11-15 15:53:48 -050094 pr_debug("invalid region access at %016lx\n", ea);
95 return 1;
96 }
97
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050098 esid = (ea & ESID_MASK) | SLB_ESID_V;
arnd@arndb.de724bd802006-06-19 20:33:23 +020099#ifdef CONFIG_HUGETLB_PAGE
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500100 if (in_hugepage_area(mm->context, ea))
arnd@arndb.de724bd802006-06-19 20:33:23 +0200101 llp = mmu_psize_defs[mmu_huge_psize].sllp;
102 else
103#endif
104 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
105 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
106 SLB_VSID_USER | llp;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500107
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500108 out_be64(&priv2->slb_index_W, spu->slb_replace);
109 out_be64(&priv2->slb_vsid_RW, vsid);
110 out_be64(&priv2->slb_esid_RW, esid);
111
112 spu->slb_replace++;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500113 if (spu->slb_replace >= 8)
114 spu->slb_replace = 0;
115
Arnd Bergmann67207b92005-11-15 15:53:48 -0500116 spu_restart_dma(spu);
117
Arnd Bergmann67207b92005-11-15 15:53:48 -0500118 return 0;
119}
120
Mark Nutter5473af02005-11-15 15:53:49 -0500121extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500122static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500123{
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100124 pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500125
Mark Nutter5473af02005-11-15 15:53:49 -0500126 /* Handle kernel space hash faults immediately.
127 User hash faults need to be deferred to process context. */
128 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
129 && REGION_ID(ea) != USER_REGION_ID
130 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
131 spu_restart_dma(spu);
132 return 0;
133 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500134
Arnd Bergmann8837d922006-01-04 20:31:28 +0100135 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
Mark Nutter5473af02005-11-15 15:53:49 -0500136 printk("%s: invalid access during switch!\n", __func__);
137 return 1;
138 }
139
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500140 spu->dar = ea;
141 spu->dsisr = dsisr;
142 mb();
Arnd Bergmann51104592005-12-05 22:52:25 -0500143 if (spu->stop_callback)
144 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500145 return 0;
146}
147
148static int __spu_trap_mailbox(struct spu *spu)
149{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500150 if (spu->ibox_callback)
151 spu->ibox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500152
153 /* atomically disable SPU mailbox interrupts */
154 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100155 spu_int_mask_and(spu, 2, ~0x1);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500156 spin_unlock(&spu->register_lock);
157 return 0;
158}
159
160static int __spu_trap_stop(struct spu *spu)
161{
162 pr_debug("%s\n", __FUNCTION__);
163 spu->stop_code = in_be32(&spu->problem->spu_status_R);
Arnd Bergmann51104592005-12-05 22:52:25 -0500164 if (spu->stop_callback)
165 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500166 return 0;
167}
168
169static int __spu_trap_halt(struct spu *spu)
170{
171 pr_debug("%s\n", __FUNCTION__);
172 spu->stop_code = in_be32(&spu->problem->spu_status_R);
Arnd Bergmann51104592005-12-05 22:52:25 -0500173 if (spu->stop_callback)
174 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500175 return 0;
176}
177
178static int __spu_trap_tag_group(struct spu *spu)
179{
180 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100181 spu->mfc_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500182 return 0;
183}
184
185static int __spu_trap_spubox(struct spu *spu)
186{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500187 if (spu->wbox_callback)
188 spu->wbox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500189
190 /* atomically disable SPU mailbox interrupts */
191 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100192 spu_int_mask_and(spu, 2, ~0x10);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500193 spin_unlock(&spu->register_lock);
194 return 0;
195}
196
197static irqreturn_t
198spu_irq_class_0(int irq, void *data, struct pt_regs *regs)
199{
200 struct spu *spu;
201
202 spu = data;
203 spu->class_0_pending = 1;
Arnd Bergmann51104592005-12-05 22:52:25 -0500204 if (spu->stop_callback)
205 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500206
207 return IRQ_HANDLED;
208}
209
Arnd Bergmann51104592005-12-05 22:52:25 -0500210int
Arnd Bergmann67207b92005-11-15 15:53:48 -0500211spu_irq_class_0_bottom(struct spu *spu)
212{
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500213 unsigned long stat, mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500214
215 spu->class_0_pending = 0;
216
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100217 mask = spu_int_mask_get(spu, 0);
218 stat = spu_int_stat_get(spu, 0);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500219
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500220 stat &= mask;
221
Arnd Bergmann67207b92005-11-15 15:53:48 -0500222 if (stat & 1) /* invalid MFC DMA */
223 __spu_trap_invalid_dma(spu);
224
225 if (stat & 2) /* invalid DMA alignment */
226 __spu_trap_dma_align(spu);
227
228 if (stat & 4) /* error on SPU */
229 __spu_trap_error(spu);
230
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100231 spu_int_stat_clear(spu, 0, stat);
Arnd Bergmann51104592005-12-05 22:52:25 -0500232
233 return (stat & 0x7) ? -EIO : 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500234}
Arnd Bergmann51104592005-12-05 22:52:25 -0500235EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500236
237static irqreturn_t
238spu_irq_class_1(int irq, void *data, struct pt_regs *regs)
239{
240 struct spu *spu;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500241 unsigned long stat, mask, dar, dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500242
243 spu = data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500244
245 /* atomically read & clear class1 status. */
246 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100247 mask = spu_int_mask_get(spu, 1);
248 stat = spu_int_stat_get(spu, 1) & mask;
249 dar = spu_mfc_dar_get(spu);
250 dsisr = spu_mfc_dsisr_get(spu);
Arnd Bergmann38307342005-12-09 19:04:18 +0100251 if (stat & 2) /* mapping fault */
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100252 spu_mfc_dsisr_set(spu, 0ul);
253 spu_int_stat_clear(spu, 1, stat);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500254 spin_unlock(&spu->register_lock);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100255 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
256 dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500257
258 if (stat & 1) /* segment fault */
259 __spu_trap_data_seg(spu, dar);
260
261 if (stat & 2) { /* mapping fault */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500262 __spu_trap_data_map(spu, dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500263 }
264
265 if (stat & 4) /* ls compare & suspend on get */
266 ;
267
268 if (stat & 8) /* ls compare & suspend on put */
269 ;
270
Arnd Bergmann67207b92005-11-15 15:53:48 -0500271 return stat ? IRQ_HANDLED : IRQ_NONE;
272}
Arnd Bergmann51104592005-12-05 22:52:25 -0500273EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500274
275static irqreturn_t
276spu_irq_class_2(int irq, void *data, struct pt_regs *regs)
277{
278 struct spu *spu;
279 unsigned long stat;
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500280 unsigned long mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500281
282 spu = data;
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100283 stat = spu_int_stat_get(spu, 2);
284 mask = spu_int_mask_get(spu, 2);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500285
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500286 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500287
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500288 stat &= mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500289
290 if (stat & 1) /* PPC core mailbox */
291 __spu_trap_mailbox(spu);
292
293 if (stat & 2) /* SPU stop-and-signal */
294 __spu_trap_stop(spu);
295
296 if (stat & 4) /* SPU halted */
297 __spu_trap_halt(spu);
298
299 if (stat & 8) /* DMA tag group complete */
300 __spu_trap_tag_group(spu);
301
302 if (stat & 0x10) /* SPU mailbox threshold */
303 __spu_trap_spubox(spu);
304
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100305 spu_int_stat_clear(spu, 2, stat);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500306 return stat ? IRQ_HANDLED : IRQ_NONE;
307}
308
309static int
310spu_request_irqs(struct spu *spu)
311{
312 int ret;
313 int irq_base;
314
315 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
316
317 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number);
318 ret = request_irq(irq_base + spu->isrc,
Arnd Bergmannf8072212006-04-29 02:40:21 +0200319 spu_irq_class_0, SA_INTERRUPT, spu->irq_c0, spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500320 if (ret)
321 goto out;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500322
323 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number);
324 ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc,
Arnd Bergmannf8072212006-04-29 02:40:21 +0200325 spu_irq_class_1, SA_INTERRUPT, spu->irq_c1, spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500326 if (ret)
327 goto out1;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500328
329 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number);
330 ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc,
Arnd Bergmannf8072212006-04-29 02:40:21 +0200331 spu_irq_class_2, SA_INTERRUPT, spu->irq_c2, spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500332 if (ret)
333 goto out2;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500334 goto out;
335
336out2:
337 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
338out1:
339 free_irq(irq_base + spu->isrc, spu);
340out:
341 return ret;
342}
343
344static void
345spu_free_irqs(struct spu *spu)
346{
347 int irq_base;
348
349 irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET;
350
351 free_irq(irq_base + spu->isrc, spu);
352 free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu);
353 free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu);
354}
355
356static LIST_HEAD(spu_list);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800357static DEFINE_MUTEX(spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500358
359static void spu_init_channels(struct spu *spu)
360{
361 static const struct {
362 unsigned channel;
363 unsigned count;
364 } zero_list[] = {
365 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
366 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
367 }, count_list[] = {
368 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
369 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
370 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
371 };
Arnd Bergmann6ff730c2006-01-04 20:31:31 +0100372 struct spu_priv2 __iomem *priv2;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500373 int i;
374
375 priv2 = spu->priv2;
376
377 /* initialize all channel data to zero */
378 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
379 int count;
380
381 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
382 for (count = 0; count < zero_list[i].count; count++)
383 out_be64(&priv2->spu_chnldata_RW, 0);
384 }
385
386 /* initialize channel counts to meaningful values */
387 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
388 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
389 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
390 }
391}
392
Arnd Bergmann67207b92005-11-15 15:53:48 -0500393struct spu *spu_alloc(void)
394{
395 struct spu *spu;
396
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800397 mutex_lock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500398 if (!list_empty(&spu_list)) {
399 spu = list_entry(spu_list.next, struct spu, list);
400 list_del_init(&spu->list);
401 pr_debug("Got SPU %x %d\n", spu->isrc, spu->number);
402 } else {
403 pr_debug("No SPU left\n");
404 spu = NULL;
405 }
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800406 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500407
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100408 if (spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500409 spu_init_channels(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500410
411 return spu;
412}
Arnd Bergmann39c73c32005-12-05 22:52:21 -0500413EXPORT_SYMBOL_GPL(spu_alloc);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500414
415void spu_free(struct spu *spu)
416{
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800417 mutex_lock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500418 list_add_tail(&spu->list, &spu_list);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800419 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500420}
Arnd Bergmann39c73c32005-12-05 22:52:21 -0500421EXPORT_SYMBOL_GPL(spu_free);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500422
Arnd Bergmann67207b92005-11-15 15:53:48 -0500423static int spu_handle_mm_fault(struct spu *spu)
424{
Arnd Bergmann67207b92005-11-15 15:53:48 -0500425 struct mm_struct *mm = spu->mm;
426 struct vm_area_struct *vma;
427 u64 ea, dsisr, is_write;
428 int ret;
429
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500430 ea = spu->dar;
431 dsisr = spu->dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500432#if 0
433 if (!IS_VALID_EA(ea)) {
434 return -EFAULT;
435 }
436#endif /* XXX */
437 if (mm == NULL) {
438 return -EFAULT;
439 }
440 if (mm->pgd == NULL) {
441 return -EFAULT;
442 }
443
444 down_read(&mm->mmap_sem);
445 vma = find_vma(mm, ea);
446 if (!vma)
447 goto bad_area;
448 if (vma->vm_start <= ea)
449 goto good_area;
450 if (!(vma->vm_flags & VM_GROWSDOWN))
451 goto bad_area;
452#if 0
453 if (expand_stack(vma, ea))
454 goto bad_area;
455#endif /* XXX */
456good_area:
457 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
458 if (is_write) {
459 if (!(vma->vm_flags & VM_WRITE))
460 goto bad_area;
461 } else {
462 if (dsisr & MFC_DSISR_ACCESS_DENIED)
463 goto bad_area;
464 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
465 goto bad_area;
466 }
467 ret = 0;
468 switch (handle_mm_fault(mm, vma, ea, is_write)) {
469 case VM_FAULT_MINOR:
470 current->min_flt++;
471 break;
472 case VM_FAULT_MAJOR:
473 current->maj_flt++;
474 break;
475 case VM_FAULT_SIGBUS:
476 ret = -EFAULT;
477 goto bad_area;
478 case VM_FAULT_OOM:
479 ret = -ENOMEM;
480 goto bad_area;
481 default:
482 BUG();
483 }
484 up_read(&mm->mmap_sem);
485 return ret;
486
487bad_area:
488 up_read(&mm->mmap_sem);
489 return -EFAULT;
490}
491
Arnd Bergmann51104592005-12-05 22:52:25 -0500492int spu_irq_class_1_bottom(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500493{
Arnd Bergmann67207b92005-11-15 15:53:48 -0500494 u64 ea, dsisr, access, error = 0UL;
495 int ret = 0;
496
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500497 ea = spu->dar;
498 dsisr = spu->dsisr;
Arnd Bergmann79c227a2006-03-24 19:49:27 +0100499 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
Arnd Bergmannf8072212006-04-29 02:40:21 +0200500 u64 flags;
501
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500502 access = (_PAGE_PRESENT | _PAGE_USER);
503 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
Arnd Bergmannf8072212006-04-29 02:40:21 +0200504 local_irq_save(flags);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500505 if (hash_page(ea, access, 0x300) != 0)
506 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
Arnd Bergmannf8072212006-04-29 02:40:21 +0200507 local_irq_restore(flags);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500508 }
Arnd Bergmann79c227a2006-03-24 19:49:27 +0100509 if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500510 if ((ret = spu_handle_mm_fault(spu)) != 0)
511 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
512 else
513 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
514 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500515 spu->dar = 0UL;
516 spu->dsisr = 0UL;
517 if (!error) {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500518 spu_restart_dma(spu);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500519 } else {
520 __spu_trap_invalid_dma(spu);
521 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500522 return ret;
523}
524
Arnd Bergmann2fb9d202006-01-05 14:05:29 +0000525void spu_irq_setaffinity(struct spu *spu, int cpu)
526{
527 u64 target = iic_get_target_id(cpu);
528 u64 route = target << 48 | target << 32 | target << 16;
529 spu_int_route_set(spu, route);
530}
531EXPORT_SYMBOL_GPL(spu_irq_setaffinity);
532
Joel H Schoppbed120c2006-05-01 12:16:11 -0700533static int __init find_spu_node_id(struct device_node *spe)
534{
535 unsigned int *id;
536 struct device_node *cpu;
537 cpu = spe->parent->parent;
538 id = (unsigned int *)get_property(cpu, "node-id", NULL);
539 return id ? *id : 0;
540}
541
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700542static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
543 const char *prop)
Joel H Schoppbed120c2006-05-01 12:16:11 -0700544{
545 static DEFINE_MUTEX(add_spumem_mutex);
546
547 struct address_prop {
548 unsigned long address;
549 unsigned int len;
550 } __attribute__((packed)) *p;
551 int proplen;
552
553 unsigned long start_pfn, nr_pages;
Joel H Schoppbed120c2006-05-01 12:16:11 -0700554 struct pglist_data *pgdata;
555 struct zone *zone;
556 int ret;
557
558 p = (void*)get_property(spe, prop, &proplen);
559 WARN_ON(proplen != sizeof (*p));
560
561 start_pfn = p->address >> PAGE_SHIFT;
562 nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
563
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700564 pgdata = NODE_DATA(spu->nid);
Joel H Schoppbed120c2006-05-01 12:16:11 -0700565 zone = pgdata->node_zones;
566
567 /* XXX rethink locking here */
568 mutex_lock(&add_spumem_mutex);
569 ret = __add_pages(zone, start_pfn, nr_pages);
570 mutex_unlock(&add_spumem_mutex);
571
572 return ret;
573}
574
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700575static void __iomem * __init map_spe_prop(struct spu *spu,
576 struct device_node *n, const char *name)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500577{
578 struct address_prop {
579 unsigned long address;
580 unsigned int len;
581 } __attribute__((packed)) *prop;
582
583 void *p;
584 int proplen;
Joel H Schoppbed120c2006-05-01 12:16:11 -0700585 void* ret = NULL;
586 int err = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500587
588 p = get_property(n, name, &proplen);
589 if (proplen != sizeof (struct address_prop))
590 return NULL;
591
592 prop = p;
593
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700594 err = cell_spuprop_present(spu, n, name);
Joel H Schoppbed120c2006-05-01 12:16:11 -0700595 if (err && (err != -EEXIST))
596 goto out;
597
598 ret = ioremap(prop->address, prop->len);
599
600 out:
601 return ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500602}
603
604static void spu_unmap(struct spu *spu)
605{
606 iounmap(spu->priv2);
607 iounmap(spu->priv1);
608 iounmap(spu->problem);
609 iounmap((u8 __iomem *)spu->local_store);
610}
611
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700612static int __init spu_map_device(struct spu *spu, struct device_node *node)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500613{
614 char *prop;
615 int ret;
616
617 ret = -ENODEV;
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700618 prop = get_property(node, "isrc", NULL);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500619 if (!prop)
620 goto out;
621 spu->isrc = *(unsigned int *)prop;
622
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700623 spu->name = get_property(node, "name", NULL);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500624 if (!spu->name)
625 goto out;
626
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700627 prop = get_property(node, "local-store", NULL);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500628 if (!prop)
629 goto out;
630 spu->local_store_phys = *(unsigned long *)prop;
631
632 /* we use local store as ram, not io memory */
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700633 spu->local_store = (void __force *)
634 map_spe_prop(spu, node, "local-store");
Arnd Bergmann67207b92005-11-15 15:53:48 -0500635 if (!spu->local_store)
636 goto out;
637
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700638 prop = get_property(node, "problem", NULL);
Mark Nutter6df10a82006-03-23 00:00:12 +0100639 if (!prop)
640 goto out_unmap;
641 spu->problem_phys = *(unsigned long *)prop;
642
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700643 spu->problem= map_spe_prop(spu, node, "problem");
Arnd Bergmann67207b92005-11-15 15:53:48 -0500644 if (!spu->problem)
645 goto out_unmap;
646
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700647 spu->priv1= map_spe_prop(spu, node, "priv1");
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100648 /* priv1 is not available on a hypervisor */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500649
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700650 spu->priv2= map_spe_prop(spu, node, "priv2");
Arnd Bergmann67207b92005-11-15 15:53:48 -0500651 if (!spu->priv2)
652 goto out_unmap;
653 ret = 0;
654 goto out;
655
656out_unmap:
657 spu_unmap(spu);
658out:
659 return ret;
660}
661
Jeremy Kerr1d640932006-06-19 20:33:19 +0200662struct sysdev_class spu_sysdev_class = {
663 set_kset_name("spu")
664};
665
666static ssize_t spu_show_isrc(struct sys_device *sysdev, char *buf)
667{
668 struct spu *spu = container_of(sysdev, struct spu, sysdev);
669 return sprintf(buf, "%d\n", spu->isrc);
670
671}
672static SYSDEV_ATTR(isrc, 0400, spu_show_isrc, NULL);
673
674extern int attach_sysdev_to_node(struct sys_device *dev, int nid);
675
676static int spu_create_sysdev(struct spu *spu)
677{
678 int ret;
679
680 spu->sysdev.id = spu->number;
681 spu->sysdev.cls = &spu_sysdev_class;
682 ret = sysdev_register(&spu->sysdev);
683 if (ret) {
684 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
685 spu->number);
686 return ret;
687 }
688
689 sysdev_create_file(&spu->sysdev, &attr_isrc);
690 sysfs_add_device_to_node(&spu->sysdev, spu->nid);
691
692 return 0;
693}
694
695static void spu_destroy_sysdev(struct spu *spu)
696{
697 sysdev_remove_file(&spu->sysdev, &attr_isrc);
698 sysfs_remove_device_from_node(&spu->sysdev, spu->nid);
699 sysdev_unregister(&spu->sysdev);
700}
701
Arnd Bergmann67207b92005-11-15 15:53:48 -0500702static int __init create_spu(struct device_node *spe)
703{
704 struct spu *spu;
705 int ret;
706 static int number;
707
708 ret = -ENOMEM;
Jeremy Kerrecec2172006-06-19 20:33:26 +0200709 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500710 if (!spu)
711 goto out;
712
713 ret = spu_map_device(spu, spe);
714 if (ret)
715 goto out_free;
716
717 spu->node = find_spu_node_id(spe);
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700718 spu->nid = of_node_to_nid(spe);
719 if (spu->nid == -1)
720 spu->nid = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500721 spin_lock_init(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100722 spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1));
723 spu_mfc_sr1_set(spu, 0x33);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800724 mutex_lock(&spu_mutex);
Jeremy Kerrecec2172006-06-19 20:33:26 +0200725
Arnd Bergmann67207b92005-11-15 15:53:48 -0500726 spu->number = number++;
727 ret = spu_request_irqs(spu);
728 if (ret)
729 goto out_unmap;
730
Jeremy Kerr1d640932006-06-19 20:33:19 +0200731 ret = spu_create_sysdev(spu);
732 if (ret)
733 goto out_free_irqs;
734
Arnd Bergmann67207b92005-11-15 15:53:48 -0500735 list_add(&spu->list, &spu_list);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800736 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500737
738 pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n",
739 spu->name, spu->isrc, spu->local_store,
740 spu->problem, spu->priv1, spu->priv2, spu->number);
741 goto out;
742
Jeremy Kerr1d640932006-06-19 20:33:19 +0200743out_free_irqs:
744 spu_free_irqs(spu);
745
Arnd Bergmann67207b92005-11-15 15:53:48 -0500746out_unmap:
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800747 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500748 spu_unmap(spu);
749out_free:
750 kfree(spu);
751out:
752 return ret;
753}
754
755static void destroy_spu(struct spu *spu)
756{
757 list_del_init(&spu->list);
758
Jeremy Kerr1d640932006-06-19 20:33:19 +0200759 spu_destroy_sysdev(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500760 spu_free_irqs(spu);
761 spu_unmap(spu);
762 kfree(spu);
763}
764
765static void cleanup_spu_base(void)
766{
767 struct spu *spu, *tmp;
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800768 mutex_lock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500769 list_for_each_entry_safe(spu, tmp, &spu_list, list)
770 destroy_spu(spu);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800771 mutex_unlock(&spu_mutex);
Jeremy Kerr1d640932006-06-19 20:33:19 +0200772 sysdev_class_unregister(&spu_sysdev_class);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500773}
774module_exit(cleanup_spu_base);
775
776static int __init init_spu_base(void)
777{
778 struct device_node *node;
779 int ret;
780
Jeremy Kerr1d640932006-06-19 20:33:19 +0200781 /* create sysdev class for spus */
782 ret = sysdev_class_register(&spu_sysdev_class);
783 if (ret)
784 return ret;
785
Arnd Bergmann67207b92005-11-15 15:53:48 -0500786 ret = -ENODEV;
787 for (node = of_find_node_by_type(NULL, "spe");
788 node; node = of_find_node_by_type(node, "spe")) {
789 ret = create_spu(node);
790 if (ret) {
791 printk(KERN_WARNING "%s: Error initializing %s\n",
792 __FUNCTION__, node->name);
793 cleanup_spu_base();
794 break;
795 }
796 }
797 /* in some old firmware versions, the spe is called 'spc', so we
798 look for that as well */
799 for (node = of_find_node_by_type(NULL, "spc");
800 node; node = of_find_node_by_type(node, "spc")) {
801 ret = create_spu(node);
802 if (ret) {
803 printk(KERN_WARNING "%s: Error initializing %s\n",
804 __FUNCTION__, node->name);
805 cleanup_spu_base();
806 break;
807 }
808 }
809 return ret;
810}
811module_init(init_spu_base);
812
813MODULE_LICENSE("GPL");
814MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");