blob: f6c94087db4025384c280d498dace78860f5bcf1 [file] [log] [blame]
Arnd Bergmann67207b92005-11-15 15:53:48 -05001/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmann3b3d22c2005-12-05 22:52:24 -050023#undef DEBUG
Arnd Bergmann67207b92005-11-15 15:53:48 -050024
25#include <linux/interrupt.h>
26#include <linux/list.h>
27#include <linux/module.h>
Arnd Bergmann7650f2f2006-10-04 17:26:20 +020028#include <linux/pci.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050029#include <linux/poll.h>
30#include <linux/ptrace.h>
31#include <linux/slab.h>
32#include <linux/wait.h>
33
Arnd Bergmann7650f2f2006-10-04 17:26:20 +020034#include <asm/firmware.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050035#include <asm/io.h>
36#include <asm/prom.h>
Ingo Molnar14cc3e22006-03-26 01:37:14 -080037#include <linux/mutex.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050038#include <asm/spu.h>
Geoff Levand540270d2006-06-19 20:33:29 +020039#include <asm/spu_priv1.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050040#include <asm/mmu_context.h>
41
42#include "interrupt.h"
43
Geoff Levand540270d2006-06-19 20:33:29 +020044const struct spu_priv1_ops *spu_priv1_ops;
45
46EXPORT_SYMBOL_GPL(spu_priv1_ops);
47
Arnd Bergmann67207b92005-11-15 15:53:48 -050048static int __spu_trap_invalid_dma(struct spu *spu)
49{
50 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmann9add11d2006-10-04 17:26:14 +020051 spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
Arnd Bergmann67207b92005-11-15 15:53:48 -050052 return 0;
53}
54
55static int __spu_trap_dma_align(struct spu *spu)
56{
57 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmann9add11d2006-10-04 17:26:14 +020058 spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
Arnd Bergmann67207b92005-11-15 15:53:48 -050059 return 0;
60}
61
62static int __spu_trap_error(struct spu *spu)
63{
64 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmann9add11d2006-10-04 17:26:14 +020065 spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
Arnd Bergmann67207b92005-11-15 15:53:48 -050066 return 0;
67}
68
69static void spu_restart_dma(struct spu *spu)
70{
71 struct spu_priv2 __iomem *priv2 = spu->priv2;
Mark Nutter5473af02005-11-15 15:53:49 -050072
Arnd Bergmann8837d922006-01-04 20:31:28 +010073 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
Mark Nutter5473af02005-11-15 15:53:49 -050074 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
Arnd Bergmann67207b92005-11-15 15:53:48 -050075}
76
77static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
78{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050079 struct spu_priv2 __iomem *priv2 = spu->priv2;
80 struct mm_struct *mm = spu->mm;
arnd@arndb.de724bd802006-06-19 20:33:23 +020081 u64 esid, vsid, llp;
Arnd Bergmann67207b92005-11-15 15:53:48 -050082
83 pr_debug("%s\n", __FUNCTION__);
84
Arnd Bergmann8837d922006-01-04 20:31:28 +010085 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050086 /* SLBs are pre-loaded for context switch, so
87 * we should never get here!
88 */
Mark Nutter5473af02005-11-15 15:53:49 -050089 printk("%s: invalid access during switch!\n", __func__);
90 return 1;
91 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -050092 if (!mm || (REGION_ID(ea) != USER_REGION_ID)) {
93 /* Future: support kernel segments so that drivers
94 * can use SPUs.
95 */
Arnd Bergmann67207b92005-11-15 15:53:48 -050096 pr_debug("invalid region access at %016lx\n", ea);
97 return 1;
98 }
99
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500100 esid = (ea & ESID_MASK) | SLB_ESID_V;
arnd@arndb.de724bd802006-06-19 20:33:23 +0200101#ifdef CONFIG_HUGETLB_PAGE
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500102 if (in_hugepage_area(mm->context, ea))
arnd@arndb.de724bd802006-06-19 20:33:23 +0200103 llp = mmu_psize_defs[mmu_huge_psize].sllp;
104 else
105#endif
106 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
107 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
108 SLB_VSID_USER | llp;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500109
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500110 out_be64(&priv2->slb_index_W, spu->slb_replace);
111 out_be64(&priv2->slb_vsid_RW, vsid);
112 out_be64(&priv2->slb_esid_RW, esid);
113
114 spu->slb_replace++;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500115 if (spu->slb_replace >= 8)
116 spu->slb_replace = 0;
117
Arnd Bergmann67207b92005-11-15 15:53:48 -0500118 spu_restart_dma(spu);
119
Arnd Bergmann67207b92005-11-15 15:53:48 -0500120 return 0;
121}
122
Mark Nutter5473af02005-11-15 15:53:49 -0500123extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500124static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500125{
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100126 pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500127
Mark Nutter5473af02005-11-15 15:53:49 -0500128 /* Handle kernel space hash faults immediately.
129 User hash faults need to be deferred to process context. */
130 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
131 && REGION_ID(ea) != USER_REGION_ID
132 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
133 spu_restart_dma(spu);
134 return 0;
135 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500136
Arnd Bergmann8837d922006-01-04 20:31:28 +0100137 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
Mark Nutter5473af02005-11-15 15:53:49 -0500138 printk("%s: invalid access during switch!\n", __func__);
139 return 1;
140 }
141
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500142 spu->dar = ea;
143 spu->dsisr = dsisr;
144 mb();
Masato Noguchiba723fe22006-06-19 20:33:33 +0200145 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500146 return 0;
147}
148
149static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200150spu_irq_class_0(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500151{
152 struct spu *spu;
153
154 spu = data;
155 spu->class_0_pending = 1;
Masato Noguchiba723fe22006-06-19 20:33:33 +0200156 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500157
158 return IRQ_HANDLED;
159}
160
Arnd Bergmann51104592005-12-05 22:52:25 -0500161int
Arnd Bergmann67207b92005-11-15 15:53:48 -0500162spu_irq_class_0_bottom(struct spu *spu)
163{
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500164 unsigned long stat, mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500165
166 spu->class_0_pending = 0;
167
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100168 mask = spu_int_mask_get(spu, 0);
169 stat = spu_int_stat_get(spu, 0);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500170
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500171 stat &= mask;
172
Arnd Bergmann2cd90bc2006-06-23 20:57:50 +0200173 if (stat & 1) /* invalid DMA alignment */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500174 __spu_trap_dma_align(spu);
175
Arnd Bergmann2cd90bc2006-06-23 20:57:50 +0200176 if (stat & 2) /* invalid MFC DMA */
177 __spu_trap_invalid_dma(spu);
178
Arnd Bergmann67207b92005-11-15 15:53:48 -0500179 if (stat & 4) /* error on SPU */
180 __spu_trap_error(spu);
181
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100182 spu_int_stat_clear(spu, 0, stat);
Arnd Bergmann51104592005-12-05 22:52:25 -0500183
184 return (stat & 0x7) ? -EIO : 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500185}
Arnd Bergmann51104592005-12-05 22:52:25 -0500186EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500187
188static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200189spu_irq_class_1(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500190{
191 struct spu *spu;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500192 unsigned long stat, mask, dar, dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500193
194 spu = data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500195
196 /* atomically read & clear class1 status. */
197 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100198 mask = spu_int_mask_get(spu, 1);
199 stat = spu_int_stat_get(spu, 1) & mask;
200 dar = spu_mfc_dar_get(spu);
201 dsisr = spu_mfc_dsisr_get(spu);
Arnd Bergmann38307342005-12-09 19:04:18 +0100202 if (stat & 2) /* mapping fault */
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100203 spu_mfc_dsisr_set(spu, 0ul);
204 spu_int_stat_clear(spu, 1, stat);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500205 spin_unlock(&spu->register_lock);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100206 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
207 dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500208
209 if (stat & 1) /* segment fault */
210 __spu_trap_data_seg(spu, dar);
211
212 if (stat & 2) { /* mapping fault */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500213 __spu_trap_data_map(spu, dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500214 }
215
216 if (stat & 4) /* ls compare & suspend on get */
217 ;
218
219 if (stat & 8) /* ls compare & suspend on put */
220 ;
221
Arnd Bergmann67207b92005-11-15 15:53:48 -0500222 return stat ? IRQ_HANDLED : IRQ_NONE;
223}
Arnd Bergmann51104592005-12-05 22:52:25 -0500224EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500225
226static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200227spu_irq_class_2(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500228{
229 struct spu *spu;
230 unsigned long stat;
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500231 unsigned long mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500232
233 spu = data;
Masato Noguchiba723fe22006-06-19 20:33:33 +0200234 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100235 stat = spu_int_stat_get(spu, 2);
236 mask = spu_int_mask_get(spu, 2);
Masato Noguchiba723fe22006-06-19 20:33:33 +0200237 /* ignore interrupts we're not waiting for */
238 stat &= mask;
239 /*
240 * mailbox interrupts (0x1 and 0x10) are level triggered.
241 * mask them now before acknowledging.
242 */
243 if (stat & 0x11)
244 spu_int_mask_and(spu, 2, ~(stat & 0x11));
245 /* acknowledge all interrupts before the callbacks */
246 spu_int_stat_clear(spu, 2, stat);
247 spin_unlock(&spu->register_lock);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500248
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500249 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500250
Arnd Bergmann67207b92005-11-15 15:53:48 -0500251 if (stat & 1) /* PPC core mailbox */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200252 spu->ibox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500253
254 if (stat & 2) /* SPU stop-and-signal */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200255 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500256
257 if (stat & 4) /* SPU halted */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200258 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500259
260 if (stat & 8) /* DMA tag group complete */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200261 spu->mfc_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500262
263 if (stat & 0x10) /* SPU mailbox threshold */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200264 spu->wbox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500265
Arnd Bergmann67207b92005-11-15 15:53:48 -0500266 return stat ? IRQ_HANDLED : IRQ_NONE;
267}
268
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000269static int spu_request_irqs(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500270{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000271 int ret = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500272
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000273 if (spu->irqs[0] != NO_IRQ) {
274 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
275 spu->number);
276 ret = request_irq(spu->irqs[0], spu_irq_class_0,
277 IRQF_DISABLED,
278 spu->irq_c0, spu);
279 if (ret)
280 goto bail0;
281 }
282 if (spu->irqs[1] != NO_IRQ) {
283 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
284 spu->number);
285 ret = request_irq(spu->irqs[1], spu_irq_class_1,
286 IRQF_DISABLED,
287 spu->irq_c1, spu);
288 if (ret)
289 goto bail1;
290 }
291 if (spu->irqs[2] != NO_IRQ) {
292 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
293 spu->number);
294 ret = request_irq(spu->irqs[2], spu_irq_class_2,
295 IRQF_DISABLED,
296 spu->irq_c2, spu);
297 if (ret)
298 goto bail2;
299 }
300 return 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500301
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000302bail2:
303 if (spu->irqs[1] != NO_IRQ)
304 free_irq(spu->irqs[1], spu);
305bail1:
306 if (spu->irqs[0] != NO_IRQ)
307 free_irq(spu->irqs[0], spu);
308bail0:
Arnd Bergmann67207b92005-11-15 15:53:48 -0500309 return ret;
310}
311
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000312static void spu_free_irqs(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500313{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000314 if (spu->irqs[0] != NO_IRQ)
315 free_irq(spu->irqs[0], spu);
316 if (spu->irqs[1] != NO_IRQ)
317 free_irq(spu->irqs[1], spu);
318 if (spu->irqs[2] != NO_IRQ)
319 free_irq(spu->irqs[2], spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500320}
321
Mark Nuttera68cf982006-10-04 17:26:12 +0200322static struct list_head spu_list[MAX_NUMNODES];
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800323static DEFINE_MUTEX(spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500324
325static void spu_init_channels(struct spu *spu)
326{
327 static const struct {
328 unsigned channel;
329 unsigned count;
330 } zero_list[] = {
331 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
332 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
333 }, count_list[] = {
334 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
335 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
336 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
337 };
Arnd Bergmann6ff730c2006-01-04 20:31:31 +0100338 struct spu_priv2 __iomem *priv2;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500339 int i;
340
341 priv2 = spu->priv2;
342
343 /* initialize all channel data to zero */
344 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
345 int count;
346
347 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
348 for (count = 0; count < zero_list[i].count; count++)
349 out_be64(&priv2->spu_chnldata_RW, 0);
350 }
351
352 /* initialize channel counts to meaningful values */
353 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
354 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
355 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
356 }
357}
358
Mark Nuttera68cf982006-10-04 17:26:12 +0200359struct spu *spu_alloc_node(int node)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500360{
Mark Nuttera68cf982006-10-04 17:26:12 +0200361 struct spu *spu = NULL;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500362
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800363 mutex_lock(&spu_mutex);
Mark Nuttera68cf982006-10-04 17:26:12 +0200364 if (!list_empty(&spu_list[node])) {
365 spu = list_entry(spu_list[node].next, struct spu, list);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500366 list_del_init(&spu->list);
Geoff Levandcc21a662006-10-24 18:31:15 +0200367 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
Mark Nuttera68cf982006-10-04 17:26:12 +0200368 spu_init_channels(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500369 }
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800370 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500371
Mark Nuttera68cf982006-10-04 17:26:12 +0200372 return spu;
373}
374EXPORT_SYMBOL_GPL(spu_alloc_node);
375
376struct spu *spu_alloc(void)
377{
378 struct spu *spu = NULL;
379 int node;
380
381 for (node = 0; node < MAX_NUMNODES; node++) {
382 spu = spu_alloc_node(node);
383 if (spu)
384 break;
385 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500386
387 return spu;
388}
Arnd Bergmann67207b92005-11-15 15:53:48 -0500389
390void spu_free(struct spu *spu)
391{
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800392 mutex_lock(&spu_mutex);
Mark Nuttera68cf982006-10-04 17:26:12 +0200393 list_add_tail(&spu->list, &spu_list[spu->node]);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800394 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500395}
Arnd Bergmann39c73c32005-12-05 22:52:21 -0500396EXPORT_SYMBOL_GPL(spu_free);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500397
Arnd Bergmann67207b92005-11-15 15:53:48 -0500398static int spu_handle_mm_fault(struct spu *spu)
399{
Arnd Bergmann67207b92005-11-15 15:53:48 -0500400 struct mm_struct *mm = spu->mm;
401 struct vm_area_struct *vma;
402 u64 ea, dsisr, is_write;
403 int ret;
404
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500405 ea = spu->dar;
406 dsisr = spu->dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500407#if 0
408 if (!IS_VALID_EA(ea)) {
409 return -EFAULT;
410 }
411#endif /* XXX */
412 if (mm == NULL) {
413 return -EFAULT;
414 }
415 if (mm->pgd == NULL) {
416 return -EFAULT;
417 }
418
419 down_read(&mm->mmap_sem);
420 vma = find_vma(mm, ea);
421 if (!vma)
422 goto bad_area;
423 if (vma->vm_start <= ea)
424 goto good_area;
425 if (!(vma->vm_flags & VM_GROWSDOWN))
426 goto bad_area;
427#if 0
428 if (expand_stack(vma, ea))
429 goto bad_area;
430#endif /* XXX */
431good_area:
432 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
433 if (is_write) {
434 if (!(vma->vm_flags & VM_WRITE))
435 goto bad_area;
436 } else {
437 if (dsisr & MFC_DSISR_ACCESS_DENIED)
438 goto bad_area;
439 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
440 goto bad_area;
441 }
442 ret = 0;
443 switch (handle_mm_fault(mm, vma, ea, is_write)) {
444 case VM_FAULT_MINOR:
445 current->min_flt++;
446 break;
447 case VM_FAULT_MAJOR:
448 current->maj_flt++;
449 break;
450 case VM_FAULT_SIGBUS:
451 ret = -EFAULT;
452 goto bad_area;
453 case VM_FAULT_OOM:
454 ret = -ENOMEM;
455 goto bad_area;
456 default:
457 BUG();
458 }
459 up_read(&mm->mmap_sem);
460 return ret;
461
462bad_area:
463 up_read(&mm->mmap_sem);
464 return -EFAULT;
465}
466
Arnd Bergmann51104592005-12-05 22:52:25 -0500467int spu_irq_class_1_bottom(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500468{
Arnd Bergmann67207b92005-11-15 15:53:48 -0500469 u64 ea, dsisr, access, error = 0UL;
470 int ret = 0;
471
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500472 ea = spu->dar;
473 dsisr = spu->dsisr;
Arnd Bergmann79c227a2006-03-24 19:49:27 +0100474 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
Arnd Bergmannf8072212006-04-29 02:40:21 +0200475 u64 flags;
476
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500477 access = (_PAGE_PRESENT | _PAGE_USER);
478 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
Arnd Bergmannf8072212006-04-29 02:40:21 +0200479 local_irq_save(flags);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500480 if (hash_page(ea, access, 0x300) != 0)
481 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
Arnd Bergmannf8072212006-04-29 02:40:21 +0200482 local_irq_restore(flags);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500483 }
Arnd Bergmann79c227a2006-03-24 19:49:27 +0100484 if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500485 if ((ret = spu_handle_mm_fault(spu)) != 0)
486 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
487 else
488 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
489 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500490 spu->dar = 0UL;
491 spu->dsisr = 0UL;
492 if (!error) {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500493 spu_restart_dma(spu);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500494 } else {
495 __spu_trap_invalid_dma(spu);
496 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500497 return ret;
498}
499
Joel H Schoppbed120c2006-05-01 12:16:11 -0700500static int __init find_spu_node_id(struct device_node *spe)
501{
Jeremy Kerrc61c27d2006-07-12 15:39:54 +1000502 const unsigned int *id;
Joel H Schoppbed120c2006-05-01 12:16:11 -0700503 struct device_node *cpu;
504 cpu = spe->parent->parent;
Jeremy Kerrc61c27d2006-07-12 15:39:54 +1000505 id = get_property(cpu, "node-id", NULL);
Joel H Schoppbed120c2006-05-01 12:16:11 -0700506 return id ? *id : 0;
507}
508
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700509static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
510 const char *prop)
Joel H Schoppbed120c2006-05-01 12:16:11 -0700511{
512 static DEFINE_MUTEX(add_spumem_mutex);
513
Jeremy Kerrc61c27d2006-07-12 15:39:54 +1000514 const struct address_prop {
Joel H Schoppbed120c2006-05-01 12:16:11 -0700515 unsigned long address;
516 unsigned int len;
517 } __attribute__((packed)) *p;
518 int proplen;
519
520 unsigned long start_pfn, nr_pages;
Joel H Schoppbed120c2006-05-01 12:16:11 -0700521 struct pglist_data *pgdata;
522 struct zone *zone;
523 int ret;
524
Jeremy Kerrc61c27d2006-07-12 15:39:54 +1000525 p = get_property(spe, prop, &proplen);
Joel H Schoppbed120c2006-05-01 12:16:11 -0700526 WARN_ON(proplen != sizeof (*p));
527
528 start_pfn = p->address >> PAGE_SHIFT;
529 nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
530
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700531 pgdata = NODE_DATA(spu->nid);
Joel H Schoppbed120c2006-05-01 12:16:11 -0700532 zone = pgdata->node_zones;
533
534 /* XXX rethink locking here */
535 mutex_lock(&add_spumem_mutex);
536 ret = __add_pages(zone, start_pfn, nr_pages);
537 mutex_unlock(&add_spumem_mutex);
538
539 return ret;
540}
541
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700542static void __iomem * __init map_spe_prop(struct spu *spu,
543 struct device_node *n, const char *name)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500544{
Jeremy Kerrc61c27d2006-07-12 15:39:54 +1000545 const struct address_prop {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500546 unsigned long address;
547 unsigned int len;
548 } __attribute__((packed)) *prop;
549
Jeremy Kerrc61c27d2006-07-12 15:39:54 +1000550 const void *p;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500551 int proplen;
Al Viroed2bfcd2006-09-23 01:37:41 +0100552 void __iomem *ret = NULL;
Joel H Schoppbed120c2006-05-01 12:16:11 -0700553 int err = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500554
555 p = get_property(n, name, &proplen);
556 if (proplen != sizeof (struct address_prop))
557 return NULL;
558
559 prop = p;
560
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700561 err = cell_spuprop_present(spu, n, name);
Joel H Schoppbed120c2006-05-01 12:16:11 -0700562 if (err && (err != -EEXIST))
563 goto out;
564
565 ret = ioremap(prop->address, prop->len);
566
567 out:
568 return ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500569}
570
571static void spu_unmap(struct spu *spu)
572{
573 iounmap(spu->priv2);
574 iounmap(spu->priv1);
575 iounmap(spu->problem);
Al Viroed2bfcd2006-09-23 01:37:41 +0100576 iounmap((__force u8 __iomem *)spu->local_store);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500577}
578
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000579/* This function shall be abstracted for HV platforms */
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200580static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000581{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000582 unsigned int isrc;
Jeremy Kerrc61c27d2006-07-12 15:39:54 +1000583 const u32 *tmp;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000584
Benjamin Herrenschmidt2e194582006-09-29 15:00:29 +1000585 /* Get the interrupt source unit from the device-tree */
Jeremy Kerrc61c27d2006-07-12 15:39:54 +1000586 tmp = get_property(np, "isrc", NULL);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000587 if (!tmp)
588 return -ENODEV;
Benjamin Herrenschmidt2e194582006-09-29 15:00:29 +1000589 isrc = tmp[0];
590
591 /* Add the node number */
592 isrc |= spu->node << IIC_IRQ_NODE_SHIFT;
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000593
594 /* Now map interrupts of all 3 classes */
Benjamin Herrenschmidt2e194582006-09-29 15:00:29 +1000595 spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc);
596 spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc);
597 spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000598
599 /* Right now, we only fail if class 2 failed */
600 return spu->irqs[2] == NO_IRQ ? -EINVAL : 0;
601}
602
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200603static int __init spu_map_device_old(struct spu *spu, struct device_node *node)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500604{
Jeremy Kerrc61c27d2006-07-12 15:39:54 +1000605 const char *prop;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500606 int ret;
607
608 ret = -ENODEV;
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700609 spu->name = get_property(node, "name", NULL);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500610 if (!spu->name)
611 goto out;
612
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700613 prop = get_property(node, "local-store", NULL);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500614 if (!prop)
615 goto out;
616 spu->local_store_phys = *(unsigned long *)prop;
617
618 /* we use local store as ram, not io memory */
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700619 spu->local_store = (void __force *)
620 map_spe_prop(spu, node, "local-store");
Arnd Bergmann67207b92005-11-15 15:53:48 -0500621 if (!spu->local_store)
622 goto out;
623
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700624 prop = get_property(node, "problem", NULL);
Mark Nutter6df10a82006-03-23 00:00:12 +0100625 if (!prop)
626 goto out_unmap;
627 spu->problem_phys = *(unsigned long *)prop;
628
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700629 spu->problem= map_spe_prop(spu, node, "problem");
Arnd Bergmann67207b92005-11-15 15:53:48 -0500630 if (!spu->problem)
631 goto out_unmap;
632
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700633 spu->priv1= map_spe_prop(spu, node, "priv1");
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100634 /* priv1 is not available on a hypervisor */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500635
Jeremy Kerr8261aa62006-05-01 12:16:13 -0700636 spu->priv2= map_spe_prop(spu, node, "priv2");
Arnd Bergmann67207b92005-11-15 15:53:48 -0500637 if (!spu->priv2)
638 goto out_unmap;
639 ret = 0;
640 goto out;
641
642out_unmap:
643 spu_unmap(spu);
644out:
645 return ret;
646}
647
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200648static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
649{
650 struct of_irq oirq;
651 int ret;
652 int i;
653
654 for (i=0; i < 3; i++) {
655 ret = of_irq_map_one(np, i, &oirq);
656 if (ret)
657 goto err;
658
659 ret = -EINVAL;
660 spu->irqs[i] = irq_create_of_mapping(oirq.controller,
661 oirq.specifier, oirq.size);
662 if (spu->irqs[i] == NO_IRQ)
663 goto err;
664 }
665 return 0;
666
667err:
668 pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name);
669 for (; i >= 0; i--) {
670 if (spu->irqs[i] != NO_IRQ)
671 irq_dispose_mapping(spu->irqs[i]);
672 }
673 return ret;
674}
675
676static int spu_map_resource(struct device_node *node, int nr,
677 void __iomem** virt, unsigned long *phys)
678{
679 struct resource resource = { };
680 int ret;
681
682 ret = of_address_to_resource(node, 0, &resource);
683 if (ret)
684 goto out;
685
686 if (phys)
687 *phys = resource.start;
688 *virt = ioremap(resource.start, resource.end - resource.start);
689 if (!*virt)
690 ret = -EINVAL;
691
692out:
693 return ret;
694}
695
696static int __init spu_map_device(struct spu *spu, struct device_node *node)
697{
698 int ret = -ENODEV;
699 spu->name = get_property(node, "name", NULL);
700 if (!spu->name)
701 goto out;
702
703 ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store,
704 &spu->local_store_phys);
705 if (ret)
706 goto out;
707 ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem,
708 &spu->problem_phys);
709 if (ret)
710 goto out_unmap;
711 ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2,
712 NULL);
713 if (ret)
714 goto out_unmap;
715
716 if (!firmware_has_feature(FW_FEATURE_LPAR))
717 ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1,
718 NULL);
719 if (ret)
720 goto out_unmap;
721 return 0;
722
723out_unmap:
724 spu_unmap(spu);
725out:
726 pr_debug("failed to map spe %s: %d\n", spu->name, ret);
727 return ret;
728}
729
Jeremy Kerr1d640932006-06-19 20:33:19 +0200730struct sysdev_class spu_sysdev_class = {
731 set_kset_name("spu")
732};
733
Jeremy Kerr1d640932006-06-19 20:33:19 +0200734static int spu_create_sysdev(struct spu *spu)
735{
736 int ret;
737
738 spu->sysdev.id = spu->number;
739 spu->sysdev.cls = &spu_sysdev_class;
740 ret = sysdev_register(&spu->sysdev);
741 if (ret) {
742 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
743 spu->number);
744 return ret;
745 }
746
Jeremy Kerr1d640932006-06-19 20:33:19 +0200747 sysfs_add_device_to_node(&spu->sysdev, spu->nid);
748
749 return 0;
750}
751
752static void spu_destroy_sysdev(struct spu *spu)
753{
Jeremy Kerr1d640932006-06-19 20:33:19 +0200754 sysfs_remove_device_from_node(&spu->sysdev, spu->nid);
755 sysdev_unregister(&spu->sysdev);
756}
757
Arnd Bergmann67207b92005-11-15 15:53:48 -0500758static int __init create_spu(struct device_node *spe)
759{
760 struct spu *spu;
761 int ret;
762 static int number;
763
764 ret = -ENOMEM;
Jeremy Kerrecec2172006-06-19 20:33:26 +0200765 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500766 if (!spu)
767 goto out;
768
Benjamin Herrenschmidte5267b42006-10-10 15:14:12 +1000769 spu->node = find_spu_node_id(spe);
770 if (spu->node >= MAX_NUMNODES) {
771 printk(KERN_WARNING "SPE %s on node %d ignored,"
772 " node number too big\n", spe->full_name, spu->node);
773 printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
774 return -ENODEV;
775 }
776 spu->nid = of_node_to_nid(spe);
777 if (spu->nid == -1)
778 spu->nid = 0;
779
Arnd Bergmann67207b92005-11-15 15:53:48 -0500780 ret = spu_map_device(spu, spe);
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200781 /* try old method */
782 if (ret)
783 ret = spu_map_device_old(spu, spe);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500784 if (ret)
785 goto out_free;
786
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000787 ret = spu_map_interrupts(spu, spe);
788 if (ret)
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200789 ret = spu_map_interrupts_old(spu, spe);
790 if (ret)
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000791 goto out_unmap;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500792 spin_lock_init(&spu->register_lock);
Masato Noguchi24f43b32006-10-24 18:31:14 +0200793 spu_mfc_sdr_setup(spu);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100794 spu_mfc_sr1_set(spu, 0x33);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800795 mutex_lock(&spu_mutex);
Jeremy Kerrecec2172006-06-19 20:33:26 +0200796
Arnd Bergmann67207b92005-11-15 15:53:48 -0500797 spu->number = number++;
798 ret = spu_request_irqs(spu);
799 if (ret)
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200800 goto out_unlock;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500801
Jeremy Kerr1d640932006-06-19 20:33:19 +0200802 ret = spu_create_sysdev(spu);
803 if (ret)
804 goto out_free_irqs;
805
Mark Nuttera68cf982006-10-04 17:26:12 +0200806 list_add(&spu->list, &spu_list[spu->node]);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800807 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500808
Geoff Levandcc21a662006-10-24 18:31:15 +0200809 pr_debug(KERN_DEBUG "Using SPE %s %p %p %p %p %d\n",
810 spu->name, spu->local_store,
Arnd Bergmann67207b92005-11-15 15:53:48 -0500811 spu->problem, spu->priv1, spu->priv2, spu->number);
812 goto out;
813
Jeremy Kerr1d640932006-06-19 20:33:19 +0200814out_free_irqs:
815 spu_free_irqs(spu);
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200816out_unlock:
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800817 mutex_unlock(&spu_mutex);
Arnd Bergmann7650f2f2006-10-04 17:26:20 +0200818out_unmap:
Arnd Bergmann67207b92005-11-15 15:53:48 -0500819 spu_unmap(spu);
820out_free:
821 kfree(spu);
822out:
823 return ret;
824}
825
826static void destroy_spu(struct spu *spu)
827{
828 list_del_init(&spu->list);
829
Jeremy Kerr1d640932006-06-19 20:33:19 +0200830 spu_destroy_sysdev(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500831 spu_free_irqs(spu);
832 spu_unmap(spu);
833 kfree(spu);
834}
835
836static void cleanup_spu_base(void)
837{
838 struct spu *spu, *tmp;
Mark Nuttera68cf982006-10-04 17:26:12 +0200839 int node;
840
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800841 mutex_lock(&spu_mutex);
Mark Nuttera68cf982006-10-04 17:26:12 +0200842 for (node = 0; node < MAX_NUMNODES; node++) {
843 list_for_each_entry_safe(spu, tmp, &spu_list[node], list)
844 destroy_spu(spu);
845 }
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800846 mutex_unlock(&spu_mutex);
Jeremy Kerr1d640932006-06-19 20:33:19 +0200847 sysdev_class_unregister(&spu_sysdev_class);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500848}
849module_exit(cleanup_spu_base);
850
851static int __init init_spu_base(void)
852{
853 struct device_node *node;
Mark Nuttera68cf982006-10-04 17:26:12 +0200854 int i, ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500855
Jeremy Kerr1d640932006-06-19 20:33:19 +0200856 /* create sysdev class for spus */
857 ret = sysdev_class_register(&spu_sysdev_class);
858 if (ret)
859 return ret;
860
Mark Nuttera68cf982006-10-04 17:26:12 +0200861 for (i = 0; i < MAX_NUMNODES; i++)
862 INIT_LIST_HEAD(&spu_list[i]);
863
Arnd Bergmann67207b92005-11-15 15:53:48 -0500864 ret = -ENODEV;
865 for (node = of_find_node_by_type(NULL, "spe");
866 node; node = of_find_node_by_type(node, "spe")) {
867 ret = create_spu(node);
868 if (ret) {
869 printk(KERN_WARNING "%s: Error initializing %s\n",
870 __FUNCTION__, node->name);
871 cleanup_spu_base();
872 break;
873 }
874 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500875 return ret;
876}
877module_init(init_spu_base);
878
879MODULE_LICENSE("GPL");
880MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");