blob: eba7a2641dcedf0c345e4c6dc7692f9c9deb48c9 [file] [log] [blame]
Arnd Bergmann67207b92005-11-15 15:53:48 -05001/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmann3b3d22c2005-12-05 22:52:24 -050023#undef DEBUG
Arnd Bergmann67207b92005-11-15 15:53:48 -050024
25#include <linux/interrupt.h>
26#include <linux/list.h>
27#include <linux/module.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050028#include <linux/ptrace.h>
29#include <linux/slab.h>
30#include <linux/wait.h>
Geoff Levande28b0032006-11-23 00:46:49 +010031#include <linux/mm.h>
32#include <linux/io.h>
Ingo Molnar14cc3e22006-03-26 01:37:14 -080033#include <linux/mutex.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050034#include <asm/spu.h>
Geoff Levand540270d2006-06-19 20:33:29 +020035#include <asm/spu_priv1.h>
Michael Ellermanff8a8f22006-10-24 18:31:27 +020036#include <asm/xmon.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050037
Geoff Levande28b0032006-11-23 00:46:49 +010038const struct spu_management_ops *spu_management_ops;
Geoff Levand540270d2006-06-19 20:33:29 +020039const struct spu_priv1_ops *spu_priv1_ops;
40
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +010041static struct list_head spu_list[MAX_NUMNODES];
42static LIST_HEAD(spu_full_list);
43static DEFINE_MUTEX(spu_mutex);
44static spinlock_t spu_list_lock = SPIN_LOCK_UNLOCKED;
45
Geoff Levand540270d2006-06-19 20:33:29 +020046EXPORT_SYMBOL_GPL(spu_priv1_ops);
47
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +010048void spu_invalidate_slbs(struct spu *spu)
49{
50 struct spu_priv2 __iomem *priv2 = spu->priv2;
51
52 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
53 out_be64(&priv2->slb_invalidate_all_W, 0UL);
54}
55EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
56
57/* This is called by the MM core when a segment size is changed, to
58 * request a flush of all the SPEs using a given mm
59 */
60void spu_flush_all_slbs(struct mm_struct *mm)
61{
62 struct spu *spu;
63 unsigned long flags;
64
65 spin_lock_irqsave(&spu_list_lock, flags);
66 list_for_each_entry(spu, &spu_full_list, full_list) {
67 if (spu->mm == mm)
68 spu_invalidate_slbs(spu);
69 }
70 spin_unlock_irqrestore(&spu_list_lock, flags);
71}
72
73/* The hack below stinks... try to do something better one of
74 * these days... Does it even work properly with NR_CPUS == 1 ?
75 */
76static inline void mm_needs_global_tlbie(struct mm_struct *mm)
77{
78 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
79
80 /* Global TLBIE broadcast required with SPEs. */
81 __cpus_setall(&mm->cpu_vm_mask, nr);
82}
83
84void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
85{
86 unsigned long flags;
87
88 spin_lock_irqsave(&spu_list_lock, flags);
89 spu->mm = mm;
90 spin_unlock_irqrestore(&spu_list_lock, flags);
91 if (mm)
92 mm_needs_global_tlbie(mm);
93}
94EXPORT_SYMBOL_GPL(spu_associate_mm);
95
Arnd Bergmann67207b92005-11-15 15:53:48 -050096static int __spu_trap_invalid_dma(struct spu *spu)
97{
98 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmann9add11d2006-10-04 17:26:14 +020099 spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500100 return 0;
101}
102
103static int __spu_trap_dma_align(struct spu *spu)
104{
105 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200106 spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500107 return 0;
108}
109
110static int __spu_trap_error(struct spu *spu)
111{
112 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200113 spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500114 return 0;
115}
116
117static void spu_restart_dma(struct spu *spu)
118{
119 struct spu_priv2 __iomem *priv2 = spu->priv2;
Mark Nutter5473af02005-11-15 15:53:49 -0500120
Arnd Bergmann8837d922006-01-04 20:31:28 +0100121 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
Mark Nutter5473af02005-11-15 15:53:49 -0500122 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500123}
124
125static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
126{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500127 struct spu_priv2 __iomem *priv2 = spu->priv2;
128 struct mm_struct *mm = spu->mm;
arnd@arndb.de724bd802006-06-19 20:33:23 +0200129 u64 esid, vsid, llp;
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100130 int psize;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500131
132 pr_debug("%s\n", __FUNCTION__);
133
Arnd Bergmann8837d922006-01-04 20:31:28 +0100134 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500135 /* SLBs are pre-loaded for context switch, so
136 * we should never get here!
137 */
Mark Nutter5473af02005-11-15 15:53:49 -0500138 printk("%s: invalid access during switch!\n", __func__);
139 return 1;
140 }
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200141 esid = (ea & ESID_MASK) | SLB_ESID_V;
142
143 switch(REGION_ID(ea)) {
144 case USER_REGION_ID:
145#ifdef CONFIG_HUGETLB_PAGE
146 if (in_hugepage_area(mm->context, ea))
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100147 psize = mmu_huge_psize;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200148 else
149#endif
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100150 psize = mm->context.user_psize;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200151 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100152 SLB_VSID_USER;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200153 break;
154 case VMALLOC_REGION_ID:
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100155 if (ea < VMALLOC_END)
156 psize = mmu_vmalloc_psize;
157 else
158 psize = mmu_io_psize;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200159 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100160 SLB_VSID_KERNEL;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200161 break;
162 case KERNEL_REGION_ID:
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100163 psize = mmu_linear_psize;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200164 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100165 SLB_VSID_KERNEL;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200166 break;
167 default:
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500168 /* Future: support kernel segments so that drivers
169 * can use SPUs.
170 */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500171 pr_debug("invalid region access at %016lx\n", ea);
172 return 1;
173 }
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100174 llp = mmu_psize_defs[psize].sllp;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500175
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500176 out_be64(&priv2->slb_index_W, spu->slb_replace);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100177 out_be64(&priv2->slb_vsid_RW, vsid | llp);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500178 out_be64(&priv2->slb_esid_RW, esid);
179
180 spu->slb_replace++;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500181 if (spu->slb_replace >= 8)
182 spu->slb_replace = 0;
183
Arnd Bergmann67207b92005-11-15 15:53:48 -0500184 spu_restart_dma(spu);
185
Arnd Bergmann67207b92005-11-15 15:53:48 -0500186 return 0;
187}
188
Mark Nutter5473af02005-11-15 15:53:49 -0500189extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500190static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500191{
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100192 pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500193
Mark Nutter5473af02005-11-15 15:53:49 -0500194 /* Handle kernel space hash faults immediately.
195 User hash faults need to be deferred to process context. */
196 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
197 && REGION_ID(ea) != USER_REGION_ID
198 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
199 spu_restart_dma(spu);
200 return 0;
201 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500202
Arnd Bergmann8837d922006-01-04 20:31:28 +0100203 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
Mark Nutter5473af02005-11-15 15:53:49 -0500204 printk("%s: invalid access during switch!\n", __func__);
205 return 1;
206 }
207
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500208 spu->dar = ea;
209 spu->dsisr = dsisr;
210 mb();
Masato Noguchiba723fe22006-06-19 20:33:33 +0200211 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500212 return 0;
213}
214
215static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200216spu_irq_class_0(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500217{
218 struct spu *spu;
219
220 spu = data;
221 spu->class_0_pending = 1;
Masato Noguchiba723fe22006-06-19 20:33:33 +0200222 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500223
224 return IRQ_HANDLED;
225}
226
Arnd Bergmann51104592005-12-05 22:52:25 -0500227int
Arnd Bergmann67207b92005-11-15 15:53:48 -0500228spu_irq_class_0_bottom(struct spu *spu)
229{
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500230 unsigned long stat, mask;
Ishizaki Kou3650cfe2007-01-12 09:52:41 +0900231 unsigned long flags;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500232
233 spu->class_0_pending = 0;
234
Ishizaki Kou3650cfe2007-01-12 09:52:41 +0900235 spin_lock_irqsave(&spu->register_lock, flags);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100236 mask = spu_int_mask_get(spu, 0);
237 stat = spu_int_stat_get(spu, 0);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500238
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500239 stat &= mask;
240
Arnd Bergmann2cd90bc2006-06-23 20:57:50 +0200241 if (stat & 1) /* invalid DMA alignment */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500242 __spu_trap_dma_align(spu);
243
Arnd Bergmann2cd90bc2006-06-23 20:57:50 +0200244 if (stat & 2) /* invalid MFC DMA */
245 __spu_trap_invalid_dma(spu);
246
Arnd Bergmann67207b92005-11-15 15:53:48 -0500247 if (stat & 4) /* error on SPU */
248 __spu_trap_error(spu);
249
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100250 spu_int_stat_clear(spu, 0, stat);
Ishizaki Kou3650cfe2007-01-12 09:52:41 +0900251 spin_unlock_irqrestore(&spu->register_lock, flags);
Arnd Bergmann51104592005-12-05 22:52:25 -0500252
253 return (stat & 0x7) ? -EIO : 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500254}
Arnd Bergmann51104592005-12-05 22:52:25 -0500255EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500256
257static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200258spu_irq_class_1(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500259{
260 struct spu *spu;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500261 unsigned long stat, mask, dar, dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500262
263 spu = data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500264
265 /* atomically read & clear class1 status. */
266 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100267 mask = spu_int_mask_get(spu, 1);
268 stat = spu_int_stat_get(spu, 1) & mask;
269 dar = spu_mfc_dar_get(spu);
270 dsisr = spu_mfc_dsisr_get(spu);
Arnd Bergmann38307342005-12-09 19:04:18 +0100271 if (stat & 2) /* mapping fault */
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100272 spu_mfc_dsisr_set(spu, 0ul);
273 spu_int_stat_clear(spu, 1, stat);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500274 spin_unlock(&spu->register_lock);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100275 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
276 dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500277
278 if (stat & 1) /* segment fault */
279 __spu_trap_data_seg(spu, dar);
280
281 if (stat & 2) { /* mapping fault */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500282 __spu_trap_data_map(spu, dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500283 }
284
285 if (stat & 4) /* ls compare & suspend on get */
286 ;
287
288 if (stat & 8) /* ls compare & suspend on put */
289 ;
290
Arnd Bergmann67207b92005-11-15 15:53:48 -0500291 return stat ? IRQ_HANDLED : IRQ_NONE;
292}
Arnd Bergmann51104592005-12-05 22:52:25 -0500293EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500294
295static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200296spu_irq_class_2(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500297{
298 struct spu *spu;
299 unsigned long stat;
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500300 unsigned long mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500301
302 spu = data;
Masato Noguchiba723fe22006-06-19 20:33:33 +0200303 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100304 stat = spu_int_stat_get(spu, 2);
305 mask = spu_int_mask_get(spu, 2);
Masato Noguchiba723fe22006-06-19 20:33:33 +0200306 /* ignore interrupts we're not waiting for */
307 stat &= mask;
308 /*
309 * mailbox interrupts (0x1 and 0x10) are level triggered.
310 * mask them now before acknowledging.
311 */
312 if (stat & 0x11)
313 spu_int_mask_and(spu, 2, ~(stat & 0x11));
314 /* acknowledge all interrupts before the callbacks */
315 spu_int_stat_clear(spu, 2, stat);
316 spin_unlock(&spu->register_lock);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500317
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500318 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500319
Arnd Bergmann67207b92005-11-15 15:53:48 -0500320 if (stat & 1) /* PPC core mailbox */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200321 spu->ibox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500322
323 if (stat & 2) /* SPU stop-and-signal */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200324 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500325
326 if (stat & 4) /* SPU halted */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200327 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500328
329 if (stat & 8) /* DMA tag group complete */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200330 spu->mfc_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500331
332 if (stat & 0x10) /* SPU mailbox threshold */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200333 spu->wbox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500334
Arnd Bergmann67207b92005-11-15 15:53:48 -0500335 return stat ? IRQ_HANDLED : IRQ_NONE;
336}
337
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000338static int spu_request_irqs(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500339{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000340 int ret = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500341
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000342 if (spu->irqs[0] != NO_IRQ) {
343 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
344 spu->number);
345 ret = request_irq(spu->irqs[0], spu_irq_class_0,
346 IRQF_DISABLED,
347 spu->irq_c0, spu);
348 if (ret)
349 goto bail0;
350 }
351 if (spu->irqs[1] != NO_IRQ) {
352 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
353 spu->number);
354 ret = request_irq(spu->irqs[1], spu_irq_class_1,
355 IRQF_DISABLED,
356 spu->irq_c1, spu);
357 if (ret)
358 goto bail1;
359 }
360 if (spu->irqs[2] != NO_IRQ) {
361 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
362 spu->number);
363 ret = request_irq(spu->irqs[2], spu_irq_class_2,
364 IRQF_DISABLED,
365 spu->irq_c2, spu);
366 if (ret)
367 goto bail2;
368 }
369 return 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500370
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000371bail2:
372 if (spu->irqs[1] != NO_IRQ)
373 free_irq(spu->irqs[1], spu);
374bail1:
375 if (spu->irqs[0] != NO_IRQ)
376 free_irq(spu->irqs[0], spu);
377bail0:
Arnd Bergmann67207b92005-11-15 15:53:48 -0500378 return ret;
379}
380
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000381static void spu_free_irqs(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500382{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000383 if (spu->irqs[0] != NO_IRQ)
384 free_irq(spu->irqs[0], spu);
385 if (spu->irqs[1] != NO_IRQ)
386 free_irq(spu->irqs[1], spu);
387 if (spu->irqs[2] != NO_IRQ)
388 free_irq(spu->irqs[2], spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500389}
390
Arnd Bergmann67207b92005-11-15 15:53:48 -0500391static void spu_init_channels(struct spu *spu)
392{
393 static const struct {
394 unsigned channel;
395 unsigned count;
396 } zero_list[] = {
397 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
398 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
399 }, count_list[] = {
400 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
401 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
402 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
403 };
Arnd Bergmann6ff730c2006-01-04 20:31:31 +0100404 struct spu_priv2 __iomem *priv2;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500405 int i;
406
407 priv2 = spu->priv2;
408
409 /* initialize all channel data to zero */
410 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
411 int count;
412
413 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
414 for (count = 0; count < zero_list[i].count; count++)
415 out_be64(&priv2->spu_chnldata_RW, 0);
416 }
417
418 /* initialize channel counts to meaningful values */
419 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
420 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
421 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
422 }
423}
424
Mark Nuttera68cf982006-10-04 17:26:12 +0200425struct spu *spu_alloc_node(int node)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500426{
Mark Nuttera68cf982006-10-04 17:26:12 +0200427 struct spu *spu = NULL;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500428
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800429 mutex_lock(&spu_mutex);
Mark Nuttera68cf982006-10-04 17:26:12 +0200430 if (!list_empty(&spu_list[node])) {
431 spu = list_entry(spu_list[node].next, struct spu, list);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500432 list_del_init(&spu->list);
Geoff Levandcc21a662006-10-24 18:31:15 +0200433 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
Mark Nuttera68cf982006-10-04 17:26:12 +0200434 spu_init_channels(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500435 }
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800436 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500437
Mark Nuttera68cf982006-10-04 17:26:12 +0200438 return spu;
439}
440EXPORT_SYMBOL_GPL(spu_alloc_node);
441
442struct spu *spu_alloc(void)
443{
444 struct spu *spu = NULL;
445 int node;
446
447 for (node = 0; node < MAX_NUMNODES; node++) {
448 spu = spu_alloc_node(node);
449 if (spu)
450 break;
451 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500452
453 return spu;
454}
Arnd Bergmann67207b92005-11-15 15:53:48 -0500455
456void spu_free(struct spu *spu)
457{
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800458 mutex_lock(&spu_mutex);
Mark Nuttera68cf982006-10-04 17:26:12 +0200459 list_add_tail(&spu->list, &spu_list[spu->node]);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800460 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500461}
Arnd Bergmann39c73c32005-12-05 22:52:21 -0500462EXPORT_SYMBOL_GPL(spu_free);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500463
Arnd Bergmann67207b92005-11-15 15:53:48 -0500464static int spu_handle_mm_fault(struct spu *spu)
465{
Arnd Bergmann67207b92005-11-15 15:53:48 -0500466 struct mm_struct *mm = spu->mm;
467 struct vm_area_struct *vma;
468 u64 ea, dsisr, is_write;
469 int ret;
470
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500471 ea = spu->dar;
472 dsisr = spu->dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500473#if 0
474 if (!IS_VALID_EA(ea)) {
475 return -EFAULT;
476 }
477#endif /* XXX */
478 if (mm == NULL) {
479 return -EFAULT;
480 }
481 if (mm->pgd == NULL) {
482 return -EFAULT;
483 }
484
485 down_read(&mm->mmap_sem);
486 vma = find_vma(mm, ea);
487 if (!vma)
488 goto bad_area;
489 if (vma->vm_start <= ea)
490 goto good_area;
491 if (!(vma->vm_flags & VM_GROWSDOWN))
492 goto bad_area;
493#if 0
494 if (expand_stack(vma, ea))
495 goto bad_area;
496#endif /* XXX */
497good_area:
498 is_write = dsisr & MFC_DSISR_ACCESS_PUT;
499 if (is_write) {
500 if (!(vma->vm_flags & VM_WRITE))
501 goto bad_area;
502 } else {
503 if (dsisr & MFC_DSISR_ACCESS_DENIED)
504 goto bad_area;
505 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
506 goto bad_area;
507 }
508 ret = 0;
509 switch (handle_mm_fault(mm, vma, ea, is_write)) {
510 case VM_FAULT_MINOR:
511 current->min_flt++;
512 break;
513 case VM_FAULT_MAJOR:
514 current->maj_flt++;
515 break;
516 case VM_FAULT_SIGBUS:
517 ret = -EFAULT;
518 goto bad_area;
519 case VM_FAULT_OOM:
520 ret = -ENOMEM;
521 goto bad_area;
522 default:
523 BUG();
524 }
525 up_read(&mm->mmap_sem);
526 return ret;
527
528bad_area:
529 up_read(&mm->mmap_sem);
530 return -EFAULT;
531}
532
Arnd Bergmann51104592005-12-05 22:52:25 -0500533int spu_irq_class_1_bottom(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500534{
Arnd Bergmann67207b92005-11-15 15:53:48 -0500535 u64 ea, dsisr, access, error = 0UL;
536 int ret = 0;
537
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500538 ea = spu->dar;
539 dsisr = spu->dsisr;
Arnd Bergmann79c227a2006-03-24 19:49:27 +0100540 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) {
Arnd Bergmannf8072212006-04-29 02:40:21 +0200541 u64 flags;
542
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500543 access = (_PAGE_PRESENT | _PAGE_USER);
544 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
Arnd Bergmannf8072212006-04-29 02:40:21 +0200545 local_irq_save(flags);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500546 if (hash_page(ea, access, 0x300) != 0)
547 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
Arnd Bergmannf8072212006-04-29 02:40:21 +0200548 local_irq_restore(flags);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500549 }
Arnd Bergmann79c227a2006-03-24 19:49:27 +0100550 if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500551 if ((ret = spu_handle_mm_fault(spu)) != 0)
552 error |= CLASS1_ENABLE_STORAGE_FAULT_INTR;
553 else
554 error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR;
555 }
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500556 spu->dar = 0UL;
557 spu->dsisr = 0UL;
558 if (!error) {
Arnd Bergmann67207b92005-11-15 15:53:48 -0500559 spu_restart_dma(spu);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500560 } else {
Arnd Bergmann453d9f72006-11-20 18:45:03 +0100561 spu->dma_callback(spu, SPE_EVENT_SPE_DATA_STORAGE);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500562 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500563 return ret;
564}
565
Jeremy Kerr1d640932006-06-19 20:33:19 +0200566struct sysdev_class spu_sysdev_class = {
567 set_kset_name("spu")
568};
569
Christian Kraffte570beb2006-10-24 18:31:23 +0200570int spu_add_sysdev_attr(struct sysdev_attribute *attr)
571{
572 struct spu *spu;
573 mutex_lock(&spu_mutex);
574
575 list_for_each_entry(spu, &spu_full_list, full_list)
576 sysdev_create_file(&spu->sysdev, attr);
577
578 mutex_unlock(&spu_mutex);
579 return 0;
580}
581EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
582
583int spu_add_sysdev_attr_group(struct attribute_group *attrs)
584{
585 struct spu *spu;
586 mutex_lock(&spu_mutex);
587
588 list_for_each_entry(spu, &spu_full_list, full_list)
589 sysfs_create_group(&spu->sysdev.kobj, attrs);
590
591 mutex_unlock(&spu_mutex);
592 return 0;
593}
594EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
595
596
597void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
598{
599 struct spu *spu;
600 mutex_lock(&spu_mutex);
601
602 list_for_each_entry(spu, &spu_full_list, full_list)
603 sysdev_remove_file(&spu->sysdev, attr);
604
605 mutex_unlock(&spu_mutex);
606}
607EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
608
609void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
610{
611 struct spu *spu;
612 mutex_lock(&spu_mutex);
613
614 list_for_each_entry(spu, &spu_full_list, full_list)
615 sysfs_remove_group(&spu->sysdev.kobj, attrs);
616
617 mutex_unlock(&spu_mutex);
618}
619EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
620
Jeremy Kerr1d640932006-06-19 20:33:19 +0200621static int spu_create_sysdev(struct spu *spu)
622{
623 int ret;
624
625 spu->sysdev.id = spu->number;
626 spu->sysdev.cls = &spu_sysdev_class;
627 ret = sysdev_register(&spu->sysdev);
628 if (ret) {
629 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
630 spu->number);
631 return ret;
632 }
633
Geoff Levand00215502006-11-20 18:45:02 +0100634 sysfs_add_device_to_node(&spu->sysdev, spu->node);
Jeremy Kerr1d640932006-06-19 20:33:19 +0200635
636 return 0;
637}
638
639static void spu_destroy_sysdev(struct spu *spu)
640{
Geoff Levand00215502006-11-20 18:45:02 +0100641 sysfs_remove_device_from_node(&spu->sysdev, spu->node);
Jeremy Kerr1d640932006-06-19 20:33:19 +0200642 sysdev_unregister(&spu->sysdev);
643}
644
Geoff Levande28b0032006-11-23 00:46:49 +0100645static int __init create_spu(void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500646{
647 struct spu *spu;
648 int ret;
649 static int number;
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100650 unsigned long flags;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500651
652 ret = -ENOMEM;
Jeremy Kerrecec2172006-06-19 20:33:26 +0200653 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500654 if (!spu)
655 goto out;
656
Geoff Levande28b0032006-11-23 00:46:49 +0100657 spin_lock_init(&spu->register_lock);
658 mutex_lock(&spu_mutex);
659 spu->number = number++;
660 mutex_unlock(&spu_mutex);
Benjamin Herrenschmidte5267b42006-10-10 15:14:12 +1000661
Geoff Levande28b0032006-11-23 00:46:49 +0100662 ret = spu_create_spu(spu, data);
663
Arnd Bergmann67207b92005-11-15 15:53:48 -0500664 if (ret)
665 goto out_free;
666
Masato Noguchi24f43b32006-10-24 18:31:14 +0200667 spu_mfc_sdr_setup(spu);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100668 spu_mfc_sr1_set(spu, 0x33);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500669 ret = spu_request_irqs(spu);
670 if (ret)
Geoff Levande28b0032006-11-23 00:46:49 +0100671 goto out_destroy;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500672
Jeremy Kerr1d640932006-06-19 20:33:19 +0200673 ret = spu_create_sysdev(spu);
674 if (ret)
675 goto out_free_irqs;
676
Geoff Levande28b0032006-11-23 00:46:49 +0100677 mutex_lock(&spu_mutex);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100678 spin_lock_irqsave(&spu_list_lock, flags);
Mark Nuttera68cf982006-10-04 17:26:12 +0200679 list_add(&spu->list, &spu_list[spu->node]);
Christian Kraffte570beb2006-10-24 18:31:23 +0200680 list_add(&spu->full_list, &spu_full_list);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100681 spin_unlock_irqrestore(&spu_list_lock, flags);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800682 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500683
Arnd Bergmann67207b92005-11-15 15:53:48 -0500684 goto out;
685
Jeremy Kerr1d640932006-06-19 20:33:19 +0200686out_free_irqs:
687 spu_free_irqs(spu);
Geoff Levande28b0032006-11-23 00:46:49 +0100688out_destroy:
689 spu_destroy_spu(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500690out_free:
691 kfree(spu);
692out:
693 return ret;
694}
695
696static void destroy_spu(struct spu *spu)
697{
698 list_del_init(&spu->list);
Christian Kraffte570beb2006-10-24 18:31:23 +0200699 list_del_init(&spu->full_list);
700
Jeremy Kerr1d640932006-06-19 20:33:19 +0200701 spu_destroy_sysdev(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500702 spu_free_irqs(spu);
Geoff Levande28b0032006-11-23 00:46:49 +0100703 spu_destroy_spu(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500704 kfree(spu);
705}
706
707static void cleanup_spu_base(void)
708{
709 struct spu *spu, *tmp;
Mark Nuttera68cf982006-10-04 17:26:12 +0200710 int node;
711
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800712 mutex_lock(&spu_mutex);
Mark Nuttera68cf982006-10-04 17:26:12 +0200713 for (node = 0; node < MAX_NUMNODES; node++) {
714 list_for_each_entry_safe(spu, tmp, &spu_list[node], list)
715 destroy_spu(spu);
716 }
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800717 mutex_unlock(&spu_mutex);
Jeremy Kerr1d640932006-06-19 20:33:19 +0200718 sysdev_class_unregister(&spu_sysdev_class);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500719}
720module_exit(cleanup_spu_base);
721
722static int __init init_spu_base(void)
723{
Mark Nuttera68cf982006-10-04 17:26:12 +0200724 int i, ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500725
Stephen Rothwellda06aa02006-11-27 19:18:54 +0100726 if (!spu_management_ops)
727 return 0;
728
Jeremy Kerr1d640932006-06-19 20:33:19 +0200729 /* create sysdev class for spus */
730 ret = sysdev_class_register(&spu_sysdev_class);
731 if (ret)
732 return ret;
733
Mark Nuttera68cf982006-10-04 17:26:12 +0200734 for (i = 0; i < MAX_NUMNODES; i++)
735 INIT_LIST_HEAD(&spu_list[i]);
736
Geoff Levande28b0032006-11-23 00:46:49 +0100737 ret = spu_enumerate_spus(create_spu);
738
739 if (ret) {
740 printk(KERN_WARNING "%s: Error initializing spus\n",
741 __FUNCTION__);
742 cleanup_spu_base();
743 return ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500744 }
Michael Ellermanff8a8f22006-10-24 18:31:27 +0200745
746 xmon_register_spus(&spu_full_list);
747
Arnd Bergmann67207b92005-11-15 15:53:48 -0500748 return ret;
749}
750module_init(init_spu_base);
751
752MODULE_LICENSE("GPL");
753MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");