blob: 8086eb1ed60d95e85ab47d5e17aca84854855e7d [file] [log] [blame]
Arnd Bergmann67207b92005-11-15 15:53:48 -05001/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmann3b3d22c2005-12-05 22:52:24 -050023#undef DEBUG
Arnd Bergmann67207b92005-11-15 15:53:48 -050024
25#include <linux/interrupt.h>
26#include <linux/list.h>
27#include <linux/module.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050028#include <linux/ptrace.h>
29#include <linux/slab.h>
30#include <linux/wait.h>
Geoff Levande28b0032006-11-23 00:46:49 +010031#include <linux/mm.h>
32#include <linux/io.h>
Ingo Molnar14cc3e22006-03-26 01:37:14 -080033#include <linux/mutex.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050034#include <asm/spu.h>
Geoff Levand540270d2006-06-19 20:33:29 +020035#include <asm/spu_priv1.h>
Michael Ellermanff8a8f22006-10-24 18:31:27 +020036#include <asm/xmon.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050037
Geoff Levande28b0032006-11-23 00:46:49 +010038const struct spu_management_ops *spu_management_ops;
Jeremy Kerrccf17e92007-04-23 21:08:29 +020039EXPORT_SYMBOL_GPL(spu_management_ops);
40
Geoff Levand540270d2006-06-19 20:33:29 +020041const struct spu_priv1_ops *spu_priv1_ops;
42
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +010043static struct list_head spu_list[MAX_NUMNODES];
44static LIST_HEAD(spu_full_list);
45static DEFINE_MUTEX(spu_mutex);
46static spinlock_t spu_list_lock = SPIN_LOCK_UNLOCKED;
47
Geoff Levand540270d2006-06-19 20:33:29 +020048EXPORT_SYMBOL_GPL(spu_priv1_ops);
49
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +010050void spu_invalidate_slbs(struct spu *spu)
51{
52 struct spu_priv2 __iomem *priv2 = spu->priv2;
53
54 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
55 out_be64(&priv2->slb_invalidate_all_W, 0UL);
56}
57EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
58
59/* This is called by the MM core when a segment size is changed, to
60 * request a flush of all the SPEs using a given mm
61 */
62void spu_flush_all_slbs(struct mm_struct *mm)
63{
64 struct spu *spu;
65 unsigned long flags;
66
67 spin_lock_irqsave(&spu_list_lock, flags);
68 list_for_each_entry(spu, &spu_full_list, full_list) {
69 if (spu->mm == mm)
70 spu_invalidate_slbs(spu);
71 }
72 spin_unlock_irqrestore(&spu_list_lock, flags);
73}
74
75/* The hack below stinks... try to do something better one of
76 * these days... Does it even work properly with NR_CPUS == 1 ?
77 */
78static inline void mm_needs_global_tlbie(struct mm_struct *mm)
79{
80 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
81
82 /* Global TLBIE broadcast required with SPEs. */
83 __cpus_setall(&mm->cpu_vm_mask, nr);
84}
85
86void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
87{
88 unsigned long flags;
89
90 spin_lock_irqsave(&spu_list_lock, flags);
91 spu->mm = mm;
92 spin_unlock_irqrestore(&spu_list_lock, flags);
93 if (mm)
94 mm_needs_global_tlbie(mm);
95}
96EXPORT_SYMBOL_GPL(spu_associate_mm);
97
Arnd Bergmann67207b92005-11-15 15:53:48 -050098static int __spu_trap_invalid_dma(struct spu *spu)
99{
100 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200101 spu->dma_callback(spu, SPE_EVENT_INVALID_DMA);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500102 return 0;
103}
104
105static int __spu_trap_dma_align(struct spu *spu)
106{
107 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200108 spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500109 return 0;
110}
111
112static int __spu_trap_error(struct spu *spu)
113{
114 pr_debug("%s\n", __FUNCTION__);
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200115 spu->dma_callback(spu, SPE_EVENT_SPE_ERROR);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500116 return 0;
117}
118
119static void spu_restart_dma(struct spu *spu)
120{
121 struct spu_priv2 __iomem *priv2 = spu->priv2;
Mark Nutter5473af02005-11-15 15:53:49 -0500122
Arnd Bergmann8837d922006-01-04 20:31:28 +0100123 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
Mark Nutter5473af02005-11-15 15:53:49 -0500124 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500125}
126
127static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
128{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500129 struct spu_priv2 __iomem *priv2 = spu->priv2;
130 struct mm_struct *mm = spu->mm;
arnd@arndb.de724bd802006-06-19 20:33:23 +0200131 u64 esid, vsid, llp;
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100132 int psize;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500133
134 pr_debug("%s\n", __FUNCTION__);
135
Arnd Bergmann8837d922006-01-04 20:31:28 +0100136 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500137 /* SLBs are pre-loaded for context switch, so
138 * we should never get here!
139 */
Mark Nutter5473af02005-11-15 15:53:49 -0500140 printk("%s: invalid access during switch!\n", __func__);
141 return 1;
142 }
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200143 esid = (ea & ESID_MASK) | SLB_ESID_V;
144
145 switch(REGION_ID(ea)) {
146 case USER_REGION_ID:
147#ifdef CONFIG_HUGETLB_PAGE
148 if (in_hugepage_area(mm->context, ea))
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100149 psize = mmu_huge_psize;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200150 else
151#endif
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100152 psize = mm->context.user_psize;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200153 vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) |
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100154 SLB_VSID_USER;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200155 break;
156 case VMALLOC_REGION_ID:
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100157 if (ea < VMALLOC_END)
158 psize = mmu_vmalloc_psize;
159 else
160 psize = mmu_io_psize;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200161 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100162 SLB_VSID_KERNEL;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200163 break;
164 case KERNEL_REGION_ID:
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100165 psize = mmu_linear_psize;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200166 vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100167 SLB_VSID_KERNEL;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200168 break;
169 default:
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500170 /* Future: support kernel segments so that drivers
171 * can use SPUs.
172 */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500173 pr_debug("invalid region access at %016lx\n", ea);
174 return 1;
175 }
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100176 llp = mmu_psize_defs[psize].sllp;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500177
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500178 out_be64(&priv2->slb_index_W, spu->slb_replace);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100179 out_be64(&priv2->slb_vsid_RW, vsid | llp);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500180 out_be64(&priv2->slb_esid_RW, esid);
181
182 spu->slb_replace++;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500183 if (spu->slb_replace >= 8)
184 spu->slb_replace = 0;
185
Arnd Bergmann67207b92005-11-15 15:53:48 -0500186 spu_restart_dma(spu);
187
Arnd Bergmann67207b92005-11-15 15:53:48 -0500188 return 0;
189}
190
Mark Nutter5473af02005-11-15 15:53:49 -0500191extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500192static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500193{
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100194 pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500195
Mark Nutter5473af02005-11-15 15:53:49 -0500196 /* Handle kernel space hash faults immediately.
197 User hash faults need to be deferred to process context. */
198 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND)
199 && REGION_ID(ea) != USER_REGION_ID
200 && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) {
201 spu_restart_dma(spu);
202 return 0;
203 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500204
Arnd Bergmann8837d922006-01-04 20:31:28 +0100205 if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) {
Mark Nutter5473af02005-11-15 15:53:49 -0500206 printk("%s: invalid access during switch!\n", __func__);
207 return 1;
208 }
209
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500210 spu->dar = ea;
211 spu->dsisr = dsisr;
212 mb();
Masato Noguchiba723fe22006-06-19 20:33:33 +0200213 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500214 return 0;
215}
216
217static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200218spu_irq_class_0(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500219{
220 struct spu *spu;
221
222 spu = data;
223 spu->class_0_pending = 1;
Masato Noguchiba723fe22006-06-19 20:33:33 +0200224 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500225
226 return IRQ_HANDLED;
227}
228
Arnd Bergmann51104592005-12-05 22:52:25 -0500229int
Arnd Bergmann67207b92005-11-15 15:53:48 -0500230spu_irq_class_0_bottom(struct spu *spu)
231{
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500232 unsigned long stat, mask;
Ishizaki Kou3650cfe2007-01-12 09:52:41 +0900233 unsigned long flags;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500234
235 spu->class_0_pending = 0;
236
Ishizaki Kou3650cfe2007-01-12 09:52:41 +0900237 spin_lock_irqsave(&spu->register_lock, flags);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100238 mask = spu_int_mask_get(spu, 0);
239 stat = spu_int_stat_get(spu, 0);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500240
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500241 stat &= mask;
242
Arnd Bergmann2cd90bc2006-06-23 20:57:50 +0200243 if (stat & 1) /* invalid DMA alignment */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500244 __spu_trap_dma_align(spu);
245
Arnd Bergmann2cd90bc2006-06-23 20:57:50 +0200246 if (stat & 2) /* invalid MFC DMA */
247 __spu_trap_invalid_dma(spu);
248
Arnd Bergmann67207b92005-11-15 15:53:48 -0500249 if (stat & 4) /* error on SPU */
250 __spu_trap_error(spu);
251
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100252 spu_int_stat_clear(spu, 0, stat);
Ishizaki Kou3650cfe2007-01-12 09:52:41 +0900253 spin_unlock_irqrestore(&spu->register_lock, flags);
Arnd Bergmann51104592005-12-05 22:52:25 -0500254
255 return (stat & 0x7) ? -EIO : 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500256}
Arnd Bergmann51104592005-12-05 22:52:25 -0500257EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500258
259static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200260spu_irq_class_1(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500261{
262 struct spu *spu;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500263 unsigned long stat, mask, dar, dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500264
265 spu = data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500266
267 /* atomically read & clear class1 status. */
268 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100269 mask = spu_int_mask_get(spu, 1);
270 stat = spu_int_stat_get(spu, 1) & mask;
271 dar = spu_mfc_dar_get(spu);
272 dsisr = spu_mfc_dsisr_get(spu);
Arnd Bergmann38307342005-12-09 19:04:18 +0100273 if (stat & 2) /* mapping fault */
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100274 spu_mfc_dsisr_set(spu, 0ul);
275 spu_int_stat_clear(spu, 1, stat);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500276 spin_unlock(&spu->register_lock);
Arnd Bergmanna33a7d72006-03-23 00:00:11 +0100277 pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat,
278 dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500279
280 if (stat & 1) /* segment fault */
281 __spu_trap_data_seg(spu, dar);
282
283 if (stat & 2) { /* mapping fault */
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500284 __spu_trap_data_map(spu, dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500285 }
286
287 if (stat & 4) /* ls compare & suspend on get */
288 ;
289
290 if (stat & 8) /* ls compare & suspend on put */
291 ;
292
Arnd Bergmann67207b92005-11-15 15:53:48 -0500293 return stat ? IRQ_HANDLED : IRQ_NONE;
294}
295
296static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200297spu_irq_class_2(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500298{
299 struct spu *spu;
300 unsigned long stat;
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500301 unsigned long mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500302
303 spu = data;
Masato Noguchiba723fe22006-06-19 20:33:33 +0200304 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100305 stat = spu_int_stat_get(spu, 2);
306 mask = spu_int_mask_get(spu, 2);
Masato Noguchiba723fe22006-06-19 20:33:33 +0200307 /* ignore interrupts we're not waiting for */
308 stat &= mask;
309 /*
310 * mailbox interrupts (0x1 and 0x10) are level triggered.
311 * mask them now before acknowledging.
312 */
313 if (stat & 0x11)
314 spu_int_mask_and(spu, 2, ~(stat & 0x11));
315 /* acknowledge all interrupts before the callbacks */
316 spu_int_stat_clear(spu, 2, stat);
317 spin_unlock(&spu->register_lock);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500318
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500319 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500320
Arnd Bergmann67207b92005-11-15 15:53:48 -0500321 if (stat & 1) /* PPC core mailbox */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200322 spu->ibox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500323
324 if (stat & 2) /* SPU stop-and-signal */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200325 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500326
327 if (stat & 4) /* SPU halted */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200328 spu->stop_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500329
330 if (stat & 8) /* DMA tag group complete */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200331 spu->mfc_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500332
333 if (stat & 0x10) /* SPU mailbox threshold */
Masato Noguchiba723fe22006-06-19 20:33:33 +0200334 spu->wbox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500335
Arnd Bergmann67207b92005-11-15 15:53:48 -0500336 return stat ? IRQ_HANDLED : IRQ_NONE;
337}
338
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000339static int spu_request_irqs(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500340{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000341 int ret = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500342
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000343 if (spu->irqs[0] != NO_IRQ) {
344 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
345 spu->number);
346 ret = request_irq(spu->irqs[0], spu_irq_class_0,
347 IRQF_DISABLED,
348 spu->irq_c0, spu);
349 if (ret)
350 goto bail0;
351 }
352 if (spu->irqs[1] != NO_IRQ) {
353 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
354 spu->number);
355 ret = request_irq(spu->irqs[1], spu_irq_class_1,
356 IRQF_DISABLED,
357 spu->irq_c1, spu);
358 if (ret)
359 goto bail1;
360 }
361 if (spu->irqs[2] != NO_IRQ) {
362 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
363 spu->number);
364 ret = request_irq(spu->irqs[2], spu_irq_class_2,
365 IRQF_DISABLED,
366 spu->irq_c2, spu);
367 if (ret)
368 goto bail2;
369 }
370 return 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500371
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000372bail2:
373 if (spu->irqs[1] != NO_IRQ)
374 free_irq(spu->irqs[1], spu);
375bail1:
376 if (spu->irqs[0] != NO_IRQ)
377 free_irq(spu->irqs[0], spu);
378bail0:
Arnd Bergmann67207b92005-11-15 15:53:48 -0500379 return ret;
380}
381
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000382static void spu_free_irqs(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500383{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000384 if (spu->irqs[0] != NO_IRQ)
385 free_irq(spu->irqs[0], spu);
386 if (spu->irqs[1] != NO_IRQ)
387 free_irq(spu->irqs[1], spu);
388 if (spu->irqs[2] != NO_IRQ)
389 free_irq(spu->irqs[2], spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500390}
391
Arnd Bergmann67207b92005-11-15 15:53:48 -0500392static void spu_init_channels(struct spu *spu)
393{
394 static const struct {
395 unsigned channel;
396 unsigned count;
397 } zero_list[] = {
398 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
399 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
400 }, count_list[] = {
401 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
402 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
403 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
404 };
Arnd Bergmann6ff730c2006-01-04 20:31:31 +0100405 struct spu_priv2 __iomem *priv2;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500406 int i;
407
408 priv2 = spu->priv2;
409
410 /* initialize all channel data to zero */
411 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
412 int count;
413
414 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
415 for (count = 0; count < zero_list[i].count; count++)
416 out_be64(&priv2->spu_chnldata_RW, 0);
417 }
418
419 /* initialize channel counts to meaningful values */
420 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
421 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
422 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
423 }
424}
425
Mark Nuttera68cf982006-10-04 17:26:12 +0200426struct spu *spu_alloc_node(int node)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500427{
Mark Nuttera68cf982006-10-04 17:26:12 +0200428 struct spu *spu = NULL;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500429
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800430 mutex_lock(&spu_mutex);
Mark Nuttera68cf982006-10-04 17:26:12 +0200431 if (!list_empty(&spu_list[node])) {
432 spu = list_entry(spu_list[node].next, struct spu, list);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500433 list_del_init(&spu->list);
Geoff Levandcc21a662006-10-24 18:31:15 +0200434 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500435 }
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800436 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500437
Christoph Hellwig62c05d52007-04-23 21:08:14 +0200438 if (spu)
439 spu_init_channels(spu);
Mark Nuttera68cf982006-10-04 17:26:12 +0200440 return spu;
441}
442EXPORT_SYMBOL_GPL(spu_alloc_node);
443
444struct spu *spu_alloc(void)
445{
446 struct spu *spu = NULL;
447 int node;
448
449 for (node = 0; node < MAX_NUMNODES; node++) {
450 spu = spu_alloc_node(node);
451 if (spu)
452 break;
453 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500454
455 return spu;
456}
Arnd Bergmann67207b92005-11-15 15:53:48 -0500457
458void spu_free(struct spu *spu)
459{
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800460 mutex_lock(&spu_mutex);
Mark Nuttera68cf982006-10-04 17:26:12 +0200461 list_add_tail(&spu->list, &spu_list[spu->node]);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800462 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500463}
Arnd Bergmann39c73c32005-12-05 22:52:21 -0500464EXPORT_SYMBOL_GPL(spu_free);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500465
Jeremy Kerr1d640932006-06-19 20:33:19 +0200466struct sysdev_class spu_sysdev_class = {
467 set_kset_name("spu")
468};
469
Christian Kraffte570beb2006-10-24 18:31:23 +0200470int spu_add_sysdev_attr(struct sysdev_attribute *attr)
471{
472 struct spu *spu;
473 mutex_lock(&spu_mutex);
474
475 list_for_each_entry(spu, &spu_full_list, full_list)
476 sysdev_create_file(&spu->sysdev, attr);
477
478 mutex_unlock(&spu_mutex);
479 return 0;
480}
481EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
482
483int spu_add_sysdev_attr_group(struct attribute_group *attrs)
484{
485 struct spu *spu;
486 mutex_lock(&spu_mutex);
487
488 list_for_each_entry(spu, &spu_full_list, full_list)
489 sysfs_create_group(&spu->sysdev.kobj, attrs);
490
491 mutex_unlock(&spu_mutex);
492 return 0;
493}
494EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
495
496
497void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
498{
499 struct spu *spu;
500 mutex_lock(&spu_mutex);
501
502 list_for_each_entry(spu, &spu_full_list, full_list)
503 sysdev_remove_file(&spu->sysdev, attr);
504
505 mutex_unlock(&spu_mutex);
506}
507EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
508
509void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
510{
511 struct spu *spu;
512 mutex_lock(&spu_mutex);
513
514 list_for_each_entry(spu, &spu_full_list, full_list)
515 sysfs_remove_group(&spu->sysdev.kobj, attrs);
516
517 mutex_unlock(&spu_mutex);
518}
519EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
520
Jeremy Kerr1d640932006-06-19 20:33:19 +0200521static int spu_create_sysdev(struct spu *spu)
522{
523 int ret;
524
525 spu->sysdev.id = spu->number;
526 spu->sysdev.cls = &spu_sysdev_class;
527 ret = sysdev_register(&spu->sysdev);
528 if (ret) {
529 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
530 spu->number);
531 return ret;
532 }
533
Geoff Levand00215502006-11-20 18:45:02 +0100534 sysfs_add_device_to_node(&spu->sysdev, spu->node);
Jeremy Kerr1d640932006-06-19 20:33:19 +0200535
536 return 0;
537}
538
Geoff Levande28b0032006-11-23 00:46:49 +0100539static int __init create_spu(void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500540{
541 struct spu *spu;
542 int ret;
543 static int number;
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100544 unsigned long flags;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500545
546 ret = -ENOMEM;
Jeremy Kerrecec2172006-06-19 20:33:26 +0200547 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500548 if (!spu)
549 goto out;
550
Geoff Levande28b0032006-11-23 00:46:49 +0100551 spin_lock_init(&spu->register_lock);
552 mutex_lock(&spu_mutex);
553 spu->number = number++;
554 mutex_unlock(&spu_mutex);
Benjamin Herrenschmidte5267b42006-10-10 15:14:12 +1000555
Geoff Levande28b0032006-11-23 00:46:49 +0100556 ret = spu_create_spu(spu, data);
557
Arnd Bergmann67207b92005-11-15 15:53:48 -0500558 if (ret)
559 goto out_free;
560
Masato Noguchi24f43b32006-10-24 18:31:14 +0200561 spu_mfc_sdr_setup(spu);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100562 spu_mfc_sr1_set(spu, 0x33);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500563 ret = spu_request_irqs(spu);
564 if (ret)
Geoff Levande28b0032006-11-23 00:46:49 +0100565 goto out_destroy;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500566
Jeremy Kerr1d640932006-06-19 20:33:19 +0200567 ret = spu_create_sysdev(spu);
568 if (ret)
569 goto out_free_irqs;
570
Geoff Levande28b0032006-11-23 00:46:49 +0100571 mutex_lock(&spu_mutex);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100572 spin_lock_irqsave(&spu_list_lock, flags);
Mark Nuttera68cf982006-10-04 17:26:12 +0200573 list_add(&spu->list, &spu_list[spu->node]);
Christian Kraffte570beb2006-10-24 18:31:23 +0200574 list_add(&spu->full_list, &spu_full_list);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100575 spin_unlock_irqrestore(&spu_list_lock, flags);
Ingo Molnar14cc3e22006-03-26 01:37:14 -0800576 mutex_unlock(&spu_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500577
Arnd Bergmann67207b92005-11-15 15:53:48 -0500578 goto out;
579
Jeremy Kerr1d640932006-06-19 20:33:19 +0200580out_free_irqs:
581 spu_free_irqs(spu);
Geoff Levande28b0032006-11-23 00:46:49 +0100582out_destroy:
583 spu_destroy_spu(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500584out_free:
585 kfree(spu);
586out:
587 return ret;
588}
589
Arnd Bergmann67207b92005-11-15 15:53:48 -0500590static int __init init_spu_base(void)
591{
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200592 int i, ret = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500593
Jeremy Kerrccf17e92007-04-23 21:08:29 +0200594 for (i = 0; i < MAX_NUMNODES; i++)
595 INIT_LIST_HEAD(&spu_list[i]);
596
Stephen Rothwellda06aa02006-11-27 19:18:54 +0100597 if (!spu_management_ops)
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200598 goto out;
Stephen Rothwellda06aa02006-11-27 19:18:54 +0100599
Jeremy Kerr1d640932006-06-19 20:33:19 +0200600 /* create sysdev class for spus */
601 ret = sysdev_class_register(&spu_sysdev_class);
602 if (ret)
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200603 goto out;
Jeremy Kerr1d640932006-06-19 20:33:19 +0200604
Geoff Levande28b0032006-11-23 00:46:49 +0100605 ret = spu_enumerate_spus(create_spu);
606
607 if (ret) {
608 printk(KERN_WARNING "%s: Error initializing spus\n",
609 __FUNCTION__);
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200610 goto out_unregister_sysdev_class;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500611 }
Michael Ellermanff8a8f22006-10-24 18:31:27 +0200612
613 xmon_register_spus(&spu_full_list);
614
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200615 return 0;
616
617 out_unregister_sysdev_class:
618 sysdev_class_unregister(&spu_sysdev_class);
619 out:
620
Arnd Bergmann67207b92005-11-15 15:53:48 -0500621 return ret;
622}
623module_init(init_spu_base);
624
625MODULE_LICENSE("GPL");
626MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");