blob: f7af74f836934575cbe491d70a3409ca350a6e25 [file] [log] [blame]
Arnd Bergmann67207b92005-11-15 15:53:48 -05001/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmann3b3d22c2005-12-05 22:52:24 -050023#undef DEBUG
Arnd Bergmann67207b92005-11-15 15:53:48 -050024
25#include <linux/interrupt.h>
26#include <linux/list.h>
27#include <linux/module.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050028#include <linux/ptrace.h>
29#include <linux/slab.h>
30#include <linux/wait.h>
Geoff Levande28b0032006-11-23 00:46:49 +010031#include <linux/mm.h>
32#include <linux/io.h>
Ingo Molnar14cc3e22006-03-26 01:37:14 -080033#include <linux/mutex.h>
Geert Uytterhoevenbce94512007-07-17 04:05:52 -070034#include <linux/linux_logo.h>
Rafael J. Wysockif5a592f2011-04-26 19:14:57 +020035#include <linux/syscore_ops.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050036#include <asm/spu.h>
Geoff Levand540270d2006-06-19 20:33:29 +020037#include <asm/spu_priv1.h>
Jeremy Kerr58bd4032007-12-05 13:49:31 +110038#include <asm/spu_csa.h>
Michael Ellermanff8a8f22006-10-24 18:31:27 +020039#include <asm/xmon.h>
Arnd Bergmann3ad216c2007-07-20 21:39:46 +020040#include <asm/prom.h>
Anton Blanchard158d5b5e2011-01-21 13:43:59 +110041#include <asm/kexec.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050042
Geoff Levande28b0032006-11-23 00:46:49 +010043const struct spu_management_ops *spu_management_ops;
Jeremy Kerrccf17e92007-04-23 21:08:29 +020044EXPORT_SYMBOL_GPL(spu_management_ops);
45
Geoff Levand540270d2006-06-19 20:33:29 +020046const struct spu_priv1_ops *spu_priv1_ops;
Geoff Levand540270d2006-06-19 20:33:29 +020047EXPORT_SYMBOL_GPL(spu_priv1_ops);
48
Christoph Hellwig24140592007-07-20 21:39:51 +020049struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
50EXPORT_SYMBOL_GPL(cbe_spu_info);
51
52/*
Jeremy Kerr3ce2f622007-12-05 13:49:31 +110053 * The spufs fault-handling code needs to call force_sig_info to raise signals
54 * on DMA errors. Export it here to avoid general kernel-wide access to this
55 * function
56 */
57EXPORT_SYMBOL_GPL(force_sig_info);
58
59/*
Christoph Hellwig24140592007-07-20 21:39:51 +020060 * Protects cbe_spu_info and spu->number.
61 */
62static DEFINE_SPINLOCK(spu_lock);
63
64/*
65 * List of all spus in the system.
66 *
67 * This list is iterated by callers from irq context and callers that
68 * want to sleep. Thus modifications need to be done with both
69 * spu_full_list_lock and spu_full_list_mutex held, while iterating
70 * through it requires either of these locks.
71 *
72 * In addition spu_full_list_lock protects all assignmens to
73 * spu->mm.
74 */
75static LIST_HEAD(spu_full_list);
76static DEFINE_SPINLOCK(spu_full_list_lock);
77static DEFINE_MUTEX(spu_full_list_mutex);
78
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +010079void spu_invalidate_slbs(struct spu *spu)
80{
81 struct spu_priv2 __iomem *priv2 = spu->priv2;
Arnd Bergmannc92a1ac2008-02-28 06:06:30 +010082 unsigned long flags;
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +010083
Arnd Bergmannc92a1ac2008-02-28 06:06:30 +010084 spin_lock_irqsave(&spu->register_lock, flags);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +010085 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
86 out_be64(&priv2->slb_invalidate_all_W, 0UL);
Arnd Bergmannc92a1ac2008-02-28 06:06:30 +010087 spin_unlock_irqrestore(&spu->register_lock, flags);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +010088}
89EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
90
91/* This is called by the MM core when a segment size is changed, to
92 * request a flush of all the SPEs using a given mm
93 */
94void spu_flush_all_slbs(struct mm_struct *mm)
95{
96 struct spu *spu;
97 unsigned long flags;
98
Christoph Hellwig24140592007-07-20 21:39:51 +020099 spin_lock_irqsave(&spu_full_list_lock, flags);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100100 list_for_each_entry(spu, &spu_full_list, full_list) {
101 if (spu->mm == mm)
102 spu_invalidate_slbs(spu);
103 }
Christoph Hellwig24140592007-07-20 21:39:51 +0200104 spin_unlock_irqrestore(&spu_full_list_lock, flags);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100105}
106
107/* The hack below stinks... try to do something better one of
108 * these days... Does it even work properly with NR_CPUS == 1 ?
109 */
110static inline void mm_needs_global_tlbie(struct mm_struct *mm)
111{
112 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
113
114 /* Global TLBIE broadcast required with SPEs. */
Rusty Russell56aa4122009-03-15 18:16:43 +0000115 bitmap_fill(cpumask_bits(mm_cpumask(mm)), nr);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100116}
117
118void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
119{
120 unsigned long flags;
121
Christoph Hellwig24140592007-07-20 21:39:51 +0200122 spin_lock_irqsave(&spu_full_list_lock, flags);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100123 spu->mm = mm;
Christoph Hellwig24140592007-07-20 21:39:51 +0200124 spin_unlock_irqrestore(&spu_full_list_lock, flags);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100125 if (mm)
126 mm_needs_global_tlbie(mm);
127}
128EXPORT_SYMBOL_GPL(spu_associate_mm);
129
Jeremy Kerrf6eb7d7f2007-12-05 13:49:31 +1100130int spu_64k_pages_available(void)
131{
132 return mmu_psize_defs[MMU_PAGE_64K].shift != 0;
133}
134EXPORT_SYMBOL_GPL(spu_64k_pages_available);
135
Arnd Bergmann67207b92005-11-15 15:53:48 -0500136static void spu_restart_dma(struct spu *spu)
137{
138 struct spu_priv2 __iomem *priv2 = spu->priv2;
Mark Nutter5473af02005-11-15 15:53:49 -0500139
Arnd Bergmann8837d922006-01-04 20:31:28 +0100140 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
Mark Nutter5473af02005-11-15 15:53:49 -0500141 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
Luke Browningde102892008-04-28 17:35:56 +1000142 else {
143 set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
144 mb();
145 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500146}
147
Ian Munsie73d16a62014-10-08 19:54:51 +1100148static inline void spu_load_slb(struct spu *spu, int slbe, struct copro_slb *slb)
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100149{
150 struct spu_priv2 __iomem *priv2 = spu->priv2;
151
Ingo Molnarfe333322009-01-06 14:26:03 +0000152 pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n",
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100153 __func__, slbe, slb->vsid, slb->esid);
154
155 out_be64(&priv2->slb_index_W, slbe);
Arnd Bergmanncc4b7c12008-02-26 07:01:56 +0100156 /* set invalid before writing vsid */
157 out_be64(&priv2->slb_esid_RW, 0);
158 /* now it's safe to write the vsid */
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100159 out_be64(&priv2->slb_vsid_RW, slb->vsid);
Arnd Bergmanncc4b7c12008-02-26 07:01:56 +0100160 /* setting the new esid makes the entry valid again */
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100161 out_be64(&priv2->slb_esid_RW, slb->esid);
162}
163
Arnd Bergmann67207b92005-11-15 15:53:48 -0500164static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
165{
Ian Munsie73d16a62014-10-08 19:54:51 +1100166 struct copro_slb slb;
167 int ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500168
Ian Munsie73d16a62014-10-08 19:54:51 +1100169 ret = copro_calculate_slb(spu->mm, ea, &slb);
170 if (ret)
171 return ret;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500172
Jeremy Kerr4d434662007-12-05 13:49:31 +1100173 spu_load_slb(spu, spu->slb_replace, &slb);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500174
175 spu->slb_replace++;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500176 if (spu->slb_replace >= 8)
177 spu->slb_replace = 0;
178
Arnd Bergmann67207b92005-11-15 15:53:48 -0500179 spu_restart_dma(spu);
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +1000180 spu->stats.slb_flt++;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500181 return 0;
182}
183
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530184extern int hash_page(unsigned long ea, unsigned long access,
185 unsigned long trap, unsigned long dsisr); //XXX
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500186static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500187{
Luke Browning2c911a12008-06-13 14:17:35 +1000188 int ret;
189
Ingo Molnarfe333322009-01-06 14:26:03 +0000190 pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500191
Luke Browning2c911a12008-06-13 14:17:35 +1000192 /*
193 * Handle kernel space hash faults immediately. User hash
194 * faults need to be deferred to process context.
195 */
196 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) &&
197 (REGION_ID(ea) != USER_REGION_ID)) {
198
199 spin_unlock(&spu->register_lock);
Aneesh Kumar K.Vaefa5682014-12-04 11:00:14 +0530200 ret = hash_page(ea, _PAGE_PRESENT, 0x300, dsisr);
Luke Browning2c911a12008-06-13 14:17:35 +1000201 spin_lock(&spu->register_lock);
202
203 if (!ret) {
204 spu_restart_dma(spu);
205 return 0;
206 }
Mark Nutter5473af02005-11-15 15:53:49 -0500207 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500208
Luke Browningf3d69e02008-04-27 18:41:55 +0000209 spu->class_1_dar = ea;
210 spu->class_1_dsisr = dsisr;
Jeremy Kerrd6ad39b2007-12-20 16:39:59 +0900211
Luke Browningf3d69e02008-04-27 18:41:55 +0000212 spu->stop_callback(spu, 1);
213
214 spu->class_1_dar = 0;
215 spu->class_1_dsisr = 0;
Jeremy Kerrd6ad39b2007-12-20 16:39:59 +0900216
Arnd Bergmann67207b92005-11-15 15:53:48 -0500217 return 0;
218}
219
Ian Munsie73d16a62014-10-08 19:54:51 +1100220static void __spu_kernel_slb(void *addr, struct copro_slb *slb)
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100221{
222 unsigned long ea = (unsigned long)addr;
223 u64 llp;
224
225 if (REGION_ID(ea) == KERNEL_REGION_ID)
226 llp = mmu_psize_defs[mmu_linear_psize].sllp;
227 else
228 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
229
230 slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
231 SLB_VSID_KERNEL | llp;
232 slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
233}
234
235/**
Jeremy Kerr684bd612007-12-05 13:49:31 +1100236 * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
237 * address @new_addr is present.
238 */
Ian Munsie73d16a62014-10-08 19:54:51 +1100239static inline int __slb_present(struct copro_slb *slbs, int nr_slbs,
Jeremy Kerr684bd612007-12-05 13:49:31 +1100240 void *new_addr)
241{
242 unsigned long ea = (unsigned long)new_addr;
243 int i;
244
245 for (i = 0; i < nr_slbs; i++)
246 if (!((slbs[i].esid ^ ea) & ESID_MASK))
247 return 1;
248
249 return 0;
250}
251
252/**
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100253 * Setup the SPU kernel SLBs, in preparation for a context save/restore. We
254 * need to map both the context save area, and the save/restore code.
Jeremy Kerr684bd612007-12-05 13:49:31 +1100255 *
256 * Because the lscsa and code may cross segment boundaires, we check to see
257 * if mappings are required for the start and end of each range. We currently
258 * assume that the mappings are smaller that one segment - if not, something
259 * is seriously wrong.
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100260 */
Jeremy Kerr684bd612007-12-05 13:49:31 +1100261void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
262 void *code, int code_size)
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100263{
Ian Munsie73d16a62014-10-08 19:54:51 +1100264 struct copro_slb slbs[4];
Jeremy Kerr684bd612007-12-05 13:49:31 +1100265 int i, nr_slbs = 0;
266 /* start and end addresses of both mappings */
267 void *addrs[] = {
268 lscsa, (void *)lscsa + sizeof(*lscsa) - 1,
269 code, code + code_size - 1
270 };
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100271
Jeremy Kerr684bd612007-12-05 13:49:31 +1100272 /* check the set of addresses, and create a new entry in the slbs array
273 * if there isn't already a SLB for that address */
274 for (i = 0; i < ARRAY_SIZE(addrs); i++) {
275 if (__slb_present(slbs, nr_slbs, addrs[i]))
276 continue;
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100277
Jeremy Kerr684bd612007-12-05 13:49:31 +1100278 __spu_kernel_slb(addrs[i], &slbs[nr_slbs]);
279 nr_slbs++;
280 }
281
Arnd Bergmannc92a1ac2008-02-28 06:06:30 +0100282 spin_lock_irq(&spu->register_lock);
Jeremy Kerr684bd612007-12-05 13:49:31 +1100283 /* Add the set of SLBs */
284 for (i = 0; i < nr_slbs; i++)
285 spu_load_slb(spu, i, &slbs[i]);
Arnd Bergmannc92a1ac2008-02-28 06:06:30 +0100286 spin_unlock_irq(&spu->register_lock);
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100287}
288EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
289
Arnd Bergmann67207b92005-11-15 15:53:48 -0500290static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200291spu_irq_class_0(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500292{
293 struct spu *spu;
Masato Noguchib7f90a42007-09-07 18:28:27 +1000294 unsigned long stat, mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500295
296 spu = data;
Masato Noguchib7f90a42007-09-07 18:28:27 +1000297
Masato Noguchib7f90a42007-09-07 18:28:27 +1000298 spin_lock(&spu->register_lock);
Jeremy Kerrd6ad39b2007-12-20 16:39:59 +0900299 mask = spu_int_mask_get(spu, 0);
300 stat = spu_int_stat_get(spu, 0) & mask;
301
Masato Noguchib7f90a42007-09-07 18:28:27 +1000302 spu->class_0_pending |= stat;
Luke Browningf3d69e02008-04-27 18:41:55 +0000303 spu->class_0_dar = spu_mfc_dar_get(spu);
Luke Browningf3d69e02008-04-27 18:41:55 +0000304 spu->stop_callback(spu, 0);
Luke Browningf3d69e02008-04-27 18:41:55 +0000305 spu->class_0_pending = 0;
Luke Browningf3d69e02008-04-27 18:41:55 +0000306 spu->class_0_dar = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500307
Masato Noguchib7f90a42007-09-07 18:28:27 +1000308 spu_int_stat_clear(spu, 0, stat);
Luke Browning2c911a12008-06-13 14:17:35 +1000309 spin_unlock(&spu->register_lock);
Masato Noguchib7f90a42007-09-07 18:28:27 +1000310
Arnd Bergmann67207b92005-11-15 15:53:48 -0500311 return IRQ_HANDLED;
312}
313
Arnd Bergmann67207b92005-11-15 15:53:48 -0500314static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200315spu_irq_class_1(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500316{
317 struct spu *spu;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500318 unsigned long stat, mask, dar, dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500319
320 spu = data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500321
322 /* atomically read & clear class1 status. */
323 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100324 mask = spu_int_mask_get(spu, 1);
325 stat = spu_int_stat_get(spu, 1) & mask;
326 dar = spu_mfc_dar_get(spu);
327 dsisr = spu_mfc_dsisr_get(spu);
Jeremy Kerr8af30672007-12-20 16:39:59 +0900328 if (stat & CLASS1_STORAGE_FAULT_INTR)
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100329 spu_mfc_dsisr_set(spu, 0ul);
330 spu_int_stat_clear(spu, 1, stat);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500331
Harvey Harrisone48b1b42008-03-29 08:21:07 +1100332 pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat,
Arnd Bergmannc92a1ac2008-02-28 06:06:30 +0100333 dar, dsisr);
334
Luke Browning2c911a12008-06-13 14:17:35 +1000335 if (stat & CLASS1_SEGMENT_FAULT_INTR)
336 __spu_trap_data_seg(spu, dar);
337
Jeremy Kerr8af30672007-12-20 16:39:59 +0900338 if (stat & CLASS1_STORAGE_FAULT_INTR)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500339 __spu_trap_data_map(spu, dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500340
Jeremy Kerr8af30672007-12-20 16:39:59 +0900341 if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500342 ;
343
Jeremy Kerr8af30672007-12-20 16:39:59 +0900344 if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500345 ;
346
Luke Browningf3d69e02008-04-27 18:41:55 +0000347 spu->class_1_dsisr = 0;
348 spu->class_1_dar = 0;
349
Luke Browning2c911a12008-06-13 14:17:35 +1000350 spin_unlock(&spu->register_lock);
351
Arnd Bergmann67207b92005-11-15 15:53:48 -0500352 return stat ? IRQ_HANDLED : IRQ_NONE;
353}
354
355static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200356spu_irq_class_2(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500357{
358 struct spu *spu;
359 unsigned long stat;
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500360 unsigned long mask;
Jeremy Kerr8af30672007-12-20 16:39:59 +0900361 const int mailbox_intrs =
362 CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500363
364 spu = data;
Masato Noguchiba723fe22006-06-19 20:33:33 +0200365 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100366 stat = spu_int_stat_get(spu, 2);
367 mask = spu_int_mask_get(spu, 2);
Masato Noguchiba723fe22006-06-19 20:33:33 +0200368 /* ignore interrupts we're not waiting for */
369 stat &= mask;
Jeremy Kerr8af30672007-12-20 16:39:59 +0900370 /* mailbox interrupts are level triggered. mask them now before
371 * acknowledging */
372 if (stat & mailbox_intrs)
373 spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
Masato Noguchiba723fe22006-06-19 20:33:33 +0200374 /* acknowledge all interrupts before the callbacks */
375 spu_int_stat_clear(spu, 2, stat);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500376
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500377 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500378
Jeremy Kerr8af30672007-12-20 16:39:59 +0900379 if (stat & CLASS2_MAILBOX_INTR)
Masato Noguchiba723fe22006-06-19 20:33:33 +0200380 spu->ibox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500381
Jeremy Kerr8af30672007-12-20 16:39:59 +0900382 if (stat & CLASS2_SPU_STOP_INTR)
Luke Browningf3d69e02008-04-27 18:41:55 +0000383 spu->stop_callback(spu, 2);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500384
Jeremy Kerr8af30672007-12-20 16:39:59 +0900385 if (stat & CLASS2_SPU_HALT_INTR)
Luke Browningf3d69e02008-04-27 18:41:55 +0000386 spu->stop_callback(spu, 2);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500387
Jeremy Kerr8af30672007-12-20 16:39:59 +0900388 if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
Masato Noguchiba723fe22006-06-19 20:33:33 +0200389 spu->mfc_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500390
Jeremy Kerr8af30672007-12-20 16:39:59 +0900391 if (stat & CLASS2_MAILBOX_THRESHOLD_INTR)
Masato Noguchiba723fe22006-06-19 20:33:33 +0200392 spu->wbox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500393
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +1000394 spu->stats.class2_intr++;
Luke Browning2c911a12008-06-13 14:17:35 +1000395
396 spin_unlock(&spu->register_lock);
397
Arnd Bergmann67207b92005-11-15 15:53:48 -0500398 return stat ? IRQ_HANDLED : IRQ_NONE;
399}
400
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000401static int spu_request_irqs(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500402{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000403 int ret = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500404
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000405 if (spu->irqs[0] != NO_IRQ) {
406 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
407 spu->number);
408 ret = request_irq(spu->irqs[0], spu_irq_class_0,
Yong Zhanga3a9f3b2011-10-21 23:56:27 +0000409 0, spu->irq_c0, spu);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000410 if (ret)
411 goto bail0;
412 }
413 if (spu->irqs[1] != NO_IRQ) {
414 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
415 spu->number);
416 ret = request_irq(spu->irqs[1], spu_irq_class_1,
Yong Zhanga3a9f3b2011-10-21 23:56:27 +0000417 0, spu->irq_c1, spu);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000418 if (ret)
419 goto bail1;
420 }
421 if (spu->irqs[2] != NO_IRQ) {
422 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
423 spu->number);
424 ret = request_irq(spu->irqs[2], spu_irq_class_2,
Yong Zhanga3a9f3b2011-10-21 23:56:27 +0000425 0, spu->irq_c2, spu);
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000426 if (ret)
427 goto bail2;
428 }
429 return 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500430
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000431bail2:
432 if (spu->irqs[1] != NO_IRQ)
433 free_irq(spu->irqs[1], spu);
434bail1:
435 if (spu->irqs[0] != NO_IRQ)
436 free_irq(spu->irqs[0], spu);
437bail0:
Arnd Bergmann67207b92005-11-15 15:53:48 -0500438 return ret;
439}
440
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000441static void spu_free_irqs(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500442{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000443 if (spu->irqs[0] != NO_IRQ)
444 free_irq(spu->irqs[0], spu);
445 if (spu->irqs[1] != NO_IRQ)
446 free_irq(spu->irqs[1], spu);
447 if (spu->irqs[2] != NO_IRQ)
448 free_irq(spu->irqs[2], spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500449}
450
Christoph Hellwig486acd42007-07-20 21:39:54 +0200451void spu_init_channels(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500452{
453 static const struct {
454 unsigned channel;
455 unsigned count;
456 } zero_list[] = {
457 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
458 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
459 }, count_list[] = {
460 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
461 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
462 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
463 };
Arnd Bergmann6ff730c2006-01-04 20:31:31 +0100464 struct spu_priv2 __iomem *priv2;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500465 int i;
466
467 priv2 = spu->priv2;
468
469 /* initialize all channel data to zero */
470 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
471 int count;
472
473 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
474 for (count = 0; count < zero_list[i].count; count++)
475 out_be64(&priv2->spu_chnldata_RW, 0);
476 }
477
478 /* initialize channel counts to meaningful values */
479 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
480 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
481 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
482 }
483}
Christoph Hellwig486acd42007-07-20 21:39:54 +0200484EXPORT_SYMBOL_GPL(spu_init_channels);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500485
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800486static struct bus_type spu_subsys = {
Kay Sieversaf5ca3f2007-12-20 02:09:39 +0100487 .name = "spu",
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800488 .dev_name = "spu",
Jeremy Kerr1d640932006-06-19 20:33:19 +0200489};
490
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800491int spu_add_dev_attr(struct device_attribute *attr)
Christian Kraffte570beb2006-10-24 18:31:23 +0200492{
493 struct spu *spu;
Christian Kraffte570beb2006-10-24 18:31:23 +0200494
Christoph Hellwig24140592007-07-20 21:39:51 +0200495 mutex_lock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200496 list_for_each_entry(spu, &spu_full_list, full_list)
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800497 device_create_file(&spu->dev, attr);
Christoph Hellwig24140592007-07-20 21:39:51 +0200498 mutex_unlock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200499
Christian Kraffte570beb2006-10-24 18:31:23 +0200500 return 0;
501}
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800502EXPORT_SYMBOL_GPL(spu_add_dev_attr);
Christian Kraffte570beb2006-10-24 18:31:23 +0200503
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800504int spu_add_dev_attr_group(struct attribute_group *attrs)
Christian Kraffte570beb2006-10-24 18:31:23 +0200505{
506 struct spu *spu;
Jeremy Kerr1e771032007-12-05 13:49:31 +1100507 int rc = 0;
Christian Kraffte570beb2006-10-24 18:31:23 +0200508
Christoph Hellwig24140592007-07-20 21:39:51 +0200509 mutex_lock(&spu_full_list_mutex);
Jeremy Kerr1e771032007-12-05 13:49:31 +1100510 list_for_each_entry(spu, &spu_full_list, full_list) {
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800511 rc = sysfs_create_group(&spu->dev.kobj, attrs);
Jeremy Kerr1e771032007-12-05 13:49:31 +1100512
513 /* we're in trouble here, but try unwinding anyway */
514 if (rc) {
515 printk(KERN_ERR "%s: can't create sysfs group '%s'\n",
516 __func__, attrs->name);
517
518 list_for_each_entry_continue_reverse(spu,
519 &spu_full_list, full_list)
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800520 sysfs_remove_group(&spu->dev.kobj, attrs);
Jeremy Kerr1e771032007-12-05 13:49:31 +1100521 break;
522 }
523 }
524
Christoph Hellwig24140592007-07-20 21:39:51 +0200525 mutex_unlock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200526
Jeremy Kerr1e771032007-12-05 13:49:31 +1100527 return rc;
Christian Kraffte570beb2006-10-24 18:31:23 +0200528}
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800529EXPORT_SYMBOL_GPL(spu_add_dev_attr_group);
Christian Kraffte570beb2006-10-24 18:31:23 +0200530
531
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800532void spu_remove_dev_attr(struct device_attribute *attr)
Christian Kraffte570beb2006-10-24 18:31:23 +0200533{
534 struct spu *spu;
Christian Kraffte570beb2006-10-24 18:31:23 +0200535
Christoph Hellwig24140592007-07-20 21:39:51 +0200536 mutex_lock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200537 list_for_each_entry(spu, &spu_full_list, full_list)
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800538 device_remove_file(&spu->dev, attr);
Christoph Hellwig24140592007-07-20 21:39:51 +0200539 mutex_unlock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200540}
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800541EXPORT_SYMBOL_GPL(spu_remove_dev_attr);
Christian Kraffte570beb2006-10-24 18:31:23 +0200542
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800543void spu_remove_dev_attr_group(struct attribute_group *attrs)
Christian Kraffte570beb2006-10-24 18:31:23 +0200544{
545 struct spu *spu;
Christian Kraffte570beb2006-10-24 18:31:23 +0200546
Christoph Hellwig24140592007-07-20 21:39:51 +0200547 mutex_lock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200548 list_for_each_entry(spu, &spu_full_list, full_list)
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800549 sysfs_remove_group(&spu->dev.kobj, attrs);
Christoph Hellwig24140592007-07-20 21:39:51 +0200550 mutex_unlock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200551}
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800552EXPORT_SYMBOL_GPL(spu_remove_dev_attr_group);
Christian Kraffte570beb2006-10-24 18:31:23 +0200553
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800554static int spu_create_dev(struct spu *spu)
Jeremy Kerr1d640932006-06-19 20:33:19 +0200555{
556 int ret;
557
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800558 spu->dev.id = spu->number;
559 spu->dev.bus = &spu_subsys;
560 ret = device_register(&spu->dev);
Jeremy Kerr1d640932006-06-19 20:33:19 +0200561 if (ret) {
562 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
563 spu->number);
564 return ret;
565 }
566
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800567 sysfs_add_device_to_node(&spu->dev, spu->node);
Jeremy Kerr1d640932006-06-19 20:33:19 +0200568
569 return 0;
570}
571
Geoff Levande28b0032006-11-23 00:46:49 +0100572static int __init create_spu(void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500573{
574 struct spu *spu;
575 int ret;
576 static int number;
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100577 unsigned long flags;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500578
579 ret = -ENOMEM;
Jeremy Kerrecec2172006-06-19 20:33:26 +0200580 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500581 if (!spu)
582 goto out;
583
Christoph Hellwig486acd42007-07-20 21:39:54 +0200584 spu->alloc_state = SPU_FREE;
585
Geoff Levande28b0032006-11-23 00:46:49 +0100586 spin_lock_init(&spu->register_lock);
Christoph Hellwig24140592007-07-20 21:39:51 +0200587 spin_lock(&spu_lock);
Geoff Levande28b0032006-11-23 00:46:49 +0100588 spu->number = number++;
Christoph Hellwig24140592007-07-20 21:39:51 +0200589 spin_unlock(&spu_lock);
Benjamin Herrenschmidte5267b42006-10-10 15:14:12 +1000590
Geoff Levande28b0032006-11-23 00:46:49 +0100591 ret = spu_create_spu(spu, data);
592
Arnd Bergmann67207b92005-11-15 15:53:48 -0500593 if (ret)
594 goto out_free;
595
Masato Noguchi24f43b32006-10-24 18:31:14 +0200596 spu_mfc_sdr_setup(spu);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100597 spu_mfc_sr1_set(spu, 0x33);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500598 ret = spu_request_irqs(spu);
599 if (ret)
Geoff Levande28b0032006-11-23 00:46:49 +0100600 goto out_destroy;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500601
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800602 ret = spu_create_dev(spu);
Jeremy Kerr1d640932006-06-19 20:33:19 +0200603 if (ret)
604 goto out_free_irqs;
605
Christoph Hellwig486acd42007-07-20 21:39:54 +0200606 mutex_lock(&cbe_spu_info[spu->node].list_mutex);
Arnd Bergmannaa6d5b22007-07-20 21:39:44 +0200607 list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
608 cbe_spu_info[spu->node].n_spus++;
Christoph Hellwig486acd42007-07-20 21:39:54 +0200609 mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
Christoph Hellwig24140592007-07-20 21:39:51 +0200610
611 mutex_lock(&spu_full_list_mutex);
612 spin_lock_irqsave(&spu_full_list_lock, flags);
Christian Kraffte570beb2006-10-24 18:31:23 +0200613 list_add(&spu->full_list, &spu_full_list);
Christoph Hellwig24140592007-07-20 21:39:51 +0200614 spin_unlock_irqrestore(&spu_full_list_lock, flags);
615 mutex_unlock(&spu_full_list_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500616
Andre Detsch27ec41d2007-07-20 21:39:33 +0200617 spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
Thomas Gleixnerf2dec1e2014-07-16 21:04:38 +0000618 spu->stats.tstamp = ktime_get_ns();
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000619
Arnd Bergmann9d92af62007-07-20 21:39:45 +0200620 INIT_LIST_HEAD(&spu->aff_list);
621
Arnd Bergmann67207b92005-11-15 15:53:48 -0500622 goto out;
623
Jeremy Kerr1d640932006-06-19 20:33:19 +0200624out_free_irqs:
625 spu_free_irqs(spu);
Geoff Levande28b0032006-11-23 00:46:49 +0100626out_destroy:
627 spu_destroy_spu(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500628out_free:
629 kfree(spu);
630out:
631 return ret;
632}
633
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000634static const char *spu_state_names[] = {
635 "user", "system", "iowait", "idle"
636};
637
638static unsigned long long spu_acct_time(struct spu *spu,
639 enum spu_utilization_state state)
640{
641 unsigned long long time = spu->stats.times[state];
642
Andre Detsch27ec41d2007-07-20 21:39:33 +0200643 /*
644 * If the spu is idle or the context is stopped, utilization
645 * statistics are not updated. Apply the time delta from the
646 * last recorded state of the spu.
647 */
Thomas Gleixnerf2dec1e2014-07-16 21:04:38 +0000648 if (spu->stats.util_state == state)
649 time += ktime_get_ns() - spu->stats.tstamp;
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000650
Andre Detsch27ec41d2007-07-20 21:39:33 +0200651 return time / NSEC_PER_MSEC;
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000652}
653
654
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800655static ssize_t spu_stat_show(struct device *dev,
656 struct device_attribute *attr, char *buf)
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000657{
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800658 struct spu *spu = container_of(dev, struct spu, dev);
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000659
660 return sprintf(buf, "%s %llu %llu %llu %llu "
661 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
Andre Detsch27ec41d2007-07-20 21:39:33 +0200662 spu_state_names[spu->stats.util_state],
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000663 spu_acct_time(spu, SPU_UTIL_USER),
664 spu_acct_time(spu, SPU_UTIL_SYSTEM),
665 spu_acct_time(spu, SPU_UTIL_IOWAIT),
Andre Detsch27ec41d2007-07-20 21:39:33 +0200666 spu_acct_time(spu, SPU_UTIL_IDLE_LOADED),
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000667 spu->stats.vol_ctx_switch,
668 spu->stats.invol_ctx_switch,
669 spu->stats.slb_flt,
670 spu->stats.hash_flt,
671 spu->stats.min_flt,
672 spu->stats.maj_flt,
673 spu->stats.class2_intr,
674 spu->stats.libassist);
675}
676
Benjamin Herrenschmidt96cf3f62013-05-06 12:02:05 +1000677static DEVICE_ATTR(stat, 0444, spu_stat_show, NULL);
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000678
Anton Blanchard158d5b5e2011-01-21 13:43:59 +1100679#ifdef CONFIG_KEXEC
680
681struct crash_spu_info {
682 struct spu *spu;
683 u32 saved_spu_runcntl_RW;
684 u32 saved_spu_status_R;
685 u32 saved_spu_npc_RW;
686 u64 saved_mfc_sr1_RW;
687 u64 saved_mfc_dar;
688 u64 saved_mfc_dsisr;
689};
690
691#define CRASH_NUM_SPUS 16 /* Enough for current hardware */
692static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
693
694static void crash_kexec_stop_spus(void)
695{
696 struct spu *spu;
697 int i;
698 u64 tmp;
699
700 for (i = 0; i < CRASH_NUM_SPUS; i++) {
701 if (!crash_spu_info[i].spu)
702 continue;
703
704 spu = crash_spu_info[i].spu;
705
706 crash_spu_info[i].saved_spu_runcntl_RW =
707 in_be32(&spu->problem->spu_runcntl_RW);
708 crash_spu_info[i].saved_spu_status_R =
709 in_be32(&spu->problem->spu_status_R);
710 crash_spu_info[i].saved_spu_npc_RW =
711 in_be32(&spu->problem->spu_npc_RW);
712
713 crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu);
714 crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu);
715 tmp = spu_mfc_sr1_get(spu);
716 crash_spu_info[i].saved_mfc_sr1_RW = tmp;
717
718 tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
719 spu_mfc_sr1_set(spu, tmp);
720
721 __delay(200);
722 }
723}
724
725static void crash_register_spus(struct list_head *list)
726{
727 struct spu *spu;
728 int ret;
729
730 list_for_each_entry(spu, list, full_list) {
731 if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
732 continue;
733
734 crash_spu_info[spu->number].spu = spu;
735 }
736
737 ret = crash_shutdown_register(&crash_kexec_stop_spus);
738 if (ret)
739 printk(KERN_ERR "Could not register SPU crash handler");
740}
741
742#else
743static inline void crash_register_spus(struct list_head *list)
744{
745}
746#endif
747
Rafael J. Wysockif5a592f2011-04-26 19:14:57 +0200748static void spu_shutdown(void)
749{
750 struct spu *spu;
751
752 mutex_lock(&spu_full_list_mutex);
753 list_for_each_entry(spu, &spu_full_list, full_list) {
754 spu_free_irqs(spu);
755 spu_destroy_spu(spu);
756 }
757 mutex_unlock(&spu_full_list_mutex);
758}
759
760static struct syscore_ops spu_syscore_ops = {
761 .shutdown = spu_shutdown,
762};
763
Arnd Bergmann67207b92005-11-15 15:53:48 -0500764static int __init init_spu_base(void)
765{
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200766 int i, ret = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500767
Arnd Bergmannaa6d5b22007-07-20 21:39:44 +0200768 for (i = 0; i < MAX_NUMNODES; i++) {
Christoph Hellwig486acd42007-07-20 21:39:54 +0200769 mutex_init(&cbe_spu_info[i].list_mutex);
Arnd Bergmannaa6d5b22007-07-20 21:39:44 +0200770 INIT_LIST_HEAD(&cbe_spu_info[i].spus);
Arnd Bergmannaa6d5b22007-07-20 21:39:44 +0200771 }
Jeremy Kerrccf17e92007-04-23 21:08:29 +0200772
Stephen Rothwellda06aa02006-11-27 19:18:54 +0100773 if (!spu_management_ops)
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200774 goto out;
Stephen Rothwellda06aa02006-11-27 19:18:54 +0100775
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800776 /* create system subsystem for spus */
777 ret = subsys_system_register(&spu_subsys, NULL);
Jeremy Kerr1d640932006-06-19 20:33:19 +0200778 if (ret)
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200779 goto out;
Jeremy Kerr1d640932006-06-19 20:33:19 +0200780
Geoff Levande28b0032006-11-23 00:46:49 +0100781 ret = spu_enumerate_spus(create_spu);
782
Geert Uytterhoevenbce94512007-07-17 04:05:52 -0700783 if (ret < 0) {
Geoff Levande28b0032006-11-23 00:46:49 +0100784 printk(KERN_WARNING "%s: Error initializing spus\n",
Harvey Harrisone48b1b42008-03-29 08:21:07 +1100785 __func__);
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800786 goto out_unregister_subsys;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500787 }
Michael Ellermanff8a8f22006-10-24 18:31:27 +0200788
Geert Uytterhoevenae52bb22009-06-16 15:34:19 -0700789 if (ret > 0)
Geert Uytterhoevenbce94512007-07-17 04:05:52 -0700790 fb_append_extra_logo(&logo_spe_clut224, ret);
Geert Uytterhoevenbce94512007-07-17 04:05:52 -0700791
Christoph Hellwig24140592007-07-20 21:39:51 +0200792 mutex_lock(&spu_full_list_mutex);
Michael Ellermanff8a8f22006-10-24 18:31:27 +0200793 xmon_register_spus(&spu_full_list);
Andre Detsch8d2655e2007-07-20 21:39:27 +0200794 crash_register_spus(&spu_full_list);
Christoph Hellwig24140592007-07-20 21:39:51 +0200795 mutex_unlock(&spu_full_list_mutex);
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800796 spu_add_dev_attr(&dev_attr_stat);
Rafael J. Wysockif5a592f2011-04-26 19:14:57 +0200797 register_syscore_ops(&spu_syscore_ops);
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000798
Andre Detschf5996442007-08-03 18:53:46 -0700799 spu_init_affinity();
Arnd Bergmann3ad216c2007-07-20 21:39:46 +0200800
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200801 return 0;
802
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800803 out_unregister_subsys:
804 bus_unregister(&spu_subsys);
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200805 out:
Arnd Bergmann67207b92005-11-15 15:53:48 -0500806 return ret;
807}
808module_init(init_spu_base);
809
810MODULE_LICENSE("GPL");
811MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");