blob: e487ad68ac1113882db05b88142dd1542b67cdd4 [file] [log] [blame]
Arnd Bergmann67207b92005-11-15 15:53:48 -05001/*
2 * Low-level SPU handling
3 *
4 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
5 *
6 * Author: Arnd Bergmann <arndb@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
Arnd Bergmann3b3d22c2005-12-05 22:52:24 -050023#undef DEBUG
Arnd Bergmann67207b92005-11-15 15:53:48 -050024
25#include <linux/interrupt.h>
26#include <linux/list.h>
27#include <linux/module.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050028#include <linux/ptrace.h>
29#include <linux/slab.h>
30#include <linux/wait.h>
Geoff Levande28b0032006-11-23 00:46:49 +010031#include <linux/mm.h>
32#include <linux/io.h>
Ingo Molnar14cc3e22006-03-26 01:37:14 -080033#include <linux/mutex.h>
Geert Uytterhoevenbce94512007-07-17 04:05:52 -070034#include <linux/linux_logo.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050035#include <asm/spu.h>
Geoff Levand540270d2006-06-19 20:33:29 +020036#include <asm/spu_priv1.h>
Jeremy Kerr58bd4032007-12-05 13:49:31 +110037#include <asm/spu_csa.h>
Michael Ellermanff8a8f22006-10-24 18:31:27 +020038#include <asm/xmon.h>
Arnd Bergmann3ad216c2007-07-20 21:39:46 +020039#include <asm/prom.h>
Arnd Bergmann67207b92005-11-15 15:53:48 -050040
Geoff Levande28b0032006-11-23 00:46:49 +010041const struct spu_management_ops *spu_management_ops;
Jeremy Kerrccf17e92007-04-23 21:08:29 +020042EXPORT_SYMBOL_GPL(spu_management_ops);
43
Geoff Levand540270d2006-06-19 20:33:29 +020044const struct spu_priv1_ops *spu_priv1_ops;
Geoff Levand540270d2006-06-19 20:33:29 +020045EXPORT_SYMBOL_GPL(spu_priv1_ops);
46
Christoph Hellwig24140592007-07-20 21:39:51 +020047struct cbe_spu_info cbe_spu_info[MAX_NUMNODES];
48EXPORT_SYMBOL_GPL(cbe_spu_info);
49
50/*
Jeremy Kerr3ce2f622007-12-05 13:49:31 +110051 * The spufs fault-handling code needs to call force_sig_info to raise signals
52 * on DMA errors. Export it here to avoid general kernel-wide access to this
53 * function
54 */
55EXPORT_SYMBOL_GPL(force_sig_info);
56
57/*
Christoph Hellwig24140592007-07-20 21:39:51 +020058 * Protects cbe_spu_info and spu->number.
59 */
60static DEFINE_SPINLOCK(spu_lock);
61
62/*
63 * List of all spus in the system.
64 *
65 * This list is iterated by callers from irq context and callers that
66 * want to sleep. Thus modifications need to be done with both
67 * spu_full_list_lock and spu_full_list_mutex held, while iterating
68 * through it requires either of these locks.
69 *
70 * In addition spu_full_list_lock protects all assignmens to
71 * spu->mm.
72 */
73static LIST_HEAD(spu_full_list);
74static DEFINE_SPINLOCK(spu_full_list_lock);
75static DEFINE_MUTEX(spu_full_list_mutex);
76
Jeremy Kerr58bd4032007-12-05 13:49:31 +110077struct spu_slb {
78 u64 esid, vsid;
79};
80
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +010081void spu_invalidate_slbs(struct spu *spu)
82{
83 struct spu_priv2 __iomem *priv2 = spu->priv2;
Arnd Bergmannc92a1ac2008-02-28 06:06:30 +010084 unsigned long flags;
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +010085
Arnd Bergmannc92a1ac2008-02-28 06:06:30 +010086 spin_lock_irqsave(&spu->register_lock, flags);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +010087 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK)
88 out_be64(&priv2->slb_invalidate_all_W, 0UL);
Arnd Bergmannc92a1ac2008-02-28 06:06:30 +010089 spin_unlock_irqrestore(&spu->register_lock, flags);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +010090}
91EXPORT_SYMBOL_GPL(spu_invalidate_slbs);
92
93/* This is called by the MM core when a segment size is changed, to
94 * request a flush of all the SPEs using a given mm
95 */
96void spu_flush_all_slbs(struct mm_struct *mm)
97{
98 struct spu *spu;
99 unsigned long flags;
100
Christoph Hellwig24140592007-07-20 21:39:51 +0200101 spin_lock_irqsave(&spu_full_list_lock, flags);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100102 list_for_each_entry(spu, &spu_full_list, full_list) {
103 if (spu->mm == mm)
104 spu_invalidate_slbs(spu);
105 }
Christoph Hellwig24140592007-07-20 21:39:51 +0200106 spin_unlock_irqrestore(&spu_full_list_lock, flags);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100107}
108
109/* The hack below stinks... try to do something better one of
110 * these days... Does it even work properly with NR_CPUS == 1 ?
111 */
112static inline void mm_needs_global_tlbie(struct mm_struct *mm)
113{
114 int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1;
115
116 /* Global TLBIE broadcast required with SPEs. */
117 __cpus_setall(&mm->cpu_vm_mask, nr);
118}
119
120void spu_associate_mm(struct spu *spu, struct mm_struct *mm)
121{
122 unsigned long flags;
123
Christoph Hellwig24140592007-07-20 21:39:51 +0200124 spin_lock_irqsave(&spu_full_list_lock, flags);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100125 spu->mm = mm;
Christoph Hellwig24140592007-07-20 21:39:51 +0200126 spin_unlock_irqrestore(&spu_full_list_lock, flags);
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100127 if (mm)
128 mm_needs_global_tlbie(mm);
129}
130EXPORT_SYMBOL_GPL(spu_associate_mm);
131
Jeremy Kerrf6eb7d7f2007-12-05 13:49:31 +1100132int spu_64k_pages_available(void)
133{
134 return mmu_psize_defs[MMU_PAGE_64K].shift != 0;
135}
136EXPORT_SYMBOL_GPL(spu_64k_pages_available);
137
Arnd Bergmann67207b92005-11-15 15:53:48 -0500138static void spu_restart_dma(struct spu *spu)
139{
140 struct spu_priv2 __iomem *priv2 = spu->priv2;
Mark Nutter5473af02005-11-15 15:53:49 -0500141
Arnd Bergmann8837d922006-01-04 20:31:28 +0100142 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
Mark Nutter5473af02005-11-15 15:53:49 -0500143 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
Luke Browningde102892008-04-28 17:35:56 +1000144 else {
145 set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
146 mb();
147 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500148}
149
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100150static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
151{
152 struct spu_priv2 __iomem *priv2 = spu->priv2;
153
Ingo Molnarfe333322009-01-06 14:26:03 +0000154 pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n",
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100155 __func__, slbe, slb->vsid, slb->esid);
156
157 out_be64(&priv2->slb_index_W, slbe);
Arnd Bergmanncc4b7c12008-02-26 07:01:56 +0100158 /* set invalid before writing vsid */
159 out_be64(&priv2->slb_esid_RW, 0);
160 /* now it's safe to write the vsid */
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100161 out_be64(&priv2->slb_vsid_RW, slb->vsid);
Arnd Bergmanncc4b7c12008-02-26 07:01:56 +0100162 /* setting the new esid makes the entry valid again */
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100163 out_be64(&priv2->slb_esid_RW, slb->esid);
164}
165
Arnd Bergmann67207b92005-11-15 15:53:48 -0500166static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
167{
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500168 struct mm_struct *mm = spu->mm;
Jeremy Kerr4d434662007-12-05 13:49:31 +1100169 struct spu_slb slb;
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100170 int psize;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500171
Harvey Harrisone48b1b42008-03-29 08:21:07 +1100172 pr_debug("%s\n", __func__);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500173
Jeremy Kerr4d434662007-12-05 13:49:31 +1100174 slb.esid = (ea & ESID_MASK) | SLB_ESID_V;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200175
176 switch(REGION_ID(ea)) {
177 case USER_REGION_ID:
Benjamin Herrenschmidtd0f13e32007-05-08 16:27:27 +1000178#ifdef CONFIG_PPC_MM_SLICES
179 psize = get_slice_psize(mm, ea);
180#else
181 psize = mm->context.user_psize;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200182#endif
Jeremy Kerr4d434662007-12-05 13:49:31 +1100183 slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M)
184 << SLB_VSID_SHIFT) | SLB_VSID_USER;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200185 break;
186 case VMALLOC_REGION_ID:
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100187 if (ea < VMALLOC_END)
188 psize = mmu_vmalloc_psize;
189 else
190 psize = mmu_io_psize;
Jeremy Kerr4d434662007-12-05 13:49:31 +1100191 slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
192 << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200193 break;
194 case KERNEL_REGION_ID:
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100195 psize = mmu_linear_psize;
Jeremy Kerr4d434662007-12-05 13:49:31 +1100196 slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M)
197 << SLB_VSID_SHIFT) | SLB_VSID_KERNEL;
arnd@arndb.de0afacde2006-10-24 18:31:18 +0200198 break;
199 default:
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500200 /* Future: support kernel segments so that drivers
201 * can use SPUs.
202 */
Arnd Bergmann67207b92005-11-15 15:53:48 -0500203 pr_debug("invalid region access at %016lx\n", ea);
204 return 1;
205 }
Jeremy Kerr4d434662007-12-05 13:49:31 +1100206 slb.vsid |= mmu_psize_defs[psize].sllp;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500207
Jeremy Kerr4d434662007-12-05 13:49:31 +1100208 spu_load_slb(spu, spu->slb_replace, &slb);
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500209
210 spu->slb_replace++;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500211 if (spu->slb_replace >= 8)
212 spu->slb_replace = 0;
213
Arnd Bergmann67207b92005-11-15 15:53:48 -0500214 spu_restart_dma(spu);
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +1000215 spu->stats.slb_flt++;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500216 return 0;
217}
218
Mark Nutter5473af02005-11-15 15:53:49 -0500219extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500220static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500221{
Luke Browning2c911a12008-06-13 14:17:35 +1000222 int ret;
223
Ingo Molnarfe333322009-01-06 14:26:03 +0000224 pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500225
Luke Browning2c911a12008-06-13 14:17:35 +1000226 /*
227 * Handle kernel space hash faults immediately. User hash
228 * faults need to be deferred to process context.
229 */
230 if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) &&
231 (REGION_ID(ea) != USER_REGION_ID)) {
232
233 spin_unlock(&spu->register_lock);
234 ret = hash_page(ea, _PAGE_PRESENT, 0x300);
235 spin_lock(&spu->register_lock);
236
237 if (!ret) {
238 spu_restart_dma(spu);
239 return 0;
240 }
Mark Nutter5473af02005-11-15 15:53:49 -0500241 }
Arnd Bergmann67207b92005-11-15 15:53:48 -0500242
Luke Browningf3d69e02008-04-27 18:41:55 +0000243 spu->class_1_dar = ea;
244 spu->class_1_dsisr = dsisr;
Jeremy Kerrd6ad39b2007-12-20 16:39:59 +0900245
Luke Browningf3d69e02008-04-27 18:41:55 +0000246 spu->stop_callback(spu, 1);
247
248 spu->class_1_dar = 0;
249 spu->class_1_dsisr = 0;
Jeremy Kerrd6ad39b2007-12-20 16:39:59 +0900250
Arnd Bergmann67207b92005-11-15 15:53:48 -0500251 return 0;
252}
253
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100254static void __spu_kernel_slb(void *addr, struct spu_slb *slb)
255{
256 unsigned long ea = (unsigned long)addr;
257 u64 llp;
258
259 if (REGION_ID(ea) == KERNEL_REGION_ID)
260 llp = mmu_psize_defs[mmu_linear_psize].sllp;
261 else
262 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
263
264 slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) |
265 SLB_VSID_KERNEL | llp;
266 slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
267}
268
269/**
Jeremy Kerr684bd612007-12-05 13:49:31 +1100270 * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the
271 * address @new_addr is present.
272 */
273static inline int __slb_present(struct spu_slb *slbs, int nr_slbs,
274 void *new_addr)
275{
276 unsigned long ea = (unsigned long)new_addr;
277 int i;
278
279 for (i = 0; i < nr_slbs; i++)
280 if (!((slbs[i].esid ^ ea) & ESID_MASK))
281 return 1;
282
283 return 0;
284}
285
286/**
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100287 * Setup the SPU kernel SLBs, in preparation for a context save/restore. We
288 * need to map both the context save area, and the save/restore code.
Jeremy Kerr684bd612007-12-05 13:49:31 +1100289 *
290 * Because the lscsa and code may cross segment boundaires, we check to see
291 * if mappings are required for the start and end of each range. We currently
292 * assume that the mappings are smaller that one segment - if not, something
293 * is seriously wrong.
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100294 */
Jeremy Kerr684bd612007-12-05 13:49:31 +1100295void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
296 void *code, int code_size)
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100297{
Jeremy Kerr684bd612007-12-05 13:49:31 +1100298 struct spu_slb slbs[4];
299 int i, nr_slbs = 0;
300 /* start and end addresses of both mappings */
301 void *addrs[] = {
302 lscsa, (void *)lscsa + sizeof(*lscsa) - 1,
303 code, code + code_size - 1
304 };
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100305
Jeremy Kerr684bd612007-12-05 13:49:31 +1100306 /* check the set of addresses, and create a new entry in the slbs array
307 * if there isn't already a SLB for that address */
308 for (i = 0; i < ARRAY_SIZE(addrs); i++) {
309 if (__slb_present(slbs, nr_slbs, addrs[i]))
310 continue;
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100311
Jeremy Kerr684bd612007-12-05 13:49:31 +1100312 __spu_kernel_slb(addrs[i], &slbs[nr_slbs]);
313 nr_slbs++;
314 }
315
Arnd Bergmannc92a1ac2008-02-28 06:06:30 +0100316 spin_lock_irq(&spu->register_lock);
Jeremy Kerr684bd612007-12-05 13:49:31 +1100317 /* Add the set of SLBs */
318 for (i = 0; i < nr_slbs; i++)
319 spu_load_slb(spu, i, &slbs[i]);
Arnd Bergmannc92a1ac2008-02-28 06:06:30 +0100320 spin_unlock_irq(&spu->register_lock);
Jeremy Kerr58bd4032007-12-05 13:49:31 +1100321}
322EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs);
323
Arnd Bergmann67207b92005-11-15 15:53:48 -0500324static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200325spu_irq_class_0(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500326{
327 struct spu *spu;
Masato Noguchib7f90a42007-09-07 18:28:27 +1000328 unsigned long stat, mask;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500329
330 spu = data;
Masato Noguchib7f90a42007-09-07 18:28:27 +1000331
Masato Noguchib7f90a42007-09-07 18:28:27 +1000332 spin_lock(&spu->register_lock);
Jeremy Kerrd6ad39b2007-12-20 16:39:59 +0900333 mask = spu_int_mask_get(spu, 0);
334 stat = spu_int_stat_get(spu, 0) & mask;
335
Masato Noguchib7f90a42007-09-07 18:28:27 +1000336 spu->class_0_pending |= stat;
Luke Browningf3d69e02008-04-27 18:41:55 +0000337 spu->class_0_dar = spu_mfc_dar_get(spu);
Luke Browningf3d69e02008-04-27 18:41:55 +0000338 spu->stop_callback(spu, 0);
Luke Browningf3d69e02008-04-27 18:41:55 +0000339 spu->class_0_pending = 0;
Luke Browningf3d69e02008-04-27 18:41:55 +0000340 spu->class_0_dar = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500341
Masato Noguchib7f90a42007-09-07 18:28:27 +1000342 spu_int_stat_clear(spu, 0, stat);
Luke Browning2c911a12008-06-13 14:17:35 +1000343 spin_unlock(&spu->register_lock);
Masato Noguchib7f90a42007-09-07 18:28:27 +1000344
Arnd Bergmann67207b92005-11-15 15:53:48 -0500345 return IRQ_HANDLED;
346}
347
Arnd Bergmann67207b92005-11-15 15:53:48 -0500348static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200349spu_irq_class_1(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500350{
351 struct spu *spu;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500352 unsigned long stat, mask, dar, dsisr;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500353
354 spu = data;
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500355
356 /* atomically read & clear class1 status. */
357 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100358 mask = spu_int_mask_get(spu, 1);
359 stat = spu_int_stat_get(spu, 1) & mask;
360 dar = spu_mfc_dar_get(spu);
361 dsisr = spu_mfc_dsisr_get(spu);
Jeremy Kerr8af30672007-12-20 16:39:59 +0900362 if (stat & CLASS1_STORAGE_FAULT_INTR)
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100363 spu_mfc_dsisr_set(spu, 0ul);
364 spu_int_stat_clear(spu, 1, stat);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500365
Harvey Harrisone48b1b42008-03-29 08:21:07 +1100366 pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat,
Arnd Bergmannc92a1ac2008-02-28 06:06:30 +0100367 dar, dsisr);
368
Luke Browning2c911a12008-06-13 14:17:35 +1000369 if (stat & CLASS1_SEGMENT_FAULT_INTR)
370 __spu_trap_data_seg(spu, dar);
371
Jeremy Kerr8af30672007-12-20 16:39:59 +0900372 if (stat & CLASS1_STORAGE_FAULT_INTR)
Arnd Bergmann8b3d6662005-11-15 15:53:52 -0500373 __spu_trap_data_map(spu, dar, dsisr);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500374
Jeremy Kerr8af30672007-12-20 16:39:59 +0900375 if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500376 ;
377
Jeremy Kerr8af30672007-12-20 16:39:59 +0900378 if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500379 ;
380
Luke Browningf3d69e02008-04-27 18:41:55 +0000381 spu->class_1_dsisr = 0;
382 spu->class_1_dar = 0;
383
Luke Browning2c911a12008-06-13 14:17:35 +1000384 spin_unlock(&spu->register_lock);
385
Arnd Bergmann67207b92005-11-15 15:53:48 -0500386 return stat ? IRQ_HANDLED : IRQ_NONE;
387}
388
389static irqreturn_t
Olaf Heringf5a92452006-10-06 22:52:16 +0200390spu_irq_class_2(int irq, void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500391{
392 struct spu *spu;
393 unsigned long stat;
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500394 unsigned long mask;
Jeremy Kerr8af30672007-12-20 16:39:59 +0900395 const int mailbox_intrs =
396 CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500397
398 spu = data;
Masato Noguchiba723fe22006-06-19 20:33:33 +0200399 spin_lock(&spu->register_lock);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100400 stat = spu_int_stat_get(spu, 2);
401 mask = spu_int_mask_get(spu, 2);
Masato Noguchiba723fe22006-06-19 20:33:33 +0200402 /* ignore interrupts we're not waiting for */
403 stat &= mask;
Jeremy Kerr8af30672007-12-20 16:39:59 +0900404 /* mailbox interrupts are level triggered. mask them now before
405 * acknowledging */
406 if (stat & mailbox_intrs)
407 spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs));
Masato Noguchiba723fe22006-06-19 20:33:33 +0200408 /* acknowledge all interrupts before the callbacks */
409 spu_int_stat_clear(spu, 2, stat);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500410
Arnd Bergmann3a843d72005-12-05 22:52:27 -0500411 pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500412
Jeremy Kerr8af30672007-12-20 16:39:59 +0900413 if (stat & CLASS2_MAILBOX_INTR)
Masato Noguchiba723fe22006-06-19 20:33:33 +0200414 spu->ibox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500415
Jeremy Kerr8af30672007-12-20 16:39:59 +0900416 if (stat & CLASS2_SPU_STOP_INTR)
Luke Browningf3d69e02008-04-27 18:41:55 +0000417 spu->stop_callback(spu, 2);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500418
Jeremy Kerr8af30672007-12-20 16:39:59 +0900419 if (stat & CLASS2_SPU_HALT_INTR)
Luke Browningf3d69e02008-04-27 18:41:55 +0000420 spu->stop_callback(spu, 2);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500421
Jeremy Kerr8af30672007-12-20 16:39:59 +0900422 if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
Masato Noguchiba723fe22006-06-19 20:33:33 +0200423 spu->mfc_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500424
Jeremy Kerr8af30672007-12-20 16:39:59 +0900425 if (stat & CLASS2_MAILBOX_THRESHOLD_INTR)
Masato Noguchiba723fe22006-06-19 20:33:33 +0200426 spu->wbox_callback(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500427
Christoph Hellwige9f8a0b2007-06-29 10:58:03 +1000428 spu->stats.class2_intr++;
Luke Browning2c911a12008-06-13 14:17:35 +1000429
430 spin_unlock(&spu->register_lock);
431
Arnd Bergmann67207b92005-11-15 15:53:48 -0500432 return stat ? IRQ_HANDLED : IRQ_NONE;
433}
434
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000435static int spu_request_irqs(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500436{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000437 int ret = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500438
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000439 if (spu->irqs[0] != NO_IRQ) {
440 snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0",
441 spu->number);
442 ret = request_irq(spu->irqs[0], spu_irq_class_0,
443 IRQF_DISABLED,
444 spu->irq_c0, spu);
445 if (ret)
446 goto bail0;
447 }
448 if (spu->irqs[1] != NO_IRQ) {
449 snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1",
450 spu->number);
451 ret = request_irq(spu->irqs[1], spu_irq_class_1,
452 IRQF_DISABLED,
453 spu->irq_c1, spu);
454 if (ret)
455 goto bail1;
456 }
457 if (spu->irqs[2] != NO_IRQ) {
458 snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2",
459 spu->number);
460 ret = request_irq(spu->irqs[2], spu_irq_class_2,
461 IRQF_DISABLED,
462 spu->irq_c2, spu);
463 if (ret)
464 goto bail2;
465 }
466 return 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500467
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000468bail2:
469 if (spu->irqs[1] != NO_IRQ)
470 free_irq(spu->irqs[1], spu);
471bail1:
472 if (spu->irqs[0] != NO_IRQ)
473 free_irq(spu->irqs[0], spu);
474bail0:
Arnd Bergmann67207b92005-11-15 15:53:48 -0500475 return ret;
476}
477
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000478static void spu_free_irqs(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500479{
Benjamin Herrenschmidt0ebfff12006-07-03 21:36:01 +1000480 if (spu->irqs[0] != NO_IRQ)
481 free_irq(spu->irqs[0], spu);
482 if (spu->irqs[1] != NO_IRQ)
483 free_irq(spu->irqs[1], spu);
484 if (spu->irqs[2] != NO_IRQ)
485 free_irq(spu->irqs[2], spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500486}
487
Christoph Hellwig486acd42007-07-20 21:39:54 +0200488void spu_init_channels(struct spu *spu)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500489{
490 static const struct {
491 unsigned channel;
492 unsigned count;
493 } zero_list[] = {
494 { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, },
495 { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, },
496 }, count_list[] = {
497 { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, },
498 { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, },
499 { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, },
500 };
Arnd Bergmann6ff730c2006-01-04 20:31:31 +0100501 struct spu_priv2 __iomem *priv2;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500502 int i;
503
504 priv2 = spu->priv2;
505
506 /* initialize all channel data to zero */
507 for (i = 0; i < ARRAY_SIZE(zero_list); i++) {
508 int count;
509
510 out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel);
511 for (count = 0; count < zero_list[i].count; count++)
512 out_be64(&priv2->spu_chnldata_RW, 0);
513 }
514
515 /* initialize channel counts to meaningful values */
516 for (i = 0; i < ARRAY_SIZE(count_list); i++) {
517 out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel);
518 out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count);
519 }
520}
Christoph Hellwig486acd42007-07-20 21:39:54 +0200521EXPORT_SYMBOL_GPL(spu_init_channels);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500522
Geoff Levand6deac062007-06-16 07:17:32 +1000523static int spu_shutdown(struct sys_device *sysdev)
524{
525 struct spu *spu = container_of(sysdev, struct spu, sysdev);
526
527 spu_free_irqs(spu);
528 spu_destroy_spu(spu);
529 return 0;
530}
531
Sebastian Siewior12388192007-09-19 14:38:12 +1000532static struct sysdev_class spu_sysdev_class = {
Kay Sieversaf5ca3f2007-12-20 02:09:39 +0100533 .name = "spu",
Geoff Levand6deac062007-06-16 07:17:32 +1000534 .shutdown = spu_shutdown,
Jeremy Kerr1d640932006-06-19 20:33:19 +0200535};
536
Christian Kraffte570beb2006-10-24 18:31:23 +0200537int spu_add_sysdev_attr(struct sysdev_attribute *attr)
538{
539 struct spu *spu;
Christian Kraffte570beb2006-10-24 18:31:23 +0200540
Christoph Hellwig24140592007-07-20 21:39:51 +0200541 mutex_lock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200542 list_for_each_entry(spu, &spu_full_list, full_list)
543 sysdev_create_file(&spu->sysdev, attr);
Christoph Hellwig24140592007-07-20 21:39:51 +0200544 mutex_unlock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200545
Christian Kraffte570beb2006-10-24 18:31:23 +0200546 return 0;
547}
548EXPORT_SYMBOL_GPL(spu_add_sysdev_attr);
549
550int spu_add_sysdev_attr_group(struct attribute_group *attrs)
551{
552 struct spu *spu;
Jeremy Kerr1e771032007-12-05 13:49:31 +1100553 int rc = 0;
Christian Kraffte570beb2006-10-24 18:31:23 +0200554
Christoph Hellwig24140592007-07-20 21:39:51 +0200555 mutex_lock(&spu_full_list_mutex);
Jeremy Kerr1e771032007-12-05 13:49:31 +1100556 list_for_each_entry(spu, &spu_full_list, full_list) {
557 rc = sysfs_create_group(&spu->sysdev.kobj, attrs);
558
559 /* we're in trouble here, but try unwinding anyway */
560 if (rc) {
561 printk(KERN_ERR "%s: can't create sysfs group '%s'\n",
562 __func__, attrs->name);
563
564 list_for_each_entry_continue_reverse(spu,
565 &spu_full_list, full_list)
566 sysfs_remove_group(&spu->sysdev.kobj, attrs);
567 break;
568 }
569 }
570
Christoph Hellwig24140592007-07-20 21:39:51 +0200571 mutex_unlock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200572
Jeremy Kerr1e771032007-12-05 13:49:31 +1100573 return rc;
Christian Kraffte570beb2006-10-24 18:31:23 +0200574}
575EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group);
576
577
578void spu_remove_sysdev_attr(struct sysdev_attribute *attr)
579{
580 struct spu *spu;
Christian Kraffte570beb2006-10-24 18:31:23 +0200581
Christoph Hellwig24140592007-07-20 21:39:51 +0200582 mutex_lock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200583 list_for_each_entry(spu, &spu_full_list, full_list)
584 sysdev_remove_file(&spu->sysdev, attr);
Christoph Hellwig24140592007-07-20 21:39:51 +0200585 mutex_unlock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200586}
587EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr);
588
589void spu_remove_sysdev_attr_group(struct attribute_group *attrs)
590{
591 struct spu *spu;
Christian Kraffte570beb2006-10-24 18:31:23 +0200592
Christoph Hellwig24140592007-07-20 21:39:51 +0200593 mutex_lock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200594 list_for_each_entry(spu, &spu_full_list, full_list)
595 sysfs_remove_group(&spu->sysdev.kobj, attrs);
Christoph Hellwig24140592007-07-20 21:39:51 +0200596 mutex_unlock(&spu_full_list_mutex);
Christian Kraffte570beb2006-10-24 18:31:23 +0200597}
598EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group);
599
Jeremy Kerr1d640932006-06-19 20:33:19 +0200600static int spu_create_sysdev(struct spu *spu)
601{
602 int ret;
603
604 spu->sysdev.id = spu->number;
605 spu->sysdev.cls = &spu_sysdev_class;
606 ret = sysdev_register(&spu->sysdev);
607 if (ret) {
608 printk(KERN_ERR "Can't register SPU %d with sysfs\n",
609 spu->number);
610 return ret;
611 }
612
Geoff Levand00215502006-11-20 18:45:02 +0100613 sysfs_add_device_to_node(&spu->sysdev, spu->node);
Jeremy Kerr1d640932006-06-19 20:33:19 +0200614
615 return 0;
616}
617
Geoff Levande28b0032006-11-23 00:46:49 +0100618static int __init create_spu(void *data)
Arnd Bergmann67207b92005-11-15 15:53:48 -0500619{
620 struct spu *spu;
621 int ret;
622 static int number;
Benjamin Herrenschmidt94b2a432007-03-10 00:05:37 +0100623 unsigned long flags;
Andre Detsch27ec41d2007-07-20 21:39:33 +0200624 struct timespec ts;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500625
626 ret = -ENOMEM;
Jeremy Kerrecec2172006-06-19 20:33:26 +0200627 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500628 if (!spu)
629 goto out;
630
Christoph Hellwig486acd42007-07-20 21:39:54 +0200631 spu->alloc_state = SPU_FREE;
632
Geoff Levande28b0032006-11-23 00:46:49 +0100633 spin_lock_init(&spu->register_lock);
Christoph Hellwig24140592007-07-20 21:39:51 +0200634 spin_lock(&spu_lock);
Geoff Levande28b0032006-11-23 00:46:49 +0100635 spu->number = number++;
Christoph Hellwig24140592007-07-20 21:39:51 +0200636 spin_unlock(&spu_lock);
Benjamin Herrenschmidte5267b42006-10-10 15:14:12 +1000637
Geoff Levande28b0032006-11-23 00:46:49 +0100638 ret = spu_create_spu(spu, data);
639
Arnd Bergmann67207b92005-11-15 15:53:48 -0500640 if (ret)
641 goto out_free;
642
Masato Noguchi24f43b32006-10-24 18:31:14 +0200643 spu_mfc_sdr_setup(spu);
Arnd Bergmannf0831ac2006-01-04 20:31:30 +0100644 spu_mfc_sr1_set(spu, 0x33);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500645 ret = spu_request_irqs(spu);
646 if (ret)
Geoff Levande28b0032006-11-23 00:46:49 +0100647 goto out_destroy;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500648
Jeremy Kerr1d640932006-06-19 20:33:19 +0200649 ret = spu_create_sysdev(spu);
650 if (ret)
651 goto out_free_irqs;
652
Christoph Hellwig486acd42007-07-20 21:39:54 +0200653 mutex_lock(&cbe_spu_info[spu->node].list_mutex);
Arnd Bergmannaa6d5b22007-07-20 21:39:44 +0200654 list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus);
655 cbe_spu_info[spu->node].n_spus++;
Christoph Hellwig486acd42007-07-20 21:39:54 +0200656 mutex_unlock(&cbe_spu_info[spu->node].list_mutex);
Christoph Hellwig24140592007-07-20 21:39:51 +0200657
658 mutex_lock(&spu_full_list_mutex);
659 spin_lock_irqsave(&spu_full_list_lock, flags);
Christian Kraffte570beb2006-10-24 18:31:23 +0200660 list_add(&spu->full_list, &spu_full_list);
Christoph Hellwig24140592007-07-20 21:39:51 +0200661 spin_unlock_irqrestore(&spu_full_list_lock, flags);
662 mutex_unlock(&spu_full_list_mutex);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500663
Andre Detsch27ec41d2007-07-20 21:39:33 +0200664 spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
665 ktime_get_ts(&ts);
666 spu->stats.tstamp = timespec_to_ns(&ts);
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000667
Arnd Bergmann9d92af62007-07-20 21:39:45 +0200668 INIT_LIST_HEAD(&spu->aff_list);
669
Arnd Bergmann67207b92005-11-15 15:53:48 -0500670 goto out;
671
Jeremy Kerr1d640932006-06-19 20:33:19 +0200672out_free_irqs:
673 spu_free_irqs(spu);
Geoff Levande28b0032006-11-23 00:46:49 +0100674out_destroy:
675 spu_destroy_spu(spu);
Arnd Bergmann67207b92005-11-15 15:53:48 -0500676out_free:
677 kfree(spu);
678out:
679 return ret;
680}
681
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000682static const char *spu_state_names[] = {
683 "user", "system", "iowait", "idle"
684};
685
686static unsigned long long spu_acct_time(struct spu *spu,
687 enum spu_utilization_state state)
688{
Andre Detsch27ec41d2007-07-20 21:39:33 +0200689 struct timespec ts;
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000690 unsigned long long time = spu->stats.times[state];
691
Andre Detsch27ec41d2007-07-20 21:39:33 +0200692 /*
693 * If the spu is idle or the context is stopped, utilization
694 * statistics are not updated. Apply the time delta from the
695 * last recorded state of the spu.
696 */
697 if (spu->stats.util_state == state) {
698 ktime_get_ts(&ts);
699 time += timespec_to_ns(&ts) - spu->stats.tstamp;
700 }
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000701
Andre Detsch27ec41d2007-07-20 21:39:33 +0200702 return time / NSEC_PER_MSEC;
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000703}
704
705
Andi Kleen4a0b2b42008-07-01 18:48:41 +0200706static ssize_t spu_stat_show(struct sys_device *sysdev,
707 struct sysdev_attribute *attr, char *buf)
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000708{
709 struct spu *spu = container_of(sysdev, struct spu, sysdev);
710
711 return sprintf(buf, "%s %llu %llu %llu %llu "
712 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
Andre Detsch27ec41d2007-07-20 21:39:33 +0200713 spu_state_names[spu->stats.util_state],
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000714 spu_acct_time(spu, SPU_UTIL_USER),
715 spu_acct_time(spu, SPU_UTIL_SYSTEM),
716 spu_acct_time(spu, SPU_UTIL_IOWAIT),
Andre Detsch27ec41d2007-07-20 21:39:33 +0200717 spu_acct_time(spu, SPU_UTIL_IDLE_LOADED),
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000718 spu->stats.vol_ctx_switch,
719 spu->stats.invol_ctx_switch,
720 spu->stats.slb_flt,
721 spu->stats.hash_flt,
722 spu->stats.min_flt,
723 spu->stats.maj_flt,
724 spu->stats.class2_intr,
725 spu->stats.libassist);
726}
727
728static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
729
Arnd Bergmann67207b92005-11-15 15:53:48 -0500730static int __init init_spu_base(void)
731{
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200732 int i, ret = 0;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500733
Arnd Bergmannaa6d5b22007-07-20 21:39:44 +0200734 for (i = 0; i < MAX_NUMNODES; i++) {
Christoph Hellwig486acd42007-07-20 21:39:54 +0200735 mutex_init(&cbe_spu_info[i].list_mutex);
Arnd Bergmannaa6d5b22007-07-20 21:39:44 +0200736 INIT_LIST_HEAD(&cbe_spu_info[i].spus);
Arnd Bergmannaa6d5b22007-07-20 21:39:44 +0200737 }
Jeremy Kerrccf17e92007-04-23 21:08:29 +0200738
Stephen Rothwellda06aa02006-11-27 19:18:54 +0100739 if (!spu_management_ops)
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200740 goto out;
Stephen Rothwellda06aa02006-11-27 19:18:54 +0100741
Jeremy Kerr1d640932006-06-19 20:33:19 +0200742 /* create sysdev class for spus */
743 ret = sysdev_class_register(&spu_sysdev_class);
744 if (ret)
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200745 goto out;
Jeremy Kerr1d640932006-06-19 20:33:19 +0200746
Geoff Levande28b0032006-11-23 00:46:49 +0100747 ret = spu_enumerate_spus(create_spu);
748
Geert Uytterhoevenbce94512007-07-17 04:05:52 -0700749 if (ret < 0) {
Geoff Levande28b0032006-11-23 00:46:49 +0100750 printk(KERN_WARNING "%s: Error initializing spus\n",
Harvey Harrisone48b1b42008-03-29 08:21:07 +1100751 __func__);
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200752 goto out_unregister_sysdev_class;
Arnd Bergmann67207b92005-11-15 15:53:48 -0500753 }
Michael Ellermanff8a8f22006-10-24 18:31:27 +0200754
Geert Uytterhoevenbce94512007-07-17 04:05:52 -0700755 if (ret > 0) {
756 /*
757 * We cannot put the forward declaration in
758 * <linux/linux_logo.h> because of conflicting session type
759 * conflicts for const and __initdata with different compiler
760 * versions
761 */
762 extern const struct linux_logo logo_spe_clut224;
763
764 fb_append_extra_logo(&logo_spe_clut224, ret);
765 }
766
Christoph Hellwig24140592007-07-20 21:39:51 +0200767 mutex_lock(&spu_full_list_mutex);
Michael Ellermanff8a8f22006-10-24 18:31:27 +0200768 xmon_register_spus(&spu_full_list);
Andre Detsch8d2655e2007-07-20 21:39:27 +0200769 crash_register_spus(&spu_full_list);
Christoph Hellwig24140592007-07-20 21:39:51 +0200770 mutex_unlock(&spu_full_list_mutex);
Christoph Hellwigfe2f8962007-06-29 10:58:07 +1000771 spu_add_sysdev_attr(&attr_stat);
772
Andre Detschf5996442007-08-03 18:53:46 -0700773 spu_init_affinity();
Arnd Bergmann3ad216c2007-07-20 21:39:46 +0200774
Christoph Hellwigbefdc742007-04-23 21:08:28 +0200775 return 0;
776
777 out_unregister_sysdev_class:
778 sysdev_class_unregister(&spu_sysdev_class);
779 out:
Arnd Bergmann67207b92005-11-15 15:53:48 -0500780 return ret;
781}
782module_init(init_spu_base);
783
784MODULE_LICENSE("GPL");
785MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");