Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Low-level SPU handling |
| 3 | * |
| 4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 |
| 5 | * |
| 6 | * Author: Arnd Bergmann <arndb@de.ibm.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation; either version 2, or (at your option) |
| 11 | * any later version. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License |
| 19 | * along with this program; if not, write to the Free Software |
| 20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 21 | */ |
| 22 | |
Arnd Bergmann | 3b3d22c | 2005-12-05 22:52:24 -0500 | [diff] [blame] | 23 | #undef DEBUG |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 24 | |
| 25 | #include <linux/interrupt.h> |
| 26 | #include <linux/list.h> |
| 27 | #include <linux/module.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 28 | #include <linux/ptrace.h> |
| 29 | #include <linux/slab.h> |
| 30 | #include <linux/wait.h> |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 31 | #include <linux/mm.h> |
| 32 | #include <linux/io.h> |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 33 | #include <linux/mutex.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 34 | #include <asm/spu.h> |
Geoff Levand | 540270d | 2006-06-19 20:33:29 +0200 | [diff] [blame] | 35 | #include <asm/spu_priv1.h> |
Michael Ellerman | ff8a8f2 | 2006-10-24 18:31:27 +0200 | [diff] [blame] | 36 | #include <asm/xmon.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 37 | |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 38 | const struct spu_management_ops *spu_management_ops; |
Jeremy Kerr | ccf17e9 | 2007-04-23 21:08:29 +0200 | [diff] [blame] | 39 | EXPORT_SYMBOL_GPL(spu_management_ops); |
| 40 | |
Geoff Levand | 540270d | 2006-06-19 20:33:29 +0200 | [diff] [blame] | 41 | const struct spu_priv1_ops *spu_priv1_ops; |
| 42 | |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 43 | static struct list_head spu_list[MAX_NUMNODES]; |
| 44 | static LIST_HEAD(spu_full_list); |
| 45 | static DEFINE_MUTEX(spu_mutex); |
Thomas Gleixner | 057b184 | 2007-04-29 16:10:39 +0000 | [diff] [blame] | 46 | static DEFINE_SPINLOCK(spu_list_lock); |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 47 | |
Geoff Levand | 540270d | 2006-06-19 20:33:29 +0200 | [diff] [blame] | 48 | EXPORT_SYMBOL_GPL(spu_priv1_ops); |
| 49 | |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 50 | void spu_invalidate_slbs(struct spu *spu) |
| 51 | { |
| 52 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
| 53 | |
| 54 | if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) |
| 55 | out_be64(&priv2->slb_invalidate_all_W, 0UL); |
| 56 | } |
| 57 | EXPORT_SYMBOL_GPL(spu_invalidate_slbs); |
| 58 | |
| 59 | /* This is called by the MM core when a segment size is changed, to |
| 60 | * request a flush of all the SPEs using a given mm |
| 61 | */ |
| 62 | void spu_flush_all_slbs(struct mm_struct *mm) |
| 63 | { |
| 64 | struct spu *spu; |
| 65 | unsigned long flags; |
| 66 | |
| 67 | spin_lock_irqsave(&spu_list_lock, flags); |
| 68 | list_for_each_entry(spu, &spu_full_list, full_list) { |
| 69 | if (spu->mm == mm) |
| 70 | spu_invalidate_slbs(spu); |
| 71 | } |
| 72 | spin_unlock_irqrestore(&spu_list_lock, flags); |
| 73 | } |
| 74 | |
| 75 | /* The hack below stinks... try to do something better one of |
| 76 | * these days... Does it even work properly with NR_CPUS == 1 ? |
| 77 | */ |
| 78 | static inline void mm_needs_global_tlbie(struct mm_struct *mm) |
| 79 | { |
| 80 | int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; |
| 81 | |
| 82 | /* Global TLBIE broadcast required with SPEs. */ |
| 83 | __cpus_setall(&mm->cpu_vm_mask, nr); |
| 84 | } |
| 85 | |
| 86 | void spu_associate_mm(struct spu *spu, struct mm_struct *mm) |
| 87 | { |
| 88 | unsigned long flags; |
| 89 | |
| 90 | spin_lock_irqsave(&spu_list_lock, flags); |
| 91 | spu->mm = mm; |
| 92 | spin_unlock_irqrestore(&spu_list_lock, flags); |
| 93 | if (mm) |
| 94 | mm_needs_global_tlbie(mm); |
| 95 | } |
| 96 | EXPORT_SYMBOL_GPL(spu_associate_mm); |
| 97 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 98 | static int __spu_trap_invalid_dma(struct spu *spu) |
| 99 | { |
| 100 | pr_debug("%s\n", __FUNCTION__); |
Arnd Bergmann | 9add11d | 2006-10-04 17:26:14 +0200 | [diff] [blame] | 101 | spu->dma_callback(spu, SPE_EVENT_INVALID_DMA); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 102 | return 0; |
| 103 | } |
| 104 | |
| 105 | static int __spu_trap_dma_align(struct spu *spu) |
| 106 | { |
| 107 | pr_debug("%s\n", __FUNCTION__); |
Arnd Bergmann | 9add11d | 2006-10-04 17:26:14 +0200 | [diff] [blame] | 108 | spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 109 | return 0; |
| 110 | } |
| 111 | |
| 112 | static int __spu_trap_error(struct spu *spu) |
| 113 | { |
| 114 | pr_debug("%s\n", __FUNCTION__); |
Arnd Bergmann | 9add11d | 2006-10-04 17:26:14 +0200 | [diff] [blame] | 115 | spu->dma_callback(spu, SPE_EVENT_SPE_ERROR); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 116 | return 0; |
| 117 | } |
| 118 | |
| 119 | static void spu_restart_dma(struct spu *spu) |
| 120 | { |
| 121 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 122 | |
Arnd Bergmann | 8837d92 | 2006-01-04 20:31:28 +0100 | [diff] [blame] | 123 | if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 124 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) |
| 128 | { |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 129 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
| 130 | struct mm_struct *mm = spu->mm; |
arnd@arndb.de | 724bd80 | 2006-06-19 20:33:23 +0200 | [diff] [blame] | 131 | u64 esid, vsid, llp; |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 132 | int psize; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 133 | |
| 134 | pr_debug("%s\n", __FUNCTION__); |
| 135 | |
Arnd Bergmann | 8837d92 | 2006-01-04 20:31:28 +0100 | [diff] [blame] | 136 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 137 | /* SLBs are pre-loaded for context switch, so |
| 138 | * we should never get here! |
| 139 | */ |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 140 | printk("%s: invalid access during switch!\n", __func__); |
| 141 | return 1; |
| 142 | } |
arnd@arndb.de | 0afacde | 2006-10-24 18:31:18 +0200 | [diff] [blame] | 143 | esid = (ea & ESID_MASK) | SLB_ESID_V; |
| 144 | |
| 145 | switch(REGION_ID(ea)) { |
| 146 | case USER_REGION_ID: |
Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame] | 147 | #ifdef CONFIG_PPC_MM_SLICES |
| 148 | psize = get_slice_psize(mm, ea); |
| 149 | #else |
| 150 | psize = mm->context.user_psize; |
arnd@arndb.de | 0afacde | 2006-10-24 18:31:18 +0200 | [diff] [blame] | 151 | #endif |
arnd@arndb.de | 0afacde | 2006-10-24 18:31:18 +0200 | [diff] [blame] | 152 | vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 153 | SLB_VSID_USER; |
arnd@arndb.de | 0afacde | 2006-10-24 18:31:18 +0200 | [diff] [blame] | 154 | break; |
| 155 | case VMALLOC_REGION_ID: |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 156 | if (ea < VMALLOC_END) |
| 157 | psize = mmu_vmalloc_psize; |
| 158 | else |
| 159 | psize = mmu_io_psize; |
arnd@arndb.de | 0afacde | 2006-10-24 18:31:18 +0200 | [diff] [blame] | 160 | vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 161 | SLB_VSID_KERNEL; |
arnd@arndb.de | 0afacde | 2006-10-24 18:31:18 +0200 | [diff] [blame] | 162 | break; |
| 163 | case KERNEL_REGION_ID: |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 164 | psize = mmu_linear_psize; |
arnd@arndb.de | 0afacde | 2006-10-24 18:31:18 +0200 | [diff] [blame] | 165 | vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 166 | SLB_VSID_KERNEL; |
arnd@arndb.de | 0afacde | 2006-10-24 18:31:18 +0200 | [diff] [blame] | 167 | break; |
| 168 | default: |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 169 | /* Future: support kernel segments so that drivers |
| 170 | * can use SPUs. |
| 171 | */ |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 172 | pr_debug("invalid region access at %016lx\n", ea); |
| 173 | return 1; |
| 174 | } |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 175 | llp = mmu_psize_defs[psize].sllp; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 176 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 177 | out_be64(&priv2->slb_index_W, spu->slb_replace); |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 178 | out_be64(&priv2->slb_vsid_RW, vsid | llp); |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 179 | out_be64(&priv2->slb_esid_RW, esid); |
| 180 | |
| 181 | spu->slb_replace++; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 182 | if (spu->slb_replace >= 8) |
| 183 | spu->slb_replace = 0; |
| 184 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 185 | spu_restart_dma(spu); |
| 186 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 187 | return 0; |
| 188 | } |
| 189 | |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 190 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 191 | static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 192 | { |
Arnd Bergmann | a33a7d7 | 2006-03-23 00:00:11 +0100 | [diff] [blame] | 193 | pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 194 | |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 195 | /* Handle kernel space hash faults immediately. |
| 196 | User hash faults need to be deferred to process context. */ |
| 197 | if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) |
| 198 | && REGION_ID(ea) != USER_REGION_ID |
| 199 | && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { |
| 200 | spu_restart_dma(spu); |
| 201 | return 0; |
| 202 | } |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 203 | |
Arnd Bergmann | 8837d92 | 2006-01-04 20:31:28 +0100 | [diff] [blame] | 204 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 205 | printk("%s: invalid access during switch!\n", __func__); |
| 206 | return 1; |
| 207 | } |
| 208 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 209 | spu->dar = ea; |
| 210 | spu->dsisr = dsisr; |
| 211 | mb(); |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 212 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 213 | return 0; |
| 214 | } |
| 215 | |
| 216 | static irqreturn_t |
Olaf Hering | f5a9245 | 2006-10-06 22:52:16 +0200 | [diff] [blame] | 217 | spu_irq_class_0(int irq, void *data) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 218 | { |
| 219 | struct spu *spu; |
| 220 | |
| 221 | spu = data; |
| 222 | spu->class_0_pending = 1; |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 223 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 224 | |
| 225 | return IRQ_HANDLED; |
| 226 | } |
| 227 | |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 228 | int |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 229 | spu_irq_class_0_bottom(struct spu *spu) |
| 230 | { |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 231 | unsigned long stat, mask; |
Ishizaki Kou | 3650cfe | 2007-01-12 09:52:41 +0900 | [diff] [blame] | 232 | unsigned long flags; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 233 | |
| 234 | spu->class_0_pending = 0; |
| 235 | |
Ishizaki Kou | 3650cfe | 2007-01-12 09:52:41 +0900 | [diff] [blame] | 236 | spin_lock_irqsave(&spu->register_lock, flags); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 237 | mask = spu_int_mask_get(spu, 0); |
| 238 | stat = spu_int_stat_get(spu, 0); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 239 | |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 240 | stat &= mask; |
| 241 | |
Arnd Bergmann | 2cd90bc | 2006-06-23 20:57:50 +0200 | [diff] [blame] | 242 | if (stat & 1) /* invalid DMA alignment */ |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 243 | __spu_trap_dma_align(spu); |
| 244 | |
Arnd Bergmann | 2cd90bc | 2006-06-23 20:57:50 +0200 | [diff] [blame] | 245 | if (stat & 2) /* invalid MFC DMA */ |
| 246 | __spu_trap_invalid_dma(spu); |
| 247 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 248 | if (stat & 4) /* error on SPU */ |
| 249 | __spu_trap_error(spu); |
| 250 | |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 251 | spu_int_stat_clear(spu, 0, stat); |
Ishizaki Kou | 3650cfe | 2007-01-12 09:52:41 +0900 | [diff] [blame] | 252 | spin_unlock_irqrestore(&spu->register_lock, flags); |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 253 | |
| 254 | return (stat & 0x7) ? -EIO : 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 255 | } |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 256 | EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 257 | |
| 258 | static irqreturn_t |
Olaf Hering | f5a9245 | 2006-10-06 22:52:16 +0200 | [diff] [blame] | 259 | spu_irq_class_1(int irq, void *data) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 260 | { |
| 261 | struct spu *spu; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 262 | unsigned long stat, mask, dar, dsisr; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 263 | |
| 264 | spu = data; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 265 | |
| 266 | /* atomically read & clear class1 status. */ |
| 267 | spin_lock(&spu->register_lock); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 268 | mask = spu_int_mask_get(spu, 1); |
| 269 | stat = spu_int_stat_get(spu, 1) & mask; |
| 270 | dar = spu_mfc_dar_get(spu); |
| 271 | dsisr = spu_mfc_dsisr_get(spu); |
Arnd Bergmann | 3830734 | 2005-12-09 19:04:18 +0100 | [diff] [blame] | 272 | if (stat & 2) /* mapping fault */ |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 273 | spu_mfc_dsisr_set(spu, 0ul); |
| 274 | spu_int_stat_clear(spu, 1, stat); |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 275 | spin_unlock(&spu->register_lock); |
Arnd Bergmann | a33a7d7 | 2006-03-23 00:00:11 +0100 | [diff] [blame] | 276 | pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, |
| 277 | dar, dsisr); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 278 | |
| 279 | if (stat & 1) /* segment fault */ |
| 280 | __spu_trap_data_seg(spu, dar); |
| 281 | |
| 282 | if (stat & 2) { /* mapping fault */ |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 283 | __spu_trap_data_map(spu, dar, dsisr); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 284 | } |
| 285 | |
| 286 | if (stat & 4) /* ls compare & suspend on get */ |
| 287 | ; |
| 288 | |
| 289 | if (stat & 8) /* ls compare & suspend on put */ |
| 290 | ; |
| 291 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 292 | return stat ? IRQ_HANDLED : IRQ_NONE; |
| 293 | } |
| 294 | |
| 295 | static irqreturn_t |
Olaf Hering | f5a9245 | 2006-10-06 22:52:16 +0200 | [diff] [blame] | 296 | spu_irq_class_2(int irq, void *data) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 297 | { |
| 298 | struct spu *spu; |
| 299 | unsigned long stat; |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 300 | unsigned long mask; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 301 | |
| 302 | spu = data; |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 303 | spin_lock(&spu->register_lock); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 304 | stat = spu_int_stat_get(spu, 2); |
| 305 | mask = spu_int_mask_get(spu, 2); |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 306 | /* ignore interrupts we're not waiting for */ |
| 307 | stat &= mask; |
| 308 | /* |
| 309 | * mailbox interrupts (0x1 and 0x10) are level triggered. |
| 310 | * mask them now before acknowledging. |
| 311 | */ |
| 312 | if (stat & 0x11) |
| 313 | spu_int_mask_and(spu, 2, ~(stat & 0x11)); |
| 314 | /* acknowledge all interrupts before the callbacks */ |
| 315 | spu_int_stat_clear(spu, 2, stat); |
| 316 | spin_unlock(&spu->register_lock); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 317 | |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 318 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 319 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 320 | if (stat & 1) /* PPC core mailbox */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 321 | spu->ibox_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 322 | |
| 323 | if (stat & 2) /* SPU stop-and-signal */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 324 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 325 | |
| 326 | if (stat & 4) /* SPU halted */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 327 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 328 | |
| 329 | if (stat & 8) /* DMA tag group complete */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 330 | spu->mfc_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 331 | |
| 332 | if (stat & 0x10) /* SPU mailbox threshold */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 333 | spu->wbox_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 334 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 335 | return stat ? IRQ_HANDLED : IRQ_NONE; |
| 336 | } |
| 337 | |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 338 | static int spu_request_irqs(struct spu *spu) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 339 | { |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 340 | int ret = 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 341 | |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 342 | if (spu->irqs[0] != NO_IRQ) { |
| 343 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", |
| 344 | spu->number); |
| 345 | ret = request_irq(spu->irqs[0], spu_irq_class_0, |
| 346 | IRQF_DISABLED, |
| 347 | spu->irq_c0, spu); |
| 348 | if (ret) |
| 349 | goto bail0; |
| 350 | } |
| 351 | if (spu->irqs[1] != NO_IRQ) { |
| 352 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", |
| 353 | spu->number); |
| 354 | ret = request_irq(spu->irqs[1], spu_irq_class_1, |
| 355 | IRQF_DISABLED, |
| 356 | spu->irq_c1, spu); |
| 357 | if (ret) |
| 358 | goto bail1; |
| 359 | } |
| 360 | if (spu->irqs[2] != NO_IRQ) { |
| 361 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", |
| 362 | spu->number); |
| 363 | ret = request_irq(spu->irqs[2], spu_irq_class_2, |
| 364 | IRQF_DISABLED, |
| 365 | spu->irq_c2, spu); |
| 366 | if (ret) |
| 367 | goto bail2; |
| 368 | } |
| 369 | return 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 370 | |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 371 | bail2: |
| 372 | if (spu->irqs[1] != NO_IRQ) |
| 373 | free_irq(spu->irqs[1], spu); |
| 374 | bail1: |
| 375 | if (spu->irqs[0] != NO_IRQ) |
| 376 | free_irq(spu->irqs[0], spu); |
| 377 | bail0: |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 378 | return ret; |
| 379 | } |
| 380 | |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 381 | static void spu_free_irqs(struct spu *spu) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 382 | { |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 383 | if (spu->irqs[0] != NO_IRQ) |
| 384 | free_irq(spu->irqs[0], spu); |
| 385 | if (spu->irqs[1] != NO_IRQ) |
| 386 | free_irq(spu->irqs[1], spu); |
| 387 | if (spu->irqs[2] != NO_IRQ) |
| 388 | free_irq(spu->irqs[2], spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 389 | } |
| 390 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 391 | static void spu_init_channels(struct spu *spu) |
| 392 | { |
| 393 | static const struct { |
| 394 | unsigned channel; |
| 395 | unsigned count; |
| 396 | } zero_list[] = { |
| 397 | { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, }, |
| 398 | { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, }, |
| 399 | }, count_list[] = { |
| 400 | { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, }, |
| 401 | { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, }, |
| 402 | { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, }, |
| 403 | }; |
Arnd Bergmann | 6ff730c | 2006-01-04 20:31:31 +0100 | [diff] [blame] | 404 | struct spu_priv2 __iomem *priv2; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 405 | int i; |
| 406 | |
| 407 | priv2 = spu->priv2; |
| 408 | |
| 409 | /* initialize all channel data to zero */ |
| 410 | for (i = 0; i < ARRAY_SIZE(zero_list); i++) { |
| 411 | int count; |
| 412 | |
| 413 | out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel); |
| 414 | for (count = 0; count < zero_list[i].count; count++) |
| 415 | out_be64(&priv2->spu_chnldata_RW, 0); |
| 416 | } |
| 417 | |
| 418 | /* initialize channel counts to meaningful values */ |
| 419 | for (i = 0; i < ARRAY_SIZE(count_list); i++) { |
| 420 | out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel); |
| 421 | out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); |
| 422 | } |
| 423 | } |
| 424 | |
Mark Nutter | a68cf98 | 2006-10-04 17:26:12 +0200 | [diff] [blame] | 425 | struct spu *spu_alloc_node(int node) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 426 | { |
Mark Nutter | a68cf98 | 2006-10-04 17:26:12 +0200 | [diff] [blame] | 427 | struct spu *spu = NULL; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 428 | |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 429 | mutex_lock(&spu_mutex); |
Mark Nutter | a68cf98 | 2006-10-04 17:26:12 +0200 | [diff] [blame] | 430 | if (!list_empty(&spu_list[node])) { |
| 431 | spu = list_entry(spu_list[node].next, struct spu, list); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 432 | list_del_init(&spu->list); |
Geoff Levand | cc21a66 | 2006-10-24 18:31:15 +0200 | [diff] [blame] | 433 | pr_debug("Got SPU %d %d\n", spu->number, spu->node); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 434 | } |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 435 | mutex_unlock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 436 | |
Christoph Hellwig | 62c05d5 | 2007-04-23 21:08:14 +0200 | [diff] [blame] | 437 | if (spu) |
| 438 | spu_init_channels(spu); |
Mark Nutter | a68cf98 | 2006-10-04 17:26:12 +0200 | [diff] [blame] | 439 | return spu; |
| 440 | } |
| 441 | EXPORT_SYMBOL_GPL(spu_alloc_node); |
| 442 | |
| 443 | struct spu *spu_alloc(void) |
| 444 | { |
| 445 | struct spu *spu = NULL; |
| 446 | int node; |
| 447 | |
| 448 | for (node = 0; node < MAX_NUMNODES; node++) { |
| 449 | spu = spu_alloc_node(node); |
| 450 | if (spu) |
| 451 | break; |
| 452 | } |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 453 | |
| 454 | return spu; |
| 455 | } |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 456 | |
| 457 | void spu_free(struct spu *spu) |
| 458 | { |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 459 | mutex_lock(&spu_mutex); |
Mark Nutter | a68cf98 | 2006-10-04 17:26:12 +0200 | [diff] [blame] | 460 | list_add_tail(&spu->list, &spu_list[spu->node]); |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 461 | mutex_unlock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 462 | } |
Arnd Bergmann | 39c73c3 | 2005-12-05 22:52:21 -0500 | [diff] [blame] | 463 | EXPORT_SYMBOL_GPL(spu_free); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 464 | |
Geoff Levand | 6deac06 | 2007-06-16 07:17:32 +1000 | [diff] [blame] | 465 | static int spu_shutdown(struct sys_device *sysdev) |
| 466 | { |
| 467 | struct spu *spu = container_of(sysdev, struct spu, sysdev); |
| 468 | |
| 469 | spu_free_irqs(spu); |
| 470 | spu_destroy_spu(spu); |
| 471 | return 0; |
| 472 | } |
| 473 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 474 | struct sysdev_class spu_sysdev_class = { |
Geoff Levand | 6deac06 | 2007-06-16 07:17:32 +1000 | [diff] [blame] | 475 | set_kset_name("spu"), |
| 476 | .shutdown = spu_shutdown, |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 477 | }; |
| 478 | |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 479 | int spu_add_sysdev_attr(struct sysdev_attribute *attr) |
| 480 | { |
| 481 | struct spu *spu; |
| 482 | mutex_lock(&spu_mutex); |
| 483 | |
| 484 | list_for_each_entry(spu, &spu_full_list, full_list) |
| 485 | sysdev_create_file(&spu->sysdev, attr); |
| 486 | |
| 487 | mutex_unlock(&spu_mutex); |
| 488 | return 0; |
| 489 | } |
| 490 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr); |
| 491 | |
| 492 | int spu_add_sysdev_attr_group(struct attribute_group *attrs) |
| 493 | { |
| 494 | struct spu *spu; |
| 495 | mutex_lock(&spu_mutex); |
| 496 | |
| 497 | list_for_each_entry(spu, &spu_full_list, full_list) |
| 498 | sysfs_create_group(&spu->sysdev.kobj, attrs); |
| 499 | |
| 500 | mutex_unlock(&spu_mutex); |
| 501 | return 0; |
| 502 | } |
| 503 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group); |
| 504 | |
| 505 | |
| 506 | void spu_remove_sysdev_attr(struct sysdev_attribute *attr) |
| 507 | { |
| 508 | struct spu *spu; |
| 509 | mutex_lock(&spu_mutex); |
| 510 | |
| 511 | list_for_each_entry(spu, &spu_full_list, full_list) |
| 512 | sysdev_remove_file(&spu->sysdev, attr); |
| 513 | |
| 514 | mutex_unlock(&spu_mutex); |
| 515 | } |
| 516 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr); |
| 517 | |
| 518 | void spu_remove_sysdev_attr_group(struct attribute_group *attrs) |
| 519 | { |
| 520 | struct spu *spu; |
| 521 | mutex_lock(&spu_mutex); |
| 522 | |
| 523 | list_for_each_entry(spu, &spu_full_list, full_list) |
| 524 | sysfs_remove_group(&spu->sysdev.kobj, attrs); |
| 525 | |
| 526 | mutex_unlock(&spu_mutex); |
| 527 | } |
| 528 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group); |
| 529 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 530 | static int spu_create_sysdev(struct spu *spu) |
| 531 | { |
| 532 | int ret; |
| 533 | |
| 534 | spu->sysdev.id = spu->number; |
| 535 | spu->sysdev.cls = &spu_sysdev_class; |
| 536 | ret = sysdev_register(&spu->sysdev); |
| 537 | if (ret) { |
| 538 | printk(KERN_ERR "Can't register SPU %d with sysfs\n", |
| 539 | spu->number); |
| 540 | return ret; |
| 541 | } |
| 542 | |
Geoff Levand | 0021550 | 2006-11-20 18:45:02 +0100 | [diff] [blame] | 543 | sysfs_add_device_to_node(&spu->sysdev, spu->node); |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 544 | |
| 545 | return 0; |
| 546 | } |
| 547 | |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 548 | static int __init create_spu(void *data) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 549 | { |
| 550 | struct spu *spu; |
| 551 | int ret; |
| 552 | static int number; |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 553 | unsigned long flags; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 554 | |
| 555 | ret = -ENOMEM; |
Jeremy Kerr | ecec217 | 2006-06-19 20:33:26 +0200 | [diff] [blame] | 556 | spu = kzalloc(sizeof (*spu), GFP_KERNEL); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 557 | if (!spu) |
| 558 | goto out; |
| 559 | |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 560 | spin_lock_init(&spu->register_lock); |
| 561 | mutex_lock(&spu_mutex); |
| 562 | spu->number = number++; |
| 563 | mutex_unlock(&spu_mutex); |
Benjamin Herrenschmidt | e5267b4 | 2006-10-10 15:14:12 +1000 | [diff] [blame] | 564 | |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 565 | ret = spu_create_spu(spu, data); |
| 566 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 567 | if (ret) |
| 568 | goto out_free; |
| 569 | |
Masato Noguchi | 24f43b3 | 2006-10-24 18:31:14 +0200 | [diff] [blame] | 570 | spu_mfc_sdr_setup(spu); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 571 | spu_mfc_sr1_set(spu, 0x33); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 572 | ret = spu_request_irqs(spu); |
| 573 | if (ret) |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 574 | goto out_destroy; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 575 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 576 | ret = spu_create_sysdev(spu); |
| 577 | if (ret) |
| 578 | goto out_free_irqs; |
| 579 | |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 580 | mutex_lock(&spu_mutex); |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 581 | spin_lock_irqsave(&spu_list_lock, flags); |
Mark Nutter | a68cf98 | 2006-10-04 17:26:12 +0200 | [diff] [blame] | 582 | list_add(&spu->list, &spu_list[spu->node]); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 583 | list_add(&spu->full_list, &spu_full_list); |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 584 | spin_unlock_irqrestore(&spu_list_lock, flags); |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 585 | mutex_unlock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 586 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 587 | goto out; |
| 588 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 589 | out_free_irqs: |
| 590 | spu_free_irqs(spu); |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 591 | out_destroy: |
| 592 | spu_destroy_spu(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 593 | out_free: |
| 594 | kfree(spu); |
| 595 | out: |
| 596 | return ret; |
| 597 | } |
| 598 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 599 | static int __init init_spu_base(void) |
| 600 | { |
Christoph Hellwig | befdc74 | 2007-04-23 21:08:28 +0200 | [diff] [blame] | 601 | int i, ret = 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 602 | |
Jeremy Kerr | ccf17e9 | 2007-04-23 21:08:29 +0200 | [diff] [blame] | 603 | for (i = 0; i < MAX_NUMNODES; i++) |
| 604 | INIT_LIST_HEAD(&spu_list[i]); |
| 605 | |
Stephen Rothwell | da06aa0 | 2006-11-27 19:18:54 +0100 | [diff] [blame] | 606 | if (!spu_management_ops) |
Christoph Hellwig | befdc74 | 2007-04-23 21:08:28 +0200 | [diff] [blame] | 607 | goto out; |
Stephen Rothwell | da06aa0 | 2006-11-27 19:18:54 +0100 | [diff] [blame] | 608 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 609 | /* create sysdev class for spus */ |
| 610 | ret = sysdev_class_register(&spu_sysdev_class); |
| 611 | if (ret) |
Christoph Hellwig | befdc74 | 2007-04-23 21:08:28 +0200 | [diff] [blame] | 612 | goto out; |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 613 | |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 614 | ret = spu_enumerate_spus(create_spu); |
| 615 | |
| 616 | if (ret) { |
| 617 | printk(KERN_WARNING "%s: Error initializing spus\n", |
| 618 | __FUNCTION__); |
Christoph Hellwig | befdc74 | 2007-04-23 21:08:28 +0200 | [diff] [blame] | 619 | goto out_unregister_sysdev_class; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 620 | } |
Michael Ellerman | ff8a8f2 | 2006-10-24 18:31:27 +0200 | [diff] [blame] | 621 | |
| 622 | xmon_register_spus(&spu_full_list); |
| 623 | |
Christoph Hellwig | befdc74 | 2007-04-23 21:08:28 +0200 | [diff] [blame] | 624 | return 0; |
| 625 | |
| 626 | out_unregister_sysdev_class: |
| 627 | sysdev_class_unregister(&spu_sysdev_class); |
| 628 | out: |
| 629 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 630 | return ret; |
| 631 | } |
| 632 | module_init(init_spu_base); |
| 633 | |
| 634 | MODULE_LICENSE("GPL"); |
| 635 | MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); |