Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Low-level SPU handling |
| 3 | * |
| 4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 |
| 5 | * |
| 6 | * Author: Arnd Bergmann <arndb@de.ibm.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation; either version 2, or (at your option) |
| 11 | * any later version. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License |
| 19 | * along with this program; if not, write to the Free Software |
| 20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 21 | */ |
| 22 | |
Arnd Bergmann | 3b3d22c | 2005-12-05 22:52:24 -0500 | [diff] [blame] | 23 | #undef DEBUG |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 24 | |
| 25 | #include <linux/interrupt.h> |
| 26 | #include <linux/list.h> |
| 27 | #include <linux/module.h> |
| 28 | #include <linux/poll.h> |
| 29 | #include <linux/ptrace.h> |
| 30 | #include <linux/slab.h> |
| 31 | #include <linux/wait.h> |
| 32 | |
| 33 | #include <asm/io.h> |
| 34 | #include <asm/prom.h> |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 35 | #include <linux/mutex.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 36 | #include <asm/spu.h> |
| 37 | #include <asm/mmu_context.h> |
| 38 | |
| 39 | #include "interrupt.h" |
| 40 | |
| 41 | static int __spu_trap_invalid_dma(struct spu *spu) |
| 42 | { |
| 43 | pr_debug("%s\n", __FUNCTION__); |
| 44 | force_sig(SIGBUS, /* info, */ current); |
| 45 | return 0; |
| 46 | } |
| 47 | |
| 48 | static int __spu_trap_dma_align(struct spu *spu) |
| 49 | { |
| 50 | pr_debug("%s\n", __FUNCTION__); |
| 51 | force_sig(SIGBUS, /* info, */ current); |
| 52 | return 0; |
| 53 | } |
| 54 | |
| 55 | static int __spu_trap_error(struct spu *spu) |
| 56 | { |
| 57 | pr_debug("%s\n", __FUNCTION__); |
| 58 | force_sig(SIGILL, /* info, */ current); |
| 59 | return 0; |
| 60 | } |
| 61 | |
| 62 | static void spu_restart_dma(struct spu *spu) |
| 63 | { |
| 64 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 65 | |
Arnd Bergmann | 8837d92 | 2006-01-04 20:31:28 +0100 | [diff] [blame] | 66 | if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 67 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 68 | } |
| 69 | |
| 70 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) |
| 71 | { |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 72 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
| 73 | struct mm_struct *mm = spu->mm; |
| 74 | u64 esid, vsid; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 75 | |
| 76 | pr_debug("%s\n", __FUNCTION__); |
| 77 | |
Arnd Bergmann | 8837d92 | 2006-01-04 20:31:28 +0100 | [diff] [blame] | 78 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 79 | /* SLBs are pre-loaded for context switch, so |
| 80 | * we should never get here! |
| 81 | */ |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 82 | printk("%s: invalid access during switch!\n", __func__); |
| 83 | return 1; |
| 84 | } |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 85 | if (!mm || (REGION_ID(ea) != USER_REGION_ID)) { |
| 86 | /* Future: support kernel segments so that drivers |
| 87 | * can use SPUs. |
| 88 | */ |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 89 | pr_debug("invalid region access at %016lx\n", ea); |
| 90 | return 1; |
| 91 | } |
| 92 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 93 | esid = (ea & ESID_MASK) | SLB_ESID_V; |
| 94 | vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER; |
| 95 | if (in_hugepage_area(mm->context, ea)) |
| 96 | vsid |= SLB_VSID_L; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 97 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 98 | out_be64(&priv2->slb_index_W, spu->slb_replace); |
| 99 | out_be64(&priv2->slb_vsid_RW, vsid); |
| 100 | out_be64(&priv2->slb_esid_RW, esid); |
| 101 | |
| 102 | spu->slb_replace++; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 103 | if (spu->slb_replace >= 8) |
| 104 | spu->slb_replace = 0; |
| 105 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 106 | spu_restart_dma(spu); |
| 107 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 108 | return 0; |
| 109 | } |
| 110 | |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 111 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 112 | static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 113 | { |
Arnd Bergmann | a33a7d7 | 2006-03-23 00:00:11 +0100 | [diff] [blame] | 114 | pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 115 | |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 116 | /* Handle kernel space hash faults immediately. |
| 117 | User hash faults need to be deferred to process context. */ |
| 118 | if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) |
| 119 | && REGION_ID(ea) != USER_REGION_ID |
| 120 | && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { |
| 121 | spu_restart_dma(spu); |
| 122 | return 0; |
| 123 | } |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 124 | |
Arnd Bergmann | 8837d92 | 2006-01-04 20:31:28 +0100 | [diff] [blame] | 125 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 126 | printk("%s: invalid access during switch!\n", __func__); |
| 127 | return 1; |
| 128 | } |
| 129 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 130 | spu->dar = ea; |
| 131 | spu->dsisr = dsisr; |
| 132 | mb(); |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 133 | if (spu->stop_callback) |
| 134 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 135 | return 0; |
| 136 | } |
| 137 | |
| 138 | static int __spu_trap_mailbox(struct spu *spu) |
| 139 | { |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 140 | if (spu->ibox_callback) |
| 141 | spu->ibox_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 142 | |
| 143 | /* atomically disable SPU mailbox interrupts */ |
| 144 | spin_lock(&spu->register_lock); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 145 | spu_int_mask_and(spu, 2, ~0x1); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 146 | spin_unlock(&spu->register_lock); |
| 147 | return 0; |
| 148 | } |
| 149 | |
| 150 | static int __spu_trap_stop(struct spu *spu) |
| 151 | { |
| 152 | pr_debug("%s\n", __FUNCTION__); |
| 153 | spu->stop_code = in_be32(&spu->problem->spu_status_R); |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 154 | if (spu->stop_callback) |
| 155 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 156 | return 0; |
| 157 | } |
| 158 | |
| 159 | static int __spu_trap_halt(struct spu *spu) |
| 160 | { |
| 161 | pr_debug("%s\n", __FUNCTION__); |
| 162 | spu->stop_code = in_be32(&spu->problem->spu_status_R); |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 163 | if (spu->stop_callback) |
| 164 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 165 | return 0; |
| 166 | } |
| 167 | |
| 168 | static int __spu_trap_tag_group(struct spu *spu) |
| 169 | { |
| 170 | pr_debug("%s\n", __FUNCTION__); |
Arnd Bergmann | a33a7d7 | 2006-03-23 00:00:11 +0100 | [diff] [blame] | 171 | spu->mfc_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 172 | return 0; |
| 173 | } |
| 174 | |
| 175 | static int __spu_trap_spubox(struct spu *spu) |
| 176 | { |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 177 | if (spu->wbox_callback) |
| 178 | spu->wbox_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 179 | |
| 180 | /* atomically disable SPU mailbox interrupts */ |
| 181 | spin_lock(&spu->register_lock); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 182 | spu_int_mask_and(spu, 2, ~0x10); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 183 | spin_unlock(&spu->register_lock); |
| 184 | return 0; |
| 185 | } |
| 186 | |
| 187 | static irqreturn_t |
| 188 | spu_irq_class_0(int irq, void *data, struct pt_regs *regs) |
| 189 | { |
| 190 | struct spu *spu; |
| 191 | |
| 192 | spu = data; |
| 193 | spu->class_0_pending = 1; |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 194 | if (spu->stop_callback) |
| 195 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 196 | |
| 197 | return IRQ_HANDLED; |
| 198 | } |
| 199 | |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 200 | int |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 201 | spu_irq_class_0_bottom(struct spu *spu) |
| 202 | { |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 203 | unsigned long stat, mask; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 204 | |
| 205 | spu->class_0_pending = 0; |
| 206 | |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 207 | mask = spu_int_mask_get(spu, 0); |
| 208 | stat = spu_int_stat_get(spu, 0); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 209 | |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 210 | stat &= mask; |
| 211 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 212 | if (stat & 1) /* invalid MFC DMA */ |
| 213 | __spu_trap_invalid_dma(spu); |
| 214 | |
| 215 | if (stat & 2) /* invalid DMA alignment */ |
| 216 | __spu_trap_dma_align(spu); |
| 217 | |
| 218 | if (stat & 4) /* error on SPU */ |
| 219 | __spu_trap_error(spu); |
| 220 | |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 221 | spu_int_stat_clear(spu, 0, stat); |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 222 | |
| 223 | return (stat & 0x7) ? -EIO : 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 224 | } |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 225 | EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 226 | |
| 227 | static irqreturn_t |
| 228 | spu_irq_class_1(int irq, void *data, struct pt_regs *regs) |
| 229 | { |
| 230 | struct spu *spu; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 231 | unsigned long stat, mask, dar, dsisr; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 232 | |
| 233 | spu = data; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 234 | |
| 235 | /* atomically read & clear class1 status. */ |
| 236 | spin_lock(&spu->register_lock); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 237 | mask = spu_int_mask_get(spu, 1); |
| 238 | stat = spu_int_stat_get(spu, 1) & mask; |
| 239 | dar = spu_mfc_dar_get(spu); |
| 240 | dsisr = spu_mfc_dsisr_get(spu); |
Arnd Bergmann | 3830734 | 2005-12-09 19:04:18 +0100 | [diff] [blame] | 241 | if (stat & 2) /* mapping fault */ |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 242 | spu_mfc_dsisr_set(spu, 0ul); |
| 243 | spu_int_stat_clear(spu, 1, stat); |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 244 | spin_unlock(&spu->register_lock); |
Arnd Bergmann | a33a7d7 | 2006-03-23 00:00:11 +0100 | [diff] [blame] | 245 | pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, |
| 246 | dar, dsisr); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 247 | |
| 248 | if (stat & 1) /* segment fault */ |
| 249 | __spu_trap_data_seg(spu, dar); |
| 250 | |
| 251 | if (stat & 2) { /* mapping fault */ |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 252 | __spu_trap_data_map(spu, dar, dsisr); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 253 | } |
| 254 | |
| 255 | if (stat & 4) /* ls compare & suspend on get */ |
| 256 | ; |
| 257 | |
| 258 | if (stat & 8) /* ls compare & suspend on put */ |
| 259 | ; |
| 260 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 261 | return stat ? IRQ_HANDLED : IRQ_NONE; |
| 262 | } |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 263 | EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 264 | |
| 265 | static irqreturn_t |
| 266 | spu_irq_class_2(int irq, void *data, struct pt_regs *regs) |
| 267 | { |
| 268 | struct spu *spu; |
| 269 | unsigned long stat; |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 270 | unsigned long mask; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 271 | |
| 272 | spu = data; |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 273 | stat = spu_int_stat_get(spu, 2); |
| 274 | mask = spu_int_mask_get(spu, 2); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 275 | |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 276 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 277 | |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 278 | stat &= mask; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 279 | |
| 280 | if (stat & 1) /* PPC core mailbox */ |
| 281 | __spu_trap_mailbox(spu); |
| 282 | |
| 283 | if (stat & 2) /* SPU stop-and-signal */ |
| 284 | __spu_trap_stop(spu); |
| 285 | |
| 286 | if (stat & 4) /* SPU halted */ |
| 287 | __spu_trap_halt(spu); |
| 288 | |
| 289 | if (stat & 8) /* DMA tag group complete */ |
| 290 | __spu_trap_tag_group(spu); |
| 291 | |
| 292 | if (stat & 0x10) /* SPU mailbox threshold */ |
| 293 | __spu_trap_spubox(spu); |
| 294 | |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 295 | spu_int_stat_clear(spu, 2, stat); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 296 | return stat ? IRQ_HANDLED : IRQ_NONE; |
| 297 | } |
| 298 | |
| 299 | static int |
| 300 | spu_request_irqs(struct spu *spu) |
| 301 | { |
| 302 | int ret; |
| 303 | int irq_base; |
| 304 | |
| 305 | irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; |
| 306 | |
| 307 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number); |
| 308 | ret = request_irq(irq_base + spu->isrc, |
Arnd Bergmann | f807221 | 2006-04-29 02:40:21 +0200 | [diff] [blame] | 309 | spu_irq_class_0, SA_INTERRUPT, spu->irq_c0, spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 310 | if (ret) |
| 311 | goto out; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 312 | |
| 313 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number); |
| 314 | ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, |
Arnd Bergmann | f807221 | 2006-04-29 02:40:21 +0200 | [diff] [blame] | 315 | spu_irq_class_1, SA_INTERRUPT, spu->irq_c1, spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 316 | if (ret) |
| 317 | goto out1; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 318 | |
| 319 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number); |
| 320 | ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, |
Arnd Bergmann | f807221 | 2006-04-29 02:40:21 +0200 | [diff] [blame] | 321 | spu_irq_class_2, SA_INTERRUPT, spu->irq_c2, spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 322 | if (ret) |
| 323 | goto out2; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 324 | goto out; |
| 325 | |
| 326 | out2: |
| 327 | free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); |
| 328 | out1: |
| 329 | free_irq(irq_base + spu->isrc, spu); |
| 330 | out: |
| 331 | return ret; |
| 332 | } |
| 333 | |
| 334 | static void |
| 335 | spu_free_irqs(struct spu *spu) |
| 336 | { |
| 337 | int irq_base; |
| 338 | |
| 339 | irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; |
| 340 | |
| 341 | free_irq(irq_base + spu->isrc, spu); |
| 342 | free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); |
| 343 | free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu); |
| 344 | } |
| 345 | |
| 346 | static LIST_HEAD(spu_list); |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 347 | static DEFINE_MUTEX(spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 348 | |
| 349 | static void spu_init_channels(struct spu *spu) |
| 350 | { |
| 351 | static const struct { |
| 352 | unsigned channel; |
| 353 | unsigned count; |
| 354 | } zero_list[] = { |
| 355 | { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, }, |
| 356 | { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, }, |
| 357 | }, count_list[] = { |
| 358 | { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, }, |
| 359 | { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, }, |
| 360 | { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, }, |
| 361 | }; |
Arnd Bergmann | 6ff730c | 2006-01-04 20:31:31 +0100 | [diff] [blame] | 362 | struct spu_priv2 __iomem *priv2; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 363 | int i; |
| 364 | |
| 365 | priv2 = spu->priv2; |
| 366 | |
| 367 | /* initialize all channel data to zero */ |
| 368 | for (i = 0; i < ARRAY_SIZE(zero_list); i++) { |
| 369 | int count; |
| 370 | |
| 371 | out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel); |
| 372 | for (count = 0; count < zero_list[i].count; count++) |
| 373 | out_be64(&priv2->spu_chnldata_RW, 0); |
| 374 | } |
| 375 | |
| 376 | /* initialize channel counts to meaningful values */ |
| 377 | for (i = 0; i < ARRAY_SIZE(count_list); i++) { |
| 378 | out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel); |
| 379 | out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); |
| 380 | } |
| 381 | } |
| 382 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 383 | struct spu *spu_alloc(void) |
| 384 | { |
| 385 | struct spu *spu; |
| 386 | |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 387 | mutex_lock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 388 | if (!list_empty(&spu_list)) { |
| 389 | spu = list_entry(spu_list.next, struct spu, list); |
| 390 | list_del_init(&spu->list); |
| 391 | pr_debug("Got SPU %x %d\n", spu->isrc, spu->number); |
| 392 | } else { |
| 393 | pr_debug("No SPU left\n"); |
| 394 | spu = NULL; |
| 395 | } |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 396 | mutex_unlock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 397 | |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 398 | if (spu) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 399 | spu_init_channels(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 400 | |
| 401 | return spu; |
| 402 | } |
Arnd Bergmann | 39c73c3 | 2005-12-05 22:52:21 -0500 | [diff] [blame] | 403 | EXPORT_SYMBOL_GPL(spu_alloc); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 404 | |
| 405 | void spu_free(struct spu *spu) |
| 406 | { |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 407 | mutex_lock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 408 | list_add_tail(&spu->list, &spu_list); |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 409 | mutex_unlock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 410 | } |
Arnd Bergmann | 39c73c3 | 2005-12-05 22:52:21 -0500 | [diff] [blame] | 411 | EXPORT_SYMBOL_GPL(spu_free); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 412 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 413 | static int spu_handle_mm_fault(struct spu *spu) |
| 414 | { |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 415 | struct mm_struct *mm = spu->mm; |
| 416 | struct vm_area_struct *vma; |
| 417 | u64 ea, dsisr, is_write; |
| 418 | int ret; |
| 419 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 420 | ea = spu->dar; |
| 421 | dsisr = spu->dsisr; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 422 | #if 0 |
| 423 | if (!IS_VALID_EA(ea)) { |
| 424 | return -EFAULT; |
| 425 | } |
| 426 | #endif /* XXX */ |
| 427 | if (mm == NULL) { |
| 428 | return -EFAULT; |
| 429 | } |
| 430 | if (mm->pgd == NULL) { |
| 431 | return -EFAULT; |
| 432 | } |
| 433 | |
| 434 | down_read(&mm->mmap_sem); |
| 435 | vma = find_vma(mm, ea); |
| 436 | if (!vma) |
| 437 | goto bad_area; |
| 438 | if (vma->vm_start <= ea) |
| 439 | goto good_area; |
| 440 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
| 441 | goto bad_area; |
| 442 | #if 0 |
| 443 | if (expand_stack(vma, ea)) |
| 444 | goto bad_area; |
| 445 | #endif /* XXX */ |
| 446 | good_area: |
| 447 | is_write = dsisr & MFC_DSISR_ACCESS_PUT; |
| 448 | if (is_write) { |
| 449 | if (!(vma->vm_flags & VM_WRITE)) |
| 450 | goto bad_area; |
| 451 | } else { |
| 452 | if (dsisr & MFC_DSISR_ACCESS_DENIED) |
| 453 | goto bad_area; |
| 454 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) |
| 455 | goto bad_area; |
| 456 | } |
| 457 | ret = 0; |
| 458 | switch (handle_mm_fault(mm, vma, ea, is_write)) { |
| 459 | case VM_FAULT_MINOR: |
| 460 | current->min_flt++; |
| 461 | break; |
| 462 | case VM_FAULT_MAJOR: |
| 463 | current->maj_flt++; |
| 464 | break; |
| 465 | case VM_FAULT_SIGBUS: |
| 466 | ret = -EFAULT; |
| 467 | goto bad_area; |
| 468 | case VM_FAULT_OOM: |
| 469 | ret = -ENOMEM; |
| 470 | goto bad_area; |
| 471 | default: |
| 472 | BUG(); |
| 473 | } |
| 474 | up_read(&mm->mmap_sem); |
| 475 | return ret; |
| 476 | |
| 477 | bad_area: |
| 478 | up_read(&mm->mmap_sem); |
| 479 | return -EFAULT; |
| 480 | } |
| 481 | |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 482 | int spu_irq_class_1_bottom(struct spu *spu) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 483 | { |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 484 | u64 ea, dsisr, access, error = 0UL; |
| 485 | int ret = 0; |
| 486 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 487 | ea = spu->dar; |
| 488 | dsisr = spu->dsisr; |
Arnd Bergmann | 79c227a | 2006-03-24 19:49:27 +0100 | [diff] [blame] | 489 | if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) { |
Arnd Bergmann | f807221 | 2006-04-29 02:40:21 +0200 | [diff] [blame] | 490 | u64 flags; |
| 491 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 492 | access = (_PAGE_PRESENT | _PAGE_USER); |
| 493 | access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL; |
Arnd Bergmann | f807221 | 2006-04-29 02:40:21 +0200 | [diff] [blame] | 494 | local_irq_save(flags); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 495 | if (hash_page(ea, access, 0x300) != 0) |
| 496 | error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; |
Arnd Bergmann | f807221 | 2006-04-29 02:40:21 +0200 | [diff] [blame] | 497 | local_irq_restore(flags); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 498 | } |
Arnd Bergmann | 79c227a | 2006-03-24 19:49:27 +0100 | [diff] [blame] | 499 | if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) { |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 500 | if ((ret = spu_handle_mm_fault(spu)) != 0) |
| 501 | error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; |
| 502 | else |
| 503 | error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR; |
| 504 | } |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 505 | spu->dar = 0UL; |
| 506 | spu->dsisr = 0UL; |
| 507 | if (!error) { |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 508 | spu_restart_dma(spu); |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 509 | } else { |
| 510 | __spu_trap_invalid_dma(spu); |
| 511 | } |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 512 | return ret; |
| 513 | } |
| 514 | |
Arnd Bergmann | 2fb9d20 | 2006-01-05 14:05:29 +0000 | [diff] [blame] | 515 | void spu_irq_setaffinity(struct spu *spu, int cpu) |
| 516 | { |
| 517 | u64 target = iic_get_target_id(cpu); |
| 518 | u64 route = target << 48 | target << 32 | target << 16; |
| 519 | spu_int_route_set(spu, route); |
| 520 | } |
| 521 | EXPORT_SYMBOL_GPL(spu_irq_setaffinity); |
| 522 | |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame^] | 523 | static int __init find_spu_node_id(struct device_node *spe) |
| 524 | { |
| 525 | unsigned int *id; |
| 526 | struct device_node *cpu; |
| 527 | cpu = spe->parent->parent; |
| 528 | id = (unsigned int *)get_property(cpu, "node-id", NULL); |
| 529 | return id ? *id : 0; |
| 530 | } |
| 531 | |
| 532 | static int __init cell_spuprop_present(struct device_node *spe, |
| 533 | const char *prop) |
| 534 | { |
| 535 | static DEFINE_MUTEX(add_spumem_mutex); |
| 536 | |
| 537 | struct address_prop { |
| 538 | unsigned long address; |
| 539 | unsigned int len; |
| 540 | } __attribute__((packed)) *p; |
| 541 | int proplen; |
| 542 | |
| 543 | unsigned long start_pfn, nr_pages; |
| 544 | int node_id; |
| 545 | struct pglist_data *pgdata; |
| 546 | struct zone *zone; |
| 547 | int ret; |
| 548 | |
| 549 | p = (void*)get_property(spe, prop, &proplen); |
| 550 | WARN_ON(proplen != sizeof (*p)); |
| 551 | |
| 552 | start_pfn = p->address >> PAGE_SHIFT; |
| 553 | nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 554 | |
| 555 | /* |
| 556 | * XXX need to get the correct NUMA node in here. This may |
| 557 | * be different from the spe::node_id property, e.g. when |
| 558 | * the host firmware is not NUMA aware. |
| 559 | */ |
| 560 | node_id = 0; |
| 561 | |
| 562 | pgdata = NODE_DATA(node_id); |
| 563 | zone = pgdata->node_zones; |
| 564 | |
| 565 | /* XXX rethink locking here */ |
| 566 | mutex_lock(&add_spumem_mutex); |
| 567 | ret = __add_pages(zone, start_pfn, nr_pages); |
| 568 | mutex_unlock(&add_spumem_mutex); |
| 569 | |
| 570 | return ret; |
| 571 | } |
| 572 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 573 | static void __iomem * __init map_spe_prop(struct device_node *n, |
| 574 | const char *name) |
| 575 | { |
| 576 | struct address_prop { |
| 577 | unsigned long address; |
| 578 | unsigned int len; |
| 579 | } __attribute__((packed)) *prop; |
| 580 | |
| 581 | void *p; |
| 582 | int proplen; |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame^] | 583 | void* ret = NULL; |
| 584 | int err = 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 585 | |
| 586 | p = get_property(n, name, &proplen); |
| 587 | if (proplen != sizeof (struct address_prop)) |
| 588 | return NULL; |
| 589 | |
| 590 | prop = p; |
| 591 | |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame^] | 592 | err = cell_spuprop_present(n, name); |
| 593 | if (err && (err != -EEXIST)) |
| 594 | goto out; |
| 595 | |
| 596 | ret = ioremap(prop->address, prop->len); |
| 597 | |
| 598 | out: |
| 599 | return ret; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 600 | } |
| 601 | |
| 602 | static void spu_unmap(struct spu *spu) |
| 603 | { |
| 604 | iounmap(spu->priv2); |
| 605 | iounmap(spu->priv1); |
| 606 | iounmap(spu->problem); |
| 607 | iounmap((u8 __iomem *)spu->local_store); |
| 608 | } |
| 609 | |
| 610 | static int __init spu_map_device(struct spu *spu, struct device_node *spe) |
| 611 | { |
| 612 | char *prop; |
| 613 | int ret; |
| 614 | |
| 615 | ret = -ENODEV; |
| 616 | prop = get_property(spe, "isrc", NULL); |
| 617 | if (!prop) |
| 618 | goto out; |
| 619 | spu->isrc = *(unsigned int *)prop; |
| 620 | |
| 621 | spu->name = get_property(spe, "name", NULL); |
| 622 | if (!spu->name) |
| 623 | goto out; |
| 624 | |
| 625 | prop = get_property(spe, "local-store", NULL); |
| 626 | if (!prop) |
| 627 | goto out; |
| 628 | spu->local_store_phys = *(unsigned long *)prop; |
| 629 | |
| 630 | /* we use local store as ram, not io memory */ |
| 631 | spu->local_store = (void __force *)map_spe_prop(spe, "local-store"); |
| 632 | if (!spu->local_store) |
| 633 | goto out; |
| 634 | |
Mark Nutter | 6df10a8 | 2006-03-23 00:00:12 +0100 | [diff] [blame] | 635 | prop = get_property(spe, "problem", NULL); |
| 636 | if (!prop) |
| 637 | goto out_unmap; |
| 638 | spu->problem_phys = *(unsigned long *)prop; |
| 639 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 640 | spu->problem= map_spe_prop(spe, "problem"); |
| 641 | if (!spu->problem) |
| 642 | goto out_unmap; |
| 643 | |
| 644 | spu->priv1= map_spe_prop(spe, "priv1"); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 645 | /* priv1 is not available on a hypervisor */ |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 646 | |
| 647 | spu->priv2= map_spe_prop(spe, "priv2"); |
| 648 | if (!spu->priv2) |
| 649 | goto out_unmap; |
| 650 | ret = 0; |
| 651 | goto out; |
| 652 | |
| 653 | out_unmap: |
| 654 | spu_unmap(spu); |
| 655 | out: |
| 656 | return ret; |
| 657 | } |
| 658 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 659 | static int __init create_spu(struct device_node *spe) |
| 660 | { |
| 661 | struct spu *spu; |
| 662 | int ret; |
| 663 | static int number; |
| 664 | |
| 665 | ret = -ENOMEM; |
| 666 | spu = kmalloc(sizeof (*spu), GFP_KERNEL); |
| 667 | if (!spu) |
| 668 | goto out; |
| 669 | |
| 670 | ret = spu_map_device(spu, spe); |
| 671 | if (ret) |
| 672 | goto out_free; |
| 673 | |
| 674 | spu->node = find_spu_node_id(spe); |
| 675 | spu->stop_code = 0; |
| 676 | spu->slb_replace = 0; |
| 677 | spu->mm = NULL; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 678 | spu->ctx = NULL; |
| 679 | spu->rq = NULL; |
| 680 | spu->pid = 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 681 | spu->class_0_pending = 0; |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 682 | spu->flags = 0UL; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 683 | spu->dar = 0UL; |
| 684 | spu->dsisr = 0UL; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 685 | spin_lock_init(&spu->register_lock); |
| 686 | |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 687 | spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1)); |
| 688 | spu_mfc_sr1_set(spu, 0x33); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 689 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 690 | spu->ibox_callback = NULL; |
| 691 | spu->wbox_callback = NULL; |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 692 | spu->stop_callback = NULL; |
Arnd Bergmann | a33a7d7 | 2006-03-23 00:00:11 +0100 | [diff] [blame] | 693 | spu->mfc_callback = NULL; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 694 | |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 695 | mutex_lock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 696 | spu->number = number++; |
| 697 | ret = spu_request_irqs(spu); |
| 698 | if (ret) |
| 699 | goto out_unmap; |
| 700 | |
| 701 | list_add(&spu->list, &spu_list); |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 702 | mutex_unlock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 703 | |
| 704 | pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n", |
| 705 | spu->name, spu->isrc, spu->local_store, |
| 706 | spu->problem, spu->priv1, spu->priv2, spu->number); |
| 707 | goto out; |
| 708 | |
| 709 | out_unmap: |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 710 | mutex_unlock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 711 | spu_unmap(spu); |
| 712 | out_free: |
| 713 | kfree(spu); |
| 714 | out: |
| 715 | return ret; |
| 716 | } |
| 717 | |
| 718 | static void destroy_spu(struct spu *spu) |
| 719 | { |
| 720 | list_del_init(&spu->list); |
| 721 | |
| 722 | spu_free_irqs(spu); |
| 723 | spu_unmap(spu); |
| 724 | kfree(spu); |
| 725 | } |
| 726 | |
| 727 | static void cleanup_spu_base(void) |
| 728 | { |
| 729 | struct spu *spu, *tmp; |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 730 | mutex_lock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 731 | list_for_each_entry_safe(spu, tmp, &spu_list, list) |
| 732 | destroy_spu(spu); |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 733 | mutex_unlock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 734 | } |
| 735 | module_exit(cleanup_spu_base); |
| 736 | |
| 737 | static int __init init_spu_base(void) |
| 738 | { |
| 739 | struct device_node *node; |
| 740 | int ret; |
| 741 | |
| 742 | ret = -ENODEV; |
| 743 | for (node = of_find_node_by_type(NULL, "spe"); |
| 744 | node; node = of_find_node_by_type(node, "spe")) { |
| 745 | ret = create_spu(node); |
| 746 | if (ret) { |
| 747 | printk(KERN_WARNING "%s: Error initializing %s\n", |
| 748 | __FUNCTION__, node->name); |
| 749 | cleanup_spu_base(); |
| 750 | break; |
| 751 | } |
| 752 | } |
| 753 | /* in some old firmware versions, the spe is called 'spc', so we |
| 754 | look for that as well */ |
| 755 | for (node = of_find_node_by_type(NULL, "spc"); |
| 756 | node; node = of_find_node_by_type(node, "spc")) { |
| 757 | ret = create_spu(node); |
| 758 | if (ret) { |
| 759 | printk(KERN_WARNING "%s: Error initializing %s\n", |
| 760 | __FUNCTION__, node->name); |
| 761 | cleanup_spu_base(); |
| 762 | break; |
| 763 | } |
| 764 | } |
| 765 | return ret; |
| 766 | } |
| 767 | module_init(init_spu_base); |
| 768 | |
| 769 | MODULE_LICENSE("GPL"); |
| 770 | MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); |