Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Low-level SPU handling |
| 3 | * |
| 4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 |
| 5 | * |
| 6 | * Author: Arnd Bergmann <arndb@de.ibm.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation; either version 2, or (at your option) |
| 11 | * any later version. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License |
| 19 | * along with this program; if not, write to the Free Software |
| 20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 21 | */ |
| 22 | |
Arnd Bergmann | 3b3d22c | 2005-12-05 22:52:24 -0500 | [diff] [blame] | 23 | #undef DEBUG |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 24 | |
| 25 | #include <linux/interrupt.h> |
| 26 | #include <linux/list.h> |
| 27 | #include <linux/module.h> |
| 28 | #include <linux/poll.h> |
| 29 | #include <linux/ptrace.h> |
| 30 | #include <linux/slab.h> |
| 31 | #include <linux/wait.h> |
| 32 | |
| 33 | #include <asm/io.h> |
| 34 | #include <asm/prom.h> |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 35 | #include <linux/mutex.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 36 | #include <asm/spu.h> |
Geoff Levand | 540270d | 2006-06-19 20:33:29 +0200 | [diff] [blame] | 37 | #include <asm/spu_priv1.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 38 | #include <asm/mmu_context.h> |
| 39 | |
| 40 | #include "interrupt.h" |
| 41 | |
Geoff Levand | 540270d | 2006-06-19 20:33:29 +0200 | [diff] [blame] | 42 | const struct spu_priv1_ops *spu_priv1_ops; |
| 43 | |
| 44 | EXPORT_SYMBOL_GPL(spu_priv1_ops); |
| 45 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 46 | static int __spu_trap_invalid_dma(struct spu *spu) |
| 47 | { |
| 48 | pr_debug("%s\n", __FUNCTION__); |
| 49 | force_sig(SIGBUS, /* info, */ current); |
| 50 | return 0; |
| 51 | } |
| 52 | |
| 53 | static int __spu_trap_dma_align(struct spu *spu) |
| 54 | { |
| 55 | pr_debug("%s\n", __FUNCTION__); |
| 56 | force_sig(SIGBUS, /* info, */ current); |
| 57 | return 0; |
| 58 | } |
| 59 | |
| 60 | static int __spu_trap_error(struct spu *spu) |
| 61 | { |
| 62 | pr_debug("%s\n", __FUNCTION__); |
| 63 | force_sig(SIGILL, /* info, */ current); |
| 64 | return 0; |
| 65 | } |
| 66 | |
| 67 | static void spu_restart_dma(struct spu *spu) |
| 68 | { |
| 69 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 70 | |
Arnd Bergmann | 8837d92 | 2006-01-04 20:31:28 +0100 | [diff] [blame] | 71 | if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 72 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 73 | } |
| 74 | |
| 75 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) |
| 76 | { |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 77 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
| 78 | struct mm_struct *mm = spu->mm; |
arnd@arndb.de | 724bd80 | 2006-06-19 20:33:23 +0200 | [diff] [blame] | 79 | u64 esid, vsid, llp; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 80 | |
| 81 | pr_debug("%s\n", __FUNCTION__); |
| 82 | |
Arnd Bergmann | 8837d92 | 2006-01-04 20:31:28 +0100 | [diff] [blame] | 83 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 84 | /* SLBs are pre-loaded for context switch, so |
| 85 | * we should never get here! |
| 86 | */ |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 87 | printk("%s: invalid access during switch!\n", __func__); |
| 88 | return 1; |
| 89 | } |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 90 | if (!mm || (REGION_ID(ea) != USER_REGION_ID)) { |
| 91 | /* Future: support kernel segments so that drivers |
| 92 | * can use SPUs. |
| 93 | */ |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 94 | pr_debug("invalid region access at %016lx\n", ea); |
| 95 | return 1; |
| 96 | } |
| 97 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 98 | esid = (ea & ESID_MASK) | SLB_ESID_V; |
arnd@arndb.de | 724bd80 | 2006-06-19 20:33:23 +0200 | [diff] [blame] | 99 | #ifdef CONFIG_HUGETLB_PAGE |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 100 | if (in_hugepage_area(mm->context, ea)) |
arnd@arndb.de | 724bd80 | 2006-06-19 20:33:23 +0200 | [diff] [blame] | 101 | llp = mmu_psize_defs[mmu_huge_psize].sllp; |
| 102 | else |
| 103 | #endif |
| 104 | llp = mmu_psize_defs[mmu_virtual_psize].sllp; |
| 105 | vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | |
| 106 | SLB_VSID_USER | llp; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 107 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 108 | out_be64(&priv2->slb_index_W, spu->slb_replace); |
| 109 | out_be64(&priv2->slb_vsid_RW, vsid); |
| 110 | out_be64(&priv2->slb_esid_RW, esid); |
| 111 | |
| 112 | spu->slb_replace++; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 113 | if (spu->slb_replace >= 8) |
| 114 | spu->slb_replace = 0; |
| 115 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 116 | spu_restart_dma(spu); |
| 117 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 118 | return 0; |
| 119 | } |
| 120 | |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 121 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 122 | static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 123 | { |
Arnd Bergmann | a33a7d7 | 2006-03-23 00:00:11 +0100 | [diff] [blame] | 124 | pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 125 | |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 126 | /* Handle kernel space hash faults immediately. |
| 127 | User hash faults need to be deferred to process context. */ |
| 128 | if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) |
| 129 | && REGION_ID(ea) != USER_REGION_ID |
| 130 | && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { |
| 131 | spu_restart_dma(spu); |
| 132 | return 0; |
| 133 | } |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 134 | |
Arnd Bergmann | 8837d92 | 2006-01-04 20:31:28 +0100 | [diff] [blame] | 135 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 136 | printk("%s: invalid access during switch!\n", __func__); |
| 137 | return 1; |
| 138 | } |
| 139 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 140 | spu->dar = ea; |
| 141 | spu->dsisr = dsisr; |
| 142 | mb(); |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame^] | 143 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 144 | return 0; |
| 145 | } |
| 146 | |
| 147 | static irqreturn_t |
| 148 | spu_irq_class_0(int irq, void *data, struct pt_regs *regs) |
| 149 | { |
| 150 | struct spu *spu; |
| 151 | |
| 152 | spu = data; |
| 153 | spu->class_0_pending = 1; |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame^] | 154 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 155 | |
| 156 | return IRQ_HANDLED; |
| 157 | } |
| 158 | |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 159 | int |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 160 | spu_irq_class_0_bottom(struct spu *spu) |
| 161 | { |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 162 | unsigned long stat, mask; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 163 | |
| 164 | spu->class_0_pending = 0; |
| 165 | |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 166 | mask = spu_int_mask_get(spu, 0); |
| 167 | stat = spu_int_stat_get(spu, 0); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 168 | |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 169 | stat &= mask; |
| 170 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 171 | if (stat & 1) /* invalid MFC DMA */ |
| 172 | __spu_trap_invalid_dma(spu); |
| 173 | |
| 174 | if (stat & 2) /* invalid DMA alignment */ |
| 175 | __spu_trap_dma_align(spu); |
| 176 | |
| 177 | if (stat & 4) /* error on SPU */ |
| 178 | __spu_trap_error(spu); |
| 179 | |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 180 | spu_int_stat_clear(spu, 0, stat); |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 181 | |
| 182 | return (stat & 0x7) ? -EIO : 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 183 | } |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 184 | EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 185 | |
| 186 | static irqreturn_t |
| 187 | spu_irq_class_1(int irq, void *data, struct pt_regs *regs) |
| 188 | { |
| 189 | struct spu *spu; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 190 | unsigned long stat, mask, dar, dsisr; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 191 | |
| 192 | spu = data; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 193 | |
| 194 | /* atomically read & clear class1 status. */ |
| 195 | spin_lock(&spu->register_lock); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 196 | mask = spu_int_mask_get(spu, 1); |
| 197 | stat = spu_int_stat_get(spu, 1) & mask; |
| 198 | dar = spu_mfc_dar_get(spu); |
| 199 | dsisr = spu_mfc_dsisr_get(spu); |
Arnd Bergmann | 3830734 | 2005-12-09 19:04:18 +0100 | [diff] [blame] | 200 | if (stat & 2) /* mapping fault */ |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 201 | spu_mfc_dsisr_set(spu, 0ul); |
| 202 | spu_int_stat_clear(spu, 1, stat); |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 203 | spin_unlock(&spu->register_lock); |
Arnd Bergmann | a33a7d7 | 2006-03-23 00:00:11 +0100 | [diff] [blame] | 204 | pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, |
| 205 | dar, dsisr); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 206 | |
| 207 | if (stat & 1) /* segment fault */ |
| 208 | __spu_trap_data_seg(spu, dar); |
| 209 | |
| 210 | if (stat & 2) { /* mapping fault */ |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 211 | __spu_trap_data_map(spu, dar, dsisr); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 212 | } |
| 213 | |
| 214 | if (stat & 4) /* ls compare & suspend on get */ |
| 215 | ; |
| 216 | |
| 217 | if (stat & 8) /* ls compare & suspend on put */ |
| 218 | ; |
| 219 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 220 | return stat ? IRQ_HANDLED : IRQ_NONE; |
| 221 | } |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 222 | EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 223 | |
| 224 | static irqreturn_t |
| 225 | spu_irq_class_2(int irq, void *data, struct pt_regs *regs) |
| 226 | { |
| 227 | struct spu *spu; |
| 228 | unsigned long stat; |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 229 | unsigned long mask; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 230 | |
| 231 | spu = data; |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame^] | 232 | spin_lock(&spu->register_lock); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 233 | stat = spu_int_stat_get(spu, 2); |
| 234 | mask = spu_int_mask_get(spu, 2); |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame^] | 235 | /* ignore interrupts we're not waiting for */ |
| 236 | stat &= mask; |
| 237 | /* |
| 238 | * mailbox interrupts (0x1 and 0x10) are level triggered. |
| 239 | * mask them now before acknowledging. |
| 240 | */ |
| 241 | if (stat & 0x11) |
| 242 | spu_int_mask_and(spu, 2, ~(stat & 0x11)); |
| 243 | /* acknowledge all interrupts before the callbacks */ |
| 244 | spu_int_stat_clear(spu, 2, stat); |
| 245 | spin_unlock(&spu->register_lock); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 246 | |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 247 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 248 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 249 | if (stat & 1) /* PPC core mailbox */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame^] | 250 | spu->ibox_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 251 | |
| 252 | if (stat & 2) /* SPU stop-and-signal */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame^] | 253 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 254 | |
| 255 | if (stat & 4) /* SPU halted */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame^] | 256 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 257 | |
| 258 | if (stat & 8) /* DMA tag group complete */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame^] | 259 | spu->mfc_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 260 | |
| 261 | if (stat & 0x10) /* SPU mailbox threshold */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame^] | 262 | spu->wbox_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 263 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 264 | return stat ? IRQ_HANDLED : IRQ_NONE; |
| 265 | } |
| 266 | |
| 267 | static int |
| 268 | spu_request_irqs(struct spu *spu) |
| 269 | { |
| 270 | int ret; |
| 271 | int irq_base; |
| 272 | |
| 273 | irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; |
| 274 | |
| 275 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number); |
| 276 | ret = request_irq(irq_base + spu->isrc, |
Arnd Bergmann | f807221 | 2006-04-29 02:40:21 +0200 | [diff] [blame] | 277 | spu_irq_class_0, SA_INTERRUPT, spu->irq_c0, spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 278 | if (ret) |
| 279 | goto out; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 280 | |
| 281 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number); |
| 282 | ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, |
Arnd Bergmann | f807221 | 2006-04-29 02:40:21 +0200 | [diff] [blame] | 283 | spu_irq_class_1, SA_INTERRUPT, spu->irq_c1, spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 284 | if (ret) |
| 285 | goto out1; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 286 | |
| 287 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number); |
| 288 | ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, |
Arnd Bergmann | f807221 | 2006-04-29 02:40:21 +0200 | [diff] [blame] | 289 | spu_irq_class_2, SA_INTERRUPT, spu->irq_c2, spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 290 | if (ret) |
| 291 | goto out2; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 292 | goto out; |
| 293 | |
| 294 | out2: |
| 295 | free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); |
| 296 | out1: |
| 297 | free_irq(irq_base + spu->isrc, spu); |
| 298 | out: |
| 299 | return ret; |
| 300 | } |
| 301 | |
| 302 | static void |
| 303 | spu_free_irqs(struct spu *spu) |
| 304 | { |
| 305 | int irq_base; |
| 306 | |
| 307 | irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; |
| 308 | |
| 309 | free_irq(irq_base + spu->isrc, spu); |
| 310 | free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); |
| 311 | free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu); |
| 312 | } |
| 313 | |
| 314 | static LIST_HEAD(spu_list); |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 315 | static DEFINE_MUTEX(spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 316 | |
| 317 | static void spu_init_channels(struct spu *spu) |
| 318 | { |
| 319 | static const struct { |
| 320 | unsigned channel; |
| 321 | unsigned count; |
| 322 | } zero_list[] = { |
| 323 | { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, }, |
| 324 | { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, }, |
| 325 | }, count_list[] = { |
| 326 | { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, }, |
| 327 | { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, }, |
| 328 | { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, }, |
| 329 | }; |
Arnd Bergmann | 6ff730c | 2006-01-04 20:31:31 +0100 | [diff] [blame] | 330 | struct spu_priv2 __iomem *priv2; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 331 | int i; |
| 332 | |
| 333 | priv2 = spu->priv2; |
| 334 | |
| 335 | /* initialize all channel data to zero */ |
| 336 | for (i = 0; i < ARRAY_SIZE(zero_list); i++) { |
| 337 | int count; |
| 338 | |
| 339 | out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel); |
| 340 | for (count = 0; count < zero_list[i].count; count++) |
| 341 | out_be64(&priv2->spu_chnldata_RW, 0); |
| 342 | } |
| 343 | |
| 344 | /* initialize channel counts to meaningful values */ |
| 345 | for (i = 0; i < ARRAY_SIZE(count_list); i++) { |
| 346 | out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel); |
| 347 | out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); |
| 348 | } |
| 349 | } |
| 350 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 351 | struct spu *spu_alloc(void) |
| 352 | { |
| 353 | struct spu *spu; |
| 354 | |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 355 | mutex_lock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 356 | if (!list_empty(&spu_list)) { |
| 357 | spu = list_entry(spu_list.next, struct spu, list); |
| 358 | list_del_init(&spu->list); |
| 359 | pr_debug("Got SPU %x %d\n", spu->isrc, spu->number); |
| 360 | } else { |
| 361 | pr_debug("No SPU left\n"); |
| 362 | spu = NULL; |
| 363 | } |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 364 | mutex_unlock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 365 | |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 366 | if (spu) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 367 | spu_init_channels(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 368 | |
| 369 | return spu; |
| 370 | } |
Arnd Bergmann | 39c73c3 | 2005-12-05 22:52:21 -0500 | [diff] [blame] | 371 | EXPORT_SYMBOL_GPL(spu_alloc); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 372 | |
| 373 | void spu_free(struct spu *spu) |
| 374 | { |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 375 | mutex_lock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 376 | list_add_tail(&spu->list, &spu_list); |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 377 | mutex_unlock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 378 | } |
Arnd Bergmann | 39c73c3 | 2005-12-05 22:52:21 -0500 | [diff] [blame] | 379 | EXPORT_SYMBOL_GPL(spu_free); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 380 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 381 | static int spu_handle_mm_fault(struct spu *spu) |
| 382 | { |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 383 | struct mm_struct *mm = spu->mm; |
| 384 | struct vm_area_struct *vma; |
| 385 | u64 ea, dsisr, is_write; |
| 386 | int ret; |
| 387 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 388 | ea = spu->dar; |
| 389 | dsisr = spu->dsisr; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 390 | #if 0 |
| 391 | if (!IS_VALID_EA(ea)) { |
| 392 | return -EFAULT; |
| 393 | } |
| 394 | #endif /* XXX */ |
| 395 | if (mm == NULL) { |
| 396 | return -EFAULT; |
| 397 | } |
| 398 | if (mm->pgd == NULL) { |
| 399 | return -EFAULT; |
| 400 | } |
| 401 | |
| 402 | down_read(&mm->mmap_sem); |
| 403 | vma = find_vma(mm, ea); |
| 404 | if (!vma) |
| 405 | goto bad_area; |
| 406 | if (vma->vm_start <= ea) |
| 407 | goto good_area; |
| 408 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
| 409 | goto bad_area; |
| 410 | #if 0 |
| 411 | if (expand_stack(vma, ea)) |
| 412 | goto bad_area; |
| 413 | #endif /* XXX */ |
| 414 | good_area: |
| 415 | is_write = dsisr & MFC_DSISR_ACCESS_PUT; |
| 416 | if (is_write) { |
| 417 | if (!(vma->vm_flags & VM_WRITE)) |
| 418 | goto bad_area; |
| 419 | } else { |
| 420 | if (dsisr & MFC_DSISR_ACCESS_DENIED) |
| 421 | goto bad_area; |
| 422 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) |
| 423 | goto bad_area; |
| 424 | } |
| 425 | ret = 0; |
| 426 | switch (handle_mm_fault(mm, vma, ea, is_write)) { |
| 427 | case VM_FAULT_MINOR: |
| 428 | current->min_flt++; |
| 429 | break; |
| 430 | case VM_FAULT_MAJOR: |
| 431 | current->maj_flt++; |
| 432 | break; |
| 433 | case VM_FAULT_SIGBUS: |
| 434 | ret = -EFAULT; |
| 435 | goto bad_area; |
| 436 | case VM_FAULT_OOM: |
| 437 | ret = -ENOMEM; |
| 438 | goto bad_area; |
| 439 | default: |
| 440 | BUG(); |
| 441 | } |
| 442 | up_read(&mm->mmap_sem); |
| 443 | return ret; |
| 444 | |
| 445 | bad_area: |
| 446 | up_read(&mm->mmap_sem); |
| 447 | return -EFAULT; |
| 448 | } |
| 449 | |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 450 | int spu_irq_class_1_bottom(struct spu *spu) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 451 | { |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 452 | u64 ea, dsisr, access, error = 0UL; |
| 453 | int ret = 0; |
| 454 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 455 | ea = spu->dar; |
| 456 | dsisr = spu->dsisr; |
Arnd Bergmann | 79c227a | 2006-03-24 19:49:27 +0100 | [diff] [blame] | 457 | if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) { |
Arnd Bergmann | f807221 | 2006-04-29 02:40:21 +0200 | [diff] [blame] | 458 | u64 flags; |
| 459 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 460 | access = (_PAGE_PRESENT | _PAGE_USER); |
| 461 | access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL; |
Arnd Bergmann | f807221 | 2006-04-29 02:40:21 +0200 | [diff] [blame] | 462 | local_irq_save(flags); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 463 | if (hash_page(ea, access, 0x300) != 0) |
| 464 | error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; |
Arnd Bergmann | f807221 | 2006-04-29 02:40:21 +0200 | [diff] [blame] | 465 | local_irq_restore(flags); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 466 | } |
Arnd Bergmann | 79c227a | 2006-03-24 19:49:27 +0100 | [diff] [blame] | 467 | if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) { |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 468 | if ((ret = spu_handle_mm_fault(spu)) != 0) |
| 469 | error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; |
| 470 | else |
| 471 | error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR; |
| 472 | } |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 473 | spu->dar = 0UL; |
| 474 | spu->dsisr = 0UL; |
| 475 | if (!error) { |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 476 | spu_restart_dma(spu); |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 477 | } else { |
| 478 | __spu_trap_invalid_dma(spu); |
| 479 | } |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 480 | return ret; |
| 481 | } |
| 482 | |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame] | 483 | static int __init find_spu_node_id(struct device_node *spe) |
| 484 | { |
| 485 | unsigned int *id; |
| 486 | struct device_node *cpu; |
| 487 | cpu = spe->parent->parent; |
| 488 | id = (unsigned int *)get_property(cpu, "node-id", NULL); |
| 489 | return id ? *id : 0; |
| 490 | } |
| 491 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 492 | static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe, |
| 493 | const char *prop) |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame] | 494 | { |
| 495 | static DEFINE_MUTEX(add_spumem_mutex); |
| 496 | |
| 497 | struct address_prop { |
| 498 | unsigned long address; |
| 499 | unsigned int len; |
| 500 | } __attribute__((packed)) *p; |
| 501 | int proplen; |
| 502 | |
| 503 | unsigned long start_pfn, nr_pages; |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame] | 504 | struct pglist_data *pgdata; |
| 505 | struct zone *zone; |
| 506 | int ret; |
| 507 | |
| 508 | p = (void*)get_property(spe, prop, &proplen); |
| 509 | WARN_ON(proplen != sizeof (*p)); |
| 510 | |
| 511 | start_pfn = p->address >> PAGE_SHIFT; |
| 512 | nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 513 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 514 | pgdata = NODE_DATA(spu->nid); |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame] | 515 | zone = pgdata->node_zones; |
| 516 | |
| 517 | /* XXX rethink locking here */ |
| 518 | mutex_lock(&add_spumem_mutex); |
| 519 | ret = __add_pages(zone, start_pfn, nr_pages); |
| 520 | mutex_unlock(&add_spumem_mutex); |
| 521 | |
| 522 | return ret; |
| 523 | } |
| 524 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 525 | static void __iomem * __init map_spe_prop(struct spu *spu, |
| 526 | struct device_node *n, const char *name) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 527 | { |
| 528 | struct address_prop { |
| 529 | unsigned long address; |
| 530 | unsigned int len; |
| 531 | } __attribute__((packed)) *prop; |
| 532 | |
| 533 | void *p; |
| 534 | int proplen; |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame] | 535 | void* ret = NULL; |
| 536 | int err = 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 537 | |
| 538 | p = get_property(n, name, &proplen); |
| 539 | if (proplen != sizeof (struct address_prop)) |
| 540 | return NULL; |
| 541 | |
| 542 | prop = p; |
| 543 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 544 | err = cell_spuprop_present(spu, n, name); |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame] | 545 | if (err && (err != -EEXIST)) |
| 546 | goto out; |
| 547 | |
| 548 | ret = ioremap(prop->address, prop->len); |
| 549 | |
| 550 | out: |
| 551 | return ret; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 552 | } |
| 553 | |
| 554 | static void spu_unmap(struct spu *spu) |
| 555 | { |
| 556 | iounmap(spu->priv2); |
| 557 | iounmap(spu->priv1); |
| 558 | iounmap(spu->problem); |
| 559 | iounmap((u8 __iomem *)spu->local_store); |
| 560 | } |
| 561 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 562 | static int __init spu_map_device(struct spu *spu, struct device_node *node) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 563 | { |
| 564 | char *prop; |
| 565 | int ret; |
| 566 | |
| 567 | ret = -ENODEV; |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 568 | prop = get_property(node, "isrc", NULL); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 569 | if (!prop) |
| 570 | goto out; |
| 571 | spu->isrc = *(unsigned int *)prop; |
| 572 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 573 | spu->name = get_property(node, "name", NULL); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 574 | if (!spu->name) |
| 575 | goto out; |
| 576 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 577 | prop = get_property(node, "local-store", NULL); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 578 | if (!prop) |
| 579 | goto out; |
| 580 | spu->local_store_phys = *(unsigned long *)prop; |
| 581 | |
| 582 | /* we use local store as ram, not io memory */ |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 583 | spu->local_store = (void __force *) |
| 584 | map_spe_prop(spu, node, "local-store"); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 585 | if (!spu->local_store) |
| 586 | goto out; |
| 587 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 588 | prop = get_property(node, "problem", NULL); |
Mark Nutter | 6df10a8 | 2006-03-23 00:00:12 +0100 | [diff] [blame] | 589 | if (!prop) |
| 590 | goto out_unmap; |
| 591 | spu->problem_phys = *(unsigned long *)prop; |
| 592 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 593 | spu->problem= map_spe_prop(spu, node, "problem"); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 594 | if (!spu->problem) |
| 595 | goto out_unmap; |
| 596 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 597 | spu->priv1= map_spe_prop(spu, node, "priv1"); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 598 | /* priv1 is not available on a hypervisor */ |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 599 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 600 | spu->priv2= map_spe_prop(spu, node, "priv2"); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 601 | if (!spu->priv2) |
| 602 | goto out_unmap; |
| 603 | ret = 0; |
| 604 | goto out; |
| 605 | |
| 606 | out_unmap: |
| 607 | spu_unmap(spu); |
| 608 | out: |
| 609 | return ret; |
| 610 | } |
| 611 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 612 | struct sysdev_class spu_sysdev_class = { |
| 613 | set_kset_name("spu") |
| 614 | }; |
| 615 | |
| 616 | static ssize_t spu_show_isrc(struct sys_device *sysdev, char *buf) |
| 617 | { |
| 618 | struct spu *spu = container_of(sysdev, struct spu, sysdev); |
| 619 | return sprintf(buf, "%d\n", spu->isrc); |
| 620 | |
| 621 | } |
| 622 | static SYSDEV_ATTR(isrc, 0400, spu_show_isrc, NULL); |
| 623 | |
| 624 | extern int attach_sysdev_to_node(struct sys_device *dev, int nid); |
| 625 | |
| 626 | static int spu_create_sysdev(struct spu *spu) |
| 627 | { |
| 628 | int ret; |
| 629 | |
| 630 | spu->sysdev.id = spu->number; |
| 631 | spu->sysdev.cls = &spu_sysdev_class; |
| 632 | ret = sysdev_register(&spu->sysdev); |
| 633 | if (ret) { |
| 634 | printk(KERN_ERR "Can't register SPU %d with sysfs\n", |
| 635 | spu->number); |
| 636 | return ret; |
| 637 | } |
| 638 | |
| 639 | sysdev_create_file(&spu->sysdev, &attr_isrc); |
| 640 | sysfs_add_device_to_node(&spu->sysdev, spu->nid); |
| 641 | |
| 642 | return 0; |
| 643 | } |
| 644 | |
| 645 | static void spu_destroy_sysdev(struct spu *spu) |
| 646 | { |
| 647 | sysdev_remove_file(&spu->sysdev, &attr_isrc); |
| 648 | sysfs_remove_device_from_node(&spu->sysdev, spu->nid); |
| 649 | sysdev_unregister(&spu->sysdev); |
| 650 | } |
| 651 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 652 | static int __init create_spu(struct device_node *spe) |
| 653 | { |
| 654 | struct spu *spu; |
| 655 | int ret; |
| 656 | static int number; |
| 657 | |
| 658 | ret = -ENOMEM; |
Jeremy Kerr | ecec217 | 2006-06-19 20:33:26 +0200 | [diff] [blame] | 659 | spu = kzalloc(sizeof (*spu), GFP_KERNEL); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 660 | if (!spu) |
| 661 | goto out; |
| 662 | |
| 663 | ret = spu_map_device(spu, spe); |
| 664 | if (ret) |
| 665 | goto out_free; |
| 666 | |
| 667 | spu->node = find_spu_node_id(spe); |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 668 | spu->nid = of_node_to_nid(spe); |
| 669 | if (spu->nid == -1) |
| 670 | spu->nid = 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 671 | spin_lock_init(&spu->register_lock); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 672 | spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1)); |
| 673 | spu_mfc_sr1_set(spu, 0x33); |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 674 | mutex_lock(&spu_mutex); |
Jeremy Kerr | ecec217 | 2006-06-19 20:33:26 +0200 | [diff] [blame] | 675 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 676 | spu->number = number++; |
| 677 | ret = spu_request_irqs(spu); |
| 678 | if (ret) |
| 679 | goto out_unmap; |
| 680 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 681 | ret = spu_create_sysdev(spu); |
| 682 | if (ret) |
| 683 | goto out_free_irqs; |
| 684 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 685 | list_add(&spu->list, &spu_list); |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 686 | mutex_unlock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 687 | |
| 688 | pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n", |
| 689 | spu->name, spu->isrc, spu->local_store, |
| 690 | spu->problem, spu->priv1, spu->priv2, spu->number); |
| 691 | goto out; |
| 692 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 693 | out_free_irqs: |
| 694 | spu_free_irqs(spu); |
| 695 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 696 | out_unmap: |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 697 | mutex_unlock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 698 | spu_unmap(spu); |
| 699 | out_free: |
| 700 | kfree(spu); |
| 701 | out: |
| 702 | return ret; |
| 703 | } |
| 704 | |
| 705 | static void destroy_spu(struct spu *spu) |
| 706 | { |
| 707 | list_del_init(&spu->list); |
| 708 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 709 | spu_destroy_sysdev(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 710 | spu_free_irqs(spu); |
| 711 | spu_unmap(spu); |
| 712 | kfree(spu); |
| 713 | } |
| 714 | |
| 715 | static void cleanup_spu_base(void) |
| 716 | { |
| 717 | struct spu *spu, *tmp; |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 718 | mutex_lock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 719 | list_for_each_entry_safe(spu, tmp, &spu_list, list) |
| 720 | destroy_spu(spu); |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 721 | mutex_unlock(&spu_mutex); |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 722 | sysdev_class_unregister(&spu_sysdev_class); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 723 | } |
| 724 | module_exit(cleanup_spu_base); |
| 725 | |
| 726 | static int __init init_spu_base(void) |
| 727 | { |
| 728 | struct device_node *node; |
| 729 | int ret; |
| 730 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 731 | /* create sysdev class for spus */ |
| 732 | ret = sysdev_class_register(&spu_sysdev_class); |
| 733 | if (ret) |
| 734 | return ret; |
| 735 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 736 | ret = -ENODEV; |
| 737 | for (node = of_find_node_by_type(NULL, "spe"); |
| 738 | node; node = of_find_node_by_type(node, "spe")) { |
| 739 | ret = create_spu(node); |
| 740 | if (ret) { |
| 741 | printk(KERN_WARNING "%s: Error initializing %s\n", |
| 742 | __FUNCTION__, node->name); |
| 743 | cleanup_spu_base(); |
| 744 | break; |
| 745 | } |
| 746 | } |
| 747 | /* in some old firmware versions, the spe is called 'spc', so we |
| 748 | look for that as well */ |
| 749 | for (node = of_find_node_by_type(NULL, "spc"); |
| 750 | node; node = of_find_node_by_type(node, "spc")) { |
| 751 | ret = create_spu(node); |
| 752 | if (ret) { |
| 753 | printk(KERN_WARNING "%s: Error initializing %s\n", |
| 754 | __FUNCTION__, node->name); |
| 755 | cleanup_spu_base(); |
| 756 | break; |
| 757 | } |
| 758 | } |
| 759 | return ret; |
| 760 | } |
| 761 | module_init(init_spu_base); |
| 762 | |
| 763 | MODULE_LICENSE("GPL"); |
| 764 | MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); |