Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Low-level SPU handling |
| 3 | * |
| 4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 |
| 5 | * |
| 6 | * Author: Arnd Bergmann <arndb@de.ibm.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation; either version 2, or (at your option) |
| 11 | * any later version. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License |
| 19 | * along with this program; if not, write to the Free Software |
| 20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 21 | */ |
| 22 | |
Arnd Bergmann | 3b3d22c | 2005-12-05 22:52:24 -0500 | [diff] [blame] | 23 | #undef DEBUG |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 24 | |
| 25 | #include <linux/interrupt.h> |
| 26 | #include <linux/list.h> |
| 27 | #include <linux/module.h> |
Arnd Bergmann | 7650f2f | 2006-10-04 17:26:20 +0200 | [diff] [blame] | 28 | #include <linux/pci.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 29 | #include <linux/poll.h> |
| 30 | #include <linux/ptrace.h> |
| 31 | #include <linux/slab.h> |
| 32 | #include <linux/wait.h> |
| 33 | |
Arnd Bergmann | 7650f2f | 2006-10-04 17:26:20 +0200 | [diff] [blame] | 34 | #include <asm/firmware.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 35 | #include <asm/io.h> |
| 36 | #include <asm/prom.h> |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 37 | #include <linux/mutex.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 38 | #include <asm/spu.h> |
Geoff Levand | 540270d | 2006-06-19 20:33:29 +0200 | [diff] [blame] | 39 | #include <asm/spu_priv1.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 40 | #include <asm/mmu_context.h> |
Michael Ellerman | ff8a8f2 | 2006-10-24 18:31:27 +0200 | [diff] [blame^] | 41 | #include <asm/xmon.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 42 | |
| 43 | #include "interrupt.h" |
| 44 | |
Geoff Levand | 540270d | 2006-06-19 20:33:29 +0200 | [diff] [blame] | 45 | const struct spu_priv1_ops *spu_priv1_ops; |
| 46 | |
| 47 | EXPORT_SYMBOL_GPL(spu_priv1_ops); |
| 48 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 49 | static int __spu_trap_invalid_dma(struct spu *spu) |
| 50 | { |
| 51 | pr_debug("%s\n", __FUNCTION__); |
Arnd Bergmann | 9add11d | 2006-10-04 17:26:14 +0200 | [diff] [blame] | 52 | spu->dma_callback(spu, SPE_EVENT_INVALID_DMA); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 53 | return 0; |
| 54 | } |
| 55 | |
| 56 | static int __spu_trap_dma_align(struct spu *spu) |
| 57 | { |
| 58 | pr_debug("%s\n", __FUNCTION__); |
Arnd Bergmann | 9add11d | 2006-10-04 17:26:14 +0200 | [diff] [blame] | 59 | spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 60 | return 0; |
| 61 | } |
| 62 | |
| 63 | static int __spu_trap_error(struct spu *spu) |
| 64 | { |
| 65 | pr_debug("%s\n", __FUNCTION__); |
Arnd Bergmann | 9add11d | 2006-10-04 17:26:14 +0200 | [diff] [blame] | 66 | spu->dma_callback(spu, SPE_EVENT_SPE_ERROR); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 67 | return 0; |
| 68 | } |
| 69 | |
| 70 | static void spu_restart_dma(struct spu *spu) |
| 71 | { |
| 72 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 73 | |
Arnd Bergmann | 8837d92 | 2006-01-04 20:31:28 +0100 | [diff] [blame] | 74 | if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 75 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 76 | } |
| 77 | |
| 78 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) |
| 79 | { |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 80 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
| 81 | struct mm_struct *mm = spu->mm; |
arnd@arndb.de | 724bd80 | 2006-06-19 20:33:23 +0200 | [diff] [blame] | 82 | u64 esid, vsid, llp; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 83 | |
| 84 | pr_debug("%s\n", __FUNCTION__); |
| 85 | |
Arnd Bergmann | 8837d92 | 2006-01-04 20:31:28 +0100 | [diff] [blame] | 86 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 87 | /* SLBs are pre-loaded for context switch, so |
| 88 | * we should never get here! |
| 89 | */ |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 90 | printk("%s: invalid access during switch!\n", __func__); |
| 91 | return 1; |
| 92 | } |
arnd@arndb.de | 0afacde | 2006-10-24 18:31:18 +0200 | [diff] [blame] | 93 | esid = (ea & ESID_MASK) | SLB_ESID_V; |
| 94 | |
| 95 | switch(REGION_ID(ea)) { |
| 96 | case USER_REGION_ID: |
| 97 | #ifdef CONFIG_HUGETLB_PAGE |
| 98 | if (in_hugepage_area(mm->context, ea)) |
| 99 | llp = mmu_psize_defs[mmu_huge_psize].sllp; |
| 100 | else |
| 101 | #endif |
| 102 | llp = mmu_psize_defs[mmu_virtual_psize].sllp; |
| 103 | vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | |
| 104 | SLB_VSID_USER | llp; |
| 105 | break; |
| 106 | case VMALLOC_REGION_ID: |
| 107 | llp = mmu_psize_defs[mmu_virtual_psize].sllp; |
| 108 | vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | |
| 109 | SLB_VSID_KERNEL | llp; |
| 110 | break; |
| 111 | case KERNEL_REGION_ID: |
| 112 | llp = mmu_psize_defs[mmu_linear_psize].sllp; |
| 113 | vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | |
| 114 | SLB_VSID_KERNEL | llp; |
| 115 | break; |
| 116 | default: |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 117 | /* Future: support kernel segments so that drivers |
| 118 | * can use SPUs. |
| 119 | */ |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 120 | pr_debug("invalid region access at %016lx\n", ea); |
| 121 | return 1; |
| 122 | } |
| 123 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 124 | out_be64(&priv2->slb_index_W, spu->slb_replace); |
| 125 | out_be64(&priv2->slb_vsid_RW, vsid); |
| 126 | out_be64(&priv2->slb_esid_RW, esid); |
| 127 | |
| 128 | spu->slb_replace++; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 129 | if (spu->slb_replace >= 8) |
| 130 | spu->slb_replace = 0; |
| 131 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 132 | spu_restart_dma(spu); |
| 133 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 134 | return 0; |
| 135 | } |
| 136 | |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 137 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 138 | static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 139 | { |
Arnd Bergmann | a33a7d7 | 2006-03-23 00:00:11 +0100 | [diff] [blame] | 140 | pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 141 | |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 142 | /* Handle kernel space hash faults immediately. |
| 143 | User hash faults need to be deferred to process context. */ |
| 144 | if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) |
| 145 | && REGION_ID(ea) != USER_REGION_ID |
| 146 | && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { |
| 147 | spu_restart_dma(spu); |
| 148 | return 0; |
| 149 | } |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 150 | |
Arnd Bergmann | 8837d92 | 2006-01-04 20:31:28 +0100 | [diff] [blame] | 151 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 152 | printk("%s: invalid access during switch!\n", __func__); |
| 153 | return 1; |
| 154 | } |
| 155 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 156 | spu->dar = ea; |
| 157 | spu->dsisr = dsisr; |
| 158 | mb(); |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 159 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 160 | return 0; |
| 161 | } |
| 162 | |
| 163 | static irqreturn_t |
Olaf Hering | f5a9245 | 2006-10-06 22:52:16 +0200 | [diff] [blame] | 164 | spu_irq_class_0(int irq, void *data) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 165 | { |
| 166 | struct spu *spu; |
| 167 | |
| 168 | spu = data; |
| 169 | spu->class_0_pending = 1; |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 170 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 171 | |
| 172 | return IRQ_HANDLED; |
| 173 | } |
| 174 | |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 175 | int |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 176 | spu_irq_class_0_bottom(struct spu *spu) |
| 177 | { |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 178 | unsigned long stat, mask; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 179 | |
| 180 | spu->class_0_pending = 0; |
| 181 | |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 182 | mask = spu_int_mask_get(spu, 0); |
| 183 | stat = spu_int_stat_get(spu, 0); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 184 | |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 185 | stat &= mask; |
| 186 | |
Arnd Bergmann | 2cd90bc | 2006-06-23 20:57:50 +0200 | [diff] [blame] | 187 | if (stat & 1) /* invalid DMA alignment */ |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 188 | __spu_trap_dma_align(spu); |
| 189 | |
Arnd Bergmann | 2cd90bc | 2006-06-23 20:57:50 +0200 | [diff] [blame] | 190 | if (stat & 2) /* invalid MFC DMA */ |
| 191 | __spu_trap_invalid_dma(spu); |
| 192 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 193 | if (stat & 4) /* error on SPU */ |
| 194 | __spu_trap_error(spu); |
| 195 | |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 196 | spu_int_stat_clear(spu, 0, stat); |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 197 | |
| 198 | return (stat & 0x7) ? -EIO : 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 199 | } |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 200 | EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 201 | |
| 202 | static irqreturn_t |
Olaf Hering | f5a9245 | 2006-10-06 22:52:16 +0200 | [diff] [blame] | 203 | spu_irq_class_1(int irq, void *data) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 204 | { |
| 205 | struct spu *spu; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 206 | unsigned long stat, mask, dar, dsisr; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 207 | |
| 208 | spu = data; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 209 | |
| 210 | /* atomically read & clear class1 status. */ |
| 211 | spin_lock(&spu->register_lock); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 212 | mask = spu_int_mask_get(spu, 1); |
| 213 | stat = spu_int_stat_get(spu, 1) & mask; |
| 214 | dar = spu_mfc_dar_get(spu); |
| 215 | dsisr = spu_mfc_dsisr_get(spu); |
Arnd Bergmann | 3830734 | 2005-12-09 19:04:18 +0100 | [diff] [blame] | 216 | if (stat & 2) /* mapping fault */ |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 217 | spu_mfc_dsisr_set(spu, 0ul); |
| 218 | spu_int_stat_clear(spu, 1, stat); |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 219 | spin_unlock(&spu->register_lock); |
Arnd Bergmann | a33a7d7 | 2006-03-23 00:00:11 +0100 | [diff] [blame] | 220 | pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, |
| 221 | dar, dsisr); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 222 | |
| 223 | if (stat & 1) /* segment fault */ |
| 224 | __spu_trap_data_seg(spu, dar); |
| 225 | |
| 226 | if (stat & 2) { /* mapping fault */ |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 227 | __spu_trap_data_map(spu, dar, dsisr); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 228 | } |
| 229 | |
| 230 | if (stat & 4) /* ls compare & suspend on get */ |
| 231 | ; |
| 232 | |
| 233 | if (stat & 8) /* ls compare & suspend on put */ |
| 234 | ; |
| 235 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 236 | return stat ? IRQ_HANDLED : IRQ_NONE; |
| 237 | } |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 238 | EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 239 | |
| 240 | static irqreturn_t |
Olaf Hering | f5a9245 | 2006-10-06 22:52:16 +0200 | [diff] [blame] | 241 | spu_irq_class_2(int irq, void *data) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 242 | { |
| 243 | struct spu *spu; |
| 244 | unsigned long stat; |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 245 | unsigned long mask; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 246 | |
| 247 | spu = data; |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 248 | spin_lock(&spu->register_lock); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 249 | stat = spu_int_stat_get(spu, 2); |
| 250 | mask = spu_int_mask_get(spu, 2); |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 251 | /* ignore interrupts we're not waiting for */ |
| 252 | stat &= mask; |
| 253 | /* |
| 254 | * mailbox interrupts (0x1 and 0x10) are level triggered. |
| 255 | * mask them now before acknowledging. |
| 256 | */ |
| 257 | if (stat & 0x11) |
| 258 | spu_int_mask_and(spu, 2, ~(stat & 0x11)); |
| 259 | /* acknowledge all interrupts before the callbacks */ |
| 260 | spu_int_stat_clear(spu, 2, stat); |
| 261 | spin_unlock(&spu->register_lock); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 262 | |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 263 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 264 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 265 | if (stat & 1) /* PPC core mailbox */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 266 | spu->ibox_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 267 | |
| 268 | if (stat & 2) /* SPU stop-and-signal */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 269 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 270 | |
| 271 | if (stat & 4) /* SPU halted */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 272 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 273 | |
| 274 | if (stat & 8) /* DMA tag group complete */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 275 | spu->mfc_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 276 | |
| 277 | if (stat & 0x10) /* SPU mailbox threshold */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 278 | spu->wbox_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 279 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 280 | return stat ? IRQ_HANDLED : IRQ_NONE; |
| 281 | } |
| 282 | |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 283 | static int spu_request_irqs(struct spu *spu) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 284 | { |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 285 | int ret = 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 286 | |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 287 | if (spu->irqs[0] != NO_IRQ) { |
| 288 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", |
| 289 | spu->number); |
| 290 | ret = request_irq(spu->irqs[0], spu_irq_class_0, |
| 291 | IRQF_DISABLED, |
| 292 | spu->irq_c0, spu); |
| 293 | if (ret) |
| 294 | goto bail0; |
| 295 | } |
| 296 | if (spu->irqs[1] != NO_IRQ) { |
| 297 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", |
| 298 | spu->number); |
| 299 | ret = request_irq(spu->irqs[1], spu_irq_class_1, |
| 300 | IRQF_DISABLED, |
| 301 | spu->irq_c1, spu); |
| 302 | if (ret) |
| 303 | goto bail1; |
| 304 | } |
| 305 | if (spu->irqs[2] != NO_IRQ) { |
| 306 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", |
| 307 | spu->number); |
| 308 | ret = request_irq(spu->irqs[2], spu_irq_class_2, |
| 309 | IRQF_DISABLED, |
| 310 | spu->irq_c2, spu); |
| 311 | if (ret) |
| 312 | goto bail2; |
| 313 | } |
| 314 | return 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 315 | |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 316 | bail2: |
| 317 | if (spu->irqs[1] != NO_IRQ) |
| 318 | free_irq(spu->irqs[1], spu); |
| 319 | bail1: |
| 320 | if (spu->irqs[0] != NO_IRQ) |
| 321 | free_irq(spu->irqs[0], spu); |
| 322 | bail0: |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 323 | return ret; |
| 324 | } |
| 325 | |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 326 | static void spu_free_irqs(struct spu *spu) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 327 | { |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 328 | if (spu->irqs[0] != NO_IRQ) |
| 329 | free_irq(spu->irqs[0], spu); |
| 330 | if (spu->irqs[1] != NO_IRQ) |
| 331 | free_irq(spu->irqs[1], spu); |
| 332 | if (spu->irqs[2] != NO_IRQ) |
| 333 | free_irq(spu->irqs[2], spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 334 | } |
| 335 | |
Mark Nutter | a68cf98 | 2006-10-04 17:26:12 +0200 | [diff] [blame] | 336 | static struct list_head spu_list[MAX_NUMNODES]; |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 337 | static LIST_HEAD(spu_full_list); |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 338 | static DEFINE_MUTEX(spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 339 | |
| 340 | static void spu_init_channels(struct spu *spu) |
| 341 | { |
| 342 | static const struct { |
| 343 | unsigned channel; |
| 344 | unsigned count; |
| 345 | } zero_list[] = { |
| 346 | { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, }, |
| 347 | { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, }, |
| 348 | }, count_list[] = { |
| 349 | { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, }, |
| 350 | { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, }, |
| 351 | { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, }, |
| 352 | }; |
Arnd Bergmann | 6ff730c | 2006-01-04 20:31:31 +0100 | [diff] [blame] | 353 | struct spu_priv2 __iomem *priv2; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 354 | int i; |
| 355 | |
| 356 | priv2 = spu->priv2; |
| 357 | |
| 358 | /* initialize all channel data to zero */ |
| 359 | for (i = 0; i < ARRAY_SIZE(zero_list); i++) { |
| 360 | int count; |
| 361 | |
| 362 | out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel); |
| 363 | for (count = 0; count < zero_list[i].count; count++) |
| 364 | out_be64(&priv2->spu_chnldata_RW, 0); |
| 365 | } |
| 366 | |
| 367 | /* initialize channel counts to meaningful values */ |
| 368 | for (i = 0; i < ARRAY_SIZE(count_list); i++) { |
| 369 | out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel); |
| 370 | out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); |
| 371 | } |
| 372 | } |
| 373 | |
Mark Nutter | a68cf98 | 2006-10-04 17:26:12 +0200 | [diff] [blame] | 374 | struct spu *spu_alloc_node(int node) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 375 | { |
Mark Nutter | a68cf98 | 2006-10-04 17:26:12 +0200 | [diff] [blame] | 376 | struct spu *spu = NULL; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 377 | |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 378 | mutex_lock(&spu_mutex); |
Mark Nutter | a68cf98 | 2006-10-04 17:26:12 +0200 | [diff] [blame] | 379 | if (!list_empty(&spu_list[node])) { |
| 380 | spu = list_entry(spu_list[node].next, struct spu, list); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 381 | list_del_init(&spu->list); |
Geoff Levand | cc21a66 | 2006-10-24 18:31:15 +0200 | [diff] [blame] | 382 | pr_debug("Got SPU %d %d\n", spu->number, spu->node); |
Mark Nutter | a68cf98 | 2006-10-04 17:26:12 +0200 | [diff] [blame] | 383 | spu_init_channels(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 384 | } |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 385 | mutex_unlock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 386 | |
Mark Nutter | a68cf98 | 2006-10-04 17:26:12 +0200 | [diff] [blame] | 387 | return spu; |
| 388 | } |
| 389 | EXPORT_SYMBOL_GPL(spu_alloc_node); |
| 390 | |
| 391 | struct spu *spu_alloc(void) |
| 392 | { |
| 393 | struct spu *spu = NULL; |
| 394 | int node; |
| 395 | |
| 396 | for (node = 0; node < MAX_NUMNODES; node++) { |
| 397 | spu = spu_alloc_node(node); |
| 398 | if (spu) |
| 399 | break; |
| 400 | } |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 401 | |
| 402 | return spu; |
| 403 | } |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 404 | |
| 405 | void spu_free(struct spu *spu) |
| 406 | { |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 407 | mutex_lock(&spu_mutex); |
Mark Nutter | a68cf98 | 2006-10-04 17:26:12 +0200 | [diff] [blame] | 408 | list_add_tail(&spu->list, &spu_list[spu->node]); |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 409 | mutex_unlock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 410 | } |
Arnd Bergmann | 39c73c3 | 2005-12-05 22:52:21 -0500 | [diff] [blame] | 411 | EXPORT_SYMBOL_GPL(spu_free); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 412 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 413 | static int spu_handle_mm_fault(struct spu *spu) |
| 414 | { |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 415 | struct mm_struct *mm = spu->mm; |
| 416 | struct vm_area_struct *vma; |
| 417 | u64 ea, dsisr, is_write; |
| 418 | int ret; |
| 419 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 420 | ea = spu->dar; |
| 421 | dsisr = spu->dsisr; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 422 | #if 0 |
| 423 | if (!IS_VALID_EA(ea)) { |
| 424 | return -EFAULT; |
| 425 | } |
| 426 | #endif /* XXX */ |
| 427 | if (mm == NULL) { |
| 428 | return -EFAULT; |
| 429 | } |
| 430 | if (mm->pgd == NULL) { |
| 431 | return -EFAULT; |
| 432 | } |
| 433 | |
| 434 | down_read(&mm->mmap_sem); |
| 435 | vma = find_vma(mm, ea); |
| 436 | if (!vma) |
| 437 | goto bad_area; |
| 438 | if (vma->vm_start <= ea) |
| 439 | goto good_area; |
| 440 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
| 441 | goto bad_area; |
| 442 | #if 0 |
| 443 | if (expand_stack(vma, ea)) |
| 444 | goto bad_area; |
| 445 | #endif /* XXX */ |
| 446 | good_area: |
| 447 | is_write = dsisr & MFC_DSISR_ACCESS_PUT; |
| 448 | if (is_write) { |
| 449 | if (!(vma->vm_flags & VM_WRITE)) |
| 450 | goto bad_area; |
| 451 | } else { |
| 452 | if (dsisr & MFC_DSISR_ACCESS_DENIED) |
| 453 | goto bad_area; |
| 454 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) |
| 455 | goto bad_area; |
| 456 | } |
| 457 | ret = 0; |
| 458 | switch (handle_mm_fault(mm, vma, ea, is_write)) { |
| 459 | case VM_FAULT_MINOR: |
| 460 | current->min_flt++; |
| 461 | break; |
| 462 | case VM_FAULT_MAJOR: |
| 463 | current->maj_flt++; |
| 464 | break; |
| 465 | case VM_FAULT_SIGBUS: |
| 466 | ret = -EFAULT; |
| 467 | goto bad_area; |
| 468 | case VM_FAULT_OOM: |
| 469 | ret = -ENOMEM; |
| 470 | goto bad_area; |
| 471 | default: |
| 472 | BUG(); |
| 473 | } |
| 474 | up_read(&mm->mmap_sem); |
| 475 | return ret; |
| 476 | |
| 477 | bad_area: |
| 478 | up_read(&mm->mmap_sem); |
| 479 | return -EFAULT; |
| 480 | } |
| 481 | |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 482 | int spu_irq_class_1_bottom(struct spu *spu) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 483 | { |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 484 | u64 ea, dsisr, access, error = 0UL; |
| 485 | int ret = 0; |
| 486 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 487 | ea = spu->dar; |
| 488 | dsisr = spu->dsisr; |
Arnd Bergmann | 79c227a | 2006-03-24 19:49:27 +0100 | [diff] [blame] | 489 | if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) { |
Arnd Bergmann | f807221 | 2006-04-29 02:40:21 +0200 | [diff] [blame] | 490 | u64 flags; |
| 491 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 492 | access = (_PAGE_PRESENT | _PAGE_USER); |
| 493 | access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL; |
Arnd Bergmann | f807221 | 2006-04-29 02:40:21 +0200 | [diff] [blame] | 494 | local_irq_save(flags); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 495 | if (hash_page(ea, access, 0x300) != 0) |
| 496 | error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; |
Arnd Bergmann | f807221 | 2006-04-29 02:40:21 +0200 | [diff] [blame] | 497 | local_irq_restore(flags); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 498 | } |
Arnd Bergmann | 79c227a | 2006-03-24 19:49:27 +0100 | [diff] [blame] | 499 | if (error & CLASS1_ENABLE_STORAGE_FAULT_INTR) { |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 500 | if ((ret = spu_handle_mm_fault(spu)) != 0) |
| 501 | error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; |
| 502 | else |
| 503 | error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR; |
| 504 | } |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 505 | spu->dar = 0UL; |
| 506 | spu->dsisr = 0UL; |
| 507 | if (!error) { |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 508 | spu_restart_dma(spu); |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 509 | } else { |
| 510 | __spu_trap_invalid_dma(spu); |
| 511 | } |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 512 | return ret; |
| 513 | } |
| 514 | |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame] | 515 | static int __init find_spu_node_id(struct device_node *spe) |
| 516 | { |
Jeremy Kerr | c61c27d | 2006-07-12 15:39:54 +1000 | [diff] [blame] | 517 | const unsigned int *id; |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame] | 518 | struct device_node *cpu; |
| 519 | cpu = spe->parent->parent; |
Jeremy Kerr | c61c27d | 2006-07-12 15:39:54 +1000 | [diff] [blame] | 520 | id = get_property(cpu, "node-id", NULL); |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame] | 521 | return id ? *id : 0; |
| 522 | } |
| 523 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 524 | static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe, |
| 525 | const char *prop) |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame] | 526 | { |
| 527 | static DEFINE_MUTEX(add_spumem_mutex); |
| 528 | |
Jeremy Kerr | c61c27d | 2006-07-12 15:39:54 +1000 | [diff] [blame] | 529 | const struct address_prop { |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame] | 530 | unsigned long address; |
| 531 | unsigned int len; |
| 532 | } __attribute__((packed)) *p; |
| 533 | int proplen; |
| 534 | |
| 535 | unsigned long start_pfn, nr_pages; |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame] | 536 | struct pglist_data *pgdata; |
| 537 | struct zone *zone; |
| 538 | int ret; |
| 539 | |
Jeremy Kerr | c61c27d | 2006-07-12 15:39:54 +1000 | [diff] [blame] | 540 | p = get_property(spe, prop, &proplen); |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame] | 541 | WARN_ON(proplen != sizeof (*p)); |
| 542 | |
| 543 | start_pfn = p->address >> PAGE_SHIFT; |
| 544 | nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 545 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 546 | pgdata = NODE_DATA(spu->nid); |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame] | 547 | zone = pgdata->node_zones; |
| 548 | |
| 549 | /* XXX rethink locking here */ |
| 550 | mutex_lock(&add_spumem_mutex); |
| 551 | ret = __add_pages(zone, start_pfn, nr_pages); |
| 552 | mutex_unlock(&add_spumem_mutex); |
| 553 | |
| 554 | return ret; |
| 555 | } |
| 556 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 557 | static void __iomem * __init map_spe_prop(struct spu *spu, |
| 558 | struct device_node *n, const char *name) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 559 | { |
Jeremy Kerr | c61c27d | 2006-07-12 15:39:54 +1000 | [diff] [blame] | 560 | const struct address_prop { |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 561 | unsigned long address; |
| 562 | unsigned int len; |
| 563 | } __attribute__((packed)) *prop; |
| 564 | |
Jeremy Kerr | c61c27d | 2006-07-12 15:39:54 +1000 | [diff] [blame] | 565 | const void *p; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 566 | int proplen; |
Al Viro | ed2bfcd | 2006-09-23 01:37:41 +0100 | [diff] [blame] | 567 | void __iomem *ret = NULL; |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame] | 568 | int err = 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 569 | |
| 570 | p = get_property(n, name, &proplen); |
| 571 | if (proplen != sizeof (struct address_prop)) |
| 572 | return NULL; |
| 573 | |
| 574 | prop = p; |
| 575 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 576 | err = cell_spuprop_present(spu, n, name); |
Joel H Schopp | bed120c | 2006-05-01 12:16:11 -0700 | [diff] [blame] | 577 | if (err && (err != -EEXIST)) |
| 578 | goto out; |
| 579 | |
| 580 | ret = ioremap(prop->address, prop->len); |
| 581 | |
| 582 | out: |
| 583 | return ret; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 584 | } |
| 585 | |
| 586 | static void spu_unmap(struct spu *spu) |
| 587 | { |
| 588 | iounmap(spu->priv2); |
| 589 | iounmap(spu->priv1); |
| 590 | iounmap(spu->problem); |
Al Viro | ed2bfcd | 2006-09-23 01:37:41 +0100 | [diff] [blame] | 591 | iounmap((__force u8 __iomem *)spu->local_store); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 592 | } |
| 593 | |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 594 | /* This function shall be abstracted for HV platforms */ |
Arnd Bergmann | 7650f2f | 2006-10-04 17:26:20 +0200 | [diff] [blame] | 595 | static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np) |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 596 | { |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 597 | unsigned int isrc; |
Jeremy Kerr | c61c27d | 2006-07-12 15:39:54 +1000 | [diff] [blame] | 598 | const u32 *tmp; |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 599 | |
Benjamin Herrenschmidt | 2e19458 | 2006-09-29 15:00:29 +1000 | [diff] [blame] | 600 | /* Get the interrupt source unit from the device-tree */ |
Jeremy Kerr | c61c27d | 2006-07-12 15:39:54 +1000 | [diff] [blame] | 601 | tmp = get_property(np, "isrc", NULL); |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 602 | if (!tmp) |
| 603 | return -ENODEV; |
Benjamin Herrenschmidt | 2e19458 | 2006-09-29 15:00:29 +1000 | [diff] [blame] | 604 | isrc = tmp[0]; |
| 605 | |
| 606 | /* Add the node number */ |
| 607 | isrc |= spu->node << IIC_IRQ_NODE_SHIFT; |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 608 | |
| 609 | /* Now map interrupts of all 3 classes */ |
Benjamin Herrenschmidt | 2e19458 | 2006-09-29 15:00:29 +1000 | [diff] [blame] | 610 | spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc); |
| 611 | spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc); |
| 612 | spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc); |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 613 | |
| 614 | /* Right now, we only fail if class 2 failed */ |
| 615 | return spu->irqs[2] == NO_IRQ ? -EINVAL : 0; |
| 616 | } |
| 617 | |
Arnd Bergmann | 7650f2f | 2006-10-04 17:26:20 +0200 | [diff] [blame] | 618 | static int __init spu_map_device_old(struct spu *spu, struct device_node *node) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 619 | { |
Jeremy Kerr | c61c27d | 2006-07-12 15:39:54 +1000 | [diff] [blame] | 620 | const char *prop; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 621 | int ret; |
| 622 | |
| 623 | ret = -ENODEV; |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 624 | spu->name = get_property(node, "name", NULL); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 625 | if (!spu->name) |
| 626 | goto out; |
| 627 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 628 | prop = get_property(node, "local-store", NULL); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 629 | if (!prop) |
| 630 | goto out; |
| 631 | spu->local_store_phys = *(unsigned long *)prop; |
| 632 | |
| 633 | /* we use local store as ram, not io memory */ |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 634 | spu->local_store = (void __force *) |
| 635 | map_spe_prop(spu, node, "local-store"); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 636 | if (!spu->local_store) |
| 637 | goto out; |
| 638 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 639 | prop = get_property(node, "problem", NULL); |
Mark Nutter | 6df10a8 | 2006-03-23 00:00:12 +0100 | [diff] [blame] | 640 | if (!prop) |
| 641 | goto out_unmap; |
| 642 | spu->problem_phys = *(unsigned long *)prop; |
| 643 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 644 | spu->problem= map_spe_prop(spu, node, "problem"); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 645 | if (!spu->problem) |
| 646 | goto out_unmap; |
| 647 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 648 | spu->priv1= map_spe_prop(spu, node, "priv1"); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 649 | /* priv1 is not available on a hypervisor */ |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 650 | |
Jeremy Kerr | 8261aa6 | 2006-05-01 12:16:13 -0700 | [diff] [blame] | 651 | spu->priv2= map_spe_prop(spu, node, "priv2"); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 652 | if (!spu->priv2) |
| 653 | goto out_unmap; |
| 654 | ret = 0; |
| 655 | goto out; |
| 656 | |
| 657 | out_unmap: |
| 658 | spu_unmap(spu); |
| 659 | out: |
| 660 | return ret; |
| 661 | } |
| 662 | |
Arnd Bergmann | 7650f2f | 2006-10-04 17:26:20 +0200 | [diff] [blame] | 663 | static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) |
| 664 | { |
| 665 | struct of_irq oirq; |
| 666 | int ret; |
| 667 | int i; |
| 668 | |
| 669 | for (i=0; i < 3; i++) { |
| 670 | ret = of_irq_map_one(np, i, &oirq); |
| 671 | if (ret) |
| 672 | goto err; |
| 673 | |
| 674 | ret = -EINVAL; |
| 675 | spu->irqs[i] = irq_create_of_mapping(oirq.controller, |
| 676 | oirq.specifier, oirq.size); |
| 677 | if (spu->irqs[i] == NO_IRQ) |
| 678 | goto err; |
| 679 | } |
| 680 | return 0; |
| 681 | |
| 682 | err: |
| 683 | pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier, spu->name); |
| 684 | for (; i >= 0; i--) { |
| 685 | if (spu->irqs[i] != NO_IRQ) |
| 686 | irq_dispose_mapping(spu->irqs[i]); |
| 687 | } |
| 688 | return ret; |
| 689 | } |
| 690 | |
| 691 | static int spu_map_resource(struct device_node *node, int nr, |
| 692 | void __iomem** virt, unsigned long *phys) |
| 693 | { |
| 694 | struct resource resource = { }; |
| 695 | int ret; |
| 696 | |
| 697 | ret = of_address_to_resource(node, 0, &resource); |
| 698 | if (ret) |
| 699 | goto out; |
| 700 | |
| 701 | if (phys) |
| 702 | *phys = resource.start; |
| 703 | *virt = ioremap(resource.start, resource.end - resource.start); |
| 704 | if (!*virt) |
| 705 | ret = -EINVAL; |
| 706 | |
| 707 | out: |
| 708 | return ret; |
| 709 | } |
| 710 | |
| 711 | static int __init spu_map_device(struct spu *spu, struct device_node *node) |
| 712 | { |
| 713 | int ret = -ENODEV; |
| 714 | spu->name = get_property(node, "name", NULL); |
| 715 | if (!spu->name) |
| 716 | goto out; |
| 717 | |
| 718 | ret = spu_map_resource(node, 0, (void __iomem**)&spu->local_store, |
| 719 | &spu->local_store_phys); |
| 720 | if (ret) |
| 721 | goto out; |
| 722 | ret = spu_map_resource(node, 1, (void __iomem**)&spu->problem, |
| 723 | &spu->problem_phys); |
| 724 | if (ret) |
| 725 | goto out_unmap; |
| 726 | ret = spu_map_resource(node, 2, (void __iomem**)&spu->priv2, |
| 727 | NULL); |
| 728 | if (ret) |
| 729 | goto out_unmap; |
| 730 | |
| 731 | if (!firmware_has_feature(FW_FEATURE_LPAR)) |
| 732 | ret = spu_map_resource(node, 3, (void __iomem**)&spu->priv1, |
| 733 | NULL); |
| 734 | if (ret) |
| 735 | goto out_unmap; |
| 736 | return 0; |
| 737 | |
| 738 | out_unmap: |
| 739 | spu_unmap(spu); |
| 740 | out: |
| 741 | pr_debug("failed to map spe %s: %d\n", spu->name, ret); |
| 742 | return ret; |
| 743 | } |
| 744 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 745 | struct sysdev_class spu_sysdev_class = { |
| 746 | set_kset_name("spu") |
| 747 | }; |
| 748 | |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 749 | int spu_add_sysdev_attr(struct sysdev_attribute *attr) |
| 750 | { |
| 751 | struct spu *spu; |
| 752 | mutex_lock(&spu_mutex); |
| 753 | |
| 754 | list_for_each_entry(spu, &spu_full_list, full_list) |
| 755 | sysdev_create_file(&spu->sysdev, attr); |
| 756 | |
| 757 | mutex_unlock(&spu_mutex); |
| 758 | return 0; |
| 759 | } |
| 760 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr); |
| 761 | |
| 762 | int spu_add_sysdev_attr_group(struct attribute_group *attrs) |
| 763 | { |
| 764 | struct spu *spu; |
| 765 | mutex_lock(&spu_mutex); |
| 766 | |
| 767 | list_for_each_entry(spu, &spu_full_list, full_list) |
| 768 | sysfs_create_group(&spu->sysdev.kobj, attrs); |
| 769 | |
| 770 | mutex_unlock(&spu_mutex); |
| 771 | return 0; |
| 772 | } |
| 773 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group); |
| 774 | |
| 775 | |
| 776 | void spu_remove_sysdev_attr(struct sysdev_attribute *attr) |
| 777 | { |
| 778 | struct spu *spu; |
| 779 | mutex_lock(&spu_mutex); |
| 780 | |
| 781 | list_for_each_entry(spu, &spu_full_list, full_list) |
| 782 | sysdev_remove_file(&spu->sysdev, attr); |
| 783 | |
| 784 | mutex_unlock(&spu_mutex); |
| 785 | } |
| 786 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr); |
| 787 | |
| 788 | void spu_remove_sysdev_attr_group(struct attribute_group *attrs) |
| 789 | { |
| 790 | struct spu *spu; |
| 791 | mutex_lock(&spu_mutex); |
| 792 | |
| 793 | list_for_each_entry(spu, &spu_full_list, full_list) |
| 794 | sysfs_remove_group(&spu->sysdev.kobj, attrs); |
| 795 | |
| 796 | mutex_unlock(&spu_mutex); |
| 797 | } |
| 798 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group); |
| 799 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 800 | static int spu_create_sysdev(struct spu *spu) |
| 801 | { |
| 802 | int ret; |
| 803 | |
| 804 | spu->sysdev.id = spu->number; |
| 805 | spu->sysdev.cls = &spu_sysdev_class; |
| 806 | ret = sysdev_register(&spu->sysdev); |
| 807 | if (ret) { |
| 808 | printk(KERN_ERR "Can't register SPU %d with sysfs\n", |
| 809 | spu->number); |
| 810 | return ret; |
| 811 | } |
| 812 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 813 | sysfs_add_device_to_node(&spu->sysdev, spu->nid); |
| 814 | |
| 815 | return 0; |
| 816 | } |
| 817 | |
| 818 | static void spu_destroy_sysdev(struct spu *spu) |
| 819 | { |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 820 | sysfs_remove_device_from_node(&spu->sysdev, spu->nid); |
| 821 | sysdev_unregister(&spu->sysdev); |
| 822 | } |
| 823 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 824 | static int __init create_spu(struct device_node *spe) |
| 825 | { |
| 826 | struct spu *spu; |
| 827 | int ret; |
| 828 | static int number; |
| 829 | |
| 830 | ret = -ENOMEM; |
Jeremy Kerr | ecec217 | 2006-06-19 20:33:26 +0200 | [diff] [blame] | 831 | spu = kzalloc(sizeof (*spu), GFP_KERNEL); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 832 | if (!spu) |
| 833 | goto out; |
| 834 | |
Benjamin Herrenschmidt | e5267b4 | 2006-10-10 15:14:12 +1000 | [diff] [blame] | 835 | spu->node = find_spu_node_id(spe); |
| 836 | if (spu->node >= MAX_NUMNODES) { |
| 837 | printk(KERN_WARNING "SPE %s on node %d ignored," |
| 838 | " node number too big\n", spe->full_name, spu->node); |
| 839 | printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n"); |
| 840 | return -ENODEV; |
| 841 | } |
| 842 | spu->nid = of_node_to_nid(spe); |
| 843 | if (spu->nid == -1) |
| 844 | spu->nid = 0; |
| 845 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 846 | ret = spu_map_device(spu, spe); |
Arnd Bergmann | 7650f2f | 2006-10-04 17:26:20 +0200 | [diff] [blame] | 847 | /* try old method */ |
| 848 | if (ret) |
| 849 | ret = spu_map_device_old(spu, spe); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 850 | if (ret) |
| 851 | goto out_free; |
| 852 | |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 853 | ret = spu_map_interrupts(spu, spe); |
| 854 | if (ret) |
Arnd Bergmann | 7650f2f | 2006-10-04 17:26:20 +0200 | [diff] [blame] | 855 | ret = spu_map_interrupts_old(spu, spe); |
| 856 | if (ret) |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 857 | goto out_unmap; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 858 | spin_lock_init(&spu->register_lock); |
Masato Noguchi | 24f43b3 | 2006-10-24 18:31:14 +0200 | [diff] [blame] | 859 | spu_mfc_sdr_setup(spu); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 860 | spu_mfc_sr1_set(spu, 0x33); |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 861 | mutex_lock(&spu_mutex); |
Jeremy Kerr | ecec217 | 2006-06-19 20:33:26 +0200 | [diff] [blame] | 862 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 863 | spu->number = number++; |
| 864 | ret = spu_request_irqs(spu); |
| 865 | if (ret) |
Arnd Bergmann | 7650f2f | 2006-10-04 17:26:20 +0200 | [diff] [blame] | 866 | goto out_unlock; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 867 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 868 | ret = spu_create_sysdev(spu); |
| 869 | if (ret) |
| 870 | goto out_free_irqs; |
| 871 | |
Mark Nutter | a68cf98 | 2006-10-04 17:26:12 +0200 | [diff] [blame] | 872 | list_add(&spu->list, &spu_list[spu->node]); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 873 | list_add(&spu->full_list, &spu_full_list); |
| 874 | spu->devnode = of_node_get(spe); |
| 875 | |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 876 | mutex_unlock(&spu_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 877 | |
Geoff Levand | cc21a66 | 2006-10-24 18:31:15 +0200 | [diff] [blame] | 878 | pr_debug(KERN_DEBUG "Using SPE %s %p %p %p %p %d\n", |
| 879 | spu->name, spu->local_store, |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 880 | spu->problem, spu->priv1, spu->priv2, spu->number); |
| 881 | goto out; |
| 882 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 883 | out_free_irqs: |
| 884 | spu_free_irqs(spu); |
Arnd Bergmann | 7650f2f | 2006-10-04 17:26:20 +0200 | [diff] [blame] | 885 | out_unlock: |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 886 | mutex_unlock(&spu_mutex); |
Arnd Bergmann | 7650f2f | 2006-10-04 17:26:20 +0200 | [diff] [blame] | 887 | out_unmap: |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 888 | spu_unmap(spu); |
| 889 | out_free: |
| 890 | kfree(spu); |
| 891 | out: |
| 892 | return ret; |
| 893 | } |
| 894 | |
| 895 | static void destroy_spu(struct spu *spu) |
| 896 | { |
| 897 | list_del_init(&spu->list); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 898 | list_del_init(&spu->full_list); |
| 899 | |
| 900 | of_node_put(spu->devnode); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 901 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 902 | spu_destroy_sysdev(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 903 | spu_free_irqs(spu); |
| 904 | spu_unmap(spu); |
| 905 | kfree(spu); |
| 906 | } |
| 907 | |
| 908 | static void cleanup_spu_base(void) |
| 909 | { |
| 910 | struct spu *spu, *tmp; |
Mark Nutter | a68cf98 | 2006-10-04 17:26:12 +0200 | [diff] [blame] | 911 | int node; |
| 912 | |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 913 | mutex_lock(&spu_mutex); |
Mark Nutter | a68cf98 | 2006-10-04 17:26:12 +0200 | [diff] [blame] | 914 | for (node = 0; node < MAX_NUMNODES; node++) { |
| 915 | list_for_each_entry_safe(spu, tmp, &spu_list[node], list) |
| 916 | destroy_spu(spu); |
| 917 | } |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 918 | mutex_unlock(&spu_mutex); |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 919 | sysdev_class_unregister(&spu_sysdev_class); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 920 | } |
| 921 | module_exit(cleanup_spu_base); |
| 922 | |
| 923 | static int __init init_spu_base(void) |
| 924 | { |
| 925 | struct device_node *node; |
Mark Nutter | a68cf98 | 2006-10-04 17:26:12 +0200 | [diff] [blame] | 926 | int i, ret; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 927 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 928 | /* create sysdev class for spus */ |
| 929 | ret = sysdev_class_register(&spu_sysdev_class); |
| 930 | if (ret) |
| 931 | return ret; |
| 932 | |
Mark Nutter | a68cf98 | 2006-10-04 17:26:12 +0200 | [diff] [blame] | 933 | for (i = 0; i < MAX_NUMNODES; i++) |
| 934 | INIT_LIST_HEAD(&spu_list[i]); |
| 935 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 936 | ret = -ENODEV; |
| 937 | for (node = of_find_node_by_type(NULL, "spe"); |
| 938 | node; node = of_find_node_by_type(node, "spe")) { |
| 939 | ret = create_spu(node); |
| 940 | if (ret) { |
| 941 | printk(KERN_WARNING "%s: Error initializing %s\n", |
| 942 | __FUNCTION__, node->name); |
| 943 | cleanup_spu_base(); |
| 944 | break; |
| 945 | } |
| 946 | } |
Michael Ellerman | ff8a8f2 | 2006-10-24 18:31:27 +0200 | [diff] [blame^] | 947 | |
| 948 | xmon_register_spus(&spu_full_list); |
| 949 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 950 | return ret; |
| 951 | } |
| 952 | module_init(init_spu_base); |
| 953 | |
| 954 | MODULE_LICENSE("GPL"); |
| 955 | MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); |