Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Low-level SPU handling |
| 3 | * |
| 4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 |
| 5 | * |
| 6 | * Author: Arnd Bergmann <arndb@de.ibm.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation; either version 2, or (at your option) |
| 11 | * any later version. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License |
| 19 | * along with this program; if not, write to the Free Software |
| 20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 21 | */ |
| 22 | |
Arnd Bergmann | 3b3d22c | 2005-12-05 22:52:24 -0500 | [diff] [blame] | 23 | #undef DEBUG |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 24 | |
| 25 | #include <linux/interrupt.h> |
| 26 | #include <linux/list.h> |
| 27 | #include <linux/module.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 28 | #include <linux/ptrace.h> |
| 29 | #include <linux/slab.h> |
| 30 | #include <linux/wait.h> |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 31 | #include <linux/mm.h> |
| 32 | #include <linux/io.h> |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 33 | #include <linux/mutex.h> |
Geert Uytterhoeven | bce9451 | 2007-07-17 04:05:52 -0700 | [diff] [blame] | 34 | #include <linux/linux_logo.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 35 | #include <asm/spu.h> |
Geoff Levand | 540270d | 2006-06-19 20:33:29 +0200 | [diff] [blame] | 36 | #include <asm/spu_priv1.h> |
Michael Ellerman | ff8a8f2 | 2006-10-24 18:31:27 +0200 | [diff] [blame] | 37 | #include <asm/xmon.h> |
Arnd Bergmann | 3ad216c | 2007-07-20 21:39:46 +0200 | [diff] [blame] | 38 | #include <asm/prom.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 39 | |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 40 | const struct spu_management_ops *spu_management_ops; |
Jeremy Kerr | ccf17e9 | 2007-04-23 21:08:29 +0200 | [diff] [blame] | 41 | EXPORT_SYMBOL_GPL(spu_management_ops); |
| 42 | |
Geoff Levand | 540270d | 2006-06-19 20:33:29 +0200 | [diff] [blame] | 43 | const struct spu_priv1_ops *spu_priv1_ops; |
Geoff Levand | 540270d | 2006-06-19 20:33:29 +0200 | [diff] [blame] | 44 | EXPORT_SYMBOL_GPL(spu_priv1_ops); |
| 45 | |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 46 | struct cbe_spu_info cbe_spu_info[MAX_NUMNODES]; |
| 47 | EXPORT_SYMBOL_GPL(cbe_spu_info); |
| 48 | |
| 49 | /* |
| 50 | * Protects cbe_spu_info and spu->number. |
| 51 | */ |
| 52 | static DEFINE_SPINLOCK(spu_lock); |
| 53 | |
| 54 | /* |
| 55 | * List of all spus in the system. |
| 56 | * |
| 57 | * This list is iterated by callers from irq context and callers that |
| 58 | * want to sleep. Thus modifications need to be done with both |
| 59 | * spu_full_list_lock and spu_full_list_mutex held, while iterating |
| 60 | * through it requires either of these locks. |
| 61 | * |
| 62 | * In addition spu_full_list_lock protects all assignmens to |
| 63 | * spu->mm. |
| 64 | */ |
| 65 | static LIST_HEAD(spu_full_list); |
| 66 | static DEFINE_SPINLOCK(spu_full_list_lock); |
| 67 | static DEFINE_MUTEX(spu_full_list_mutex); |
| 68 | |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 69 | void spu_invalidate_slbs(struct spu *spu) |
| 70 | { |
| 71 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
| 72 | |
| 73 | if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) |
| 74 | out_be64(&priv2->slb_invalidate_all_W, 0UL); |
| 75 | } |
| 76 | EXPORT_SYMBOL_GPL(spu_invalidate_slbs); |
| 77 | |
| 78 | /* This is called by the MM core when a segment size is changed, to |
| 79 | * request a flush of all the SPEs using a given mm |
| 80 | */ |
| 81 | void spu_flush_all_slbs(struct mm_struct *mm) |
| 82 | { |
| 83 | struct spu *spu; |
| 84 | unsigned long flags; |
| 85 | |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 86 | spin_lock_irqsave(&spu_full_list_lock, flags); |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 87 | list_for_each_entry(spu, &spu_full_list, full_list) { |
| 88 | if (spu->mm == mm) |
| 89 | spu_invalidate_slbs(spu); |
| 90 | } |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 91 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 92 | } |
| 93 | |
| 94 | /* The hack below stinks... try to do something better one of |
| 95 | * these days... Does it even work properly with NR_CPUS == 1 ? |
| 96 | */ |
| 97 | static inline void mm_needs_global_tlbie(struct mm_struct *mm) |
| 98 | { |
| 99 | int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; |
| 100 | |
| 101 | /* Global TLBIE broadcast required with SPEs. */ |
| 102 | __cpus_setall(&mm->cpu_vm_mask, nr); |
| 103 | } |
| 104 | |
| 105 | void spu_associate_mm(struct spu *spu, struct mm_struct *mm) |
| 106 | { |
| 107 | unsigned long flags; |
| 108 | |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 109 | spin_lock_irqsave(&spu_full_list_lock, flags); |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 110 | spu->mm = mm; |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 111 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 112 | if (mm) |
| 113 | mm_needs_global_tlbie(mm); |
| 114 | } |
| 115 | EXPORT_SYMBOL_GPL(spu_associate_mm); |
| 116 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 117 | static int __spu_trap_invalid_dma(struct spu *spu) |
| 118 | { |
| 119 | pr_debug("%s\n", __FUNCTION__); |
Arnd Bergmann | 9add11d | 2006-10-04 17:26:14 +0200 | [diff] [blame] | 120 | spu->dma_callback(spu, SPE_EVENT_INVALID_DMA); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 121 | return 0; |
| 122 | } |
| 123 | |
| 124 | static int __spu_trap_dma_align(struct spu *spu) |
| 125 | { |
| 126 | pr_debug("%s\n", __FUNCTION__); |
Arnd Bergmann | 9add11d | 2006-10-04 17:26:14 +0200 | [diff] [blame] | 127 | spu->dma_callback(spu, SPE_EVENT_DMA_ALIGNMENT); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 128 | return 0; |
| 129 | } |
| 130 | |
| 131 | static int __spu_trap_error(struct spu *spu) |
| 132 | { |
| 133 | pr_debug("%s\n", __FUNCTION__); |
Arnd Bergmann | 9add11d | 2006-10-04 17:26:14 +0200 | [diff] [blame] | 134 | spu->dma_callback(spu, SPE_EVENT_SPE_ERROR); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 135 | return 0; |
| 136 | } |
| 137 | |
| 138 | static void spu_restart_dma(struct spu *spu) |
| 139 | { |
| 140 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 141 | |
Arnd Bergmann | 8837d92 | 2006-01-04 20:31:28 +0100 | [diff] [blame] | 142 | if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 143 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 144 | } |
| 145 | |
| 146 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) |
| 147 | { |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 148 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
| 149 | struct mm_struct *mm = spu->mm; |
arnd@arndb.de | 724bd80 | 2006-06-19 20:33:23 +0200 | [diff] [blame] | 150 | u64 esid, vsid, llp; |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 151 | int psize; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 152 | |
| 153 | pr_debug("%s\n", __FUNCTION__); |
| 154 | |
Arnd Bergmann | 8837d92 | 2006-01-04 20:31:28 +0100 | [diff] [blame] | 155 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 156 | /* SLBs are pre-loaded for context switch, so |
| 157 | * we should never get here! |
| 158 | */ |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 159 | printk("%s: invalid access during switch!\n", __func__); |
| 160 | return 1; |
| 161 | } |
arnd@arndb.de | 0afacde | 2006-10-24 18:31:18 +0200 | [diff] [blame] | 162 | esid = (ea & ESID_MASK) | SLB_ESID_V; |
| 163 | |
| 164 | switch(REGION_ID(ea)) { |
| 165 | case USER_REGION_ID: |
Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame] | 166 | #ifdef CONFIG_PPC_MM_SLICES |
| 167 | psize = get_slice_psize(mm, ea); |
| 168 | #else |
| 169 | psize = mm->context.user_psize; |
arnd@arndb.de | 0afacde | 2006-10-24 18:31:18 +0200 | [diff] [blame] | 170 | #endif |
arnd@arndb.de | 0afacde | 2006-10-24 18:31:18 +0200 | [diff] [blame] | 171 | vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 172 | SLB_VSID_USER; |
arnd@arndb.de | 0afacde | 2006-10-24 18:31:18 +0200 | [diff] [blame] | 173 | break; |
| 174 | case VMALLOC_REGION_ID: |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 175 | if (ea < VMALLOC_END) |
| 176 | psize = mmu_vmalloc_psize; |
| 177 | else |
| 178 | psize = mmu_io_psize; |
arnd@arndb.de | 0afacde | 2006-10-24 18:31:18 +0200 | [diff] [blame] | 179 | vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 180 | SLB_VSID_KERNEL; |
arnd@arndb.de | 0afacde | 2006-10-24 18:31:18 +0200 | [diff] [blame] | 181 | break; |
| 182 | case KERNEL_REGION_ID: |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 183 | psize = mmu_linear_psize; |
arnd@arndb.de | 0afacde | 2006-10-24 18:31:18 +0200 | [diff] [blame] | 184 | vsid = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 185 | SLB_VSID_KERNEL; |
arnd@arndb.de | 0afacde | 2006-10-24 18:31:18 +0200 | [diff] [blame] | 186 | break; |
| 187 | default: |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 188 | /* Future: support kernel segments so that drivers |
| 189 | * can use SPUs. |
| 190 | */ |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 191 | pr_debug("invalid region access at %016lx\n", ea); |
| 192 | return 1; |
| 193 | } |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 194 | llp = mmu_psize_defs[psize].sllp; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 195 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 196 | out_be64(&priv2->slb_index_W, spu->slb_replace); |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 197 | out_be64(&priv2->slb_vsid_RW, vsid | llp); |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 198 | out_be64(&priv2->slb_esid_RW, esid); |
| 199 | |
| 200 | spu->slb_replace++; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 201 | if (spu->slb_replace >= 8) |
| 202 | spu->slb_replace = 0; |
| 203 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 204 | spu_restart_dma(spu); |
Christoph Hellwig | e9f8a0b | 2007-06-29 10:58:03 +1000 | [diff] [blame] | 205 | spu->stats.slb_flt++; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 206 | return 0; |
| 207 | } |
| 208 | |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 209 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 210 | static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 211 | { |
Arnd Bergmann | a33a7d7 | 2006-03-23 00:00:11 +0100 | [diff] [blame] | 212 | pr_debug("%s, %lx, %lx\n", __FUNCTION__, dsisr, ea); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 213 | |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 214 | /* Handle kernel space hash faults immediately. |
| 215 | User hash faults need to be deferred to process context. */ |
| 216 | if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) |
| 217 | && REGION_ID(ea) != USER_REGION_ID |
| 218 | && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { |
| 219 | spu_restart_dma(spu); |
| 220 | return 0; |
| 221 | } |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 222 | |
Arnd Bergmann | 8837d92 | 2006-01-04 20:31:28 +0100 | [diff] [blame] | 223 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 224 | printk("%s: invalid access during switch!\n", __func__); |
| 225 | return 1; |
| 226 | } |
| 227 | |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 228 | spu->dar = ea; |
| 229 | spu->dsisr = dsisr; |
| 230 | mb(); |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 231 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 232 | return 0; |
| 233 | } |
| 234 | |
| 235 | static irqreturn_t |
Olaf Hering | f5a9245 | 2006-10-06 22:52:16 +0200 | [diff] [blame] | 236 | spu_irq_class_0(int irq, void *data) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 237 | { |
| 238 | struct spu *spu; |
| 239 | |
| 240 | spu = data; |
| 241 | spu->class_0_pending = 1; |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 242 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 243 | |
| 244 | return IRQ_HANDLED; |
| 245 | } |
| 246 | |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 247 | int |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 248 | spu_irq_class_0_bottom(struct spu *spu) |
| 249 | { |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 250 | unsigned long stat, mask; |
Ishizaki Kou | 3650cfe | 2007-01-12 09:52:41 +0900 | [diff] [blame] | 251 | unsigned long flags; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 252 | |
| 253 | spu->class_0_pending = 0; |
| 254 | |
Ishizaki Kou | 3650cfe | 2007-01-12 09:52:41 +0900 | [diff] [blame] | 255 | spin_lock_irqsave(&spu->register_lock, flags); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 256 | mask = spu_int_mask_get(spu, 0); |
| 257 | stat = spu_int_stat_get(spu, 0); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 258 | |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 259 | stat &= mask; |
| 260 | |
Arnd Bergmann | 2cd90bc | 2006-06-23 20:57:50 +0200 | [diff] [blame] | 261 | if (stat & 1) /* invalid DMA alignment */ |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 262 | __spu_trap_dma_align(spu); |
| 263 | |
Arnd Bergmann | 2cd90bc | 2006-06-23 20:57:50 +0200 | [diff] [blame] | 264 | if (stat & 2) /* invalid MFC DMA */ |
| 265 | __spu_trap_invalid_dma(spu); |
| 266 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 267 | if (stat & 4) /* error on SPU */ |
| 268 | __spu_trap_error(spu); |
| 269 | |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 270 | spu_int_stat_clear(spu, 0, stat); |
Ishizaki Kou | 3650cfe | 2007-01-12 09:52:41 +0900 | [diff] [blame] | 271 | spin_unlock_irqrestore(&spu->register_lock, flags); |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 272 | |
| 273 | return (stat & 0x7) ? -EIO : 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 274 | } |
Arnd Bergmann | 5110459 | 2005-12-05 22:52:25 -0500 | [diff] [blame] | 275 | EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 276 | |
| 277 | static irqreturn_t |
Olaf Hering | f5a9245 | 2006-10-06 22:52:16 +0200 | [diff] [blame] | 278 | spu_irq_class_1(int irq, void *data) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 279 | { |
| 280 | struct spu *spu; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 281 | unsigned long stat, mask, dar, dsisr; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 282 | |
| 283 | spu = data; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 284 | |
| 285 | /* atomically read & clear class1 status. */ |
| 286 | spin_lock(&spu->register_lock); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 287 | mask = spu_int_mask_get(spu, 1); |
| 288 | stat = spu_int_stat_get(spu, 1) & mask; |
| 289 | dar = spu_mfc_dar_get(spu); |
| 290 | dsisr = spu_mfc_dsisr_get(spu); |
Arnd Bergmann | 3830734 | 2005-12-09 19:04:18 +0100 | [diff] [blame] | 291 | if (stat & 2) /* mapping fault */ |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 292 | spu_mfc_dsisr_set(spu, 0ul); |
| 293 | spu_int_stat_clear(spu, 1, stat); |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 294 | spin_unlock(&spu->register_lock); |
Arnd Bergmann | a33a7d7 | 2006-03-23 00:00:11 +0100 | [diff] [blame] | 295 | pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, |
| 296 | dar, dsisr); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 297 | |
| 298 | if (stat & 1) /* segment fault */ |
| 299 | __spu_trap_data_seg(spu, dar); |
| 300 | |
| 301 | if (stat & 2) { /* mapping fault */ |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 302 | __spu_trap_data_map(spu, dar, dsisr); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 303 | } |
| 304 | |
| 305 | if (stat & 4) /* ls compare & suspend on get */ |
| 306 | ; |
| 307 | |
| 308 | if (stat & 8) /* ls compare & suspend on put */ |
| 309 | ; |
| 310 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 311 | return stat ? IRQ_HANDLED : IRQ_NONE; |
| 312 | } |
| 313 | |
| 314 | static irqreturn_t |
Olaf Hering | f5a9245 | 2006-10-06 22:52:16 +0200 | [diff] [blame] | 315 | spu_irq_class_2(int irq, void *data) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 316 | { |
| 317 | struct spu *spu; |
| 318 | unsigned long stat; |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 319 | unsigned long mask; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 320 | |
| 321 | spu = data; |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 322 | spin_lock(&spu->register_lock); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 323 | stat = spu_int_stat_get(spu, 2); |
| 324 | mask = spu_int_mask_get(spu, 2); |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 325 | /* ignore interrupts we're not waiting for */ |
| 326 | stat &= mask; |
| 327 | /* |
| 328 | * mailbox interrupts (0x1 and 0x10) are level triggered. |
| 329 | * mask them now before acknowledging. |
| 330 | */ |
| 331 | if (stat & 0x11) |
| 332 | spu_int_mask_and(spu, 2, ~(stat & 0x11)); |
| 333 | /* acknowledge all interrupts before the callbacks */ |
| 334 | spu_int_stat_clear(spu, 2, stat); |
| 335 | spin_unlock(&spu->register_lock); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 336 | |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 337 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 338 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 339 | if (stat & 1) /* PPC core mailbox */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 340 | spu->ibox_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 341 | |
| 342 | if (stat & 2) /* SPU stop-and-signal */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 343 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 344 | |
| 345 | if (stat & 4) /* SPU halted */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 346 | spu->stop_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 347 | |
| 348 | if (stat & 8) /* DMA tag group complete */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 349 | spu->mfc_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 350 | |
| 351 | if (stat & 0x10) /* SPU mailbox threshold */ |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 352 | spu->wbox_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 353 | |
Christoph Hellwig | e9f8a0b | 2007-06-29 10:58:03 +1000 | [diff] [blame] | 354 | spu->stats.class2_intr++; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 355 | return stat ? IRQ_HANDLED : IRQ_NONE; |
| 356 | } |
| 357 | |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 358 | static int spu_request_irqs(struct spu *spu) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 359 | { |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 360 | int ret = 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 361 | |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 362 | if (spu->irqs[0] != NO_IRQ) { |
| 363 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", |
| 364 | spu->number); |
| 365 | ret = request_irq(spu->irqs[0], spu_irq_class_0, |
| 366 | IRQF_DISABLED, |
| 367 | spu->irq_c0, spu); |
| 368 | if (ret) |
| 369 | goto bail0; |
| 370 | } |
| 371 | if (spu->irqs[1] != NO_IRQ) { |
| 372 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", |
| 373 | spu->number); |
| 374 | ret = request_irq(spu->irqs[1], spu_irq_class_1, |
| 375 | IRQF_DISABLED, |
| 376 | spu->irq_c1, spu); |
| 377 | if (ret) |
| 378 | goto bail1; |
| 379 | } |
| 380 | if (spu->irqs[2] != NO_IRQ) { |
| 381 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", |
| 382 | spu->number); |
| 383 | ret = request_irq(spu->irqs[2], spu_irq_class_2, |
| 384 | IRQF_DISABLED, |
| 385 | spu->irq_c2, spu); |
| 386 | if (ret) |
| 387 | goto bail2; |
| 388 | } |
| 389 | return 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 390 | |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 391 | bail2: |
| 392 | if (spu->irqs[1] != NO_IRQ) |
| 393 | free_irq(spu->irqs[1], spu); |
| 394 | bail1: |
| 395 | if (spu->irqs[0] != NO_IRQ) |
| 396 | free_irq(spu->irqs[0], spu); |
| 397 | bail0: |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 398 | return ret; |
| 399 | } |
| 400 | |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 401 | static void spu_free_irqs(struct spu *spu) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 402 | { |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 403 | if (spu->irqs[0] != NO_IRQ) |
| 404 | free_irq(spu->irqs[0], spu); |
| 405 | if (spu->irqs[1] != NO_IRQ) |
| 406 | free_irq(spu->irqs[1], spu); |
| 407 | if (spu->irqs[2] != NO_IRQ) |
| 408 | free_irq(spu->irqs[2], spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 409 | } |
| 410 | |
Christoph Hellwig | 486acd4 | 2007-07-20 21:39:54 +0200 | [diff] [blame] | 411 | void spu_init_channels(struct spu *spu) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 412 | { |
| 413 | static const struct { |
| 414 | unsigned channel; |
| 415 | unsigned count; |
| 416 | } zero_list[] = { |
| 417 | { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, }, |
| 418 | { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, }, |
| 419 | }, count_list[] = { |
| 420 | { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, }, |
| 421 | { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, }, |
| 422 | { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, }, |
| 423 | }; |
Arnd Bergmann | 6ff730c | 2006-01-04 20:31:31 +0100 | [diff] [blame] | 424 | struct spu_priv2 __iomem *priv2; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 425 | int i; |
| 426 | |
| 427 | priv2 = spu->priv2; |
| 428 | |
| 429 | /* initialize all channel data to zero */ |
| 430 | for (i = 0; i < ARRAY_SIZE(zero_list); i++) { |
| 431 | int count; |
| 432 | |
| 433 | out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel); |
| 434 | for (count = 0; count < zero_list[i].count; count++) |
| 435 | out_be64(&priv2->spu_chnldata_RW, 0); |
| 436 | } |
| 437 | |
| 438 | /* initialize channel counts to meaningful values */ |
| 439 | for (i = 0; i < ARRAY_SIZE(count_list); i++) { |
| 440 | out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel); |
| 441 | out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); |
| 442 | } |
| 443 | } |
Christoph Hellwig | 486acd4 | 2007-07-20 21:39:54 +0200 | [diff] [blame] | 444 | EXPORT_SYMBOL_GPL(spu_init_channels); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 445 | |
Geoff Levand | 6deac06 | 2007-06-16 07:17:32 +1000 | [diff] [blame] | 446 | static int spu_shutdown(struct sys_device *sysdev) |
| 447 | { |
| 448 | struct spu *spu = container_of(sysdev, struct spu, sysdev); |
| 449 | |
| 450 | spu_free_irqs(spu); |
| 451 | spu_destroy_spu(spu); |
| 452 | return 0; |
| 453 | } |
| 454 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 455 | struct sysdev_class spu_sysdev_class = { |
Geoff Levand | 6deac06 | 2007-06-16 07:17:32 +1000 | [diff] [blame] | 456 | set_kset_name("spu"), |
| 457 | .shutdown = spu_shutdown, |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 458 | }; |
| 459 | |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 460 | int spu_add_sysdev_attr(struct sysdev_attribute *attr) |
| 461 | { |
| 462 | struct spu *spu; |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 463 | |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 464 | mutex_lock(&spu_full_list_mutex); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 465 | list_for_each_entry(spu, &spu_full_list, full_list) |
| 466 | sysdev_create_file(&spu->sysdev, attr); |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 467 | mutex_unlock(&spu_full_list_mutex); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 468 | |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 469 | return 0; |
| 470 | } |
| 471 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr); |
| 472 | |
| 473 | int spu_add_sysdev_attr_group(struct attribute_group *attrs) |
| 474 | { |
| 475 | struct spu *spu; |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 476 | |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 477 | mutex_lock(&spu_full_list_mutex); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 478 | list_for_each_entry(spu, &spu_full_list, full_list) |
| 479 | sysfs_create_group(&spu->sysdev.kobj, attrs); |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 480 | mutex_unlock(&spu_full_list_mutex); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 481 | |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 482 | return 0; |
| 483 | } |
| 484 | EXPORT_SYMBOL_GPL(spu_add_sysdev_attr_group); |
| 485 | |
| 486 | |
| 487 | void spu_remove_sysdev_attr(struct sysdev_attribute *attr) |
| 488 | { |
| 489 | struct spu *spu; |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 490 | |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 491 | mutex_lock(&spu_full_list_mutex); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 492 | list_for_each_entry(spu, &spu_full_list, full_list) |
| 493 | sysdev_remove_file(&spu->sysdev, attr); |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 494 | mutex_unlock(&spu_full_list_mutex); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 495 | } |
| 496 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr); |
| 497 | |
| 498 | void spu_remove_sysdev_attr_group(struct attribute_group *attrs) |
| 499 | { |
| 500 | struct spu *spu; |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 501 | |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 502 | mutex_lock(&spu_full_list_mutex); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 503 | list_for_each_entry(spu, &spu_full_list, full_list) |
| 504 | sysfs_remove_group(&spu->sysdev.kobj, attrs); |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 505 | mutex_unlock(&spu_full_list_mutex); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 506 | } |
| 507 | EXPORT_SYMBOL_GPL(spu_remove_sysdev_attr_group); |
| 508 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 509 | static int spu_create_sysdev(struct spu *spu) |
| 510 | { |
| 511 | int ret; |
| 512 | |
| 513 | spu->sysdev.id = spu->number; |
| 514 | spu->sysdev.cls = &spu_sysdev_class; |
| 515 | ret = sysdev_register(&spu->sysdev); |
| 516 | if (ret) { |
| 517 | printk(KERN_ERR "Can't register SPU %d with sysfs\n", |
| 518 | spu->number); |
| 519 | return ret; |
| 520 | } |
| 521 | |
Geoff Levand | 0021550 | 2006-11-20 18:45:02 +0100 | [diff] [blame] | 522 | sysfs_add_device_to_node(&spu->sysdev, spu->node); |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 523 | |
| 524 | return 0; |
| 525 | } |
| 526 | |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 527 | static int __init create_spu(void *data) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 528 | { |
| 529 | struct spu *spu; |
| 530 | int ret; |
| 531 | static int number; |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 532 | unsigned long flags; |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 533 | struct timespec ts; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 534 | |
| 535 | ret = -ENOMEM; |
Jeremy Kerr | ecec217 | 2006-06-19 20:33:26 +0200 | [diff] [blame] | 536 | spu = kzalloc(sizeof (*spu), GFP_KERNEL); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 537 | if (!spu) |
| 538 | goto out; |
| 539 | |
Christoph Hellwig | 486acd4 | 2007-07-20 21:39:54 +0200 | [diff] [blame] | 540 | spu->alloc_state = SPU_FREE; |
| 541 | |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 542 | spin_lock_init(&spu->register_lock); |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 543 | spin_lock(&spu_lock); |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 544 | spu->number = number++; |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 545 | spin_unlock(&spu_lock); |
Benjamin Herrenschmidt | e5267b4 | 2006-10-10 15:14:12 +1000 | [diff] [blame] | 546 | |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 547 | ret = spu_create_spu(spu, data); |
| 548 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 549 | if (ret) |
| 550 | goto out_free; |
| 551 | |
Masato Noguchi | 24f43b3 | 2006-10-24 18:31:14 +0200 | [diff] [blame] | 552 | spu_mfc_sdr_setup(spu); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 553 | spu_mfc_sr1_set(spu, 0x33); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 554 | ret = spu_request_irqs(spu); |
| 555 | if (ret) |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 556 | goto out_destroy; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 557 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 558 | ret = spu_create_sysdev(spu); |
| 559 | if (ret) |
| 560 | goto out_free_irqs; |
| 561 | |
Christoph Hellwig | 486acd4 | 2007-07-20 21:39:54 +0200 | [diff] [blame] | 562 | mutex_lock(&cbe_spu_info[spu->node].list_mutex); |
Arnd Bergmann | aa6d5b2 | 2007-07-20 21:39:44 +0200 | [diff] [blame] | 563 | list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus); |
| 564 | cbe_spu_info[spu->node].n_spus++; |
Christoph Hellwig | 486acd4 | 2007-07-20 21:39:54 +0200 | [diff] [blame] | 565 | mutex_unlock(&cbe_spu_info[spu->node].list_mutex); |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 566 | |
| 567 | mutex_lock(&spu_full_list_mutex); |
| 568 | spin_lock_irqsave(&spu_full_list_lock, flags); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 569 | list_add(&spu->full_list, &spu_full_list); |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 570 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
| 571 | mutex_unlock(&spu_full_list_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 572 | |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 573 | spu->stats.util_state = SPU_UTIL_IDLE_LOADED; |
| 574 | ktime_get_ts(&ts); |
| 575 | spu->stats.tstamp = timespec_to_ns(&ts); |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 576 | |
Arnd Bergmann | 9d92af6 | 2007-07-20 21:39:45 +0200 | [diff] [blame] | 577 | INIT_LIST_HEAD(&spu->aff_list); |
| 578 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 579 | goto out; |
| 580 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 581 | out_free_irqs: |
| 582 | spu_free_irqs(spu); |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 583 | out_destroy: |
| 584 | spu_destroy_spu(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 585 | out_free: |
| 586 | kfree(spu); |
| 587 | out: |
| 588 | return ret; |
| 589 | } |
| 590 | |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 591 | static const char *spu_state_names[] = { |
| 592 | "user", "system", "iowait", "idle" |
| 593 | }; |
| 594 | |
| 595 | static unsigned long long spu_acct_time(struct spu *spu, |
| 596 | enum spu_utilization_state state) |
| 597 | { |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 598 | struct timespec ts; |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 599 | unsigned long long time = spu->stats.times[state]; |
| 600 | |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 601 | /* |
| 602 | * If the spu is idle or the context is stopped, utilization |
| 603 | * statistics are not updated. Apply the time delta from the |
| 604 | * last recorded state of the spu. |
| 605 | */ |
| 606 | if (spu->stats.util_state == state) { |
| 607 | ktime_get_ts(&ts); |
| 608 | time += timespec_to_ns(&ts) - spu->stats.tstamp; |
| 609 | } |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 610 | |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 611 | return time / NSEC_PER_MSEC; |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 612 | } |
| 613 | |
| 614 | |
| 615 | static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf) |
| 616 | { |
| 617 | struct spu *spu = container_of(sysdev, struct spu, sysdev); |
| 618 | |
| 619 | return sprintf(buf, "%s %llu %llu %llu %llu " |
| 620 | "%llu %llu %llu %llu %llu %llu %llu %llu\n", |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 621 | spu_state_names[spu->stats.util_state], |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 622 | spu_acct_time(spu, SPU_UTIL_USER), |
| 623 | spu_acct_time(spu, SPU_UTIL_SYSTEM), |
| 624 | spu_acct_time(spu, SPU_UTIL_IOWAIT), |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 625 | spu_acct_time(spu, SPU_UTIL_IDLE_LOADED), |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 626 | spu->stats.vol_ctx_switch, |
| 627 | spu->stats.invol_ctx_switch, |
| 628 | spu->stats.slb_flt, |
| 629 | spu->stats.hash_flt, |
| 630 | spu->stats.min_flt, |
| 631 | spu->stats.maj_flt, |
| 632 | spu->stats.class2_intr, |
| 633 | spu->stats.libassist); |
| 634 | } |
| 635 | |
| 636 | static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL); |
| 637 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 638 | static int __init init_spu_base(void) |
| 639 | { |
Christoph Hellwig | befdc74 | 2007-04-23 21:08:28 +0200 | [diff] [blame] | 640 | int i, ret = 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 641 | |
Arnd Bergmann | aa6d5b2 | 2007-07-20 21:39:44 +0200 | [diff] [blame] | 642 | for (i = 0; i < MAX_NUMNODES; i++) { |
Christoph Hellwig | 486acd4 | 2007-07-20 21:39:54 +0200 | [diff] [blame] | 643 | mutex_init(&cbe_spu_info[i].list_mutex); |
Arnd Bergmann | aa6d5b2 | 2007-07-20 21:39:44 +0200 | [diff] [blame] | 644 | INIT_LIST_HEAD(&cbe_spu_info[i].spus); |
Arnd Bergmann | aa6d5b2 | 2007-07-20 21:39:44 +0200 | [diff] [blame] | 645 | } |
Jeremy Kerr | ccf17e9 | 2007-04-23 21:08:29 +0200 | [diff] [blame] | 646 | |
Stephen Rothwell | da06aa0 | 2006-11-27 19:18:54 +0100 | [diff] [blame] | 647 | if (!spu_management_ops) |
Christoph Hellwig | befdc74 | 2007-04-23 21:08:28 +0200 | [diff] [blame] | 648 | goto out; |
Stephen Rothwell | da06aa0 | 2006-11-27 19:18:54 +0100 | [diff] [blame] | 649 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 650 | /* create sysdev class for spus */ |
| 651 | ret = sysdev_class_register(&spu_sysdev_class); |
| 652 | if (ret) |
Christoph Hellwig | befdc74 | 2007-04-23 21:08:28 +0200 | [diff] [blame] | 653 | goto out; |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 654 | |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 655 | ret = spu_enumerate_spus(create_spu); |
| 656 | |
Geert Uytterhoeven | bce9451 | 2007-07-17 04:05:52 -0700 | [diff] [blame] | 657 | if (ret < 0) { |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 658 | printk(KERN_WARNING "%s: Error initializing spus\n", |
| 659 | __FUNCTION__); |
Christoph Hellwig | befdc74 | 2007-04-23 21:08:28 +0200 | [diff] [blame] | 660 | goto out_unregister_sysdev_class; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 661 | } |
Michael Ellerman | ff8a8f2 | 2006-10-24 18:31:27 +0200 | [diff] [blame] | 662 | |
Geert Uytterhoeven | bce9451 | 2007-07-17 04:05:52 -0700 | [diff] [blame] | 663 | if (ret > 0) { |
| 664 | /* |
| 665 | * We cannot put the forward declaration in |
| 666 | * <linux/linux_logo.h> because of conflicting session type |
| 667 | * conflicts for const and __initdata with different compiler |
| 668 | * versions |
| 669 | */ |
| 670 | extern const struct linux_logo logo_spe_clut224; |
| 671 | |
| 672 | fb_append_extra_logo(&logo_spe_clut224, ret); |
| 673 | } |
| 674 | |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 675 | mutex_lock(&spu_full_list_mutex); |
Michael Ellerman | ff8a8f2 | 2006-10-24 18:31:27 +0200 | [diff] [blame] | 676 | xmon_register_spus(&spu_full_list); |
Andre Detsch | 8d2655e | 2007-07-20 21:39:27 +0200 | [diff] [blame] | 677 | crash_register_spus(&spu_full_list); |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 678 | mutex_unlock(&spu_full_list_mutex); |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 679 | spu_add_sysdev_attr(&attr_stat); |
| 680 | |
Andre Detsch | f599644 | 2007-08-03 18:53:46 -0700 | [diff] [blame^] | 681 | spu_init_affinity(); |
Arnd Bergmann | 3ad216c | 2007-07-20 21:39:46 +0200 | [diff] [blame] | 682 | |
Christoph Hellwig | befdc74 | 2007-04-23 21:08:28 +0200 | [diff] [blame] | 683 | return 0; |
| 684 | |
| 685 | out_unregister_sysdev_class: |
| 686 | sysdev_class_unregister(&spu_sysdev_class); |
| 687 | out: |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 688 | return ret; |
| 689 | } |
| 690 | module_init(init_spu_base); |
| 691 | |
| 692 | MODULE_LICENSE("GPL"); |
| 693 | MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); |