Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Low-level SPU handling |
| 3 | * |
| 4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 |
| 5 | * |
| 6 | * Author: Arnd Bergmann <arndb@de.ibm.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation; either version 2, or (at your option) |
| 11 | * any later version. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License |
| 19 | * along with this program; if not, write to the Free Software |
| 20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 21 | */ |
| 22 | |
Arnd Bergmann | 3b3d22c | 2005-12-05 22:52:24 -0500 | [diff] [blame] | 23 | #undef DEBUG |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 24 | |
| 25 | #include <linux/interrupt.h> |
| 26 | #include <linux/list.h> |
Paul Gortmaker | 8038665 | 2016-03-27 18:08:15 -0400 | [diff] [blame] | 27 | #include <linux/init.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 28 | #include <linux/ptrace.h> |
| 29 | #include <linux/slab.h> |
| 30 | #include <linux/wait.h> |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 31 | #include <linux/mm.h> |
| 32 | #include <linux/io.h> |
Ingo Molnar | 14cc3e2 | 2006-03-26 01:37:14 -0800 | [diff] [blame] | 33 | #include <linux/mutex.h> |
Geert Uytterhoeven | bce9451 | 2007-07-17 04:05:52 -0700 | [diff] [blame] | 34 | #include <linux/linux_logo.h> |
Rafael J. Wysocki | f5a592f | 2011-04-26 19:14:57 +0200 | [diff] [blame] | 35 | #include <linux/syscore_ops.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 36 | #include <asm/spu.h> |
Geoff Levand | 540270d | 2006-06-19 20:33:29 +0200 | [diff] [blame] | 37 | #include <asm/spu_priv1.h> |
Jeremy Kerr | 58bd403 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 38 | #include <asm/spu_csa.h> |
Michael Ellerman | ff8a8f2 | 2006-10-24 18:31:27 +0200 | [diff] [blame] | 39 | #include <asm/xmon.h> |
Arnd Bergmann | 3ad216c | 2007-07-20 21:39:46 +0200 | [diff] [blame] | 40 | #include <asm/prom.h> |
Anton Blanchard | 158d5b5e | 2011-01-21 13:43:59 +1100 | [diff] [blame] | 41 | #include <asm/kexec.h> |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 42 | |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 43 | const struct spu_management_ops *spu_management_ops; |
Jeremy Kerr | ccf17e9 | 2007-04-23 21:08:29 +0200 | [diff] [blame] | 44 | EXPORT_SYMBOL_GPL(spu_management_ops); |
| 45 | |
Geoff Levand | 540270d | 2006-06-19 20:33:29 +0200 | [diff] [blame] | 46 | const struct spu_priv1_ops *spu_priv1_ops; |
Geoff Levand | 540270d | 2006-06-19 20:33:29 +0200 | [diff] [blame] | 47 | EXPORT_SYMBOL_GPL(spu_priv1_ops); |
| 48 | |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 49 | struct cbe_spu_info cbe_spu_info[MAX_NUMNODES]; |
| 50 | EXPORT_SYMBOL_GPL(cbe_spu_info); |
| 51 | |
| 52 | /* |
Jeremy Kerr | 3ce2f62 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 53 | * The spufs fault-handling code needs to call force_sig_info to raise signals |
| 54 | * on DMA errors. Export it here to avoid general kernel-wide access to this |
| 55 | * function |
| 56 | */ |
| 57 | EXPORT_SYMBOL_GPL(force_sig_info); |
| 58 | |
| 59 | /* |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 60 | * Protects cbe_spu_info and spu->number. |
| 61 | */ |
| 62 | static DEFINE_SPINLOCK(spu_lock); |
| 63 | |
| 64 | /* |
| 65 | * List of all spus in the system. |
| 66 | * |
| 67 | * This list is iterated by callers from irq context and callers that |
| 68 | * want to sleep. Thus modifications need to be done with both |
| 69 | * spu_full_list_lock and spu_full_list_mutex held, while iterating |
| 70 | * through it requires either of these locks. |
| 71 | * |
Michael Ellerman | 027dfac | 2016-06-01 16:34:37 +1000 | [diff] [blame] | 72 | * In addition spu_full_list_lock protects all assignments to |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 73 | * spu->mm. |
| 74 | */ |
| 75 | static LIST_HEAD(spu_full_list); |
| 76 | static DEFINE_SPINLOCK(spu_full_list_lock); |
| 77 | static DEFINE_MUTEX(spu_full_list_mutex); |
| 78 | |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 79 | void spu_invalidate_slbs(struct spu *spu) |
| 80 | { |
| 81 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
Arnd Bergmann | c92a1ac | 2008-02-28 06:06:30 +0100 | [diff] [blame] | 82 | unsigned long flags; |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 83 | |
Arnd Bergmann | c92a1ac | 2008-02-28 06:06:30 +0100 | [diff] [blame] | 84 | spin_lock_irqsave(&spu->register_lock, flags); |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 85 | if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) |
| 86 | out_be64(&priv2->slb_invalidate_all_W, 0UL); |
Arnd Bergmann | c92a1ac | 2008-02-28 06:06:30 +0100 | [diff] [blame] | 87 | spin_unlock_irqrestore(&spu->register_lock, flags); |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 88 | } |
| 89 | EXPORT_SYMBOL_GPL(spu_invalidate_slbs); |
| 90 | |
| 91 | /* This is called by the MM core when a segment size is changed, to |
| 92 | * request a flush of all the SPEs using a given mm |
| 93 | */ |
| 94 | void spu_flush_all_slbs(struct mm_struct *mm) |
| 95 | { |
| 96 | struct spu *spu; |
| 97 | unsigned long flags; |
| 98 | |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 99 | spin_lock_irqsave(&spu_full_list_lock, flags); |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 100 | list_for_each_entry(spu, &spu_full_list, full_list) { |
| 101 | if (spu->mm == mm) |
| 102 | spu_invalidate_slbs(spu); |
| 103 | } |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 104 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 105 | } |
| 106 | |
| 107 | /* The hack below stinks... try to do something better one of |
| 108 | * these days... Does it even work properly with NR_CPUS == 1 ? |
| 109 | */ |
| 110 | static inline void mm_needs_global_tlbie(struct mm_struct *mm) |
| 111 | { |
| 112 | int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; |
| 113 | |
| 114 | /* Global TLBIE broadcast required with SPEs. */ |
Rusty Russell | 56aa412 | 2009-03-15 18:16:43 +0000 | [diff] [blame] | 115 | bitmap_fill(cpumask_bits(mm_cpumask(mm)), nr); |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 116 | } |
| 117 | |
| 118 | void spu_associate_mm(struct spu *spu, struct mm_struct *mm) |
| 119 | { |
| 120 | unsigned long flags; |
| 121 | |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 122 | spin_lock_irqsave(&spu_full_list_lock, flags); |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 123 | spu->mm = mm; |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 124 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 125 | if (mm) |
| 126 | mm_needs_global_tlbie(mm); |
| 127 | } |
| 128 | EXPORT_SYMBOL_GPL(spu_associate_mm); |
| 129 | |
Jeremy Kerr | f6eb7d7f | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 130 | int spu_64k_pages_available(void) |
| 131 | { |
| 132 | return mmu_psize_defs[MMU_PAGE_64K].shift != 0; |
| 133 | } |
| 134 | EXPORT_SYMBOL_GPL(spu_64k_pages_available); |
| 135 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 136 | static void spu_restart_dma(struct spu *spu) |
| 137 | { |
| 138 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 139 | |
Arnd Bergmann | 8837d92 | 2006-01-04 20:31:28 +0100 | [diff] [blame] | 140 | if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 141 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); |
Luke Browning | de10289 | 2008-04-28 17:35:56 +1000 | [diff] [blame] | 142 | else { |
| 143 | set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags); |
| 144 | mb(); |
| 145 | } |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 146 | } |
| 147 | |
Ian Munsie | 73d16a6 | 2014-10-08 19:54:51 +1100 | [diff] [blame] | 148 | static inline void spu_load_slb(struct spu *spu, int slbe, struct copro_slb *slb) |
Jeremy Kerr | 58bd403 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 149 | { |
| 150 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
| 151 | |
Ingo Molnar | fe33332 | 2009-01-06 14:26:03 +0000 | [diff] [blame] | 152 | pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n", |
Jeremy Kerr | 58bd403 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 153 | __func__, slbe, slb->vsid, slb->esid); |
| 154 | |
| 155 | out_be64(&priv2->slb_index_W, slbe); |
Arnd Bergmann | cc4b7c1 | 2008-02-26 07:01:56 +0100 | [diff] [blame] | 156 | /* set invalid before writing vsid */ |
| 157 | out_be64(&priv2->slb_esid_RW, 0); |
| 158 | /* now it's safe to write the vsid */ |
Jeremy Kerr | 58bd403 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 159 | out_be64(&priv2->slb_vsid_RW, slb->vsid); |
Arnd Bergmann | cc4b7c1 | 2008-02-26 07:01:56 +0100 | [diff] [blame] | 160 | /* setting the new esid makes the entry valid again */ |
Jeremy Kerr | 58bd403 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 161 | out_be64(&priv2->slb_esid_RW, slb->esid); |
| 162 | } |
| 163 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 164 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) |
| 165 | { |
Ian Munsie | 73d16a6 | 2014-10-08 19:54:51 +1100 | [diff] [blame] | 166 | struct copro_slb slb; |
| 167 | int ret; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 168 | |
Ian Munsie | 73d16a6 | 2014-10-08 19:54:51 +1100 | [diff] [blame] | 169 | ret = copro_calculate_slb(spu->mm, ea, &slb); |
| 170 | if (ret) |
| 171 | return ret; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 172 | |
Jeremy Kerr | 4d43466 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 173 | spu_load_slb(spu, spu->slb_replace, &slb); |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 174 | |
| 175 | spu->slb_replace++; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 176 | if (spu->slb_replace >= 8) |
| 177 | spu->slb_replace = 0; |
| 178 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 179 | spu_restart_dma(spu); |
Christoph Hellwig | e9f8a0b | 2007-06-29 10:58:03 +1000 | [diff] [blame] | 180 | spu->stats.slb_flt++; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 181 | return 0; |
| 182 | } |
| 183 | |
Aneesh Kumar K.V | aefa568 | 2014-12-04 11:00:14 +0530 | [diff] [blame] | 184 | extern int hash_page(unsigned long ea, unsigned long access, |
| 185 | unsigned long trap, unsigned long dsisr); //XXX |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 186 | static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 187 | { |
Luke Browning | 2c911a1 | 2008-06-13 14:17:35 +1000 | [diff] [blame] | 188 | int ret; |
| 189 | |
Ingo Molnar | fe33332 | 2009-01-06 14:26:03 +0000 | [diff] [blame] | 190 | pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 191 | |
Luke Browning | 2c911a1 | 2008-06-13 14:17:35 +1000 | [diff] [blame] | 192 | /* |
| 193 | * Handle kernel space hash faults immediately. User hash |
| 194 | * faults need to be deferred to process context. |
| 195 | */ |
| 196 | if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) && |
| 197 | (REGION_ID(ea) != USER_REGION_ID)) { |
| 198 | |
| 199 | spin_unlock(&spu->register_lock); |
Aneesh Kumar K.V | c7d5484 | 2016-04-29 23:25:30 +1000 | [diff] [blame] | 200 | ret = hash_page(ea, _PAGE_PRESENT | _PAGE_READ, 0x300, dsisr); |
Luke Browning | 2c911a1 | 2008-06-13 14:17:35 +1000 | [diff] [blame] | 201 | spin_lock(&spu->register_lock); |
| 202 | |
| 203 | if (!ret) { |
| 204 | spu_restart_dma(spu); |
| 205 | return 0; |
| 206 | } |
Mark Nutter | 5473af0 | 2005-11-15 15:53:49 -0500 | [diff] [blame] | 207 | } |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 208 | |
Luke Browning | f3d69e0 | 2008-04-27 18:41:55 +0000 | [diff] [blame] | 209 | spu->class_1_dar = ea; |
| 210 | spu->class_1_dsisr = dsisr; |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 211 | |
Luke Browning | f3d69e0 | 2008-04-27 18:41:55 +0000 | [diff] [blame] | 212 | spu->stop_callback(spu, 1); |
| 213 | |
| 214 | spu->class_1_dar = 0; |
| 215 | spu->class_1_dsisr = 0; |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 216 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 217 | return 0; |
| 218 | } |
| 219 | |
Ian Munsie | 73d16a6 | 2014-10-08 19:54:51 +1100 | [diff] [blame] | 220 | static void __spu_kernel_slb(void *addr, struct copro_slb *slb) |
Jeremy Kerr | 58bd403 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 221 | { |
| 222 | unsigned long ea = (unsigned long)addr; |
| 223 | u64 llp; |
| 224 | |
| 225 | if (REGION_ID(ea) == KERNEL_REGION_ID) |
| 226 | llp = mmu_psize_defs[mmu_linear_psize].sllp; |
| 227 | else |
| 228 | llp = mmu_psize_defs[mmu_virtual_psize].sllp; |
| 229 | |
| 230 | slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | |
| 231 | SLB_VSID_KERNEL | llp; |
| 232 | slb->esid = (ea & ESID_MASK) | SLB_ESID_V; |
| 233 | } |
| 234 | |
| 235 | /** |
Jeremy Kerr | 684bd61 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 236 | * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the |
| 237 | * address @new_addr is present. |
| 238 | */ |
Ian Munsie | 73d16a6 | 2014-10-08 19:54:51 +1100 | [diff] [blame] | 239 | static inline int __slb_present(struct copro_slb *slbs, int nr_slbs, |
Jeremy Kerr | 684bd61 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 240 | void *new_addr) |
| 241 | { |
| 242 | unsigned long ea = (unsigned long)new_addr; |
| 243 | int i; |
| 244 | |
| 245 | for (i = 0; i < nr_slbs; i++) |
| 246 | if (!((slbs[i].esid ^ ea) & ESID_MASK)) |
| 247 | return 1; |
| 248 | |
| 249 | return 0; |
| 250 | } |
| 251 | |
| 252 | /** |
Jeremy Kerr | 58bd403 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 253 | * Setup the SPU kernel SLBs, in preparation for a context save/restore. We |
| 254 | * need to map both the context save area, and the save/restore code. |
Jeremy Kerr | 684bd61 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 255 | * |
Michael Ellerman | 027dfac | 2016-06-01 16:34:37 +1000 | [diff] [blame] | 256 | * Because the lscsa and code may cross segment boundaries, we check to see |
Jeremy Kerr | 684bd61 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 257 | * if mappings are required for the start and end of each range. We currently |
| 258 | * assume that the mappings are smaller that one segment - if not, something |
| 259 | * is seriously wrong. |
Jeremy Kerr | 58bd403 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 260 | */ |
Jeremy Kerr | 684bd61 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 261 | void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, |
| 262 | void *code, int code_size) |
Jeremy Kerr | 58bd403 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 263 | { |
Ian Munsie | 73d16a6 | 2014-10-08 19:54:51 +1100 | [diff] [blame] | 264 | struct copro_slb slbs[4]; |
Jeremy Kerr | 684bd61 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 265 | int i, nr_slbs = 0; |
| 266 | /* start and end addresses of both mappings */ |
| 267 | void *addrs[] = { |
| 268 | lscsa, (void *)lscsa + sizeof(*lscsa) - 1, |
| 269 | code, code + code_size - 1 |
| 270 | }; |
Jeremy Kerr | 58bd403 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 271 | |
Jeremy Kerr | 684bd61 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 272 | /* check the set of addresses, and create a new entry in the slbs array |
| 273 | * if there isn't already a SLB for that address */ |
| 274 | for (i = 0; i < ARRAY_SIZE(addrs); i++) { |
| 275 | if (__slb_present(slbs, nr_slbs, addrs[i])) |
| 276 | continue; |
Jeremy Kerr | 58bd403 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 277 | |
Jeremy Kerr | 684bd61 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 278 | __spu_kernel_slb(addrs[i], &slbs[nr_slbs]); |
| 279 | nr_slbs++; |
| 280 | } |
| 281 | |
Arnd Bergmann | c92a1ac | 2008-02-28 06:06:30 +0100 | [diff] [blame] | 282 | spin_lock_irq(&spu->register_lock); |
Jeremy Kerr | 684bd61 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 283 | /* Add the set of SLBs */ |
| 284 | for (i = 0; i < nr_slbs; i++) |
| 285 | spu_load_slb(spu, i, &slbs[i]); |
Arnd Bergmann | c92a1ac | 2008-02-28 06:06:30 +0100 | [diff] [blame] | 286 | spin_unlock_irq(&spu->register_lock); |
Jeremy Kerr | 58bd403 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 287 | } |
| 288 | EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs); |
| 289 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 290 | static irqreturn_t |
Olaf Hering | f5a9245 | 2006-10-06 22:52:16 +0200 | [diff] [blame] | 291 | spu_irq_class_0(int irq, void *data) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 292 | { |
| 293 | struct spu *spu; |
Masato Noguchi | b7f90a4 | 2007-09-07 18:28:27 +1000 | [diff] [blame] | 294 | unsigned long stat, mask; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 295 | |
| 296 | spu = data; |
Masato Noguchi | b7f90a4 | 2007-09-07 18:28:27 +1000 | [diff] [blame] | 297 | |
Masato Noguchi | b7f90a4 | 2007-09-07 18:28:27 +1000 | [diff] [blame] | 298 | spin_lock(&spu->register_lock); |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 299 | mask = spu_int_mask_get(spu, 0); |
| 300 | stat = spu_int_stat_get(spu, 0) & mask; |
| 301 | |
Masato Noguchi | b7f90a4 | 2007-09-07 18:28:27 +1000 | [diff] [blame] | 302 | spu->class_0_pending |= stat; |
Luke Browning | f3d69e0 | 2008-04-27 18:41:55 +0000 | [diff] [blame] | 303 | spu->class_0_dar = spu_mfc_dar_get(spu); |
Luke Browning | f3d69e0 | 2008-04-27 18:41:55 +0000 | [diff] [blame] | 304 | spu->stop_callback(spu, 0); |
Luke Browning | f3d69e0 | 2008-04-27 18:41:55 +0000 | [diff] [blame] | 305 | spu->class_0_pending = 0; |
Luke Browning | f3d69e0 | 2008-04-27 18:41:55 +0000 | [diff] [blame] | 306 | spu->class_0_dar = 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 307 | |
Masato Noguchi | b7f90a4 | 2007-09-07 18:28:27 +1000 | [diff] [blame] | 308 | spu_int_stat_clear(spu, 0, stat); |
Luke Browning | 2c911a1 | 2008-06-13 14:17:35 +1000 | [diff] [blame] | 309 | spin_unlock(&spu->register_lock); |
Masato Noguchi | b7f90a4 | 2007-09-07 18:28:27 +1000 | [diff] [blame] | 310 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 311 | return IRQ_HANDLED; |
| 312 | } |
| 313 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 314 | static irqreturn_t |
Olaf Hering | f5a9245 | 2006-10-06 22:52:16 +0200 | [diff] [blame] | 315 | spu_irq_class_1(int irq, void *data) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 316 | { |
| 317 | struct spu *spu; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 318 | unsigned long stat, mask, dar, dsisr; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 319 | |
| 320 | spu = data; |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 321 | |
| 322 | /* atomically read & clear class1 status. */ |
| 323 | spin_lock(&spu->register_lock); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 324 | mask = spu_int_mask_get(spu, 1); |
| 325 | stat = spu_int_stat_get(spu, 1) & mask; |
| 326 | dar = spu_mfc_dar_get(spu); |
| 327 | dsisr = spu_mfc_dsisr_get(spu); |
Jeremy Kerr | 8af3067 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 328 | if (stat & CLASS1_STORAGE_FAULT_INTR) |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 329 | spu_mfc_dsisr_set(spu, 0ul); |
| 330 | spu_int_stat_clear(spu, 1, stat); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 331 | |
Harvey Harrison | e48b1b4 | 2008-03-29 08:21:07 +1100 | [diff] [blame] | 332 | pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat, |
Arnd Bergmann | c92a1ac | 2008-02-28 06:06:30 +0100 | [diff] [blame] | 333 | dar, dsisr); |
| 334 | |
Luke Browning | 2c911a1 | 2008-06-13 14:17:35 +1000 | [diff] [blame] | 335 | if (stat & CLASS1_SEGMENT_FAULT_INTR) |
| 336 | __spu_trap_data_seg(spu, dar); |
| 337 | |
Jeremy Kerr | 8af3067 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 338 | if (stat & CLASS1_STORAGE_FAULT_INTR) |
Arnd Bergmann | 8b3d666 | 2005-11-15 15:53:52 -0500 | [diff] [blame] | 339 | __spu_trap_data_map(spu, dar, dsisr); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 340 | |
Jeremy Kerr | 8af3067 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 341 | if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_GET_INTR) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 342 | ; |
| 343 | |
Jeremy Kerr | 8af3067 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 344 | if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 345 | ; |
| 346 | |
Luke Browning | f3d69e0 | 2008-04-27 18:41:55 +0000 | [diff] [blame] | 347 | spu->class_1_dsisr = 0; |
| 348 | spu->class_1_dar = 0; |
| 349 | |
Luke Browning | 2c911a1 | 2008-06-13 14:17:35 +1000 | [diff] [blame] | 350 | spin_unlock(&spu->register_lock); |
| 351 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 352 | return stat ? IRQ_HANDLED : IRQ_NONE; |
| 353 | } |
| 354 | |
| 355 | static irqreturn_t |
Olaf Hering | f5a9245 | 2006-10-06 22:52:16 +0200 | [diff] [blame] | 356 | spu_irq_class_2(int irq, void *data) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 357 | { |
| 358 | struct spu *spu; |
| 359 | unsigned long stat; |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 360 | unsigned long mask; |
Jeremy Kerr | 8af3067 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 361 | const int mailbox_intrs = |
| 362 | CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 363 | |
| 364 | spu = data; |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 365 | spin_lock(&spu->register_lock); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 366 | stat = spu_int_stat_get(spu, 2); |
| 367 | mask = spu_int_mask_get(spu, 2); |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 368 | /* ignore interrupts we're not waiting for */ |
| 369 | stat &= mask; |
Jeremy Kerr | 8af3067 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 370 | /* mailbox interrupts are level triggered. mask them now before |
| 371 | * acknowledging */ |
| 372 | if (stat & mailbox_intrs) |
| 373 | spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs)); |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 374 | /* acknowledge all interrupts before the callbacks */ |
| 375 | spu_int_stat_clear(spu, 2, stat); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 376 | |
Arnd Bergmann | 3a843d7 | 2005-12-05 22:52:27 -0500 | [diff] [blame] | 377 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 378 | |
Jeremy Kerr | 8af3067 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 379 | if (stat & CLASS2_MAILBOX_INTR) |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 380 | spu->ibox_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 381 | |
Jeremy Kerr | 8af3067 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 382 | if (stat & CLASS2_SPU_STOP_INTR) |
Luke Browning | f3d69e0 | 2008-04-27 18:41:55 +0000 | [diff] [blame] | 383 | spu->stop_callback(spu, 2); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 384 | |
Jeremy Kerr | 8af3067 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 385 | if (stat & CLASS2_SPU_HALT_INTR) |
Luke Browning | f3d69e0 | 2008-04-27 18:41:55 +0000 | [diff] [blame] | 386 | spu->stop_callback(spu, 2); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 387 | |
Jeremy Kerr | 8af3067 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 388 | if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR) |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 389 | spu->mfc_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 390 | |
Jeremy Kerr | 8af3067 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 391 | if (stat & CLASS2_MAILBOX_THRESHOLD_INTR) |
Masato Noguchi | ba723fe2 | 2006-06-19 20:33:33 +0200 | [diff] [blame] | 392 | spu->wbox_callback(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 393 | |
Christoph Hellwig | e9f8a0b | 2007-06-29 10:58:03 +1000 | [diff] [blame] | 394 | spu->stats.class2_intr++; |
Luke Browning | 2c911a1 | 2008-06-13 14:17:35 +1000 | [diff] [blame] | 395 | |
| 396 | spin_unlock(&spu->register_lock); |
| 397 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 398 | return stat ? IRQ_HANDLED : IRQ_NONE; |
| 399 | } |
| 400 | |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 401 | static int spu_request_irqs(struct spu *spu) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 402 | { |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 403 | int ret = 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 404 | |
Michael Ellerman | ef24ba7 | 2016-09-06 21:53:24 +1000 | [diff] [blame] | 405 | if (spu->irqs[0]) { |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 406 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", |
| 407 | spu->number); |
| 408 | ret = request_irq(spu->irqs[0], spu_irq_class_0, |
Yong Zhang | a3a9f3b | 2011-10-21 23:56:27 +0000 | [diff] [blame] | 409 | 0, spu->irq_c0, spu); |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 410 | if (ret) |
| 411 | goto bail0; |
| 412 | } |
Michael Ellerman | ef24ba7 | 2016-09-06 21:53:24 +1000 | [diff] [blame] | 413 | if (spu->irqs[1]) { |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 414 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", |
| 415 | spu->number); |
| 416 | ret = request_irq(spu->irqs[1], spu_irq_class_1, |
Yong Zhang | a3a9f3b | 2011-10-21 23:56:27 +0000 | [diff] [blame] | 417 | 0, spu->irq_c1, spu); |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 418 | if (ret) |
| 419 | goto bail1; |
| 420 | } |
Michael Ellerman | ef24ba7 | 2016-09-06 21:53:24 +1000 | [diff] [blame] | 421 | if (spu->irqs[2]) { |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 422 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", |
| 423 | spu->number); |
| 424 | ret = request_irq(spu->irqs[2], spu_irq_class_2, |
Yong Zhang | a3a9f3b | 2011-10-21 23:56:27 +0000 | [diff] [blame] | 425 | 0, spu->irq_c2, spu); |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 426 | if (ret) |
| 427 | goto bail2; |
| 428 | } |
| 429 | return 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 430 | |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 431 | bail2: |
Michael Ellerman | ef24ba7 | 2016-09-06 21:53:24 +1000 | [diff] [blame] | 432 | if (spu->irqs[1]) |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 433 | free_irq(spu->irqs[1], spu); |
| 434 | bail1: |
Michael Ellerman | ef24ba7 | 2016-09-06 21:53:24 +1000 | [diff] [blame] | 435 | if (spu->irqs[0]) |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 436 | free_irq(spu->irqs[0], spu); |
| 437 | bail0: |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 438 | return ret; |
| 439 | } |
| 440 | |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 441 | static void spu_free_irqs(struct spu *spu) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 442 | { |
Michael Ellerman | ef24ba7 | 2016-09-06 21:53:24 +1000 | [diff] [blame] | 443 | if (spu->irqs[0]) |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 444 | free_irq(spu->irqs[0], spu); |
Michael Ellerman | ef24ba7 | 2016-09-06 21:53:24 +1000 | [diff] [blame] | 445 | if (spu->irqs[1]) |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 446 | free_irq(spu->irqs[1], spu); |
Michael Ellerman | ef24ba7 | 2016-09-06 21:53:24 +1000 | [diff] [blame] | 447 | if (spu->irqs[2]) |
Benjamin Herrenschmidt | 0ebfff1 | 2006-07-03 21:36:01 +1000 | [diff] [blame] | 448 | free_irq(spu->irqs[2], spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 449 | } |
| 450 | |
Christoph Hellwig | 486acd4 | 2007-07-20 21:39:54 +0200 | [diff] [blame] | 451 | void spu_init_channels(struct spu *spu) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 452 | { |
| 453 | static const struct { |
| 454 | unsigned channel; |
| 455 | unsigned count; |
| 456 | } zero_list[] = { |
| 457 | { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, }, |
| 458 | { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, }, |
| 459 | }, count_list[] = { |
| 460 | { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, }, |
| 461 | { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, }, |
| 462 | { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, }, |
| 463 | }; |
Arnd Bergmann | 6ff730c | 2006-01-04 20:31:31 +0100 | [diff] [blame] | 464 | struct spu_priv2 __iomem *priv2; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 465 | int i; |
| 466 | |
| 467 | priv2 = spu->priv2; |
| 468 | |
| 469 | /* initialize all channel data to zero */ |
| 470 | for (i = 0; i < ARRAY_SIZE(zero_list); i++) { |
| 471 | int count; |
| 472 | |
| 473 | out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel); |
| 474 | for (count = 0; count < zero_list[i].count; count++) |
| 475 | out_be64(&priv2->spu_chnldata_RW, 0); |
| 476 | } |
| 477 | |
| 478 | /* initialize channel counts to meaningful values */ |
| 479 | for (i = 0; i < ARRAY_SIZE(count_list); i++) { |
| 480 | out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel); |
| 481 | out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); |
| 482 | } |
| 483 | } |
Christoph Hellwig | 486acd4 | 2007-07-20 21:39:54 +0200 | [diff] [blame] | 484 | EXPORT_SYMBOL_GPL(spu_init_channels); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 485 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 486 | static struct bus_type spu_subsys = { |
Kay Sievers | af5ca3f | 2007-12-20 02:09:39 +0100 | [diff] [blame] | 487 | .name = "spu", |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 488 | .dev_name = "spu", |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 489 | }; |
| 490 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 491 | int spu_add_dev_attr(struct device_attribute *attr) |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 492 | { |
| 493 | struct spu *spu; |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 494 | |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 495 | mutex_lock(&spu_full_list_mutex); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 496 | list_for_each_entry(spu, &spu_full_list, full_list) |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 497 | device_create_file(&spu->dev, attr); |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 498 | mutex_unlock(&spu_full_list_mutex); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 499 | |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 500 | return 0; |
| 501 | } |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 502 | EXPORT_SYMBOL_GPL(spu_add_dev_attr); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 503 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 504 | int spu_add_dev_attr_group(struct attribute_group *attrs) |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 505 | { |
| 506 | struct spu *spu; |
Jeremy Kerr | 1e77103 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 507 | int rc = 0; |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 508 | |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 509 | mutex_lock(&spu_full_list_mutex); |
Jeremy Kerr | 1e77103 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 510 | list_for_each_entry(spu, &spu_full_list, full_list) { |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 511 | rc = sysfs_create_group(&spu->dev.kobj, attrs); |
Jeremy Kerr | 1e77103 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 512 | |
| 513 | /* we're in trouble here, but try unwinding anyway */ |
| 514 | if (rc) { |
| 515 | printk(KERN_ERR "%s: can't create sysfs group '%s'\n", |
| 516 | __func__, attrs->name); |
| 517 | |
| 518 | list_for_each_entry_continue_reverse(spu, |
| 519 | &spu_full_list, full_list) |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 520 | sysfs_remove_group(&spu->dev.kobj, attrs); |
Jeremy Kerr | 1e77103 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 521 | break; |
| 522 | } |
| 523 | } |
| 524 | |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 525 | mutex_unlock(&spu_full_list_mutex); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 526 | |
Jeremy Kerr | 1e77103 | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 527 | return rc; |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 528 | } |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 529 | EXPORT_SYMBOL_GPL(spu_add_dev_attr_group); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 530 | |
| 531 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 532 | void spu_remove_dev_attr(struct device_attribute *attr) |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 533 | { |
| 534 | struct spu *spu; |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 535 | |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 536 | mutex_lock(&spu_full_list_mutex); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 537 | list_for_each_entry(spu, &spu_full_list, full_list) |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 538 | device_remove_file(&spu->dev, attr); |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 539 | mutex_unlock(&spu_full_list_mutex); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 540 | } |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 541 | EXPORT_SYMBOL_GPL(spu_remove_dev_attr); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 542 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 543 | void spu_remove_dev_attr_group(struct attribute_group *attrs) |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 544 | { |
| 545 | struct spu *spu; |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 546 | |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 547 | mutex_lock(&spu_full_list_mutex); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 548 | list_for_each_entry(spu, &spu_full_list, full_list) |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 549 | sysfs_remove_group(&spu->dev.kobj, attrs); |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 550 | mutex_unlock(&spu_full_list_mutex); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 551 | } |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 552 | EXPORT_SYMBOL_GPL(spu_remove_dev_attr_group); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 553 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 554 | static int spu_create_dev(struct spu *spu) |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 555 | { |
| 556 | int ret; |
| 557 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 558 | spu->dev.id = spu->number; |
| 559 | spu->dev.bus = &spu_subsys; |
| 560 | ret = device_register(&spu->dev); |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 561 | if (ret) { |
| 562 | printk(KERN_ERR "Can't register SPU %d with sysfs\n", |
| 563 | spu->number); |
| 564 | return ret; |
| 565 | } |
| 566 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 567 | sysfs_add_device_to_node(&spu->dev, spu->node); |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 568 | |
| 569 | return 0; |
| 570 | } |
| 571 | |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 572 | static int __init create_spu(void *data) |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 573 | { |
| 574 | struct spu *spu; |
| 575 | int ret; |
| 576 | static int number; |
Benjamin Herrenschmidt | 94b2a43 | 2007-03-10 00:05:37 +0100 | [diff] [blame] | 577 | unsigned long flags; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 578 | |
| 579 | ret = -ENOMEM; |
Jeremy Kerr | ecec217 | 2006-06-19 20:33:26 +0200 | [diff] [blame] | 580 | spu = kzalloc(sizeof (*spu), GFP_KERNEL); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 581 | if (!spu) |
| 582 | goto out; |
| 583 | |
Christoph Hellwig | 486acd4 | 2007-07-20 21:39:54 +0200 | [diff] [blame] | 584 | spu->alloc_state = SPU_FREE; |
| 585 | |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 586 | spin_lock_init(&spu->register_lock); |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 587 | spin_lock(&spu_lock); |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 588 | spu->number = number++; |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 589 | spin_unlock(&spu_lock); |
Benjamin Herrenschmidt | e5267b4 | 2006-10-10 15:14:12 +1000 | [diff] [blame] | 590 | |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 591 | ret = spu_create_spu(spu, data); |
| 592 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 593 | if (ret) |
| 594 | goto out_free; |
| 595 | |
Masato Noguchi | 24f43b3 | 2006-10-24 18:31:14 +0200 | [diff] [blame] | 596 | spu_mfc_sdr_setup(spu); |
Arnd Bergmann | f0831ac | 2006-01-04 20:31:30 +0100 | [diff] [blame] | 597 | spu_mfc_sr1_set(spu, 0x33); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 598 | ret = spu_request_irqs(spu); |
| 599 | if (ret) |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 600 | goto out_destroy; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 601 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 602 | ret = spu_create_dev(spu); |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 603 | if (ret) |
| 604 | goto out_free_irqs; |
| 605 | |
Christoph Hellwig | 486acd4 | 2007-07-20 21:39:54 +0200 | [diff] [blame] | 606 | mutex_lock(&cbe_spu_info[spu->node].list_mutex); |
Arnd Bergmann | aa6d5b2 | 2007-07-20 21:39:44 +0200 | [diff] [blame] | 607 | list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus); |
| 608 | cbe_spu_info[spu->node].n_spus++; |
Christoph Hellwig | 486acd4 | 2007-07-20 21:39:54 +0200 | [diff] [blame] | 609 | mutex_unlock(&cbe_spu_info[spu->node].list_mutex); |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 610 | |
| 611 | mutex_lock(&spu_full_list_mutex); |
| 612 | spin_lock_irqsave(&spu_full_list_lock, flags); |
Christian Krafft | e570beb | 2006-10-24 18:31:23 +0200 | [diff] [blame] | 613 | list_add(&spu->full_list, &spu_full_list); |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 614 | spin_unlock_irqrestore(&spu_full_list_lock, flags); |
| 615 | mutex_unlock(&spu_full_list_mutex); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 616 | |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 617 | spu->stats.util_state = SPU_UTIL_IDLE_LOADED; |
Thomas Gleixner | f2dec1e | 2014-07-16 21:04:38 +0000 | [diff] [blame] | 618 | spu->stats.tstamp = ktime_get_ns(); |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 619 | |
Arnd Bergmann | 9d92af6 | 2007-07-20 21:39:45 +0200 | [diff] [blame] | 620 | INIT_LIST_HEAD(&spu->aff_list); |
| 621 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 622 | goto out; |
| 623 | |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 624 | out_free_irqs: |
| 625 | spu_free_irqs(spu); |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 626 | out_destroy: |
| 627 | spu_destroy_spu(spu); |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 628 | out_free: |
| 629 | kfree(spu); |
| 630 | out: |
| 631 | return ret; |
| 632 | } |
| 633 | |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 634 | static const char *spu_state_names[] = { |
| 635 | "user", "system", "iowait", "idle" |
| 636 | }; |
| 637 | |
| 638 | static unsigned long long spu_acct_time(struct spu *spu, |
| 639 | enum spu_utilization_state state) |
| 640 | { |
| 641 | unsigned long long time = spu->stats.times[state]; |
| 642 | |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 643 | /* |
| 644 | * If the spu is idle or the context is stopped, utilization |
| 645 | * statistics are not updated. Apply the time delta from the |
| 646 | * last recorded state of the spu. |
| 647 | */ |
Thomas Gleixner | f2dec1e | 2014-07-16 21:04:38 +0000 | [diff] [blame] | 648 | if (spu->stats.util_state == state) |
| 649 | time += ktime_get_ns() - spu->stats.tstamp; |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 650 | |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 651 | return time / NSEC_PER_MSEC; |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 652 | } |
| 653 | |
| 654 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 655 | static ssize_t spu_stat_show(struct device *dev, |
| 656 | struct device_attribute *attr, char *buf) |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 657 | { |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 658 | struct spu *spu = container_of(dev, struct spu, dev); |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 659 | |
| 660 | return sprintf(buf, "%s %llu %llu %llu %llu " |
| 661 | "%llu %llu %llu %llu %llu %llu %llu %llu\n", |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 662 | spu_state_names[spu->stats.util_state], |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 663 | spu_acct_time(spu, SPU_UTIL_USER), |
| 664 | spu_acct_time(spu, SPU_UTIL_SYSTEM), |
| 665 | spu_acct_time(spu, SPU_UTIL_IOWAIT), |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 666 | spu_acct_time(spu, SPU_UTIL_IDLE_LOADED), |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 667 | spu->stats.vol_ctx_switch, |
| 668 | spu->stats.invol_ctx_switch, |
| 669 | spu->stats.slb_flt, |
| 670 | spu->stats.hash_flt, |
| 671 | spu->stats.min_flt, |
| 672 | spu->stats.maj_flt, |
| 673 | spu->stats.class2_intr, |
| 674 | spu->stats.libassist); |
| 675 | } |
| 676 | |
Benjamin Herrenschmidt | 96cf3f6 | 2013-05-06 12:02:05 +1000 | [diff] [blame] | 677 | static DEVICE_ATTR(stat, 0444, spu_stat_show, NULL); |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 678 | |
Thiago Jung Bauermann | da66588 | 2016-11-29 23:45:50 +1100 | [diff] [blame] | 679 | #ifdef CONFIG_KEXEC_CORE |
Anton Blanchard | 158d5b5e | 2011-01-21 13:43:59 +1100 | [diff] [blame] | 680 | |
| 681 | struct crash_spu_info { |
| 682 | struct spu *spu; |
| 683 | u32 saved_spu_runcntl_RW; |
| 684 | u32 saved_spu_status_R; |
| 685 | u32 saved_spu_npc_RW; |
| 686 | u64 saved_mfc_sr1_RW; |
| 687 | u64 saved_mfc_dar; |
| 688 | u64 saved_mfc_dsisr; |
| 689 | }; |
| 690 | |
| 691 | #define CRASH_NUM_SPUS 16 /* Enough for current hardware */ |
| 692 | static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS]; |
| 693 | |
| 694 | static void crash_kexec_stop_spus(void) |
| 695 | { |
| 696 | struct spu *spu; |
| 697 | int i; |
| 698 | u64 tmp; |
| 699 | |
| 700 | for (i = 0; i < CRASH_NUM_SPUS; i++) { |
| 701 | if (!crash_spu_info[i].spu) |
| 702 | continue; |
| 703 | |
| 704 | spu = crash_spu_info[i].spu; |
| 705 | |
| 706 | crash_spu_info[i].saved_spu_runcntl_RW = |
| 707 | in_be32(&spu->problem->spu_runcntl_RW); |
| 708 | crash_spu_info[i].saved_spu_status_R = |
| 709 | in_be32(&spu->problem->spu_status_R); |
| 710 | crash_spu_info[i].saved_spu_npc_RW = |
| 711 | in_be32(&spu->problem->spu_npc_RW); |
| 712 | |
| 713 | crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu); |
| 714 | crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu); |
| 715 | tmp = spu_mfc_sr1_get(spu); |
| 716 | crash_spu_info[i].saved_mfc_sr1_RW = tmp; |
| 717 | |
| 718 | tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; |
| 719 | spu_mfc_sr1_set(spu, tmp); |
| 720 | |
| 721 | __delay(200); |
| 722 | } |
| 723 | } |
| 724 | |
| 725 | static void crash_register_spus(struct list_head *list) |
| 726 | { |
| 727 | struct spu *spu; |
| 728 | int ret; |
| 729 | |
| 730 | list_for_each_entry(spu, list, full_list) { |
| 731 | if (WARN_ON(spu->number >= CRASH_NUM_SPUS)) |
| 732 | continue; |
| 733 | |
| 734 | crash_spu_info[spu->number].spu = spu; |
| 735 | } |
| 736 | |
| 737 | ret = crash_shutdown_register(&crash_kexec_stop_spus); |
| 738 | if (ret) |
| 739 | printk(KERN_ERR "Could not register SPU crash handler"); |
| 740 | } |
| 741 | |
| 742 | #else |
| 743 | static inline void crash_register_spus(struct list_head *list) |
| 744 | { |
| 745 | } |
| 746 | #endif |
| 747 | |
Rafael J. Wysocki | f5a592f | 2011-04-26 19:14:57 +0200 | [diff] [blame] | 748 | static void spu_shutdown(void) |
| 749 | { |
| 750 | struct spu *spu; |
| 751 | |
| 752 | mutex_lock(&spu_full_list_mutex); |
| 753 | list_for_each_entry(spu, &spu_full_list, full_list) { |
| 754 | spu_free_irqs(spu); |
| 755 | spu_destroy_spu(spu); |
| 756 | } |
| 757 | mutex_unlock(&spu_full_list_mutex); |
| 758 | } |
| 759 | |
| 760 | static struct syscore_ops spu_syscore_ops = { |
| 761 | .shutdown = spu_shutdown, |
| 762 | }; |
| 763 | |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 764 | static int __init init_spu_base(void) |
| 765 | { |
Christoph Hellwig | befdc74 | 2007-04-23 21:08:28 +0200 | [diff] [blame] | 766 | int i, ret = 0; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 767 | |
Arnd Bergmann | aa6d5b2 | 2007-07-20 21:39:44 +0200 | [diff] [blame] | 768 | for (i = 0; i < MAX_NUMNODES; i++) { |
Christoph Hellwig | 486acd4 | 2007-07-20 21:39:54 +0200 | [diff] [blame] | 769 | mutex_init(&cbe_spu_info[i].list_mutex); |
Arnd Bergmann | aa6d5b2 | 2007-07-20 21:39:44 +0200 | [diff] [blame] | 770 | INIT_LIST_HEAD(&cbe_spu_info[i].spus); |
Arnd Bergmann | aa6d5b2 | 2007-07-20 21:39:44 +0200 | [diff] [blame] | 771 | } |
Jeremy Kerr | ccf17e9 | 2007-04-23 21:08:29 +0200 | [diff] [blame] | 772 | |
Stephen Rothwell | da06aa0 | 2006-11-27 19:18:54 +0100 | [diff] [blame] | 773 | if (!spu_management_ops) |
Christoph Hellwig | befdc74 | 2007-04-23 21:08:28 +0200 | [diff] [blame] | 774 | goto out; |
Stephen Rothwell | da06aa0 | 2006-11-27 19:18:54 +0100 | [diff] [blame] | 775 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 776 | /* create system subsystem for spus */ |
| 777 | ret = subsys_system_register(&spu_subsys, NULL); |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 778 | if (ret) |
Christoph Hellwig | befdc74 | 2007-04-23 21:08:28 +0200 | [diff] [blame] | 779 | goto out; |
Jeremy Kerr | 1d64093 | 2006-06-19 20:33:19 +0200 | [diff] [blame] | 780 | |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 781 | ret = spu_enumerate_spus(create_spu); |
| 782 | |
Geert Uytterhoeven | bce9451 | 2007-07-17 04:05:52 -0700 | [diff] [blame] | 783 | if (ret < 0) { |
Geoff Levand | e28b003 | 2006-11-23 00:46:49 +0100 | [diff] [blame] | 784 | printk(KERN_WARNING "%s: Error initializing spus\n", |
Harvey Harrison | e48b1b4 | 2008-03-29 08:21:07 +1100 | [diff] [blame] | 785 | __func__); |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 786 | goto out_unregister_subsys; |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 787 | } |
Michael Ellerman | ff8a8f2 | 2006-10-24 18:31:27 +0200 | [diff] [blame] | 788 | |
Geert Uytterhoeven | ae52bb2 | 2009-06-16 15:34:19 -0700 | [diff] [blame] | 789 | if (ret > 0) |
Geert Uytterhoeven | bce9451 | 2007-07-17 04:05:52 -0700 | [diff] [blame] | 790 | fb_append_extra_logo(&logo_spe_clut224, ret); |
Geert Uytterhoeven | bce9451 | 2007-07-17 04:05:52 -0700 | [diff] [blame] | 791 | |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 792 | mutex_lock(&spu_full_list_mutex); |
Michael Ellerman | ff8a8f2 | 2006-10-24 18:31:27 +0200 | [diff] [blame] | 793 | xmon_register_spus(&spu_full_list); |
Andre Detsch | 8d2655e | 2007-07-20 21:39:27 +0200 | [diff] [blame] | 794 | crash_register_spus(&spu_full_list); |
Christoph Hellwig | 2414059 | 2007-07-20 21:39:51 +0200 | [diff] [blame] | 795 | mutex_unlock(&spu_full_list_mutex); |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 796 | spu_add_dev_attr(&dev_attr_stat); |
Rafael J. Wysocki | f5a592f | 2011-04-26 19:14:57 +0200 | [diff] [blame] | 797 | register_syscore_ops(&spu_syscore_ops); |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 798 | |
Andre Detsch | f599644 | 2007-08-03 18:53:46 -0700 | [diff] [blame] | 799 | spu_init_affinity(); |
Arnd Bergmann | 3ad216c | 2007-07-20 21:39:46 +0200 | [diff] [blame] | 800 | |
Christoph Hellwig | befdc74 | 2007-04-23 21:08:28 +0200 | [diff] [blame] | 801 | return 0; |
| 802 | |
Kay Sievers | 8a25a2f | 2011-12-21 14:29:42 -0800 | [diff] [blame] | 803 | out_unregister_subsys: |
| 804 | bus_unregister(&spu_subsys); |
Christoph Hellwig | befdc74 | 2007-04-23 21:08:28 +0200 | [diff] [blame] | 805 | out: |
Arnd Bergmann | 67207b9 | 2005-11-15 15:53:48 -0500 | [diff] [blame] | 806 | return ret; |
| 807 | } |
Paul Gortmaker | 8038665 | 2016-03-27 18:08:15 -0400 | [diff] [blame] | 808 | device_initcall(init_spu_base); |