Arnd Bergmann | 57dace2 | 2007-04-23 21:08:15 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Low-level SPU handling |
| 3 | * |
| 4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 |
| 5 | * |
| 6 | * Author: Arnd Bergmann <arndb@de.ibm.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation; either version 2, or (at your option) |
| 11 | * any later version. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License |
| 19 | * along with this program; if not, write to the Free Software |
| 20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 21 | */ |
| 22 | #include <linux/sched.h> |
| 23 | #include <linux/mm.h> |
| 24 | #include <linux/module.h> |
| 25 | |
| 26 | #include <asm/spu.h> |
| 27 | #include <asm/spu_csa.h> |
| 28 | |
| 29 | #include "spufs.h" |
| 30 | |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 31 | /** |
| 32 | * Handle an SPE event, depending on context SPU_CREATE_EVENTS_ENABLED flag. |
| 33 | * |
| 34 | * If the context was created with events, we just set the return event. |
| 35 | * Otherwise, send an appropriate signal to the process. |
| 36 | */ |
| 37 | static void spufs_handle_event(struct spu_context *ctx, |
Jeremy Kerr | c8a1e93 | 2007-04-23 21:08:16 +0200 | [diff] [blame] | 38 | unsigned long ea, int type) |
Arnd Bergmann | 57dace2 | 2007-04-23 21:08:15 +0200 | [diff] [blame] | 39 | { |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 40 | siginfo_t info; |
| 41 | |
Arnd Bergmann | 57dace2 | 2007-04-23 21:08:15 +0200 | [diff] [blame] | 42 | if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { |
| 43 | ctx->event_return |= type; |
| 44 | wake_up_all(&ctx->stop_wq); |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 45 | return; |
Arnd Bergmann | 57dace2 | 2007-04-23 21:08:15 +0200 | [diff] [blame] | 46 | } |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 47 | |
| 48 | memset(&info, 0, sizeof(info)); |
| 49 | |
| 50 | switch (type) { |
| 51 | case SPE_EVENT_INVALID_DMA: |
| 52 | info.si_signo = SIGBUS; |
| 53 | info.si_code = BUS_OBJERR; |
| 54 | break; |
| 55 | case SPE_EVENT_SPE_DATA_STORAGE: |
Andre Detsch | 18789fb | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 56 | info.si_signo = SIGSEGV; |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 57 | info.si_addr = (void __user *)ea; |
Andre Detsch | 18789fb | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 58 | info.si_code = SEGV_ACCERR; |
| 59 | ctx->ops->restart_dma(ctx); |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 60 | break; |
| 61 | case SPE_EVENT_DMA_ALIGNMENT: |
| 62 | info.si_signo = SIGBUS; |
| 63 | /* DAR isn't set for an alignment fault :( */ |
| 64 | info.si_code = BUS_ADRALN; |
| 65 | break; |
| 66 | case SPE_EVENT_SPE_ERROR: |
| 67 | info.si_signo = SIGILL; |
| 68 | info.si_addr = (void __user *)(unsigned long) |
| 69 | ctx->ops->npc_read(ctx) - 4; |
| 70 | info.si_code = ILL_ILLOPC; |
| 71 | break; |
| 72 | } |
| 73 | |
| 74 | if (info.si_signo) |
| 75 | force_sig_info(info.si_signo, &info, current); |
Arnd Bergmann | 57dace2 | 2007-04-23 21:08:15 +0200 | [diff] [blame] | 76 | } |
| 77 | |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 78 | int spufs_handle_class0(struct spu_context *ctx) |
Arnd Bergmann | 57dace2 | 2007-04-23 21:08:15 +0200 | [diff] [blame] | 79 | { |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 80 | unsigned long stat = ctx->csa.class_0_pending & CLASS0_INTR_MASK; |
| 81 | |
| 82 | if (likely(!stat)) |
| 83 | return 0; |
| 84 | |
| 85 | if (stat & CLASS0_DMA_ALIGNMENT_INTR) |
Luke Browning | f3d69e0 | 2008-04-27 18:41:55 +0000 | [diff] [blame] | 86 | spufs_handle_event(ctx, ctx->csa.class_0_dar, |
| 87 | SPE_EVENT_DMA_ALIGNMENT); |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 88 | |
| 89 | if (stat & CLASS0_INVALID_DMA_COMMAND_INTR) |
Luke Browning | f3d69e0 | 2008-04-27 18:41:55 +0000 | [diff] [blame] | 90 | spufs_handle_event(ctx, ctx->csa.class_0_dar, |
| 91 | SPE_EVENT_INVALID_DMA); |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 92 | |
| 93 | if (stat & CLASS0_SPU_ERROR_INTR) |
Luke Browning | f3d69e0 | 2008-04-27 18:41:55 +0000 | [diff] [blame] | 94 | spufs_handle_event(ctx, ctx->csa.class_0_dar, |
| 95 | SPE_EVENT_SPE_ERROR); |
| 96 | |
| 97 | ctx->csa.class_0_pending = 0; |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 98 | |
| 99 | return -EIO; |
Arnd Bergmann | 57dace2 | 2007-04-23 21:08:15 +0200 | [diff] [blame] | 100 | } |
Arnd Bergmann | 57dace2 | 2007-04-23 21:08:15 +0200 | [diff] [blame] | 101 | |
| 102 | /* |
| 103 | * bottom half handler for page faults, we can't do this from |
| 104 | * interrupt context, since we might need to sleep. |
| 105 | * we also need to give up the mutex so we can get scheduled |
| 106 | * out while waiting for the backing store. |
| 107 | * |
| 108 | * TODO: try calling hash_page from the interrupt handler first |
| 109 | * in order to speed up the easy case. |
| 110 | */ |
| 111 | int spufs_handle_class1(struct spu_context *ctx) |
| 112 | { |
| 113 | u64 ea, dsisr, access; |
| 114 | unsigned long flags; |
Christoph Hellwig | e9f8a0b | 2007-06-29 10:58:03 +1000 | [diff] [blame] | 115 | unsigned flt = 0; |
Christoph Hellwig | eebead5 | 2008-02-08 15:50:41 +1100 | [diff] [blame] | 116 | int ret; |
Arnd Bergmann | 57dace2 | 2007-04-23 21:08:15 +0200 | [diff] [blame] | 117 | |
| 118 | /* |
| 119 | * dar and dsisr get passed from the registers |
| 120 | * to the spu_context, to this function, but not |
| 121 | * back to the spu if it gets scheduled again. |
| 122 | * |
| 123 | * if we don't handle the fault for a saved context |
| 124 | * in time, we can still expect to get the same fault |
| 125 | * the immediately after the context restore. |
| 126 | */ |
Luke Browning | f3d69e0 | 2008-04-27 18:41:55 +0000 | [diff] [blame] | 127 | ea = ctx->csa.class_1_dar; |
| 128 | dsisr = ctx->csa.class_1_dsisr; |
Arnd Bergmann | 57dace2 | 2007-04-23 21:08:15 +0200 | [diff] [blame] | 129 | |
| 130 | if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) |
| 131 | return 0; |
| 132 | |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 133 | spuctx_switch_state(ctx, SPU_UTIL_IOWAIT); |
Christoph Hellwig | e9f8a0b | 2007-06-29 10:58:03 +1000 | [diff] [blame] | 134 | |
Stephen Rothwell | 9477e45 | 2009-01-06 14:27:38 +0000 | [diff] [blame] | 135 | pr_debug("ctx %p: ea %016llx, dsisr %016llx state %d\n", ctx, ea, |
Arnd Bergmann | 57dace2 | 2007-04-23 21:08:15 +0200 | [diff] [blame] | 136 | dsisr, ctx->state); |
| 137 | |
Christoph Hellwig | e9f8a0b | 2007-06-29 10:58:03 +1000 | [diff] [blame] | 138 | ctx->stats.hash_flt++; |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 139 | if (ctx->state == SPU_STATE_RUNNABLE) |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 140 | ctx->spu->stats.hash_flt++; |
Christoph Hellwig | e9f8a0b | 2007-06-29 10:58:03 +1000 | [diff] [blame] | 141 | |
Arnd Bergmann | 57dace2 | 2007-04-23 21:08:15 +0200 | [diff] [blame] | 142 | /* we must not hold the lock when entering spu_handle_mm_fault */ |
| 143 | spu_release(ctx); |
| 144 | |
| 145 | access = (_PAGE_PRESENT | _PAGE_USER); |
| 146 | access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL; |
| 147 | local_irq_save(flags); |
| 148 | ret = hash_page(ea, access, 0x300); |
| 149 | local_irq_restore(flags); |
| 150 | |
| 151 | /* hashing failed, so try the actual fault handler */ |
| 152 | if (ret) |
Christoph Hellwig | e9f8a0b | 2007-06-29 10:58:03 +1000 | [diff] [blame] | 153 | ret = spu_handle_mm_fault(current->mm, ea, dsisr, &flt); |
Arnd Bergmann | 57dace2 | 2007-04-23 21:08:15 +0200 | [diff] [blame] | 154 | |
Christoph Hellwig | c9101bd | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 155 | /* |
Christoph Hellwig | eebead5 | 2008-02-08 15:50:41 +1100 | [diff] [blame] | 156 | * This is nasty: we need the state_mutex for all the bookkeeping even |
| 157 | * if the syscall was interrupted by a signal. ewww. |
Christoph Hellwig | c9101bd | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 158 | */ |
Christoph Hellwig | eebead5 | 2008-02-08 15:50:41 +1100 | [diff] [blame] | 159 | mutex_lock(&ctx->state_mutex); |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 160 | |
| 161 | /* |
| 162 | * Clear dsisr under ctxt lock after handling the fault, so that |
| 163 | * time slicing will not preempt the context while the page fault |
| 164 | * handler is running. Context switch code removes mappings. |
| 165 | */ |
Luke Browning | f3d69e0 | 2008-04-27 18:41:55 +0000 | [diff] [blame] | 166 | ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0; |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 167 | |
Arnd Bergmann | 57dace2 | 2007-04-23 21:08:15 +0200 | [diff] [blame] | 168 | /* |
| 169 | * If we handled the fault successfully and are in runnable |
| 170 | * state, restart the DMA. |
| 171 | * In case of unhandled error report the problem to user space. |
| 172 | */ |
| 173 | if (!ret) { |
Christoph Hellwig | 8042297 | 2007-07-19 12:05:58 -0700 | [diff] [blame] | 174 | if (flt & VM_FAULT_MAJOR) |
Christoph Hellwig | e9f8a0b | 2007-06-29 10:58:03 +1000 | [diff] [blame] | 175 | ctx->stats.maj_flt++; |
Christoph Hellwig | 8042297 | 2007-07-19 12:05:58 -0700 | [diff] [blame] | 176 | else |
| 177 | ctx->stats.min_flt++; |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 178 | if (ctx->state == SPU_STATE_RUNNABLE) { |
Christoph Hellwig | 8042297 | 2007-07-19 12:05:58 -0700 | [diff] [blame] | 179 | if (flt & VM_FAULT_MAJOR) |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 180 | ctx->spu->stats.maj_flt++; |
Christoph Hellwig | 8042297 | 2007-07-19 12:05:58 -0700 | [diff] [blame] | 181 | else |
| 182 | ctx->spu->stats.min_flt++; |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 183 | } |
Christoph Hellwig | e9f8a0b | 2007-06-29 10:58:03 +1000 | [diff] [blame] | 184 | |
Arnd Bergmann | 57dace2 | 2007-04-23 21:08:15 +0200 | [diff] [blame] | 185 | if (ctx->spu) |
| 186 | ctx->ops->restart_dma(ctx); |
| 187 | } else |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 188 | spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE); |
Arnd Bergmann | 57dace2 | 2007-04-23 21:08:15 +0200 | [diff] [blame] | 189 | |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 190 | spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); |
Arnd Bergmann | 57dace2 | 2007-04-23 21:08:15 +0200 | [diff] [blame] | 191 | return ret; |
| 192 | } |