arnd@arndb.de | 0afacde | 2006-10-24 18:31:18 +0200 | [diff] [blame] | 1 | #define DEBUG |
| 2 | |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 3 | #include <linux/wait.h> |
| 4 | #include <linux/ptrace.h> |
| 5 | |
| 6 | #include <asm/spu.h> |
Jeremy Kerr | c6730ed | 2006-11-20 18:45:10 +0100 | [diff] [blame] | 7 | #include <asm/spu_priv1.h> |
| 8 | #include <asm/io.h> |
Dave Jones | cfff5b2 | 2006-03-31 23:53:09 -0500 | [diff] [blame] | 9 | #include <asm/unistd.h> |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 10 | |
| 11 | #include "spufs.h" |
| 12 | |
| 13 | /* interrupt-level stop callback function. */ |
| 14 | void spufs_stop_callback(struct spu *spu) |
| 15 | { |
| 16 | struct spu_context *ctx = spu->ctx; |
| 17 | |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 18 | /* |
| 19 | * It should be impossible to preempt a context while an exception |
| 20 | * is being processed, since the context switch code is specially |
| 21 | * coded to deal with interrupts ... But, just in case, sanity check |
| 22 | * the context pointer. It is OK to return doing nothing since |
| 23 | * the exception will be regenerated when the context is resumed. |
| 24 | */ |
| 25 | if (ctx) { |
| 26 | /* Copy exception arguments into module specific structure */ |
| 27 | ctx->csa.class_0_pending = spu->class_0_pending; |
| 28 | ctx->csa.dsisr = spu->dsisr; |
| 29 | ctx->csa.dar = spu->dar; |
| 30 | |
| 31 | /* ensure that the exception status has hit memory before a |
| 32 | * thread waiting on the context's stop queue is woken */ |
| 33 | smp_wmb(); |
| 34 | |
| 35 | wake_up_all(&ctx->stop_wq); |
| 36 | } |
| 37 | |
| 38 | /* Clear callback arguments from spu structure */ |
| 39 | spu->class_0_pending = 0; |
| 40 | spu->dsisr = 0; |
| 41 | spu->dar = 0; |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 42 | } |
| 43 | |
Luke Browning | e65c2f6 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 44 | int spu_stopped(struct spu_context *ctx, u32 *stat) |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 45 | { |
Luke Browning | e65c2f6 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 46 | u64 dsisr; |
| 47 | u32 stopped; |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 48 | |
| 49 | *stat = ctx->ops->status_read(ctx); |
Bob Nelson | 36aaccc | 2007-07-20 21:39:52 +0200 | [diff] [blame] | 50 | |
Luke Browning | e65c2f6 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 51 | if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) |
Bob Nelson | 36aaccc | 2007-07-20 21:39:52 +0200 | [diff] [blame] | 52 | return 1; |
Luke Browning | e65c2f6 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 53 | |
| 54 | stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | |
| 55 | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; |
| 56 | if (*stat & stopped) |
| 57 | return 1; |
| 58 | |
| 59 | dsisr = ctx->csa.dsisr; |
| 60 | if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) |
| 61 | return 1; |
| 62 | |
| 63 | if (ctx->csa.class_0_pending) |
| 64 | return 1; |
| 65 | |
| 66 | return 0; |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 67 | } |
| 68 | |
Jeremy Kerr | c6730ed | 2006-11-20 18:45:10 +0100 | [diff] [blame] | 69 | static int spu_setup_isolated(struct spu_context *ctx) |
| 70 | { |
| 71 | int ret; |
| 72 | u64 __iomem *mfc_cntl; |
| 73 | u64 sr1; |
| 74 | u32 status; |
| 75 | unsigned long timeout; |
| 76 | const u32 status_loading = SPU_STATUS_RUNNING |
| 77 | | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS; |
| 78 | |
Christoph Hellwig | 7ec18ab | 2007-04-23 21:08:12 +0200 | [diff] [blame] | 79 | ret = -ENODEV; |
Jeremy Kerr | c6730ed | 2006-11-20 18:45:10 +0100 | [diff] [blame] | 80 | if (!isolated_loader) |
Jeremy Kerr | c6730ed | 2006-11-20 18:45:10 +0100 | [diff] [blame] | 81 | goto out; |
| 82 | |
Christoph Hellwig | 7ec18ab | 2007-04-23 21:08:12 +0200 | [diff] [blame] | 83 | /* |
| 84 | * We need to exclude userspace access to the context. |
| 85 | * |
| 86 | * To protect against memory access we invalidate all ptes |
| 87 | * and make sure the pagefault handlers block on the mutex. |
| 88 | */ |
| 89 | spu_unmap_mappings(ctx); |
| 90 | |
Jeremy Kerr | c6730ed | 2006-11-20 18:45:10 +0100 | [diff] [blame] | 91 | mfc_cntl = &ctx->spu->priv2->mfc_control_RW; |
| 92 | |
| 93 | /* purge the MFC DMA queue to ensure no spurious accesses before we |
| 94 | * enter kernel mode */ |
| 95 | timeout = jiffies + HZ; |
| 96 | out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST); |
| 97 | while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK) |
| 98 | != MFC_CNTL_PURGE_DMA_COMPLETE) { |
| 99 | if (time_after(jiffies, timeout)) { |
| 100 | printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n", |
| 101 | __FUNCTION__); |
| 102 | ret = -EIO; |
Christoph Hellwig | 7ec18ab | 2007-04-23 21:08:12 +0200 | [diff] [blame] | 103 | goto out; |
Jeremy Kerr | c6730ed | 2006-11-20 18:45:10 +0100 | [diff] [blame] | 104 | } |
| 105 | cond_resched(); |
| 106 | } |
| 107 | |
| 108 | /* put the SPE in kernel mode to allow access to the loader */ |
| 109 | sr1 = spu_mfc_sr1_get(ctx->spu); |
| 110 | sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK; |
| 111 | spu_mfc_sr1_set(ctx->spu, sr1); |
| 112 | |
| 113 | /* start the loader */ |
| 114 | ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32); |
| 115 | ctx->ops->signal2_write(ctx, |
| 116 | (unsigned long)isolated_loader & 0xffffffff); |
| 117 | |
| 118 | ctx->ops->runcntl_write(ctx, |
| 119 | SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); |
| 120 | |
| 121 | ret = 0; |
| 122 | timeout = jiffies + HZ; |
| 123 | while (((status = ctx->ops->status_read(ctx)) & status_loading) == |
| 124 | status_loading) { |
| 125 | if (time_after(jiffies, timeout)) { |
| 126 | printk(KERN_ERR "%s: timeout waiting for loader\n", |
| 127 | __FUNCTION__); |
| 128 | ret = -EIO; |
| 129 | goto out_drop_priv; |
| 130 | } |
| 131 | cond_resched(); |
| 132 | } |
| 133 | |
| 134 | if (!(status & SPU_STATUS_RUNNING)) { |
| 135 | /* If isolated LOAD has failed: run SPU, we will get a stop-and |
| 136 | * signal later. */ |
| 137 | pr_debug("%s: isolated LOAD failed\n", __FUNCTION__); |
| 138 | ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); |
| 139 | ret = -EACCES; |
Christoph Hellwig | 7ec18ab | 2007-04-23 21:08:12 +0200 | [diff] [blame] | 140 | goto out_drop_priv; |
| 141 | } |
Jeremy Kerr | c6730ed | 2006-11-20 18:45:10 +0100 | [diff] [blame] | 142 | |
Christoph Hellwig | 7ec18ab | 2007-04-23 21:08:12 +0200 | [diff] [blame] | 143 | if (!(status & SPU_STATUS_ISOLATED_STATE)) { |
Jeremy Kerr | c6730ed | 2006-11-20 18:45:10 +0100 | [diff] [blame] | 144 | /* This isn't allowed by the CBEA, but check anyway */ |
| 145 | pr_debug("%s: SPU fell out of isolated mode?\n", __FUNCTION__); |
| 146 | ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP); |
| 147 | ret = -EINVAL; |
Christoph Hellwig | 7ec18ab | 2007-04-23 21:08:12 +0200 | [diff] [blame] | 148 | goto out_drop_priv; |
Jeremy Kerr | c6730ed | 2006-11-20 18:45:10 +0100 | [diff] [blame] | 149 | } |
| 150 | |
| 151 | out_drop_priv: |
| 152 | /* Finished accessing the loader. Drop kernel mode */ |
| 153 | sr1 |= MFC_STATE1_PROBLEM_STATE_MASK; |
| 154 | spu_mfc_sr1_set(ctx->spu, sr1); |
| 155 | |
Jeremy Kerr | c6730ed | 2006-11-20 18:45:10 +0100 | [diff] [blame] | 156 | out: |
| 157 | return ret; |
| 158 | } |
| 159 | |
Bob Nelson | 36aaccc | 2007-07-20 21:39:52 +0200 | [diff] [blame] | 160 | static int spu_run_init(struct spu_context *ctx, u32 *npc) |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 161 | { |
Luke Browning | e65c2f6 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 162 | unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; |
Luke Browning | 9156953 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 163 | int ret; |
Luke Browning | cc210b3 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 164 | |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 165 | spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); |
| 166 | |
Luke Browning | e65c2f6 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 167 | /* |
| 168 | * NOSCHED is synchronous scheduling with respect to the caller. |
| 169 | * The caller waits for the context to be loaded. |
| 170 | */ |
| 171 | if (ctx->flags & SPU_CREATE_NOSCHED) { |
Luke Browning | 9156953 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 172 | if (ctx->state == SPU_STATE_SAVED) { |
Luke Browning | 9156953 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 173 | ret = spu_activate(ctx, 0); |
Christoph Hellwig | 7ec18ab | 2007-04-23 21:08:12 +0200 | [diff] [blame] | 174 | if (ret) |
Christoph Hellwig | aa45e25 | 2007-04-23 21:08:27 +0200 | [diff] [blame] | 175 | return ret; |
Jeremy Kerr | c6730ed | 2006-11-20 18:45:10 +0100 | [diff] [blame] | 176 | } |
Luke Browning | e65c2f6 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 177 | } |
Jeremy Kerr | c6730ed | 2006-11-20 18:45:10 +0100 | [diff] [blame] | 178 | |
Luke Browning | e65c2f6 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 179 | /* |
| 180 | * Apply special setup as required. |
| 181 | */ |
| 182 | if (ctx->flags & SPU_CREATE_ISOLATE) { |
Luke Browning | 9156953 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 183 | if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { |
| 184 | ret = spu_setup_isolated(ctx); |
| 185 | if (ret) |
| 186 | return ret; |
| 187 | } |
| 188 | |
| 189 | /* |
| 190 | * If userspace has set the runcntrl register (eg, to |
| 191 | * issue an isolated exit), we need to re-set it here |
| 192 | */ |
Jeremy Kerr | c6730ed | 2006-11-20 18:45:10 +0100 | [diff] [blame] | 193 | runcntl = ctx->ops->runcntl_read(ctx) & |
| 194 | (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); |
| 195 | if (runcntl == 0) |
| 196 | runcntl = SPU_RUNCNTL_RUNNABLE; |
Luke Browning | e65c2f6 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 197 | } |
Luke Browning | 9156953 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 198 | |
Luke Browning | e65c2f6 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 199 | if (ctx->flags & SPU_CREATE_NOSCHED) { |
Luke Browning | 9156953 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 200 | spuctx_switch_state(ctx, SPU_UTIL_USER); |
| 201 | ctx->ops->runcntl_write(ctx, runcntl); |
Christoph Hellwig | 2eb1b12 | 2007-02-13 21:54:29 +0100 | [diff] [blame] | 202 | } else { |
Luke Browning | cc210b3 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 203 | unsigned long privcntl; |
| 204 | |
Benjamin Herrenschmidt | 0516923 | 2007-06-04 15:15:37 +1000 | [diff] [blame] | 205 | if (test_thread_flag(TIF_SINGLESTEP)) |
Luke Browning | cc210b3 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 206 | privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP; |
| 207 | else |
| 208 | privcntl = SPU_PRIVCNTL_MODE_NORMAL; |
Luke Browning | cc210b3 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 209 | |
| 210 | ctx->ops->npc_write(ctx, *npc); |
| 211 | ctx->ops->privcntl_write(ctx, privcntl); |
Luke Browning | e65c2f6 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 212 | ctx->ops->runcntl_write(ctx, runcntl); |
Luke Browning | 9156953 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 213 | |
| 214 | if (ctx->state == SPU_STATE_SAVED) { |
Luke Browning | 9156953 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 215 | ret = spu_activate(ctx, 0); |
| 216 | if (ret) |
| 217 | return ret; |
Luke Browning | e65c2f6 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 218 | } else { |
| 219 | spuctx_switch_state(ctx, SPU_UTIL_USER); |
Luke Browning | 9156953 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 220 | } |
Christoph Hellwig | 2eb1b12 | 2007-02-13 21:54:29 +0100 | [diff] [blame] | 221 | } |
Jeremy Kerr | c6730ed | 2006-11-20 18:45:10 +0100 | [diff] [blame] | 222 | |
Christoph Hellwig | aa45e25 | 2007-04-23 21:08:27 +0200 | [diff] [blame] | 223 | return 0; |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 224 | } |
| 225 | |
Bob Nelson | 36aaccc | 2007-07-20 21:39:52 +0200 | [diff] [blame] | 226 | static int spu_run_fini(struct spu_context *ctx, u32 *npc, |
| 227 | u32 *status) |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 228 | { |
| 229 | int ret = 0; |
| 230 | |
Luke Browning | e65c2f6 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 231 | spu_del_from_rq(ctx); |
| 232 | |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 233 | *status = ctx->ops->status_read(ctx); |
| 234 | *npc = ctx->ops->npc_read(ctx); |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 235 | |
| 236 | spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 237 | spu_release(ctx); |
| 238 | |
| 239 | if (signal_pending(current)) |
| 240 | ret = -ERESTARTSYS; |
Masato Noguchi | 2ebb247 | 2006-11-20 18:45:04 +0100 | [diff] [blame] | 241 | |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 242 | return ret; |
| 243 | } |
| 244 | |
Arnd Bergmann | 2dd1493 | 2006-03-23 00:00:09 +0100 | [diff] [blame] | 245 | /* |
| 246 | * SPU syscall restarting is tricky because we violate the basic |
| 247 | * assumption that the signal handler is running on the interrupted |
| 248 | * thread. Here instead, the handler runs on PowerPC user space code, |
| 249 | * while the syscall was called from the SPU. |
| 250 | * This means we can only do a very rough approximation of POSIX |
| 251 | * signal semantics. |
| 252 | */ |
Sebastian Siewior | 1238819 | 2007-09-19 14:38:12 +1000 | [diff] [blame] | 253 | static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret, |
Arnd Bergmann | 2dd1493 | 2006-03-23 00:00:09 +0100 | [diff] [blame] | 254 | unsigned int *npc) |
| 255 | { |
| 256 | int ret; |
| 257 | |
| 258 | switch (*spu_ret) { |
| 259 | case -ERESTARTSYS: |
| 260 | case -ERESTARTNOINTR: |
| 261 | /* |
| 262 | * Enter the regular syscall restarting for |
| 263 | * sys_spu_run, then restart the SPU syscall |
| 264 | * callback. |
| 265 | */ |
| 266 | *npc -= 8; |
| 267 | ret = -ERESTARTSYS; |
| 268 | break; |
| 269 | case -ERESTARTNOHAND: |
| 270 | case -ERESTART_RESTARTBLOCK: |
| 271 | /* |
| 272 | * Restart block is too hard for now, just return -EINTR |
| 273 | * to the SPU. |
| 274 | * ERESTARTNOHAND comes from sys_pause, we also return |
| 275 | * -EINTR from there. |
| 276 | * Assume that we need to be restarted ourselves though. |
| 277 | */ |
| 278 | *spu_ret = -EINTR; |
| 279 | ret = -ERESTARTSYS; |
| 280 | break; |
| 281 | default: |
| 282 | printk(KERN_WARNING "%s: unexpected return code %ld\n", |
| 283 | __FUNCTION__, *spu_ret); |
| 284 | ret = 0; |
| 285 | } |
| 286 | return ret; |
| 287 | } |
| 288 | |
Sebastian Siewior | 1238819 | 2007-09-19 14:38:12 +1000 | [diff] [blame] | 289 | static int spu_process_callback(struct spu_context *ctx) |
Arnd Bergmann | 2dd1493 | 2006-03-23 00:00:09 +0100 | [diff] [blame] | 290 | { |
| 291 | struct spu_syscall_block s; |
| 292 | u32 ls_pointer, npc; |
Akinobu Mita | 9e2fe2c | 2007-04-23 21:08:22 +0200 | [diff] [blame] | 293 | void __iomem *ls; |
Arnd Bergmann | 2dd1493 | 2006-03-23 00:00:09 +0100 | [diff] [blame] | 294 | long spu_ret; |
Christoph Hellwig | c9101bd | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 295 | int ret, ret2; |
Arnd Bergmann | 2dd1493 | 2006-03-23 00:00:09 +0100 | [diff] [blame] | 296 | |
| 297 | /* get syscall block from local store */ |
Akinobu Mita | 9e2fe2c | 2007-04-23 21:08:22 +0200 | [diff] [blame] | 298 | npc = ctx->ops->npc_read(ctx) & ~3; |
| 299 | ls = (void __iomem *)ctx->ops->get_ls(ctx); |
| 300 | ls_pointer = in_be32(ls + npc); |
Arnd Bergmann | 2dd1493 | 2006-03-23 00:00:09 +0100 | [diff] [blame] | 301 | if (ls_pointer > (LS_SIZE - sizeof(s))) |
| 302 | return -EFAULT; |
Akinobu Mita | 9e2fe2c | 2007-04-23 21:08:22 +0200 | [diff] [blame] | 303 | memcpy_fromio(&s, ls + ls_pointer, sizeof(s)); |
Arnd Bergmann | 2dd1493 | 2006-03-23 00:00:09 +0100 | [diff] [blame] | 304 | |
| 305 | /* do actual syscall without pinning the spu */ |
| 306 | ret = 0; |
| 307 | spu_ret = -ENOSYS; |
| 308 | npc += 4; |
| 309 | |
| 310 | if (s.nr_ret < __NR_syscalls) { |
| 311 | spu_release(ctx); |
| 312 | /* do actual system call from here */ |
| 313 | spu_ret = spu_sys_callback(&s); |
| 314 | if (spu_ret <= -ERESTARTSYS) { |
| 315 | ret = spu_handle_restartsys(ctx, &spu_ret, &npc); |
| 316 | } |
Christoph Hellwig | c9101bd | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 317 | ret2 = spu_acquire(ctx); |
Arnd Bergmann | 2dd1493 | 2006-03-23 00:00:09 +0100 | [diff] [blame] | 318 | if (ret == -ERESTARTSYS) |
| 319 | return ret; |
Christoph Hellwig | c9101bd | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 320 | if (ret2) |
| 321 | return -EINTR; |
Arnd Bergmann | 2dd1493 | 2006-03-23 00:00:09 +0100 | [diff] [blame] | 322 | } |
| 323 | |
| 324 | /* write result, jump over indirect pointer */ |
Akinobu Mita | 9e2fe2c | 2007-04-23 21:08:22 +0200 | [diff] [blame] | 325 | memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret)); |
Arnd Bergmann | 2dd1493 | 2006-03-23 00:00:09 +0100 | [diff] [blame] | 326 | ctx->ops->npc_write(ctx, npc); |
| 327 | ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); |
| 328 | return ret; |
| 329 | } |
| 330 | |
Jeremy Kerr | 50af32a | 2007-07-20 21:39:42 +0200 | [diff] [blame] | 331 | long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 332 | { |
| 333 | int ret; |
Bob Nelson | 36aaccc | 2007-07-20 21:39:52 +0200 | [diff] [blame] | 334 | struct spu *spu; |
Arnd Bergmann | 9add11d | 2006-10-04 17:26:14 +0200 | [diff] [blame] | 335 | u32 status; |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 336 | |
Christoph Hellwig | e45d48a3 | 2007-04-23 21:08:17 +0200 | [diff] [blame] | 337 | if (mutex_lock_interruptible(&ctx->run_mutex)) |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 338 | return -ERESTARTSYS; |
| 339 | |
Masato Noguchi | c25620d | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 340 | spu_enable_spu(ctx); |
Arnd Bergmann | 9add11d | 2006-10-04 17:26:14 +0200 | [diff] [blame] | 341 | ctx->event_return = 0; |
Christoph Hellwig | aa45e25 | 2007-04-23 21:08:27 +0200 | [diff] [blame] | 342 | |
Christoph Hellwig | c9101bd | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 343 | ret = spu_acquire(ctx); |
| 344 | if (ret) |
| 345 | goto out_unlock; |
Christoph Hellwig | 2cf2b3b | 2007-06-29 10:57:55 +1000 | [diff] [blame] | 346 | |
Luke Browning | 9156953 | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 347 | spu_update_sched_info(ctx); |
Christoph Hellwig | aa45e25 | 2007-04-23 21:08:27 +0200 | [diff] [blame] | 348 | |
| 349 | ret = spu_run_init(ctx, npc); |
| 350 | if (ret) { |
| 351 | spu_release(ctx); |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 352 | goto out; |
Christoph Hellwig | aa45e25 | 2007-04-23 21:08:27 +0200 | [diff] [blame] | 353 | } |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 354 | |
| 355 | do { |
Arnd Bergmann | 9add11d | 2006-10-04 17:26:14 +0200 | [diff] [blame] | 356 | ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); |
Christoph Hellwig | eebead5 | 2008-02-08 15:50:41 +1100 | [diff] [blame^] | 357 | if (unlikely(ret)) { |
| 358 | /* |
| 359 | * This is nasty: we need the state_mutex for all the |
| 360 | * bookkeeping even if the syscall was interrupted by |
| 361 | * a signal. ewww. |
| 362 | */ |
| 363 | mutex_lock(&ctx->state_mutex); |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 364 | break; |
Christoph Hellwig | eebead5 | 2008-02-08 15:50:41 +1100 | [diff] [blame^] | 365 | } |
Bob Nelson | 36aaccc | 2007-07-20 21:39:52 +0200 | [diff] [blame] | 366 | spu = ctx->spu; |
| 367 | if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE, |
| 368 | &ctx->sched_flags))) { |
| 369 | if (!(status & SPU_STATUS_STOPPED_BY_STOP)) { |
| 370 | spu_switch_notify(spu, ctx); |
| 371 | continue; |
| 372 | } |
| 373 | } |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 374 | |
| 375 | spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); |
| 376 | |
Arnd Bergmann | 9add11d | 2006-10-04 17:26:14 +0200 | [diff] [blame] | 377 | if ((status & SPU_STATUS_STOPPED_BY_STOP) && |
| 378 | (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) { |
Arnd Bergmann | 2dd1493 | 2006-03-23 00:00:09 +0100 | [diff] [blame] | 379 | ret = spu_process_callback(ctx); |
| 380 | if (ret) |
| 381 | break; |
Arnd Bergmann | 9add11d | 2006-10-04 17:26:14 +0200 | [diff] [blame] | 382 | status &= ~SPU_STATUS_STOPPED_BY_STOP; |
Arnd Bergmann | 2dd1493 | 2006-03-23 00:00:09 +0100 | [diff] [blame] | 383 | } |
Arnd Bergmann | 57dace2 | 2007-04-23 21:08:15 +0200 | [diff] [blame] | 384 | ret = spufs_handle_class1(ctx); |
| 385 | if (ret) |
| 386 | break; |
| 387 | |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 388 | ret = spufs_handle_class0(ctx); |
| 389 | if (ret) |
| 390 | break; |
| 391 | |
Jeremy Kerr | d6ad39b | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 392 | if (signal_pending(current)) |
| 393 | ret = -ERESTARTSYS; |
Arnd Bergmann | 9add11d | 2006-10-04 17:26:14 +0200 | [diff] [blame] | 394 | } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | |
Benjamin Herrenschmidt | 0516923 | 2007-06-04 15:15:37 +1000 | [diff] [blame] | 395 | SPU_STATUS_STOPPED_BY_HALT | |
| 396 | SPU_STATUS_SINGLE_STEP))); |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 397 | |
Christoph Hellwig | e9f8a0b | 2007-06-29 10:58:03 +1000 | [diff] [blame] | 398 | if ((status & SPU_STATUS_STOPPED_BY_STOP) && |
Christoph Hellwig | fe2f896 | 2007-06-29 10:58:07 +1000 | [diff] [blame] | 399 | (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100) && |
| 400 | (ctx->state == SPU_STATE_RUNNABLE)) |
Christoph Hellwig | e9f8a0b | 2007-06-29 10:58:03 +1000 | [diff] [blame] | 401 | ctx->stats.libassist++; |
| 402 | |
Andre Detsch | 27ec41d | 2007-07-20 21:39:33 +0200 | [diff] [blame] | 403 | |
Masato Noguchi | c25620d | 2007-12-05 13:49:31 +1100 | [diff] [blame] | 404 | spu_disable_spu(ctx); |
Arnd Bergmann | 9add11d | 2006-10-04 17:26:14 +0200 | [diff] [blame] | 405 | ret = spu_run_fini(ctx, npc, &status); |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 406 | spu_yield(ctx); |
| 407 | |
Masato Noguchi | 2ebb247 | 2006-11-20 18:45:04 +0100 | [diff] [blame] | 408 | if ((ret == 0) || |
| 409 | ((ret == -ERESTARTSYS) && |
| 410 | ((status & SPU_STATUS_STOPPED_BY_HALT) || |
Benjamin Herrenschmidt | 0516923 | 2007-06-04 15:15:37 +1000 | [diff] [blame] | 411 | (status & SPU_STATUS_SINGLE_STEP) || |
Masato Noguchi | 2ebb247 | 2006-11-20 18:45:04 +0100 | [diff] [blame] | 412 | ((status & SPU_STATUS_STOPPED_BY_STOP) && |
| 413 | (status >> SPU_STOP_STATUS_SHIFT != 0x2104))))) |
| 414 | ret = status; |
| 415 | |
Benjamin Herrenschmidt | 0516923 | 2007-06-04 15:15:37 +1000 | [diff] [blame] | 416 | /* Note: we don't need to force_sig SIGTRAP on single-step |
| 417 | * since we have TIF_SINGLESTEP set, thus the kernel will do |
| 418 | * it upon return from the syscall anyawy |
| 419 | */ |
Jeremy Kerr | 60cf54d | 2008-01-11 15:03:26 +1100 | [diff] [blame] | 420 | if (unlikely(status & SPU_STATUS_SINGLE_STEP)) |
| 421 | ret = -ERESTARTSYS; |
| 422 | |
| 423 | else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP) |
| 424 | && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) { |
Arnd Bergmann | c2b2226 | 2006-11-27 19:18:53 +0100 | [diff] [blame] | 425 | force_sig(SIGTRAP, current); |
| 426 | ret = -ERESTARTSYS; |
Masato Noguchi | 2ebb247 | 2006-11-20 18:45:04 +0100 | [diff] [blame] | 427 | } |
| 428 | |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 429 | out: |
Arnd Bergmann | 9add11d | 2006-10-04 17:26:14 +0200 | [diff] [blame] | 430 | *event = ctx->event_return; |
Christoph Hellwig | c9101bd | 2007-12-20 16:39:59 +0900 | [diff] [blame] | 431 | out_unlock: |
Christoph Hellwig | e45d48a3 | 2007-04-23 21:08:17 +0200 | [diff] [blame] | 432 | mutex_unlock(&ctx->run_mutex); |
Arnd Bergmann | ce8ab85 | 2006-01-04 20:31:29 +0100 | [diff] [blame] | 433 | return ret; |
| 434 | } |