blob: b7493b86581228d0edf8b0c0b691582e706bac58 [file] [log] [blame]
arnd@arndb.de0afacde2006-10-24 18:31:18 +02001#define DEBUG
2
Arnd Bergmannce8ab852006-01-04 20:31:29 +01003#include <linux/wait.h>
4#include <linux/ptrace.h>
5
6#include <asm/spu.h>
Jeremy Kerrc6730ed2006-11-20 18:45:10 +01007#include <asm/spu_priv1.h>
8#include <asm/io.h>
Dave Jonescfff5b22006-03-31 23:53:09 -05009#include <asm/unistd.h>
Arnd Bergmannce8ab852006-01-04 20:31:29 +010010
11#include "spufs.h"
12
13/* interrupt-level stop callback function. */
Luke Browningf3d69e02008-04-27 18:41:55 +000014void spufs_stop_callback(struct spu *spu, int irq)
Arnd Bergmannce8ab852006-01-04 20:31:29 +010015{
16 struct spu_context *ctx = spu->ctx;
17
Jeremy Kerrd6ad39b2007-12-20 16:39:59 +090018 /*
19 * It should be impossible to preempt a context while an exception
20 * is being processed, since the context switch code is specially
21 * coded to deal with interrupts ... But, just in case, sanity check
22 * the context pointer. It is OK to return doing nothing since
23 * the exception will be regenerated when the context is resumed.
24 */
25 if (ctx) {
26 /* Copy exception arguments into module specific structure */
Luke Browningf3d69e02008-04-27 18:41:55 +000027 switch(irq) {
28 case 0 :
29 ctx->csa.class_0_pending = spu->class_0_pending;
30 ctx->csa.class_0_dsisr = spu->class_0_dsisr;
31 ctx->csa.class_0_dar = spu->class_0_dar;
32 break;
33 case 1 :
34 ctx->csa.class_1_dsisr = spu->class_1_dsisr;
35 ctx->csa.class_1_dar = spu->class_1_dar;
36 break;
37 case 2 :
38 break;
39 }
Jeremy Kerrd6ad39b2007-12-20 16:39:59 +090040
41 /* ensure that the exception status has hit memory before a
42 * thread waiting on the context's stop queue is woken */
43 smp_wmb();
44
45 wake_up_all(&ctx->stop_wq);
46 }
Arnd Bergmannce8ab852006-01-04 20:31:29 +010047}
48
Luke Browninge65c2f62007-12-20 16:39:59 +090049int spu_stopped(struct spu_context *ctx, u32 *stat)
Arnd Bergmannce8ab852006-01-04 20:31:29 +010050{
Luke Browninge65c2f62007-12-20 16:39:59 +090051 u64 dsisr;
52 u32 stopped;
Arnd Bergmannce8ab852006-01-04 20:31:29 +010053
54 *stat = ctx->ops->status_read(ctx);
Bob Nelson36aaccc2007-07-20 21:39:52 +020055
Luke Browninge65c2f62007-12-20 16:39:59 +090056 if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))
Bob Nelson36aaccc2007-07-20 21:39:52 +020057 return 1;
Luke Browninge65c2f62007-12-20 16:39:59 +090058
59 stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
60 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
Luke Browning85687ff2008-02-08 15:50:41 +110061 if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped))
Luke Browninge65c2f62007-12-20 16:39:59 +090062 return 1;
63
Luke Browningf3d69e02008-04-27 18:41:55 +000064 dsisr = ctx->csa.class_0_dsisr;
65 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
66 return 1;
67
68 dsisr = ctx->csa.class_1_dsisr;
Luke Browninge65c2f62007-12-20 16:39:59 +090069 if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
70 return 1;
71
72 if (ctx->csa.class_0_pending)
73 return 1;
74
75 return 0;
Arnd Bergmannce8ab852006-01-04 20:31:29 +010076}
77
Jeremy Kerrc6730ed2006-11-20 18:45:10 +010078static int spu_setup_isolated(struct spu_context *ctx)
79{
80 int ret;
81 u64 __iomem *mfc_cntl;
82 u64 sr1;
83 u32 status;
84 unsigned long timeout;
85 const u32 status_loading = SPU_STATUS_RUNNING
86 | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS;
87
Christoph Hellwig7ec18ab2007-04-23 21:08:12 +020088 ret = -ENODEV;
Jeremy Kerrc6730ed2006-11-20 18:45:10 +010089 if (!isolated_loader)
Jeremy Kerrc6730ed2006-11-20 18:45:10 +010090 goto out;
91
Christoph Hellwig7ec18ab2007-04-23 21:08:12 +020092 /*
93 * We need to exclude userspace access to the context.
94 *
95 * To protect against memory access we invalidate all ptes
96 * and make sure the pagefault handlers block on the mutex.
97 */
98 spu_unmap_mappings(ctx);
99
Jeremy Kerrc6730ed2006-11-20 18:45:10 +0100100 mfc_cntl = &ctx->spu->priv2->mfc_control_RW;
101
102 /* purge the MFC DMA queue to ensure no spurious accesses before we
103 * enter kernel mode */
104 timeout = jiffies + HZ;
105 out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST);
106 while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK)
107 != MFC_CNTL_PURGE_DMA_COMPLETE) {
108 if (time_after(jiffies, timeout)) {
109 printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n",
Harvey Harrisone48b1b42008-03-29 08:21:07 +1100110 __func__);
Jeremy Kerrc6730ed2006-11-20 18:45:10 +0100111 ret = -EIO;
Christoph Hellwig7ec18ab2007-04-23 21:08:12 +0200112 goto out;
Jeremy Kerrc6730ed2006-11-20 18:45:10 +0100113 }
114 cond_resched();
115 }
116
117 /* put the SPE in kernel mode to allow access to the loader */
118 sr1 = spu_mfc_sr1_get(ctx->spu);
119 sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK;
120 spu_mfc_sr1_set(ctx->spu, sr1);
121
122 /* start the loader */
123 ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32);
124 ctx->ops->signal2_write(ctx,
125 (unsigned long)isolated_loader & 0xffffffff);
126
127 ctx->ops->runcntl_write(ctx,
128 SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
129
130 ret = 0;
131 timeout = jiffies + HZ;
132 while (((status = ctx->ops->status_read(ctx)) & status_loading) ==
133 status_loading) {
134 if (time_after(jiffies, timeout)) {
135 printk(KERN_ERR "%s: timeout waiting for loader\n",
Harvey Harrisone48b1b42008-03-29 08:21:07 +1100136 __func__);
Jeremy Kerrc6730ed2006-11-20 18:45:10 +0100137 ret = -EIO;
138 goto out_drop_priv;
139 }
140 cond_resched();
141 }
142
143 if (!(status & SPU_STATUS_RUNNING)) {
144 /* If isolated LOAD has failed: run SPU, we will get a stop-and
145 * signal later. */
Harvey Harrisone48b1b42008-03-29 08:21:07 +1100146 pr_debug("%s: isolated LOAD failed\n", __func__);
Jeremy Kerrc6730ed2006-11-20 18:45:10 +0100147 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
148 ret = -EACCES;
Christoph Hellwig7ec18ab2007-04-23 21:08:12 +0200149 goto out_drop_priv;
150 }
Jeremy Kerrc6730ed2006-11-20 18:45:10 +0100151
Christoph Hellwig7ec18ab2007-04-23 21:08:12 +0200152 if (!(status & SPU_STATUS_ISOLATED_STATE)) {
Jeremy Kerrc6730ed2006-11-20 18:45:10 +0100153 /* This isn't allowed by the CBEA, but check anyway */
Harvey Harrisone48b1b42008-03-29 08:21:07 +1100154 pr_debug("%s: SPU fell out of isolated mode?\n", __func__);
Jeremy Kerrc6730ed2006-11-20 18:45:10 +0100155 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP);
156 ret = -EINVAL;
Christoph Hellwig7ec18ab2007-04-23 21:08:12 +0200157 goto out_drop_priv;
Jeremy Kerrc6730ed2006-11-20 18:45:10 +0100158 }
159
160out_drop_priv:
161 /* Finished accessing the loader. Drop kernel mode */
162 sr1 |= MFC_STATE1_PROBLEM_STATE_MASK;
163 spu_mfc_sr1_set(ctx->spu, sr1);
164
Jeremy Kerrc6730ed2006-11-20 18:45:10 +0100165out:
166 return ret;
167}
168
Bob Nelson36aaccc2007-07-20 21:39:52 +0200169static int spu_run_init(struct spu_context *ctx, u32 *npc)
Arnd Bergmannce8ab852006-01-04 20:31:29 +0100170{
Luke Browninge65c2f62007-12-20 16:39:59 +0900171 unsigned long runcntl = SPU_RUNCNTL_RUNNABLE;
Luke Browning91569532007-12-20 16:39:59 +0900172 int ret;
Luke Browningcc210b32007-12-20 16:39:59 +0900173
Andre Detsch27ec41d2007-07-20 21:39:33 +0200174 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
175
Luke Browninge65c2f62007-12-20 16:39:59 +0900176 /*
177 * NOSCHED is synchronous scheduling with respect to the caller.
178 * The caller waits for the context to be loaded.
179 */
180 if (ctx->flags & SPU_CREATE_NOSCHED) {
Luke Browning91569532007-12-20 16:39:59 +0900181 if (ctx->state == SPU_STATE_SAVED) {
Luke Browning91569532007-12-20 16:39:59 +0900182 ret = spu_activate(ctx, 0);
Christoph Hellwig7ec18ab2007-04-23 21:08:12 +0200183 if (ret)
Christoph Hellwigaa45e252007-04-23 21:08:27 +0200184 return ret;
Jeremy Kerrc6730ed2006-11-20 18:45:10 +0100185 }
Luke Browninge65c2f62007-12-20 16:39:59 +0900186 }
Jeremy Kerrc6730ed2006-11-20 18:45:10 +0100187
Luke Browninge65c2f62007-12-20 16:39:59 +0900188 /*
189 * Apply special setup as required.
190 */
191 if (ctx->flags & SPU_CREATE_ISOLATE) {
Luke Browning91569532007-12-20 16:39:59 +0900192 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) {
193 ret = spu_setup_isolated(ctx);
194 if (ret)
195 return ret;
196 }
197
198 /*
199 * If userspace has set the runcntrl register (eg, to
200 * issue an isolated exit), we need to re-set it here
201 */
Jeremy Kerrc6730ed2006-11-20 18:45:10 +0100202 runcntl = ctx->ops->runcntl_read(ctx) &
203 (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE);
204 if (runcntl == 0)
205 runcntl = SPU_RUNCNTL_RUNNABLE;
Luke Browninge65c2f62007-12-20 16:39:59 +0900206 }
Luke Browning91569532007-12-20 16:39:59 +0900207
Luke Browninge65c2f62007-12-20 16:39:59 +0900208 if (ctx->flags & SPU_CREATE_NOSCHED) {
Luke Browning91569532007-12-20 16:39:59 +0900209 spuctx_switch_state(ctx, SPU_UTIL_USER);
210 ctx->ops->runcntl_write(ctx, runcntl);
Christoph Hellwig2eb1b122007-02-13 21:54:29 +0100211 } else {
Luke Browningcc210b32007-12-20 16:39:59 +0900212 unsigned long privcntl;
213
Benjamin Herrenschmidt05169232007-06-04 15:15:37 +1000214 if (test_thread_flag(TIF_SINGLESTEP))
Luke Browningcc210b32007-12-20 16:39:59 +0900215 privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP;
216 else
217 privcntl = SPU_PRIVCNTL_MODE_NORMAL;
Luke Browningcc210b32007-12-20 16:39:59 +0900218
219 ctx->ops->npc_write(ctx, *npc);
220 ctx->ops->privcntl_write(ctx, privcntl);
Luke Browninge65c2f62007-12-20 16:39:59 +0900221 ctx->ops->runcntl_write(ctx, runcntl);
Luke Browning91569532007-12-20 16:39:59 +0900222
223 if (ctx->state == SPU_STATE_SAVED) {
Luke Browning91569532007-12-20 16:39:59 +0900224 ret = spu_activate(ctx, 0);
225 if (ret)
226 return ret;
Luke Browninge65c2f62007-12-20 16:39:59 +0900227 } else {
228 spuctx_switch_state(ctx, SPU_UTIL_USER);
Luke Browning91569532007-12-20 16:39:59 +0900229 }
Christoph Hellwig2eb1b122007-02-13 21:54:29 +0100230 }
Jeremy Kerrc6730ed2006-11-20 18:45:10 +0100231
Jeremy Kerrce7c1912008-03-04 20:17:02 +1100232 set_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
Christoph Hellwigaa45e252007-04-23 21:08:27 +0200233 return 0;
Arnd Bergmannce8ab852006-01-04 20:31:29 +0100234}
235
Bob Nelson36aaccc2007-07-20 21:39:52 +0200236static int spu_run_fini(struct spu_context *ctx, u32 *npc,
237 u32 *status)
Arnd Bergmannce8ab852006-01-04 20:31:29 +0100238{
239 int ret = 0;
240
Luke Browninge65c2f62007-12-20 16:39:59 +0900241 spu_del_from_rq(ctx);
242
Arnd Bergmannce8ab852006-01-04 20:31:29 +0100243 *status = ctx->ops->status_read(ctx);
244 *npc = ctx->ops->npc_read(ctx);
Andre Detsch27ec41d2007-07-20 21:39:33 +0200245
246 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
Jeremy Kerrce7c1912008-03-04 20:17:02 +1100247 clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags);
Arnd Bergmannce8ab852006-01-04 20:31:29 +0100248 spu_release(ctx);
249
250 if (signal_pending(current))
251 ret = -ERESTARTSYS;
Masato Noguchi2ebb2472006-11-20 18:45:04 +0100252
Arnd Bergmannce8ab852006-01-04 20:31:29 +0100253 return ret;
254}
255
Arnd Bergmann2dd14932006-03-23 00:00:09 +0100256/*
257 * SPU syscall restarting is tricky because we violate the basic
258 * assumption that the signal handler is running on the interrupted
259 * thread. Here instead, the handler runs on PowerPC user space code,
260 * while the syscall was called from the SPU.
261 * This means we can only do a very rough approximation of POSIX
262 * signal semantics.
263 */
Sebastian Siewior12388192007-09-19 14:38:12 +1000264static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret,
Arnd Bergmann2dd14932006-03-23 00:00:09 +0100265 unsigned int *npc)
266{
267 int ret;
268
269 switch (*spu_ret) {
270 case -ERESTARTSYS:
271 case -ERESTARTNOINTR:
272 /*
273 * Enter the regular syscall restarting for
274 * sys_spu_run, then restart the SPU syscall
275 * callback.
276 */
277 *npc -= 8;
278 ret = -ERESTARTSYS;
279 break;
280 case -ERESTARTNOHAND:
281 case -ERESTART_RESTARTBLOCK:
282 /*
283 * Restart block is too hard for now, just return -EINTR
284 * to the SPU.
285 * ERESTARTNOHAND comes from sys_pause, we also return
286 * -EINTR from there.
287 * Assume that we need to be restarted ourselves though.
288 */
289 *spu_ret = -EINTR;
290 ret = -ERESTARTSYS;
291 break;
292 default:
293 printk(KERN_WARNING "%s: unexpected return code %ld\n",
Harvey Harrisone48b1b42008-03-29 08:21:07 +1100294 __func__, *spu_ret);
Arnd Bergmann2dd14932006-03-23 00:00:09 +0100295 ret = 0;
296 }
297 return ret;
298}
299
Sebastian Siewior12388192007-09-19 14:38:12 +1000300static int spu_process_callback(struct spu_context *ctx)
Arnd Bergmann2dd14932006-03-23 00:00:09 +0100301{
302 struct spu_syscall_block s;
303 u32 ls_pointer, npc;
Akinobu Mita9e2fe2c2007-04-23 21:08:22 +0200304 void __iomem *ls;
Arnd Bergmann2dd14932006-03-23 00:00:09 +0100305 long spu_ret;
Jeremy Kerrd29694f2008-04-23 16:02:10 +1000306 int ret;
Arnd Bergmann2dd14932006-03-23 00:00:09 +0100307
308 /* get syscall block from local store */
Akinobu Mita9e2fe2c2007-04-23 21:08:22 +0200309 npc = ctx->ops->npc_read(ctx) & ~3;
310 ls = (void __iomem *)ctx->ops->get_ls(ctx);
311 ls_pointer = in_be32(ls + npc);
Arnd Bergmann2dd14932006-03-23 00:00:09 +0100312 if (ls_pointer > (LS_SIZE - sizeof(s)))
313 return -EFAULT;
Akinobu Mita9e2fe2c2007-04-23 21:08:22 +0200314 memcpy_fromio(&s, ls + ls_pointer, sizeof(s));
Arnd Bergmann2dd14932006-03-23 00:00:09 +0100315
316 /* do actual syscall without pinning the spu */
317 ret = 0;
318 spu_ret = -ENOSYS;
319 npc += 4;
320
321 if (s.nr_ret < __NR_syscalls) {
322 spu_release(ctx);
323 /* do actual system call from here */
324 spu_ret = spu_sys_callback(&s);
325 if (spu_ret <= -ERESTARTSYS) {
326 ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
327 }
Jeremy Kerrd29694f2008-04-23 16:02:10 +1000328 mutex_lock(&ctx->state_mutex);
Arnd Bergmann2dd14932006-03-23 00:00:09 +0100329 if (ret == -ERESTARTSYS)
330 return ret;
331 }
332
Jeremy Kerr4eb5aef2008-03-25 13:32:03 +1100333 /* need to re-get the ls, as it may have changed when we released the
334 * spu */
335 ls = (void __iomem *)ctx->ops->get_ls(ctx);
336
Arnd Bergmann2dd14932006-03-23 00:00:09 +0100337 /* write result, jump over indirect pointer */
Akinobu Mita9e2fe2c2007-04-23 21:08:22 +0200338 memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret));
Arnd Bergmann2dd14932006-03-23 00:00:09 +0100339 ctx->ops->npc_write(ctx, npc);
340 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
341 return ret;
342}
343
Jeremy Kerr50af32a2007-07-20 21:39:42 +0200344long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
Arnd Bergmannce8ab852006-01-04 20:31:29 +0100345{
346 int ret;
Bob Nelson36aaccc2007-07-20 21:39:52 +0200347 struct spu *spu;
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200348 u32 status;
Arnd Bergmannce8ab852006-01-04 20:31:29 +0100349
Christoph Hellwige45d48a32007-04-23 21:08:17 +0200350 if (mutex_lock_interruptible(&ctx->run_mutex))
Arnd Bergmannce8ab852006-01-04 20:31:29 +0100351 return -ERESTARTSYS;
352
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200353 ctx->event_return = 0;
Christoph Hellwigaa45e252007-04-23 21:08:27 +0200354
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900355 ret = spu_acquire(ctx);
356 if (ret)
357 goto out_unlock;
Christoph Hellwig2cf2b3b2007-06-29 10:57:55 +1000358
Jeremy Kerrc0bace52008-04-23 14:24:27 +1000359 spu_enable_spu(ctx);
360
Luke Browning91569532007-12-20 16:39:59 +0900361 spu_update_sched_info(ctx);
Christoph Hellwigaa45e252007-04-23 21:08:27 +0200362
363 ret = spu_run_init(ctx, npc);
364 if (ret) {
365 spu_release(ctx);
Arnd Bergmannce8ab852006-01-04 20:31:29 +0100366 goto out;
Christoph Hellwigaa45e252007-04-23 21:08:27 +0200367 }
Arnd Bergmannce8ab852006-01-04 20:31:29 +0100368
369 do {
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200370 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
Christoph Hellwigeebead52008-02-08 15:50:41 +1100371 if (unlikely(ret)) {
372 /*
373 * This is nasty: we need the state_mutex for all the
374 * bookkeeping even if the syscall was interrupted by
375 * a signal. ewww.
376 */
377 mutex_lock(&ctx->state_mutex);
Arnd Bergmannce8ab852006-01-04 20:31:29 +0100378 break;
Christoph Hellwigeebead52008-02-08 15:50:41 +1100379 }
Bob Nelson36aaccc2007-07-20 21:39:52 +0200380 spu = ctx->spu;
381 if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,
382 &ctx->sched_flags))) {
383 if (!(status & SPU_STATUS_STOPPED_BY_STOP)) {
384 spu_switch_notify(spu, ctx);
385 continue;
386 }
387 }
Andre Detsch27ec41d2007-07-20 21:39:33 +0200388
389 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
390
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200391 if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
392 (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
Arnd Bergmann2dd14932006-03-23 00:00:09 +0100393 ret = spu_process_callback(ctx);
394 if (ret)
395 break;
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200396 status &= ~SPU_STATUS_STOPPED_BY_STOP;
Arnd Bergmann2dd14932006-03-23 00:00:09 +0100397 }
Arnd Bergmann57dace22007-04-23 21:08:15 +0200398 ret = spufs_handle_class1(ctx);
399 if (ret)
400 break;
401
Jeremy Kerrd6ad39b2007-12-20 16:39:59 +0900402 ret = spufs_handle_class0(ctx);
403 if (ret)
404 break;
405
Jeremy Kerrd6ad39b2007-12-20 16:39:59 +0900406 if (signal_pending(current))
407 ret = -ERESTARTSYS;
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200408 } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP |
Benjamin Herrenschmidt05169232007-06-04 15:15:37 +1000409 SPU_STATUS_STOPPED_BY_HALT |
410 SPU_STATUS_SINGLE_STEP)));
Arnd Bergmannce8ab852006-01-04 20:31:29 +0100411
Masato Noguchic25620d2007-12-05 13:49:31 +1100412 spu_disable_spu(ctx);
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200413 ret = spu_run_fini(ctx, npc, &status);
Arnd Bergmannce8ab852006-01-04 20:31:29 +0100414 spu_yield(ctx);
415
Christoph Hellwig5158e9b2008-04-29 17:08:38 +1000416 spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, status);
417
Luke Browninge66686b42008-02-08 15:50:41 +1100418 if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
419 (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
420 ctx->stats.libassist++;
421
Masato Noguchi2ebb2472006-11-20 18:45:04 +0100422 if ((ret == 0) ||
423 ((ret == -ERESTARTSYS) &&
424 ((status & SPU_STATUS_STOPPED_BY_HALT) ||
Benjamin Herrenschmidt05169232007-06-04 15:15:37 +1000425 (status & SPU_STATUS_SINGLE_STEP) ||
Masato Noguchi2ebb2472006-11-20 18:45:04 +0100426 ((status & SPU_STATUS_STOPPED_BY_STOP) &&
427 (status >> SPU_STOP_STATUS_SHIFT != 0x2104)))))
428 ret = status;
429
Benjamin Herrenschmidt05169232007-06-04 15:15:37 +1000430 /* Note: we don't need to force_sig SIGTRAP on single-step
431 * since we have TIF_SINGLESTEP set, thus the kernel will do
432 * it upon return from the syscall anyawy
433 */
Jeremy Kerr60cf54d2008-01-11 15:03:26 +1100434 if (unlikely(status & SPU_STATUS_SINGLE_STEP))
435 ret = -ERESTARTSYS;
436
437 else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP)
438 && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) {
Arnd Bergmannc2b22262006-11-27 19:18:53 +0100439 force_sig(SIGTRAP, current);
440 ret = -ERESTARTSYS;
Masato Noguchi2ebb2472006-11-20 18:45:04 +0100441 }
442
Arnd Bergmannce8ab852006-01-04 20:31:29 +0100443out:
Arnd Bergmann9add11d2006-10-04 17:26:14 +0200444 *event = ctx->event_return;
Christoph Hellwigc9101bd2007-12-20 16:39:59 +0900445out_unlock:
Christoph Hellwige45d48a32007-04-23 21:08:17 +0200446 mutex_unlock(&ctx->run_mutex);
Arnd Bergmannce8ab852006-01-04 20:31:29 +0100447 return ret;
448}