[PATCH] spufs: Improved SPU preemptability.

This patch makes it easier to preempt an SPU context by
having the scheduler hold ctx->state_sema for much shorter
periods of time.

As part of this restructuring, the control logic for the "run"
operation is moved from arch/ppc64/kernel/spu_base.c to
fs/spufs/file.c.  Of course the base retains "bottom half"
handlers for class{0,1} irqs.  The new run loop will re-acquire
an SPU if preempted.

From: Mark Nutter <mnutter@us.ibm.com>
Signed-off-by: Arnd Bergmann <arndb@de.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index 092ec97..dd91ed8 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -135,9 +135,9 @@
 	spinlock_t register_lock;
 
 	u32 stop_code;
-	wait_queue_head_t stop_wq;
 	void (* wbox_callback)(struct spu *spu);
 	void (* ibox_callback)(struct spu *spu);
+	void (* stop_callback)(struct spu *spu);
 
 	char irq_c0[8];
 	char irq_c1[8];
@@ -146,7 +146,8 @@
 
 struct spu *spu_alloc(void);
 void spu_free(struct spu *spu);
-int spu_run(struct spu *spu);
+int spu_irq_class_0_bottom(struct spu *spu);
+int spu_irq_class_1_bottom(struct spu *spu);
 
 extern struct spufs_calls {
 	asmlinkage long (*create_thread)(const char __user *name,