sh64: Use the shared FPU state restorer.

This kills off the sh64-specific state restorer and switches over to
the generic one.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
index 8f13f73..36d031c 100644
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -1124,7 +1124,7 @@
 	pta	its_IRQ, tr0
 	beqi/l	r4, EVENT_INTERRUPT, tr0
 #ifdef CONFIG_SH_FPU
-	movi	do_fpu_state_restore, r6
+	movi	fpu_state_restore_trap_handler, r6
 #else
 	movi	do_exception_error, r6
 #endif
@@ -1135,7 +1135,7 @@
 	pta	its_IRQ, tr0
 	beqi/l	r4, EVENT_INTERRUPT, tr0
 #ifdef CONFIG_SH_FPU
-	movi	do_fpu_state_restore, r6
+	movi	fpu_state_restore_trap_handler, r6
 #else
 	movi	do_exception_error, r6
 #endif
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
index 92df285..4b3bb35 100644
--- a/arch/sh/kernel/cpu/sh5/fpu.c
+++ b/arch/sh/kernel/cpu/sh5/fpu.c
@@ -15,24 +15,6 @@
 #include <linux/sched.h>
 #include <linux/signal.h>
 #include <asm/processor.h>
-#include <asm/user.h>
-#include <asm/io.h>
-#include <asm/fpu.h>
-
-/*
- * Initially load the FPU with signalling NANS.  This bit pattern
- * has the property that no matter whether considered as single or as
- * double precision, it still represents a signalling NAN.
- */
-#define sNAN64		0xFFFFFFFFFFFFFFFFULL
-#define sNAN32		0xFFFFFFFFUL
-
-static union thread_xstate init_fpuregs = {
-	.hardfpu = {
-		.fp_regs = { [0 ... 63] = sNAN32 },
-		.fpscr = FPSCR_INIT
-	}
-};
 
 void save_fpu(struct task_struct *tsk)
 {
@@ -76,8 +58,7 @@
 		: "memory");
 }
 
-static inline void
-fpload(struct sh_fpu_hard_struct *fpregs)
+void restore_fpu(struct task_struct *tsk)
 {
 	asm volatile("fld.p     %0, (0*8), fp0\n\t"
 		     "fld.p     %0, (1*8), fp2\n\t"
@@ -116,16 +97,11 @@
 
 		     "fld.p     %0, (31*8), fp62\n\t"
 		: /* no output */
-		: "r" (fpregs) );
+		: "r" (&tsk->thread.xstate->hardfpu)
+		: "memory");
 }
 
-void fpinit(struct sh_fpu_hard_struct *fpregs)
-{
-	*fpregs = init_fpuregs.hardfpu;
-}
-
-asmlinkage void
-do_fpu_error(unsigned long ex, struct pt_regs *regs)
+asmlinkage void do_fpu_error(unsigned long ex, struct pt_regs *regs)
 {
 	struct task_struct *tsk = current;
 
@@ -133,35 +109,6 @@
 
 	tsk->thread.trap_no = 11;
 	tsk->thread.error_code = 0;
+
 	force_sig(SIGFPE, tsk);
 }
-
-
-asmlinkage void
-do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
-{
-	void die(const char *str, struct pt_regs *regs, long err);
-
-	if (! user_mode(regs))
-		die("FPU used in kernel", regs, ex);
-
-	regs->sr &= ~SR_FD;
-
-	if (last_task_used_math == current)
-		return;
-
-	enable_fpu();
-	if (last_task_used_math != NULL)
-		/* Other processes fpu state, save away */
-		save_fpu(last_task_used_math);
-
-        last_task_used_math = current;
-        if (used_math()) {
-                fpload(&current->thread.xstate->hardfpu);
-        } else {
-		/* First time FPU user.  */
-		fpload(&init_fpuregs.hardfpu);
-                set_used_math();
-        }
-	disable_fpu();
-}
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index 67fbcee..23fbd92 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -114,8 +114,7 @@
 	regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
 
 	if (!tsk_used_math(task)) {
-		fpinit(&task->thread.xstate->hardfpu);
-		set_stopped_child_used_math(task);
+		init_fpu(task);
 	} else if (last_task_used_math == task) {
 		enable_fpu();
 		save_fpu(task);