x86/fpu: Pass 'struct fpu' to fpu__restore()

This cleans up the call sites and the function a bit,
and also makes it more symmetric with the other high
level FPU state handling functions.

It's still only valid for the current task, as we copy
to the FPU registers of the current CPU.

No change in functionality.

Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 7fdc90b..a4c1b7d 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -50,7 +50,7 @@
 extern void fpu__activate_curr(struct fpu *fpu);
 extern void fpu__activate_stopped(struct fpu *fpu);
 extern void fpu__save(struct fpu *fpu);
-extern void fpu__restore(void);
+extern void fpu__restore(struct fpu *fpu);
 extern int  fpu__restore_sig(void __user *buf, int ia32_frame);
 extern void fpu__drop(struct fpu *fpu);
 extern int  fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index d67558c..c066160 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -343,11 +343,8 @@
  * with local interrupts disabled, as it is in the case of
  * do_device_not_available()).
  */
-void fpu__restore(void)
+void fpu__restore(struct fpu *fpu)
 {
-	struct task_struct *tsk = current;
-	struct fpu *fpu = &tsk->thread.fpu;
-
 	fpu__activate_curr(fpu);
 
 	/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
@@ -355,9 +352,9 @@
 	fpregs_activate(fpu);
 	if (unlikely(copy_fpstate_to_fpregs(fpu))) {
 		fpu__clear(fpu);
-		force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
+		force_sig_info(SIGSEGV, SEND_SIG_PRIV, current);
 	} else {
-		tsk->thread.fpu.counter++;
+		fpu->counter++;
 	}
 	kernel_fpu_enable();
 }
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 99f7309..50ec9af 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -319,7 +319,7 @@
 		fpu->fpstate_active = 1;
 		if (use_eager_fpu()) {
 			preempt_disable();
-			fpu__restore();
+			fpu__restore(fpu);
 			preempt_enable();
 		}
 
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 6f581c6..a2510f2 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -803,7 +803,7 @@
 		return;
 	}
 #endif
-	fpu__restore(); /* interrupts still off */
+	fpu__restore(&current->thread.fpu); /* interrupts still off */
 #ifdef CONFIG_X86_32
 	conditional_sti(regs);
 #endif
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 99bb300..6a4cd77 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -302,7 +302,7 @@
 	 * before this.
 	 */
 	else if (cpu->regs->trapnum == 7 && !fpregs_active())
-		fpu__restore();
+		fpu__restore(&current->thread.fpu);
 }
 
 /*H:130