x86/fpu: Rename fpstate_alloc_init() to fpstate_init_curr()
Now that there are no FPU context allocations, rename fpstate_alloc_init()
to fpstate_init_curr(), to signal that it initializes the fpstate and
marks it active, for the current task.
Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 1d0c5ce..1345ab3 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -44,7 +44,7 @@
extern void fpu__init_cpu_xstate(void);
extern void fpu__init_system(struct cpuinfo_x86 *c);
-extern void fpstate_alloc_init(struct fpu *fpu);
+extern void fpstate_init_curr(struct fpu *fpu);
extern void fpstate_init(struct fpu *fpu);
extern void fpu__clear(struct task_struct *tsk);
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index b44ac50..45f014e 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -261,7 +261,7 @@
/*
* Initialize the current task's in-memory FPU context:
*/
-void fpstate_alloc_init(struct fpu *fpu)
+void fpstate_init_curr(struct fpu *fpu)
{
WARN_ON_ONCE(fpu != ¤t->thread.fpu);
WARN_ON_ONCE(fpu->fpstate_active);
@@ -271,7 +271,7 @@
/* Safe to do for the current task: */
fpu->fpstate_active = 1;
}
-EXPORT_SYMBOL_GPL(fpstate_alloc_init);
+EXPORT_SYMBOL_GPL(fpstate_init_curr);
/*
* This function is called before we modify a stopped child's
@@ -332,7 +332,7 @@
struct fpu *fpu = &tsk->thread.fpu;
if (!fpu->fpstate_active)
- fpstate_alloc_init(fpu);
+ fpstate_init_curr(fpu);
/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
kernel_fpu_disable();
@@ -358,7 +358,7 @@
drop_fpu(fpu);
} else {
if (!fpu->fpstate_active) {
- fpstate_alloc_init(fpu);
+ fpstate_init_curr(fpu);
user_fpu_begin();
}
restore_init_xstate();
diff --git a/arch/x86/kernel/fpu/xsave.c b/arch/x86/kernel/fpu/xsave.c
index dd2cef0..49d9f3d 100644
--- a/arch/x86/kernel/fpu/xsave.c
+++ b/arch/x86/kernel/fpu/xsave.c
@@ -359,7 +359,7 @@
return -EACCES;
if (!fpu->fpstate_active)
- fpstate_alloc_init(fpu);
+ fpstate_init_curr(fpu);
if (!static_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_set(current, NULL,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 707f4e2..74b53c3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6602,7 +6602,7 @@
sigset_t sigsaved;
if (!fpu->fpstate_active)
- fpstate_alloc_init(fpu);
+ fpstate_init_curr(fpu);
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 99ddfc2..4c6ab79 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -150,7 +150,7 @@
struct fpu *fpu = ¤t->thread.fpu;
if (!fpu->fpstate_active)
- fpstate_alloc_init(fpu);
+ fpstate_init_curr(fpu);
#ifdef RE_ENTRANT_CHECKING
if (emulating) {