x86/fpu: Move thread_info::fpu_counter into thread_info::fpu.counter
This field is kept separate from the main FPU state structure for
no good reason.
Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index 02e0e97..f85d21b 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -384,7 +384,7 @@
* Forget coprocessor state..
*/
preempt_disable();
- tsk->thread.fpu_counter = 0;
+ tsk->thread.fpu.counter = 0;
if (__thread_has_fpu(tsk)) {
/* Ignore delayed exceptions from user space */
@@ -441,7 +441,7 @@
* or if the past 5 consecutive context-switches used math.
*/
fpu.preload = tsk_used_math(new) &&
- (use_eager_fpu() || new->thread.fpu_counter > 5);
+ (use_eager_fpu() || new->thread.fpu.counter > 5);
if (__thread_has_fpu(old)) {
if (!__save_init_fpu(old))
@@ -454,16 +454,16 @@
/* Don't change CR0.TS if we just switch! */
if (fpu.preload) {
- new->thread.fpu_counter++;
+ new->thread.fpu.counter++;
__thread_set_has_fpu(new);
prefetch(new->thread.fpu.state);
} else if (!use_eager_fpu())
stts();
} else {
- old->thread.fpu_counter = 0;
+ old->thread.fpu.counter = 0;
task_disable_lazy_fpu_restore(old);
if (fpu.preload) {
- new->thread.fpu_counter++;
+ new->thread.fpu.counter++;
if (fpu_lazy_restore(new, cpu))
fpu.preload = 0;
else
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 2dc08c2..64d6b5d 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -433,6 +433,15 @@
unsigned int last_cpu;
unsigned int has_fpu;
union thread_xstate *state;
+ /*
+ * This counter contains the number of consecutive context switches
+ * that the FPU is used. If this is over a threshold, the lazy fpu
+ * saving becomes unlazy to save the trap. This is an unsigned char
+ * so that after 256 times the counter wraps and the behavior turns
+ * lazy again; this to deal with bursty apps that only use FPU for
+ * a short time
+ */
+ unsigned char counter;
};
#ifdef CONFIG_X86_64
@@ -535,15 +544,6 @@
unsigned long iopl;
/* Max allowed port in the bitmap, in bytes: */
unsigned io_bitmap_max;
- /*
- * fpu_counter contains the number of consecutive context switches
- * that the FPU is used. If this is over a threshold, the lazy fpu
- * saving becomes unlazy to save the trap. This is an unsigned char
- * so that after 256 times the counter wraps and the behavior turns
- * lazy again; this to deal with bursty apps that only use FPU for
- * a short time
- */
- unsigned char fpu_counter;
};
/*
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index fd4aa56..c7793ad 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -87,7 +87,7 @@
{
*dst = *src;
- dst->thread.fpu_counter = 0;
+ dst->thread.fpu.counter = 0;
dst->thread.fpu.has_fpu = 0;
dst->thread.fpu.state = NULL;
task_disable_lazy_fpu_restore(dst);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index cf9c962..231aa57 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -863,7 +863,7 @@
fpu_reset_state(tsk);
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
} else {
- tsk->thread.fpu_counter++;
+ tsk->thread.fpu.counter++;
}
kernel_fpu_enable();
}