Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "Two FPU rewrite related fixes.  This addresses all known x86
  regressions at this stage.  Also some other misc fixes"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/fpu: Fix boot crash in the early FPU code
  x86/asm/entry/64: Update path names
  x86/fpu: Fix FPU related boot regression when CPUID masking BIOS feature is enabled
  x86/boot/setup: Clean up the e820_reserve_setup_data() code
  x86/kaslr: Fix typo in the KASLR_FLAG documentation
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index 7c1f9fa..9da6f35 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -406,7 +406,7 @@
 	- If 0, the protected-mode code is loaded at 0x10000.
 	- If 1, the protected-mode code is loaded at 0x100000.
 
-  Bit 1 (kernel internal): ALSR_FLAG
+  Bit 1 (kernel internal): KASLR_FLAG
 	- Used internally by the compressed kernel to communicate
 	  KASLR status to kernel proper.
 	  If 1, KASLR enabled.
diff --git a/Documentation/x86/entry_64.txt b/Documentation/x86/entry_64.txt
index 33884d1..c1df8eb 100644
--- a/Documentation/x86/entry_64.txt
+++ b/Documentation/x86/entry_64.txt
@@ -1,14 +1,14 @@
 This file documents some of the kernel entries in
-arch/x86/kernel/entry_64.S.  A lot of this explanation is adapted from
+arch/x86/entry/entry_64.S.  A lot of this explanation is adapted from
 an email from Ingo Molnar:
 
 http://lkml.kernel.org/r/<20110529191055.GC9835%40elte.hu>
 
 The x86 architecture has quite a few different ways to jump into
 kernel code.  Most of these entry points are registered in
-arch/x86/kernel/traps.c and implemented in arch/x86/kernel/entry_64.S
-for 64-bit, arch/x86/kernel/entry_32.S for 32-bit and finally
-arch/x86/ia32/ia32entry.S which implements the 32-bit compatibility
+arch/x86/kernel/traps.c and implemented in arch/x86/entry/entry_64.S
+for 64-bit, arch/x86/entry/entry_32.S for 32-bit and finally
+arch/x86/entry/entry_64_compat.S which implements the 32-bit compatibility
 syscall entry points and thus provides for 32-bit processes the
 ability to execute syscalls when running on 64-bit kernels.
 
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 9fc5e3d..922c5e0 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -742,7 +742,6 @@
 	cpu_detect(c);
 	get_cpu_vendor(c);
 	get_cpu_cap(c);
-	fpu__init_system(c);
 
 	if (this_cpu->c_early_init)
 		this_cpu->c_early_init(c);
@@ -754,6 +753,7 @@
 		this_cpu->c_bsp_init(c);
 
 	setup_force_cpu_cap(X86_FEATURE_ALWAYS);
+	fpu__init_system(c);
 }
 
 void __init early_cpu_init(void)
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index fc878fe..3282679 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -95,11 +95,12 @@
 	unsigned int mask = 0;
 
 	if (cpu_has_fxsr) {
-		struct fxregs_state fx_tmp __aligned(32) = { };
+		/* Static because GCC does not get 16-byte stack alignment right: */
+		static struct fxregs_state fxregs __initdata;
 
-		asm volatile("fxsave %0" : "+m" (fx_tmp));
+		asm volatile("fxsave %0" : "+m" (fxregs));
 
-		mask = fx_tmp.mxcsr_mask;
+		mask = fxregs.mxcsr_mask;
 
 		/*
 		 * If zero then use the default features mask,
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index d3b95b8..80f874b 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -461,19 +461,18 @@
 {
 	struct setup_data *data;
 	u64 pa_data;
-	int found = 0;
 
 	pa_data = boot_params.hdr.setup_data;
+	if (!pa_data)
+		return;
+
 	while (pa_data) {
 		data = early_memremap(pa_data, sizeof(*data));
 		e820_update_range(pa_data, sizeof(*data)+data->len,
 			 E820_RAM, E820_RESERVED_KERN);
-		found = 1;
 		pa_data = data->next;
 		early_memunmap(data, sizeof(*data));
 	}
-	if (!found)
-		return;
 
 	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 	memcpy(&e820_saved, &e820, sizeof(struct e820map));