x86, boot: straighten out ranges to copy/zero in compressed/head*.S

Both on 32 and 64 bits, we copy all the way up to the end of bss,
except that on 64 bits there is a hack to avoid copying on top of the
page tables.  There is no point in copying bss at all, especially
since we are just about to zero it all anyway.

To clean up and unify the handling, we now do:

  - copy from startup_32 to _bss.
  - zero from _bss to _ebss.
  - the _ebss symbol is aligned to an 8-byte boundary.
  - the page tables are moved to a separate section.

Use _bss as the copy endpoint since _edata may be misaligned.

[ Impact: cleanup, trivial performance improvement ]

Signed-off-by: H. Peter Anvin <hpa@zytor.com>
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 26c3def..5bc9052 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -253,9 +253,9 @@
  * Copy the compressed kernel to the end of our buffer
  * where decompression in place becomes safe.
  */
-	leaq	_end_before_pgt(%rip), %r8
-	leaq	_end_before_pgt(%rbx), %r9
-	movq	$_end_before_pgt /* - $startup_32 */, %rcx
+	leaq	_bss(%rip), %r8
+	leaq	_bss(%rbx), %r9
+	movq	$_bss /* - $startup_32 */, %rcx
 1:	subq	$8, %r8
 	subq	$8, %r9
 	movq	0(%r8), %rax
@@ -276,8 +276,8 @@
  * Clear BSS
  */
 	xorq	%rax, %rax
-	leaq    _edata(%rbx), %rdi
-	leaq    _end_before_pgt(%rbx), %rcx
+	leaq    _bss(%rbx), %rdi
+	leaq    _ebss(%rbx), %rcx
 	subq	%rdi, %rcx
 	cld
 	rep	stosb
@@ -329,3 +329,11 @@
 boot_stack:
 	.fill BOOT_STACK_SIZE, 1, 0
 boot_stack_end:
+
+/*
+ * Space for page tables (not in .bss so not zeroed)
+ */
+	.section ".pgtable","a",@nobits
+	.balign 4096
+pgtable:
+	.fill 6*4096, 1, 0