[PATCH] x86_64: Implement early DMI scanning
There are more and more cases where we need to know DMI information
early to work around bugs. i386 already had early DMI scanning, but
x86-64 didn't. Implement this now.
This required some cleanup in the i386 code.
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/arch/i386/kernel/dmi_scan.c b/arch/i386/kernel/dmi_scan.c
index d2dfd9c..ebc8dc1 100644
--- a/arch/i386/kernel/dmi_scan.c
+++ b/arch/i386/kernel/dmi_scan.c
@@ -5,6 +5,7 @@
#include <linux/dmi.h>
#include <linux/bootmem.h>
#include <linux/slab.h>
+#include <asm/dmi.h>
static char * __init dmi_string(struct dmi_header *dm, u8 s)
{
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 49f2858..8acf531 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -68,6 +68,7 @@
#include <asm/swiotlb.h>
#include <asm/sections.h>
#include <asm/gart-mapping.h>
+#include <asm/dmi.h>
/*
* Machine setup..
@@ -92,6 +93,12 @@
unsigned long saved_video_mode;
+/*
+ * Early DMI memory
+ */
+int dmi_alloc_index;
+char dmi_alloc_data[DMI_MAX_DATA];
+
/*
* Setup options
*/
@@ -620,6 +627,8 @@
init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
+ dmi_scan_machine();
+
zap_low_mappings(0);
#ifdef CONFIG_ACPI
@@ -1412,10 +1421,3 @@
.show = show_cpuinfo,
};
-static int __init run_dmi_scan(void)
-{
- dmi_scan_machine();
- return 0;
-}
-core_initcall(run_dmi_scan);
-
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 675a456..54c7f59 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -225,6 +225,33 @@
ti->allocated = 0;
}
+/* Must run before zap_low_mappings */
+__init void *early_ioremap(unsigned long addr, unsigned long size)
+{
+ unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
+
+ /* actually usually some more */
+ if (size >= LARGE_PAGE_SIZE) {
+ printk("SMBIOS area too long %lu\n", size);
+ return NULL;
+ }
+ set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
+ map += LARGE_PAGE_SIZE;
+ set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
+ __flush_tlb();
+ return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
+}
+
+/* To avoid virtual aliases later */
+__init void early_iounmap(void *addr, unsigned long size)
+{
+ if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
+ printk("early_iounmap: bad address %p\n", addr);
+ set_pmd(temp_mappings[0].pmd, __pmd(0));
+ set_pmd(temp_mappings[1].pmd, __pmd(0));
+ __flush_tlb();
+}
+
static void __meminit
phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
{