x86: clean up arch/x86/mm/pageattr.c
do some leftover cleanups in the now unified arch/x86/mm/pageattr.c
file.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 510ff40..a270f9c 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -2,7 +2,6 @@
* Copyright 2002 Andi Kleen, SuSE Labs.
* Thanks to Ben LaHaise for precious feedback.
*/
-
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -50,9 +49,7 @@
/* change init_mm */
set_pte_atomic(kpte, pte);
#ifdef CONFIG_X86_32
- if (SHARED_KERNEL_PMD)
- return;
- {
+ if (!SHARED_KERNEL_PMD) {
struct page *page;
for (page = pgd_list; page; page = (struct page *)page->index) {
@@ -277,14 +274,14 @@
return;
/*
- * the return value is ignored - the calls cannot fail,
- * large pages are disabled at boot time.
+ * The return value is ignored - the calls cannot fail,
+ * large pages are disabled at boot time:
*/
change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
/*
- * we should perform an IPI and flush all tlbs,
- * but that can deadlock->flush only current cpu.
+ * We should perform an IPI and flush all tlbs,
+ * but that can deadlock->flush only current cpu:
*/
__flush_tlb_all();
}
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index 223b003..faa6a96 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -25,6 +25,8 @@
#endif /* !__ASSEMBLY__ */
+#define SHARED_KERNEL_PMD 1
+
/*
* PGDIR_SHIFT determines what a top-level page table entry can map
*/