x86: cpa: set_memory_notpresent()

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 05bb12d..4757be7 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -559,8 +559,21 @@
 		free_page(addr);
 		totalram_pages++;
 	}
-	if (addr > __START_KERNEL_map)
-		global_flush_tlb();
+#ifdef CONFIG_DEBUG_RODATA
+	/*
+	 * This will make the __init pages not present and
+	 * not executable, so that any attempt to use a
+	 * __init function from now on will fault immediately
+	 * rather than supriously later when memory gets reused.
+	 *
+	 * We only do this for DEBUG_RODATA to not break up the
+	 * 2Mb kernel mapping just for this debug feature.
+	 */
+	if (begin >= __START_KERNEL_map) {
+		set_memory_np(begin, (end - begin)/PAGE_SIZE);
+		set_memory_nx(begin, (end - begin)/PAGE_SIZE);
+	}
+#endif
 }
 
 void free_initmem(void)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index fcd9612..e5910ac 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -357,8 +357,6 @@
 	return change_page_attr_addr(addr, numpages, prot);
 }
 
-
-
 int set_memory_uc(unsigned long addr, int numpages)
 {
 	pgprot_t uncached;
@@ -402,7 +400,6 @@
 	pgprot_val(rw) = _PAGE_RW;
 	return change_page_attr_clear(addr, numpages, rw);
 }
-EXPORT_SYMBOL(set_memory_ro);
 
 int set_memory_rw(unsigned long addr, int numpages)
 {
@@ -411,7 +408,14 @@
 	pgprot_val(rw) = _PAGE_RW;
 	return change_page_attr_set(addr, numpages, rw);
 }
-EXPORT_SYMBOL(set_memory_rw);
+
+int set_memory_np(unsigned long addr, int numpages)
+{
+	pgprot_t present;
+
+	pgprot_val(present) = _PAGE_PRESENT;
+	return change_page_attr_clear(addr, numpages, present);
+}
 
 int set_pages_uc(struct page *page, int numpages)
 {
@@ -461,7 +465,6 @@
 	pgprot_val(rw) = _PAGE_RW;
 	return change_page_attr_clear(addr, numpages, rw);
 }
-EXPORT_SYMBOL(set_pages_ro);
 
 int set_pages_rw(struct page *page, int numpages)
 {
@@ -471,8 +474,6 @@
 	pgprot_val(rw) = _PAGE_RW;
 	return change_page_attr_set(addr, numpages, rw);
 }
-EXPORT_SYMBOL(set_pages_rw);
-
 
 void clflush_cache_range(void *addr, int size)
 {
@@ -503,6 +504,20 @@
 EXPORT_SYMBOL(global_flush_tlb);
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
+
+static int __set_pages_p(struct page *page, int numpages)
+{
+	unsigned long addr = (unsigned long)page_address(page);
+	return change_page_attr_set(addr, numpages,
+				__pgprot(_PAGE_PRESENT | _PAGE_RW));
+}
+
+static int __set_pages_np(struct page *page, int numpages)
+{
+	unsigned long addr = (unsigned long)page_address(page);
+	return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
+}
+
 void kernel_map_pages(struct page *page, int numpages, int enable)
 {
 	if (PageHighMem(page))
@@ -522,7 +537,10 @@
 	 * The return value is ignored - the calls cannot fail,
 	 * large pages are disabled at boot time:
 	 */
-	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
+	if (enable)
+		__set_pages_p(page, numpages);
+	else
+		__set_pages_np(page, numpages);
 
 	/*
 	 * We should perform an IPI and flush all tlbs,
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index e79159b..a95afaf 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -42,6 +42,7 @@
 int set_memory_nx(unsigned long addr, int numpages);
 int set_memory_ro(unsigned long addr, int numpages);
 int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_np(unsigned long addr, int numpages);
 
 void clflush_cache_range(void *addr, int size);