x86: change cpa to pfn based

change CPA to pfn based.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index effcd78..d18c41d 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -3,6 +3,7 @@
  * Thanks to Ben LaHaise for precious feedback.
  */
 #include <linux/highmem.h>
+#include <linux/bootmem.h>
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
@@ -144,13 +145,15 @@
 }
 
 static int
-__change_page_attr(unsigned long address, struct page *page, pgprot_t prot)
+__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
 {
 	struct page *kpte_page;
 	int level, err = 0;
 	pte_t *kpte;
 
-	BUG_ON(PageHighMem(page));
+#ifdef CONFIG_X86_32
+	BUG_ON(pfn > max_low_pfn);
+#endif
 
 repeat:
 	kpte = lookup_address(address, &level);
@@ -164,7 +167,7 @@
 	prot = check_exec(prot, address);
 
 	if (level == PG_LEVEL_4K) {
-		set_pte_atomic(kpte, mk_pte(page, canon_pgprot(prot)));
+		set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
 	} else {
 		err = split_large_page(kpte, address);
 		if (!err)
@@ -203,7 +206,7 @@
 		unsigned long pfn = __pa(address) >> PAGE_SHIFT;
 
 		if (!kernel_map || pte_present(pfn_pte(0, prot))) {
-			err = __change_page_attr(address, pfn_to_page(pfn), prot);
+			err = __change_page_attr(address, pfn, prot);
 			if (err)
 				break;
 		}
@@ -219,7 +222,7 @@
 			addr2 = __START_KERNEL_map + __pa(address);
 			/* Make sure the kernel mappings stay executable */
 			prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
-			err = __change_page_attr(addr2, pfn_to_page(pfn), prot2);
+			err = __change_page_attr(addr2, pfn, prot2);
 		}
 #endif
 	}