[PATCH] powerpc: Separate usage of KERNELBASE and PAGE_OFFSET

This patch separates usage of KERNELBASE and PAGE_OFFSET. I haven't
looked at any of the PPC32 code, if we ever want to support Kdump on
PPC we'll have to do another audit, ditto for iSeries.

This patch makes PAGE_OFFSET the constant, it'll always be 0xC * 1
gazillion for 64-bit.

To get a physical address from a virtual one you subtract PAGE_OFFSET,
_not_ KERNELBASE.

KERNELBASE is the virtual address of the start of the kernel, it's
often the same as PAGE_OFFSET, but _might not be_.

If you want to know something's offset from the start of the kernel
you should subtract KERNELBASE.

Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index a18dab0..82e4951 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -40,7 +40,7 @@
 	unsigned long entry, group, old_esid, castout_entry, i;
 	unsigned int global_entry;
 	struct stab_entry *ste, *castout_ste;
-	unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE;
+	unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET;
 
 	vsid_data = vsid << STE_VSID_SHIFT;
 	esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
@@ -83,7 +83,7 @@
 		}
 
 		/* Dont cast out the first kernel segment */
-		if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE)
+		if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET)
 			break;
 
 		castout_entry = (castout_entry + 1) & 0xf;
@@ -251,7 +251,7 @@
 			panic("Unable to allocate segment table for CPU %d.\n",
 			      cpu);
 
-		newstab += KERNELBASE;
+		newstab = (unsigned long)__va(newstab);
 
 		memset((void *)newstab, 0, HW_PAGE_SIZE);
 
@@ -270,11 +270,11 @@
  */
 void stab_initialize(unsigned long stab)
 {
-	unsigned long vsid = get_kernel_vsid(KERNELBASE);
+	unsigned long vsid = get_kernel_vsid(PAGE_OFFSET);
 	unsigned long stabreal;
 
 	asm volatile("isync; slbia; isync":::"memory");
-	make_ste(stab, GET_ESID(KERNELBASE), vsid);
+	make_ste(stab, GET_ESID(PAGE_OFFSET), vsid);
 
 	/* Order update */
 	asm volatile("sync":::"memory");