[POWERPC] Use 1TB segments

This makes the kernel use 1TB segments for all kernel mappings and for
user addresses of 1TB and above, on machines which support them
(currently POWER5+, POWER6 and PA6T).

We detect that the machine supports 1TB segments by looking at the
ibm,processor-segment-sizes property in the device tree.

We don't currently use 1TB segments for user addresses < 1T, since
that would effectively prevent 32-bit processes from using huge pages
unless we also had a way to revert to using 256MB segments.  That
would be possible but would involve extra complications (such as
keeping track of which segment size was used when HPTEs were inserted)
and is not addressed here.

Parts of this patch were originally written by Ben Herrenschmidt.

Signed-off-by: Paul Mackerras <paulus@samba.org>
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 4bee1cf..6c164ce 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -43,17 +43,26 @@
 	slb_allocate_realmode(ea);
 }
 
-static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot)
+static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
+					 unsigned long slot)
 {
-	return (ea & ESID_MASK) | SLB_ESID_V | slot;
+	unsigned long mask;
+
+	mask = (ssize == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T;
+	return (ea & mask) | SLB_ESID_V | slot;
 }
 
-static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
+#define slb_vsid_shift(ssize)	\
+	((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
+
+static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
+					 unsigned long flags)
 {
-	return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
+	return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags |
+		((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
 }
 
-static inline void slb_shadow_update(unsigned long ea,
+static inline void slb_shadow_update(unsigned long ea, int ssize,
 				     unsigned long flags,
 				     unsigned long entry)
 {
@@ -63,8 +72,8 @@
 	 * we only update the current CPU's SLB shadow buffer.
 	 */
 	get_slb_shadow()->save_area[entry].esid = 0;
-	get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags);
-	get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry);
+	get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags);
+	get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry);
 }
 
 static inline void slb_shadow_clear(unsigned long entry)
@@ -72,7 +81,8 @@
 	get_slb_shadow()->save_area[entry].esid = 0;
 }
 
-static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
+static inline void create_shadowed_slbe(unsigned long ea, int ssize,
+					unsigned long flags,
 					unsigned long entry)
 {
 	/*
@@ -80,11 +90,11 @@
 	 * we don't get a stale entry here if we get preempted by PHYP
 	 * between these two statements.
 	 */
-	slb_shadow_update(ea, flags, entry);
+	slb_shadow_update(ea, ssize, flags, entry);
 
 	asm volatile("slbmte  %0,%1" :
-		     : "r" (mk_vsid_data(ea, flags)),
-		       "r" (mk_esid_data(ea, entry))
+		     : "r" (mk_vsid_data(ea, ssize, flags)),
+		       "r" (mk_esid_data(ea, ssize, entry))
 		     : "memory" );
 }
 
@@ -93,7 +103,7 @@
 	/* If you change this make sure you change SLB_NUM_BOLTED
 	 * appropriately too. */
 	unsigned long linear_llp, vmalloc_llp, lflags, vflags;
-	unsigned long ksp_esid_data;
+	unsigned long ksp_esid_data, ksp_vsid_data;
 
 	WARN_ON(!irqs_disabled());
 
@@ -102,13 +112,15 @@
 	lflags = SLB_VSID_KERNEL | linear_llp;
 	vflags = SLB_VSID_KERNEL | vmalloc_llp;
 
-	ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
-	if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) {
+	ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2);
+	if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
 		ksp_esid_data &= ~SLB_ESID_V;
+		ksp_vsid_data = 0;
 		slb_shadow_clear(2);
 	} else {
 		/* Update stack entry; others don't change */
-		slb_shadow_update(get_paca()->kstack, lflags, 2);
+		slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
+		ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
 	}
 
 	/* We need to do this all in asm, so we're sure we don't touch
@@ -120,9 +132,9 @@
 		     /* Slot 2 - kernel stack */
 		     "slbmte	%2,%3\n"
 		     "isync"
-		     :: "r"(mk_vsid_data(VMALLOC_START, vflags)),
-		        "r"(mk_esid_data(VMALLOC_START, 1)),
-		        "r"(mk_vsid_data(ksp_esid_data, lflags)),
+		     :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
+		        "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)),
+		        "r"(ksp_vsid_data),
 		        "r"(ksp_esid_data)
 		     : "memory");
 }
@@ -132,7 +144,7 @@
 	unsigned long vflags;
 
 	vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
-	slb_shadow_update(VMALLOC_START, vflags, 1);
+	slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
 	slb_flush_and_rebolt();
 }
 
@@ -140,7 +152,7 @@
 void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
 {
 	unsigned long offset = get_paca()->slb_cache_ptr;
-	unsigned long esid_data = 0;
+	unsigned long slbie_data = 0;
 	unsigned long pc = KSTK_EIP(tsk);
 	unsigned long stack = KSTK_ESP(tsk);
 	unsigned long unmapped_base;
@@ -149,9 +161,12 @@
 		int i;
 		asm volatile("isync" : : : "memory");
 		for (i = 0; i < offset; i++) {
-			esid_data = ((unsigned long)get_paca()->slb_cache[i]
-				<< SID_SHIFT) | SLBIE_C;
-			asm volatile("slbie %0" : : "r" (esid_data));
+			slbie_data = (unsigned long)get_paca()->slb_cache[i]
+				<< SID_SHIFT; /* EA */
+			slbie_data |= user_segment_size(slbie_data)
+				<< SLBIE_SSIZE_SHIFT;
+			slbie_data |= SLBIE_C; /* C set for user addresses */
+			asm volatile("slbie %0" : : "r" (slbie_data));
 		}
 		asm volatile("isync" : : : "memory");
 	} else {
@@ -160,7 +175,7 @@
 
 	/* Workaround POWER5 < DD2.1 issue */
 	if (offset == 1 || offset > SLB_CACHE_ENTRIES)
-		asm volatile("slbie %0" : : "r" (esid_data));
+		asm volatile("slbie %0" : : "r" (slbie_data));
 
 	get_paca()->slb_cache_ptr = 0;
 	get_paca()->context = mm->context;
@@ -243,9 +258,9 @@
 	asm volatile("isync":::"memory");
 	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
 	asm volatile("isync; slbia; isync":::"memory");
-	create_shadowed_slbe(PAGE_OFFSET, lflags, 0);
+	create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
 
-	create_shadowed_slbe(VMALLOC_START, vflags, 1);
+	create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
 
 	/* We don't bolt the stack for the time being - we're in boot,
 	 * so the stack is in the bolted segment.  By the time it goes