[SPARC64]: Deal with PTE layout differences in SUN4V.

Yes, you heard it right, they changed the PTE layout for
SUN4V.  Ho hum...

This is the simple and inefficient way to support this.
It'll get optimized, don't worry.

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S
index 97facce..730caa4 100644
--- a/arch/sparc64/kernel/itlb_miss.S
+++ b/arch/sparc64/kernel/itlb_miss.S
@@ -6,9 +6,10 @@
 	 nop					! Delay slot (fill me)
 	TSB_LOAD_QUAD(%g1, %g4)			! Load TSB entry
 	cmp	%g4, %g6			! Compare TAG
-	sethi	%hi(_PAGE_EXEC), %g4		! Setup exec check
+	sethi	%hi(PAGE_EXEC), %g4		! Setup exec check
 
 /* ITLB ** ICACHE line 2: TSB compare and TLB load	*/
+	ldx	[%g4 + %lo(PAGE_EXEC)], %g4
 	bne,pn	%xcc, tsb_miss_itlb		! Miss
 	 mov	FAULT_CODE_ITLB, %g3
 	andcc	%g5, %g4, %g0			! Executable?
@@ -16,7 +17,6 @@
 	 nop					! Delay slot, fill me
 	stxa	%g5, [%g0] ASI_ITLB_DATA_IN	! Load TLB
 	retry					! Trap done
-	nop
 
 /* ITLB ** ICACHE line 3: 				*/
 	nop
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index 2d333ab..47dfd45 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -131,16 +131,8 @@
 	brgez,pn	%g4, kvmap_dtlb_nonlinear
 	 nop
 
-#define KERN_HIGHBITS	((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
-#define KERN_LOWBITS	(_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
-
-	sethi		%uhi(KERN_HIGHBITS), %g2
-	or		%g2, %ulo(KERN_HIGHBITS), %g2
-	sllx		%g2, 32, %g2
-	or		%g2, KERN_LOWBITS, %g2
-
-#undef KERN_HIGHBITS
-#undef KERN_LOWBITS
+	sethi		%hi(kern_linear_pte_xor), %g2
+	ldx		[%g2 + %lo(kern_linear_pte_xor)], %g2
 
 	.globl		kvmap_linear_patch
 kvmap_linear_patch:
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index f36b257..ca75f3b 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -64,12 +64,6 @@
 	16                      /* orig-video-points */
 };
 
-/* Typing sync at the prom prompt calls the function pointed to by
- * the sync callback which I set to the following function.
- * This should sync all filesystems and return, for now it just
- * prints out pretty messages and returns.
- */
-
 void (*prom_palette)(int);
 void (*prom_keyboard)(void);
 
@@ -79,263 +73,6 @@
 	prom_write(s, n);
 }
 
-static struct console prom_console = {
-	.name =		"prom",
-	.write =	prom_console_write,
-	.flags =	CON_CONSDEV | CON_ENABLED,
-	.index =	-1,
-};
-
-#define PROM_TRUE	-1
-#define PROM_FALSE	0
-
-/* Pretty sick eh? */
-int prom_callback(long *args)
-{
-	struct console *cons, *saved_console = NULL;
-	unsigned long flags;
-	char *cmd;
-	extern spinlock_t prom_entry_lock;
-
-	if (!args)
-		return -1;
-	if (!(cmd = (char *)args[0]))
-		return -1;
-
-	/*
-	 * The callback can be invoked on the cpu that first dropped 
-	 * into prom_cmdline after taking the serial interrupt, or on 
-	 * a slave processor that was smp_captured() if the 
-	 * administrator has done a switch-cpu inside obp. In either 
-	 * case, the cpu is marked as in-interrupt. Drop IRQ locks.
-	 */
-	irq_exit();
-
-	/* XXX Revisit the locking here someday.  This is a debugging
-	 * XXX feature so it isnt all that critical.  -DaveM
-	 */
-	local_irq_save(flags);
-
-	spin_unlock(&prom_entry_lock);
-	cons = console_drivers;
-	while (cons) {
-		unregister_console(cons);
-		cons->flags &= ~(CON_PRINTBUFFER);
-		cons->next = saved_console;
-		saved_console = cons;
-		cons = console_drivers;
-	}
-	register_console(&prom_console);
-	if (!strcmp(cmd, "sync")) {
-		prom_printf("PROM `%s' command...\n", cmd);
-		show_free_areas();
-		if (current->pid != 0) {
-			local_irq_enable();
-			sys_sync();
-			local_irq_disable();
-		}
-		args[2] = 0;
-		args[args[1] + 3] = -1;
-		prom_printf("Returning to PROM\n");
-	} else if (!strcmp(cmd, "va>tte-data")) {
-		unsigned long ctx, va;
-		unsigned long tte = 0;
-		long res = PROM_FALSE;
-
-		ctx = args[3];
-		va = args[4];
-		if (ctx) {
-			/*
-			 * Find process owning ctx, lookup mapping.
-			 */
-			struct task_struct *p;
-			struct mm_struct *mm = NULL;
-			pgd_t *pgdp;
-			pud_t *pudp;
-			pmd_t *pmdp;
-			pte_t *ptep;
-			pte_t pte;
-
-			for_each_process(p) {
-				mm = p->mm;
-				if (CTX_NRBITS(mm->context) == ctx)
-					break;
-			}
-			if (!mm ||
-			    CTX_NRBITS(mm->context) != ctx)
-				goto done;
-
-			pgdp = pgd_offset(mm, va);
-			if (pgd_none(*pgdp))
-				goto done;
-			pudp = pud_offset(pgdp, va);
-			if (pud_none(*pudp))
-				goto done;
-			pmdp = pmd_offset(pudp, va);
-			if (pmd_none(*pmdp))
-				goto done;
-
-			/* Preemption implicitly disabled by virtue of
-			 * being called from inside OBP.
-			 */
-			ptep = pte_offset_map(pmdp, va);
-			pte = *ptep;
-			if (pte_present(pte)) {
-				tte = pte_val(pte);
-				res = PROM_TRUE;
-			}
-			pte_unmap(ptep);
-			goto done;
-		}
-
-		if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
-			if (tlb_type == spitfire) {
-				extern unsigned long sparc64_kern_pri_context;
-
-				/* Spitfire Errata #32 workaround */
-				__asm__ __volatile__(
-					"stxa 	%0, [%1] %2\n\t"
-					"flush	%%g6"
-					: /* No outputs */
-					: "r" (sparc64_kern_pri_context),
-					  "r" (PRIMARY_CONTEXT),
-					  "i" (ASI_DMMU));
-			}
-
-			/*
-			 * Locked down tlb entry.
-			 */
-
-			if (tlb_type == spitfire) {
-				tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT);
-				res = PROM_TRUE;
-			} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
-				tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT);
-				res = PROM_TRUE;
-			}
-			goto done;
-		}
-
-		if (va < PGDIR_SIZE) {
-			/*
-			 * vmalloc or prom_inherited mapping.
-			 */
-			pgd_t *pgdp;
-			pud_t *pudp;
-			pmd_t *pmdp;
-			pte_t *ptep;
-			pte_t pte;
-			int error;
-
-			if ((va >= LOW_OBP_ADDRESS) && (va < HI_OBP_ADDRESS)) {
-				tte = prom_virt_to_phys(va, &error);
-				if (!error)
-					res = PROM_TRUE;
-				goto done;
-			}
-			pgdp = pgd_offset_k(va);
-			if (pgd_none(*pgdp))
-				goto done;
-			pudp = pud_offset(pgdp, va);
-			if (pud_none(*pudp))
-				goto done;
-			pmdp = pmd_offset(pudp, va);
-			if (pmd_none(*pmdp))
-				goto done;
-
-			/* Preemption implicitly disabled by virtue of
-			 * being called from inside OBP.
-			 */
-			ptep = pte_offset_kernel(pmdp, va);
-			pte = *ptep;
-			if (pte_present(pte)) {
-				tte = pte_val(pte);
-				res = PROM_TRUE;
-			}
-			goto done;
-		}
-
-		if (va < PAGE_OFFSET) {
-			/*
-			 * No mappings here.
-			 */
-			goto done;
-		}
-
-		if (va & (1UL << 40)) {
-			/*
-			 * I/O page.
-			 */
-
-			tte = (__pa(va) & _PAGE_PADDR) |
-			      _PAGE_VALID | _PAGE_SZ4MB |
-			      _PAGE_E | _PAGE_P | _PAGE_W;
-			res = PROM_TRUE;
-			goto done;
-		}
-
-		/*
-		 * Normal page.
-		 */
-		tte = (__pa(va) & _PAGE_PADDR) |
-		      _PAGE_VALID | _PAGE_SZ4MB |
-		      _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W;
-		res = PROM_TRUE;
-
-	done:
-		if (res == PROM_TRUE) {
-			args[2] = 3;
-			args[args[1] + 3] = 0;
-			args[args[1] + 4] = res;
-			args[args[1] + 5] = tte;
-		} else {
-			args[2] = 2;
-			args[args[1] + 3] = 0;
-			args[args[1] + 4] = res;
-		}
-	} else if (!strcmp(cmd, ".soft1")) {
-		unsigned long tte;
-
-		tte = args[3];
-		prom_printf("%lx:\"%s%s%s%s%s\" ",
-			    (tte & _PAGE_SOFT) >> 7,
-			    tte & _PAGE_MODIFIED ? "M" : "-",
-			    tte & _PAGE_ACCESSED ? "A" : "-",
-			    tte & _PAGE_READ     ? "W" : "-",
-			    tte & _PAGE_WRITE    ? "R" : "-",
-			    tte & _PAGE_PRESENT  ? "P" : "-");
-
-		args[2] = 2;
-		args[args[1] + 3] = 0;
-		args[args[1] + 4] = PROM_TRUE;
-	} else if (!strcmp(cmd, ".soft2")) {
-		unsigned long tte;
-
-		tte = args[3];
-		prom_printf("%lx ", (tte & 0x07FC000000000000UL) >> 50);
-
-		args[2] = 2;
-		args[args[1] + 3] = 0;
-		args[args[1] + 4] = PROM_TRUE;
-	} else {
-		prom_printf("unknown PROM `%s' command...\n", cmd);
-	}
-	unregister_console(&prom_console);
-	while (saved_console) {
-		cons = saved_console;
-		saved_console = cons->next;
-		register_console(cons);
-	}
-	spin_lock(&prom_entry_lock);
-	local_irq_restore(flags);
-
-	/*
-	 * Restore in-interrupt status for a resume from obp.
-	 */
-	irq_enter();
-	return 0;
-}
-
 unsigned int boot_flags = 0;
 #define BOOTME_DEBUG  0x1
 #define BOOTME_SINGLE 0x2
@@ -483,17 +220,6 @@
 
 static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
 
-void register_prom_callbacks(void)
-{
-	prom_setcallback(prom_callback);
-	prom_feval(": linux-va>tte-data 2 \" va>tte-data\" $callback drop ; "
-		   "' linux-va>tte-data to va>tte-data");
-	prom_feval(": linux-.soft1 1 \" .soft1\" $callback 2drop ; "
-		   "' linux-.soft1 to .soft1");
-	prom_feval(": linux-.soft2 1 \" .soft2\" $callback 2drop ; "
-		   "' linux-.soft2 to .soft2");
-}
-
 static void __init per_cpu_patch(void)
 {
 #ifdef CONFIG_SMP
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S
index 597359c..950ca74 100644
--- a/arch/sparc64/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc64/kernel/sun4v_tlb_miss.S
@@ -59,7 +59,8 @@
 	/* Load TSB tag/pte into %g2/%g3 and compare the tag.  */
 	ldda	[%g1] ASI_QUAD_LDD_PHYS, %g2
 	cmp	%g2, %g6
-	sethi	%hi(_PAGE_EXEC), %g7
+	sethi	%hi(PAGE_EXEC), %g7
+	ldx	[%g7 + %lo(PAGE_EXEC)], %g7
 	bne,a,pn %xcc, tsb_miss_page_table_walk
 	 mov	FAULT_CODE_ITLB, %g3
 	andcc	%g3, %g7, %g0
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 667dcb0..be8f089 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -56,10 +56,11 @@
 	/* If it is larger than the base page size, don't
 	 * bother putting it into the TSB.
 	 */
-	srlx		%g5, 32, %g2
-	sethi		%hi(_PAGE_ALL_SZ_BITS >> 32), %g7
-	and		%g2, %g7, %g2
-	sethi		%hi(_PAGE_SZBITS >> 32), %g7
+	sethi		%hi(_PAGE_ALL_SZ_BITS), %g7
+	ldx		[%g7 + %lo(_PAGE_ALL_SZ_BITS)], %g7
+	and		%g5, %g7, %g2
+	sethi		%hi(_PAGE_SZBITS), %g7
+	ldx		[%g7 + %lo(_PAGE_SZBITS)], %g7
 	cmp		%g2, %g7
 	bne,a,pn	%xcc, tsb_tlb_reload
 	 TSB_STORE(%g1, %g0)
diff --git a/arch/sparc64/lib/clear_page.S b/arch/sparc64/lib/clear_page.S
index cdc634b..77e531f 100644
--- a/arch/sparc64/lib/clear_page.S
+++ b/arch/sparc64/lib/clear_page.S
@@ -23,9 +23,6 @@
 	 * disable preemption during the clear.
 	 */
 
-#define TTE_BITS_TOP	(_PAGE_VALID | _PAGE_SZBITS)
-#define TTE_BITS_BOTTOM	(_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
-
 	.text
 
 	.globl		_clear_page
@@ -44,12 +41,11 @@
 	sethi		%hi(PAGE_SIZE), %o4
 
 	sllx		%g2, 32, %g2
-	sethi		%uhi(TTE_BITS_TOP), %g3
+	sethi		%hi(PAGE_KERNEL_LOCKED), %g3
 
-	sllx		%g3, 32, %g3
+	ldx		[%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
 	sub		%o0, %g2, %g1		! paddr
 
-	or		%g3, TTE_BITS_BOTTOM, %g3
 	and		%o1, %o4, %o0		! vaddr D-cache alias bit
 
 	or		%g1, %g3, %g1		! TTE data
diff --git a/arch/sparc64/lib/copy_page.S b/arch/sparc64/lib/copy_page.S
index feebb14..3746066 100644
--- a/arch/sparc64/lib/copy_page.S
+++ b/arch/sparc64/lib/copy_page.S
@@ -23,8 +23,6 @@
 	 * disable preemption during the clear.
 	 */
 
-#define TTE_BITS_TOP	(_PAGE_VALID | _PAGE_SZBITS)
-#define TTE_BITS_BOTTOM	(_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
 #define	DCACHE_SIZE	(PAGE_SIZE * 2)
 
 #if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19)
@@ -52,13 +50,12 @@
 	sethi		%hi(PAGE_SIZE), %o3
 
 	sllx		%g2, 32, %g2
-	sethi		%uhi(TTE_BITS_TOP), %g3
+	sethi		%hi(PAGE_KERNEL_LOCKED), %g3
 
-	sllx		%g3, 32, %g3
+	ldx		[%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
 	sub		%o0, %g2, %g1		! dest paddr
 
 	sub		%o1, %g2, %g2		! src paddr
-	or		%g3, TTE_BITS_BOTTOM, %g3
 
 	and		%o2, %o3, %o0		! vaddr D-cache alias bit
 	or		%g1, %g3, %g1		! dest TTE data
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 6f0539a..439a53c 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -137,7 +137,7 @@
 	if (!pte_present(pte))
 		goto out;
 
-	pa  = (pte_val(pte) & _PAGE_PADDR);
+	pa  = (pte_pfn(pte) << PAGE_SHIFT);
 	pa += (tpc & ~PAGE_MASK);
 
 	/* Use phys bypass so we don't pollute dtlb/dcache. */
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
index 580b63d..5fc5c57 100644
--- a/arch/sparc64/mm/generic.c
+++ b/arch/sparc64/mm/generic.c
@@ -15,15 +15,6 @@
 #include <asm/page.h>
 #include <asm/tlbflush.h>
 
-static inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space)
-{
-	pte_t pte;
-	pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E) &
-			~(unsigned long)_PAGE_CACHE);
-	pte_val(pte) |= (((unsigned long)space) << 32);
-	return pte;
-}
-
 /* Remap IO memory, the same way as remap_pfn_range(), but use
  * the obio memory space.
  *
@@ -48,24 +39,29 @@
 		pte_t entry;
 		unsigned long curend = address + PAGE_SIZE;
 		
-		entry = mk_pte_io(offset, prot, space);
+		entry = mk_pte_io(offset, prot, space, PAGE_SIZE);
 		if (!(address & 0xffff)) {
-			if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
-				entry = mk_pte_io(offset,
-						  __pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
-						  space);
+			if (PAGE_SIZE < (4 * 1024 * 1024) &&
+			    !(address & 0x3fffff) &&
+			    !(offset & 0x3ffffe) &&
+			    end >= address + 0x400000) {
+				entry = mk_pte_io(offset, prot, space,
+						  4 * 1024 * 1024);
 				curend = address + 0x400000;
 				offset += 0x400000;
-			} else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
-				entry = mk_pte_io(offset,
-						  __pgprot(pgprot_val (prot) | _PAGE_SZ512K),
-						  space);
+			} else if (PAGE_SIZE < (512 * 1024) &&
+				   !(address & 0x7ffff) &&
+				   !(offset & 0x7fffe) &&
+				   end >= address + 0x80000) {
+				entry = mk_pte_io(offset, prot, space,
+						  512 * 1024 * 1024);
 				curend = address + 0x80000;
 				offset += 0x80000;
-			} else if (!(offset & 0xfffe) && end >= address + 0x10000) {
-				entry = mk_pte_io(offset,
-						  __pgprot(pgprot_val (prot) | _PAGE_SZ64K),
-						  space);
+			} else if (PAGE_SIZE < (64 * 1024) &&
+				   !(offset & 0xfffe) &&
+				   end >= address + 0x10000) {
+				entry = mk_pte_io(offset, prot, space,
+						  64 * 1024);
 				curend = address + 0x10000;
 				offset += 0x10000;
 			} else
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 92756da..9c2fc23 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -6,6 +6,7 @@
  */
  
 #include <linux/config.h>
+#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/string.h>
@@ -118,6 +119,7 @@
 unsigned long kern_base __read_mostly;
 unsigned long kern_size __read_mostly;
 unsigned long pfn_base __read_mostly;
+unsigned long kern_linear_pte_xor __read_mostly;
 
 /* get_new_mmu_context() uses "cache + 1".  */
 DEFINE_SPINLOCK(ctx_alloc_lock);
@@ -256,6 +258,9 @@
 	__tsb_insert(tsb_addr, tag, pte);
 }
 
+unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
+unsigned long _PAGE_SZBITS __read_mostly;
+
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
 {
 	struct mm_struct *mm;
@@ -398,39 +403,9 @@
 struct linux_prom_translation prom_trans[512] __read_mostly;
 unsigned int prom_trans_ents __read_mostly;
 
-extern unsigned long prom_boot_page;
-extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
-extern int prom_get_mmu_ihandle(void);
-extern void register_prom_callbacks(void);
-
 /* Exported for SMP bootup purposes. */
 unsigned long kern_locked_tte_data;
 
-/*
- * Translate PROM's mapping we capture at boot time into physical address.
- * The second parameter is only set from prom_callback() invocations.
- */
-unsigned long prom_virt_to_phys(unsigned long promva, int *error)
-{
-	int i;
-
-	for (i = 0; i < prom_trans_ents; i++) {
-		struct linux_prom_translation *p = &prom_trans[i];
-
-		if (promva >= p->virt &&
-		    promva < (p->virt + p->size)) {
-			unsigned long base = p->data & _PAGE_PADDR;
-
-			if (error)
-				*error = 0;
-			return base + (promva & (8192 - 1));
-		}
-	}
-	if (error)
-		*error = 1;
-	return 0UL;
-}
-
 /* The obp translations are saved based on 8k pagesize, since obp can
  * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
  * HI_OBP_ADDRESS range are handled in ktlb.S.
@@ -537,6 +512,8 @@
 			       "3" (arg2), "4" (arg3));
 }
 
+static unsigned long kern_large_tte(unsigned long paddr);
+
 static void __init remap_kernel(void)
 {
 	unsigned long phys_page, tte_vaddr, tte_data;
@@ -544,9 +521,7 @@
 
 	tte_vaddr = (unsigned long) KERNBASE;
 	phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
-	tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB |
-				 _PAGE_CP | _PAGE_CV | _PAGE_P |
-				 _PAGE_L | _PAGE_W));
+	tte_data = kern_large_tte(phys_page);
 
 	kern_locked_tte_data = tte_data;
 
@@ -591,10 +566,6 @@
 	prom_printf("Remapping the kernel... ");
 	remap_kernel();
 	prom_printf("done.\n");
-
-	prom_printf("Registering callbacks... ");
-	register_prom_callbacks();
-	prom_printf("done.\n");
 }
 
 void prom_world(int enter)
@@ -631,63 +602,6 @@
 }
 #endif /* DCACHE_ALIASING_POSSIBLE */
 
-/* If not locked, zap it. */
-void __flush_tlb_all(void)
-{
-	unsigned long pstate;
-	int i;
-
-	__asm__ __volatile__("flushw\n\t"
-			     "rdpr	%%pstate, %0\n\t"
-			     "wrpr	%0, %1, %%pstate"
-			     : "=r" (pstate)
-			     : "i" (PSTATE_IE));
-	if (tlb_type == spitfire) {
-		for (i = 0; i < 64; i++) {
-			/* Spitfire Errata #32 workaround */
-			/* NOTE: Always runs on spitfire, so no
-			 *       cheetah+ page size encodings.
-			 */
-			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
-					     "flush	%%g6"
-					     : /* No outputs */
-					     : "r" (0),
-					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-
-			if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
-				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
-						     "membar #Sync"
-						     : /* no outputs */
-						     : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
-				spitfire_put_dtlb_data(i, 0x0UL);
-			}
-
-			/* Spitfire Errata #32 workaround */
-			/* NOTE: Always runs on spitfire, so no
-			 *       cheetah+ page size encodings.
-			 */
-			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
-					     "flush	%%g6"
-					     : /* No outputs */
-					     : "r" (0),
-					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-
-			if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {
-				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
-						     "membar #Sync"
-						     : /* no outputs */
-						     : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
-				spitfire_put_itlb_data(i, 0x0UL);
-			}
-		}
-	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
-		cheetah_flush_dtlb_all();
-		cheetah_flush_itlb_all();
-	}
-	__asm__ __volatile__("wrpr	%0, 0, %%pstate"
-			     : : "r" (pstate));
-}
-
 /* Caller does TLB context flushing on local CPU if necessary.
  * The caller also ensures that CTX_VALID(mm->context) is false.
  *
@@ -1180,6 +1094,9 @@
 static unsigned long last_valid_pfn;
 pgd_t swapper_pg_dir[2048];
 
+static void sun4u_pgprot_init(void);
+static void sun4v_pgprot_init(void);
+
 void __init paging_init(void)
 {
 	unsigned long end_pfn, pages_avail, shift;
@@ -1188,6 +1105,11 @@
 	kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
 	kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
 
+	if (tlb_type == hypervisor)
+		sun4v_pgprot_init();
+	else
+		sun4u_pgprot_init();
+
 	if (tlb_type == cheetah_plus ||
 	    tlb_type == hypervisor)
 		tsb_phys_patch();
@@ -1411,3 +1333,596 @@
 	}
 }
 #endif
+
+/* SUN4U pte bits... */
+#define _PAGE_SZ4MB_4U	  0x6000000000000000 /* 4MB Page               */
+#define _PAGE_SZ512K_4U	  0x4000000000000000 /* 512K Page              */
+#define _PAGE_SZ64K_4U	  0x2000000000000000 /* 64K Page               */
+#define _PAGE_SZ8K_4U	  0x0000000000000000 /* 8K Page                */
+#define _PAGE_NFO_4U	  0x1000000000000000 /* No Fault Only          */
+#define _PAGE_IE_4U	  0x0800000000000000 /* Invert Endianness      */
+#define _PAGE_SOFT2_4U	  0x07FC000000000000 /* Software bits, set 2   */
+#define _PAGE_RES1_4U	  0x0002000000000000 /* Reserved               */
+#define _PAGE_SZ32MB_4U	  0x0001000000000000 /* (Panther) 32MB page    */
+#define _PAGE_SZ256MB_4U  0x2001000000000000 /* (Panther) 256MB page   */
+#define _PAGE_SN_4U	  0x0000800000000000 /* (Cheetah) Snoop        */
+#define _PAGE_RES2_4U	  0x0000780000000000 /* Reserved               */
+#define _PAGE_PADDR_4U	  0x000007FFFFFFE000 /* (Cheetah) paddr[42:13] */
+#define _PAGE_SOFT_4U	  0x0000000000001F80 /* Software bits:         */
+#define _PAGE_EXEC_4U	  0x0000000000001000 /* Executable SW bit      */
+#define _PAGE_MODIFIED_4U 0x0000000000000800 /* Modified (dirty)       */
+#define _PAGE_FILE_4U	  0x0000000000000800 /* Pagecache page         */
+#define _PAGE_ACCESSED_4U 0x0000000000000400 /* Accessed (ref'd)       */
+#define _PAGE_READ_4U	  0x0000000000000200 /* Readable SW Bit        */
+#define _PAGE_WRITE_4U	  0x0000000000000100 /* Writable SW Bit        */
+#define _PAGE_PRESENT_4U  0x0000000000000080 /* Present                */
+#define _PAGE_L_4U	  0x0000000000000040 /* Locked TTE             */
+#define _PAGE_CP_4U	  0x0000000000000020 /* Cacheable in P-Cache   */
+#define _PAGE_CV_4U	  0x0000000000000010 /* Cacheable in V-Cache   */
+#define _PAGE_E_4U	  0x0000000000000008 /* side-Effect            */
+#define _PAGE_P_4U	  0x0000000000000004 /* Privileged Page        */
+#define _PAGE_W_4U	  0x0000000000000002 /* Writable               */
+
+/* SUN4V pte bits... */
+#define _PAGE_NFO_4V	  0x4000000000000000 /* No Fault Only          */
+#define _PAGE_SOFT2_4V	  0x3F00000000000000 /* Software bits, set 2   */
+#define _PAGE_MODIFIED_4V 0x2000000000000000 /* Modified (dirty)       */
+#define _PAGE_ACCESSED_4V 0x1000000000000000 /* Accessed (ref'd)       */
+#define _PAGE_READ_4V	  0x0800000000000000 /* Readable SW Bit        */
+#define _PAGE_WRITE_4V	  0x0400000000000000 /* Writable SW Bit        */
+#define _PAGE_PADDR_4V	  0x00FFFFFFFFFFE000 /* paddr[55:13]           */
+#define _PAGE_IE_4V	  0x0000000000001000 /* Invert Endianness      */
+#define _PAGE_E_4V	  0x0000000000000800 /* side-Effect            */
+#define _PAGE_CP_4V	  0x0000000000000400 /* Cacheable in P-Cache   */
+#define _PAGE_CV_4V	  0x0000000000000200 /* Cacheable in V-Cache   */
+#define _PAGE_P_4V	  0x0000000000000100 /* Privileged Page        */
+#define _PAGE_EXEC_4V	  0x0000000000000080 /* Executable Page        */
+#define _PAGE_W_4V	  0x0000000000000040 /* Writable               */
+#define _PAGE_SOFT_4V	  0x0000000000000030 /* Software bits          */
+#define _PAGE_FILE_4V	  0x0000000000000020 /* Pagecache page         */
+#define _PAGE_PRESENT_4V  0x0000000000000010 /* Present                */
+#define _PAGE_RESV_4V	  0x0000000000000008 /* Reserved               */
+#define _PAGE_SZ16GB_4V	  0x0000000000000007 /* 16GB Page              */
+#define _PAGE_SZ2GB_4V	  0x0000000000000006 /* 2GB Page               */
+#define _PAGE_SZ256MB_4V  0x0000000000000005 /* 256MB Page             */
+#define _PAGE_SZ32MB_4V	  0x0000000000000004 /* 32MB Page              */
+#define _PAGE_SZ4MB_4V	  0x0000000000000003 /* 4MB Page               */
+#define _PAGE_SZ512K_4V	  0x0000000000000002 /* 512K Page              */
+#define _PAGE_SZ64K_4V	  0x0000000000000001 /* 64K Page               */
+#define _PAGE_SZ8K_4V	  0x0000000000000000 /* 8K Page                */
+
+#if PAGE_SHIFT == 13
+#define _PAGE_SZBITS_4U	_PAGE_SZ8K_4U
+#define _PAGE_SZBITS_4V	_PAGE_SZ8K_4V
+#elif PAGE_SHIFT == 16
+#define _PAGE_SZBITS_4U	_PAGE_SZ64K_4U
+#define _PAGE_SZBITS_4V	_PAGE_SZ64K_4V
+#elif PAGE_SHIFT == 19
+#define _PAGE_SZBITS_4U	_PAGE_SZ512K_4U
+#define _PAGE_SZBITS_4V	_PAGE_SZ512K_4V
+#elif PAGE_SHIFT == 22
+#define _PAGE_SZBITS_4U	_PAGE_SZ4MB_4U
+#define _PAGE_SZBITS_4V	_PAGE_SZ4MB_4V
+#else
+#error Wrong PAGE_SHIFT specified
+#endif
+
+#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
+#define _PAGE_SZHUGE_4U	_PAGE_SZ4MB_4U
+#define _PAGE_SZHUGE_4V	_PAGE_SZ4MB_4V
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
+#define _PAGE_SZHUGE_4U	_PAGE_SZ512K_4U
+#define _PAGE_SZHUGE_4V	_PAGE_SZ512K_4V
+#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
+#define _PAGE_SZHUGE_4U	_PAGE_SZ64K_4U
+#define _PAGE_SZHUGE_4V	_PAGE_SZ64K_4V
+#endif
+
+#define _PAGE_CACHE_4U	(_PAGE_CP_4U | _PAGE_CV_4U)
+#define _PAGE_CACHE_4V	(_PAGE_CP_4V | _PAGE_CV_4V)
+#define __DIRTY_BITS_4U	 (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
+#define __DIRTY_BITS_4V	 (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
+#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
+#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
+
+pgprot_t PAGE_KERNEL __read_mostly;
+EXPORT_SYMBOL(PAGE_KERNEL);
+
+pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
+pgprot_t PAGE_COPY __read_mostly;
+pgprot_t PAGE_EXEC __read_mostly;
+unsigned long pg_iobits __read_mostly;
+
+unsigned long _PAGE_IE __read_mostly;
+unsigned long _PAGE_E __read_mostly;
+unsigned long _PAGE_CACHE __read_mostly;
+
+static void prot_init_common(unsigned long page_none,
+			     unsigned long page_shared,
+			     unsigned long page_copy,
+			     unsigned long page_readonly,
+			     unsigned long page_exec_bit)
+{
+	PAGE_COPY = __pgprot(page_copy);
+
+	protection_map[0x0] = __pgprot(page_none);
+	protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
+	protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
+	protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
+	protection_map[0x4] = __pgprot(page_readonly);
+	protection_map[0x5] = __pgprot(page_readonly);
+	protection_map[0x6] = __pgprot(page_copy);
+	protection_map[0x7] = __pgprot(page_copy);
+	protection_map[0x8] = __pgprot(page_none);
+	protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
+	protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
+	protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
+	protection_map[0xc] = __pgprot(page_readonly);
+	protection_map[0xd] = __pgprot(page_readonly);
+	protection_map[0xe] = __pgprot(page_shared);
+	protection_map[0xf] = __pgprot(page_shared);
+}
+
+static void __init sun4u_pgprot_init(void)
+{
+	unsigned long page_none, page_shared, page_copy, page_readonly;
+	unsigned long page_exec_bit;
+
+	PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
+				_PAGE_CACHE_4U | _PAGE_P_4U |
+				__ACCESS_BITS_4U | __DIRTY_BITS_4U |
+				_PAGE_EXEC_4U);
+	PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
+				       _PAGE_CACHE_4U | _PAGE_P_4U |
+				       __ACCESS_BITS_4U | __DIRTY_BITS_4U |
+				       _PAGE_EXEC_4U | _PAGE_L_4U);
+	PAGE_EXEC = __pgprot(_PAGE_EXEC_4U);
+
+	_PAGE_IE = _PAGE_IE_4U;
+	_PAGE_E = _PAGE_E_4U;
+	_PAGE_CACHE = _PAGE_CACHE_4U;
+
+	pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
+		     __ACCESS_BITS_4U | _PAGE_E_4U);
+
+	kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
+		0xfffff80000000000;
+	kern_linear_pte_xor |= (_PAGE_CP_4U | _PAGE_CV_4U |
+				_PAGE_P_4U | _PAGE_W_4U);
+
+	_PAGE_SZBITS = _PAGE_SZBITS_4U;
+	_PAGE_ALL_SZ_BITS =  (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
+			      _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
+			      _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
+
+
+	page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
+	page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
+		       __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
+	page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
+		       __ACCESS_BITS_4U | _PAGE_EXEC_4U);
+	page_readonly   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
+			   __ACCESS_BITS_4U | _PAGE_EXEC_4U);
+
+	page_exec_bit = _PAGE_EXEC_4U;
+
+	prot_init_common(page_none, page_shared, page_copy, page_readonly,
+			 page_exec_bit);
+}
+
+static void __init sun4v_pgprot_init(void)
+{
+	unsigned long page_none, page_shared, page_copy, page_readonly;
+	unsigned long page_exec_bit;
+
+	PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
+				_PAGE_CACHE_4V | _PAGE_P_4V |
+				__ACCESS_BITS_4V | __DIRTY_BITS_4V |
+				_PAGE_EXEC_4V);
+	PAGE_KERNEL_LOCKED = PAGE_KERNEL;
+	PAGE_EXEC = __pgprot(_PAGE_EXEC_4V);
+
+	_PAGE_IE = _PAGE_IE_4V;
+	_PAGE_E = _PAGE_E_4V;
+	_PAGE_CACHE = _PAGE_CACHE_4V;
+
+	kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
+		0xfffff80000000000;
+	kern_linear_pte_xor |= (_PAGE_CP_4V | _PAGE_CV_4V |
+				_PAGE_P_4V | _PAGE_W_4V);
+
+	pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
+		     __ACCESS_BITS_4V | _PAGE_E_4V);
+
+	_PAGE_SZBITS = _PAGE_SZBITS_4V;
+	_PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
+			     _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
+			     _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
+			     _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
+
+	page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
+	page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
+		       __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
+	page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
+		       __ACCESS_BITS_4V | _PAGE_EXEC_4V);
+	page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
+			 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
+
+	page_exec_bit = _PAGE_EXEC_4V;
+
+	prot_init_common(page_none, page_shared, page_copy, page_readonly,
+			 page_exec_bit);
+}
+
+unsigned long pte_sz_bits(unsigned long sz)
+{
+	if (tlb_type == hypervisor) {
+		switch (sz) {
+		case 8 * 1024:
+		default:
+			return _PAGE_SZ8K_4V;
+		case 64 * 1024:
+			return _PAGE_SZ64K_4V;
+		case 512 * 1024:
+			return _PAGE_SZ512K_4V;
+		case 4 * 1024 * 1024:
+			return _PAGE_SZ4MB_4V;
+		};
+	} else {
+		switch (sz) {
+		case 8 * 1024:
+		default:
+			return _PAGE_SZ8K_4U;
+		case 64 * 1024:
+			return _PAGE_SZ64K_4U;
+		case 512 * 1024:
+			return _PAGE_SZ512K_4U;
+		case 4 * 1024 * 1024:
+			return _PAGE_SZ4MB_4U;
+		};
+	}
+}
+
+pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
+{
+	pte_t pte;
+	if (tlb_type == hypervisor) {
+		pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4V) &
+				~(unsigned long)_PAGE_CACHE_4V);
+	} else {
+		pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4U) &
+				~(unsigned long)_PAGE_CACHE_4U);
+	}
+	pte_val(pte) |= (((unsigned long)space) << 32);
+	pte_val(pte) |= pte_sz_bits(page_size);
+	return pte;
+}
+
+unsigned long pte_present(pte_t pte)
+{
+	return (pte_val(pte) &
+		((tlb_type == hypervisor) ?
+		 _PAGE_PRESENT_4V : _PAGE_PRESENT_4U));
+}
+
+unsigned long pte_file(pte_t pte)
+{
+	return (pte_val(pte) &
+		((tlb_type == hypervisor) ?
+		 _PAGE_FILE_4V : _PAGE_FILE_4U));
+}
+
+unsigned long pte_read(pte_t pte)
+{
+	return (pte_val(pte) &
+		((tlb_type == hypervisor) ?
+		 _PAGE_READ_4V : _PAGE_READ_4U));
+}
+
+unsigned long pte_exec(pte_t pte)
+{
+	return (pte_val(pte) &
+		((tlb_type == hypervisor) ?
+		 _PAGE_EXEC_4V : _PAGE_EXEC_4U));
+}
+
+unsigned long pte_write(pte_t pte)
+{
+	return (pte_val(pte) &
+		((tlb_type == hypervisor) ?
+		 _PAGE_WRITE_4V : _PAGE_WRITE_4U));
+}
+
+unsigned long pte_dirty(pte_t pte)
+{
+	return (pte_val(pte) &
+		((tlb_type == hypervisor) ?
+		 _PAGE_MODIFIED_4V : _PAGE_MODIFIED_4U));
+}
+
+unsigned long pte_young(pte_t pte)
+{
+	return (pte_val(pte) &
+		((tlb_type == hypervisor) ?
+		 _PAGE_ACCESSED_4V : _PAGE_ACCESSED_4U));
+}
+
+pte_t pte_wrprotect(pte_t pte)
+{
+	unsigned long mask = _PAGE_WRITE_4U | _PAGE_W_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_WRITE_4V | _PAGE_W_4V;
+
+	return __pte(pte_val(pte) & ~mask);
+}
+
+pte_t pte_rdprotect(pte_t pte)
+{
+	unsigned long mask = _PAGE_R | _PAGE_READ_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_R | _PAGE_READ_4V;
+
+	return __pte(pte_val(pte) & ~mask);
+}
+
+pte_t pte_mkclean(pte_t pte)
+{
+	unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_MODIFIED_4V | _PAGE_W_4V;
+
+	return __pte(pte_val(pte) & ~mask);
+}
+
+pte_t pte_mkold(pte_t pte)
+{
+	unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_R | _PAGE_ACCESSED_4V;
+
+	return __pte(pte_val(pte) & ~mask);
+}
+
+pte_t pte_mkyoung(pte_t pte)
+{
+	unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_R | _PAGE_ACCESSED_4V;
+
+	return __pte(pte_val(pte) | mask);
+}
+
+pte_t pte_mkwrite(pte_t pte)
+{
+	unsigned long mask = _PAGE_WRITE_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_WRITE_4V;
+
+	return __pte(pte_val(pte) | mask);
+}
+
+pte_t pte_mkdirty(pte_t pte)
+{
+	unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_MODIFIED_4V | _PAGE_W_4V;
+
+	return __pte(pte_val(pte) | mask);
+}
+
+pte_t pte_mkhuge(pte_t pte)
+{
+	unsigned long mask = _PAGE_SZHUGE_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_SZHUGE_4V;
+
+	return __pte(pte_val(pte) | mask);
+}
+
+pte_t pgoff_to_pte(unsigned long off)
+{
+	unsigned long bit = _PAGE_FILE_4U;
+
+	if (tlb_type == hypervisor)
+		bit = _PAGE_FILE_4V;
+
+	return __pte((off << PAGE_SHIFT) | bit);
+}
+
+pgprot_t pgprot_noncached(pgprot_t prot)
+{
+	unsigned long val = pgprot_val(prot);
+	unsigned long off = _PAGE_CP_4U | _PAGE_CV_4U;
+	unsigned long on = _PAGE_E_4U;
+
+	if (tlb_type == hypervisor) {
+		off = _PAGE_CP_4V | _PAGE_CV_4V;
+		on = _PAGE_E_4V;
+	}
+
+	return __pgprot((val & ~off) | on);
+}
+
+pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
+{
+	unsigned long sz_bits = _PAGE_SZBITS_4U;
+
+	if (tlb_type == hypervisor)
+		sz_bits = _PAGE_SZBITS_4V;
+
+	return __pte((pfn << PAGE_SHIFT) | pgprot_val(prot) | sz_bits);
+}
+
+unsigned long pte_pfn(pte_t pte)
+{
+	unsigned long mask = _PAGE_PADDR_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_PADDR_4V;
+
+	return (pte_val(pte) & mask) >> PAGE_SHIFT;
+}
+
+pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
+{
+	unsigned long preserve_mask;
+	unsigned long val;
+
+	preserve_mask = (_PAGE_PADDR_4U |
+			 _PAGE_MODIFIED_4U |
+			 _PAGE_ACCESSED_4U |
+			 _PAGE_CP_4U |
+			 _PAGE_CV_4U |
+			 _PAGE_E_4U |
+			 _PAGE_PRESENT_4U |
+			 _PAGE_SZBITS_4U);
+	if (tlb_type == hypervisor)
+		preserve_mask = (_PAGE_PADDR_4V |
+				 _PAGE_MODIFIED_4V |
+				 _PAGE_ACCESSED_4V |
+				 _PAGE_CP_4V |
+				 _PAGE_CV_4V |
+				 _PAGE_E_4V |
+				 _PAGE_PRESENT_4V |
+				 _PAGE_SZBITS_4V);
+
+	val = (pte_val(orig_pte) & preserve_mask);
+
+	return __pte(val | (pgprot_val(new_prot) & ~preserve_mask));
+}
+
+static unsigned long kern_large_tte(unsigned long paddr)
+{
+	unsigned long val;
+
+	val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
+	       _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
+	       _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
+	if (tlb_type == hypervisor)
+		val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
+		       _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
+		       _PAGE_EXEC_4V | _PAGE_W_4V);
+
+	return val | paddr;
+}
+
+/*
+ * Translate PROM's mapping we capture at boot time into physical address.
+ * The second parameter is only set from prom_callback() invocations.
+ */
+unsigned long prom_virt_to_phys(unsigned long promva, int *error)
+{
+	unsigned long mask;
+	int i;
+
+	mask = _PAGE_PADDR_4U;
+	if (tlb_type == hypervisor)
+		mask = _PAGE_PADDR_4V;
+
+	for (i = 0; i < prom_trans_ents; i++) {
+		struct linux_prom_translation *p = &prom_trans[i];
+
+		if (promva >= p->virt &&
+		    promva < (p->virt + p->size)) {
+			unsigned long base = p->data & mask;
+
+			if (error)
+				*error = 0;
+			return base + (promva & (8192 - 1));
+		}
+	}
+	if (error)
+		*error = 1;
+	return 0UL;
+}
+
+/* XXX We should kill off this ugly thing at so me point. XXX */
+unsigned long sun4u_get_pte(unsigned long addr)
+{
+	pgd_t *pgdp;
+	pud_t *pudp;
+	pmd_t *pmdp;
+	pte_t *ptep;
+	unsigned long mask = _PAGE_PADDR_4U;
+
+	if (tlb_type == hypervisor)
+		mask = _PAGE_PADDR_4V;
+
+	if (addr >= PAGE_OFFSET)
+		return addr & mask;
+
+	if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
+		return prom_virt_to_phys(addr, NULL);
+
+	pgdp = pgd_offset_k(addr);
+	pudp = pud_offset(pgdp, addr);
+	pmdp = pmd_offset(pudp, addr);
+	ptep = pte_offset_kernel(pmdp, addr);
+
+	return pte_val(*ptep) & mask;
+}
+
+/* If not locked, zap it. */
+void __flush_tlb_all(void)
+{
+	unsigned long pstate;
+	int i;
+
+	__asm__ __volatile__("flushw\n\t"
+			     "rdpr	%%pstate, %0\n\t"
+			     "wrpr	%0, %1, %%pstate"
+			     : "=r" (pstate)
+			     : "i" (PSTATE_IE));
+	if (tlb_type == spitfire) {
+		for (i = 0; i < 64; i++) {
+			/* Spitfire Errata #32 workaround */
+			/* NOTE: Always runs on spitfire, so no
+			 *       cheetah+ page size encodings.
+			 */
+			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+					     "flush	%%g6"
+					     : /* No outputs */
+					     : "r" (0),
+					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+			if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
+				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+						     "membar #Sync"
+						     : /* no outputs */
+						     : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+				spitfire_put_dtlb_data(i, 0x0UL);
+			}
+
+			/* Spitfire Errata #32 workaround */
+			/* NOTE: Always runs on spitfire, so no
+			 *       cheetah+ page size encodings.
+			 */
+			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+					     "flush	%%g6"
+					     : /* No outputs */
+					     : "r" (0),
+					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+			if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
+				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+						     "membar #Sync"
+						     : /* no outputs */
+						     : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
+				spitfire_put_itlb_data(i, 0x0UL);
+			}
+		}
+	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+		cheetah_flush_dtlb_all();
+		cheetah_flush_itlb_all();
+	}
+	__asm__ __volatile__("wrpr	%0, 0, %%pstate"
+			     : : "r" (pstate));
+}
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index c5dc4b0..975242a 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -85,8 +85,7 @@
 	mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
 
 	base = TSBMAP_BASE;
-	tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP |
-	       _PAGE_CV    | _PAGE_P | _PAGE_W);
+	tte = pgprot_val(PAGE_KERNEL_LOCKED);
 	tsb_paddr = __pa(mm->context.tsb);
 	BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
 
@@ -99,55 +98,48 @@
 #ifdef DCACHE_ALIASING_POSSIBLE
 		base += (tsb_paddr & 8192);
 #endif
-		tte |= _PAGE_SZ8K;
 		page_sz = 8192;
 		break;
 
 	case 8192 << 1:
 		tsb_reg = 0x1UL;
-		tte |= _PAGE_SZ64K;
 		page_sz = 64 * 1024;
 		break;
 
 	case 8192 << 2:
 		tsb_reg = 0x2UL;
-		tte |= _PAGE_SZ64K;
 		page_sz = 64 * 1024;
 		break;
 
 	case 8192 << 3:
 		tsb_reg = 0x3UL;
-		tte |= _PAGE_SZ64K;
 		page_sz = 64 * 1024;
 		break;
 
 	case 8192 << 4:
 		tsb_reg = 0x4UL;
-		tte |= _PAGE_SZ512K;
 		page_sz = 512 * 1024;
 		break;
 
 	case 8192 << 5:
 		tsb_reg = 0x5UL;
-		tte |= _PAGE_SZ512K;
 		page_sz = 512 * 1024;
 		break;
 
 	case 8192 << 6:
 		tsb_reg = 0x6UL;
-		tte |= _PAGE_SZ512K;
 		page_sz = 512 * 1024;
 		break;
 
 	case 8192 << 7:
 		tsb_reg = 0x7UL;
-		tte |= _PAGE_SZ4MB;
 		page_sz = 4 * 1024 * 1024;
 		break;
 
 	default:
 		BUG();
 	};
+	tte |= pte_sz_bits(page_sz);
 
 	if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
 		/* Physical mapping, no locked TLB entry for TSB.  */