[SPARC64]: Preload TSB entries from update_mmu_cache().

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index fe266ba..08405ed 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -126,6 +126,23 @@
 	wrpr	%g3, %tnpc			! Write it into TNPC
 	done					! Trap return
 
+	/* Insert an entry into the TSB.
+	 *
+	 * %o0: TSB entry pointer
+	 * %o1: tag
+	 * %o2:	pte
+	 */
+	.align	32
+	.globl	tsb_insert
+tsb_insert:
+	rdpr	%pstate, %o5
+	wrpr	%o5, PSTATE_IE, %pstate
+	TSB_LOCK_TAG(%o0, %g2, %g3)
+	TSB_WRITE(%o0, %o2, %o1)
+	wrpr	%o5, %pstate
+	retl
+	 nop
+
 	/* Reload MMU related context switch state at
 	 * schedule() time.
 	 *
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index a8119cb..1e8a5a3 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -277,6 +277,16 @@
 	mm_rss = get_mm_rss(mm);
 	if (mm_rss >= mm->context.tsb_rss_limit)
 		tsb_grow(mm, mm_rss, GFP_ATOMIC);
+
+	if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) {
+		struct tsb *tsb;
+		unsigned long tag;
+
+		tsb = &mm->context.tsb[(address >> PAGE_SHIFT) &
+				       (mm->context.tsb_nentries - 1UL)];
+		tag = (address >> 22UL) | CTX_HWBITS(mm->context) << 48UL;
+		tsb_insert(tsb, tag, pte_val(pte));
+	}
 }
 
 void flush_dcache_page(struct page *page)
diff --git a/include/asm-sparc64/mmu.h b/include/asm-sparc64/mmu.h
index 76008ff..18f98ed 100644
--- a/include/asm-sparc64/mmu.h
+++ b/include/asm-sparc64/mmu.h
@@ -97,6 +97,8 @@
 	unsigned long pte;
 } __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
 
+extern void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte);
+
 typedef struct {
 	unsigned long	sparc64_ctx_val;
 	struct tsb	*tsb;