[MIPS] MT: Improved multithreading support.
    
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index a865f239..9dca099 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -32,13 +32,35 @@
 				     "nop; nop; nop; nop; nop; nop;\n\t" \
 				     ".set reorder\n\t")
 
+/* Atomicity and interruptability */
+#ifdef CONFIG_MIPS_MT_SMTC
+
+#include <asm/smtc.h>
+#include <asm/mipsmtregs.h>
+
+#define ENTER_CRITICAL(flags) \
+	{ \
+	unsigned int mvpflags; \
+	local_irq_save(flags);\
+	mvpflags = dvpe()
+#define EXIT_CRITICAL(flags) \
+	evpe(mvpflags); \
+	local_irq_restore(flags); \
+	}
+#else
+
+#define ENTER_CRITICAL(flags) local_irq_save(flags)
+#define EXIT_CRITICAL(flags) local_irq_restore(flags)
+
+#endif /* CONFIG_MIPS_MT_SMTC */
+
 void local_flush_tlb_all(void)
 {
 	unsigned long flags;
 	unsigned long old_ctx;
 	int entry;
 
-	local_irq_save(flags);
+	ENTER_CRITICAL(flags);
 	/* Save old context and create impossible VPN2 value */
 	old_ctx = read_c0_entryhi();
 	write_c0_entrylo0(0);
@@ -57,7 +79,7 @@
 	}
 	tlbw_use_hazard();
 	write_c0_entryhi(old_ctx);
-	local_irq_restore(flags);
+	EXIT_CRITICAL(flags);
 }
 
 /* All entries common to a mm share an asid.  To effectively flush
@@ -87,6 +109,7 @@
 		unsigned long flags;
 		int size;
 
+		ENTER_CRITICAL(flags);
 		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 		size = (size + 1) >> 1;
 		local_irq_save(flags);
@@ -120,7 +143,7 @@
 		} else {
 			drop_mmu_context(mm, cpu);
 		}
-		local_irq_restore(flags);
+		EXIT_CRITICAL(flags);
 	}
 }
 
@@ -129,9 +152,9 @@
 	unsigned long flags;
 	int size;
 
+	ENTER_CRITICAL(flags);
 	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 	size = (size + 1) >> 1;
-	local_irq_save(flags);
 	if (size <= current_cpu_data.tlbsize / 2) {
 		int pid = read_c0_entryhi();
 
@@ -162,7 +185,7 @@
 	} else {
 		local_flush_tlb_all();
 	}
-	local_irq_restore(flags);
+	EXIT_CRITICAL(flags);
 }
 
 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
@@ -175,7 +198,7 @@
 
 		newpid = cpu_asid(cpu, vma->vm_mm);
 		page &= (PAGE_MASK << 1);
-		local_irq_save(flags);
+		ENTER_CRITICAL(flags);
 		oldpid = read_c0_entryhi();
 		write_c0_entryhi(page | newpid);
 		mtc0_tlbw_hazard();
@@ -194,7 +217,7 @@
 
 	finish:
 		write_c0_entryhi(oldpid);
-		local_irq_restore(flags);
+		EXIT_CRITICAL(flags);
 	}
 }
 
@@ -207,7 +230,7 @@
 	unsigned long flags;
 	int oldpid, idx;
 
-	local_irq_save(flags);
+	ENTER_CRITICAL(flags);
 	oldpid = read_c0_entryhi();
 	page &= (PAGE_MASK << 1);
 	write_c0_entryhi(page);
@@ -226,7 +249,7 @@
 	}
 	write_c0_entryhi(oldpid);
 
-	local_irq_restore(flags);
+	EXIT_CRITICAL(flags);
 }
 
 /*
@@ -249,7 +272,7 @@
 	if (current->active_mm != vma->vm_mm)
 		return;
 
-	local_irq_save(flags);
+	ENTER_CRITICAL(flags);
 
 	pid = read_c0_entryhi() & ASID_MASK;
 	address &= (PAGE_MASK << 1);
@@ -277,7 +300,7 @@
 	else
 		tlb_write_indexed();
 	tlbw_use_hazard();
-	local_irq_restore(flags);
+	EXIT_CRITICAL(flags);
 }
 
 #if 0
@@ -291,7 +314,7 @@
 	pte_t *ptep;
 	int idx;
 
-	local_irq_save(flags);
+	ENTER_CRITICAL(flags);
 	address &= (PAGE_MASK << 1);
 	asid = read_c0_entryhi() & ASID_MASK;
 	write_c0_entryhi(address | asid);
@@ -310,7 +333,7 @@
 	else
 		tlb_write_indexed();
 	tlbw_use_hazard();
-	local_irq_restore(flags);
+	EXIT_CRITICAL(flags);
 }
 #endif
 
@@ -322,7 +345,7 @@
 	unsigned long old_pagemask;
 	unsigned long old_ctx;
 
-	local_irq_save(flags);
+	ENTER_CRITICAL(flags);
 	/* Save old context and create impossible VPN2 value */
 	old_ctx = read_c0_entryhi();
 	old_pagemask = read_c0_pagemask();
@@ -342,7 +365,7 @@
 	BARRIER;
 	write_c0_pagemask(old_pagemask);
 	local_flush_tlb_all();
-	local_irq_restore(flags);
+	EXIT_CRITICAL(flags);
 }
 
 /*
@@ -362,7 +385,7 @@
 	unsigned long old_pagemask;
 	unsigned long old_ctx;
 
-	local_irq_save(flags);
+	ENTER_CRITICAL(flags);
 	/* Save old context and create impossible VPN2 value */
 	old_ctx = read_c0_entryhi();
 	old_pagemask = read_c0_pagemask();
@@ -386,10 +409,11 @@
 	write_c0_entryhi(old_ctx);
 	write_c0_pagemask(old_pagemask);
 out:
-	local_irq_restore(flags);
+	EXIT_CRITICAL(flags);
 	return ret;
 }
 
+extern void __init sanitize_tlb_entries(void);
 static void __init probe_tlb(unsigned long config)
 {
 	struct cpuinfo_mips *c = &current_cpu_data;
@@ -402,6 +426,14 @@
 	 */
 	if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
 		return;
+#ifdef CONFIG_MIPS_MT_SMTC
+	/*
+	 * If TLB is shared in SMTC system, total size already
+	 * has been calculated and written into cpu_data tlbsize
+	 */
+	if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
+		return;
+#endif /* CONFIG_MIPS_MT_SMTC */
 
 	reg = read_c0_config1();
 	if (!((config >> 7) & 3))
@@ -410,6 +442,15 @@
 	c->tlbsize = ((reg >> 25) & 0x3f) + 1;
 }
 
+static int __initdata ntlb = 0;
+static int __init set_ntlb(char *str)
+{
+	get_option(&str, &ntlb);
+	return 1;
+}
+
+__setup("ntlb=", set_ntlb);
+
 void __init tlb_init(void)
 {
 	unsigned int config = read_c0_config();
@@ -432,5 +473,15 @@
 
 	/* Did I tell you that ARC SUCKS?  */
 
+	if (ntlb) {
+		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
+			int wired = current_cpu_data.tlbsize - ntlb;
+			write_c0_wired(wired);
+			write_c0_index(wired-1);
+			printk ("Restricting TLB to %d entries\n", ntlb);
+		} else
+			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
+	}
+
 	build_tlb_refill_handler();
 }