MN10300: Rename __flush_tlb*() to local_flush_tlb*()

Rename __flush_tlb*() to local_flush_tlb*() as it's more appropriate, and ready
to differentiate local from global TLB flushes when SMP is introduced.

Whilst we're at it, get rid of __flush_tlb_global() and make
local_flush_tlb_page() take an mm_struct pointer rather than VMA pointer.

Signed-off-by: David Howells <dhowells@redhat.com>
diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h
index f577ba2..3817d9f 100644
--- a/arch/mn10300/include/asm/highmem.h
+++ b/arch/mn10300/include/asm/highmem.h
@@ -87,7 +87,7 @@
 		BUG();
 #endif
 	set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
-	__flush_tlb_one(vaddr);
+	local_flush_tlb_one(vaddr);
 
 	return vaddr;
 }
@@ -116,7 +116,7 @@
 		 * this pte without first remap it
 		 */
 		pte_clear(kmap_pte - idx);
-		__flush_tlb_one(vaddr);
+		local_flush_tlb_one(vaddr);
 	}
 #endif
 	pagefault_enable();
diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h
index cb294c2..24d63f0 100644
--- a/arch/mn10300/include/asm/mmu_context.h
+++ b/arch/mn10300/include/asm/mmu_context.h
@@ -58,7 +58,7 @@
 	if (!(mc & MMU_CONTEXT_TLBPID_MASK)) {
 		/* we exhausted the TLB PIDs of this version on this CPU, so we
 		 * flush this CPU's TLB in its entirety and start new cycle */
-		flush_tlb_all();
+		local_flush_tlb_all();
 
 		/* fix the TLB version if needed (we avoid version #0 so as to
 		 * distingush MMU_NO_CONTEXT) */
diff --git a/arch/mn10300/include/asm/tlbflush.h b/arch/mn10300/include/asm/tlbflush.h
index 1a7e292..5d54bf5 100644
--- a/arch/mn10300/include/asm/tlbflush.h
+++ b/arch/mn10300/include/asm/tlbflush.h
@@ -13,21 +13,37 @@
 
 #include <asm/processor.h>
 
-#define __flush_tlb()						\
-do {								\
-	int w;							\
-	__asm__ __volatile__					\
-		("	mov %1,%0		\n"		\
-		 "	or %2,%0		\n"		\
-		 "	mov %0,%1		\n"		\
-		 : "=d"(w)					\
-		 : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)	\
-		 : "cc", "memory"				\
-		 );						\
-} while (0)
+/**
+ * local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb(void)
+{
+	int w;
+	asm volatile(
+		"	mov	%1,%0		\n"
+		"	or	%2,%0		\n"
+		"	mov	%0,%1		\n"
+		: "=d"(w)
+		: "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)
+		: "cc", "memory");
+}
 
-#define __flush_tlb_all() __flush_tlb()
-#define __flush_tlb_one(addr) __flush_tlb()
+/**
+ * local_flush_tlb_all - Flush all entries from the local CPU's TLBs
+ */
+#define local_flush_tlb_all()		local_flush_tlb()
+
+/**
+ * local_flush_tlb_one - Flush one entry from the local CPU's TLBs
+ */
+#define local_flush_tlb_one(addr)	local_flush_tlb()
+
+/**
+ * local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
+ * @mm: The MM to flush for
+ * @addr: The address of the target page in RAM (not its page struct)
+ */
+extern void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr);
 
 
 /*
@@ -43,14 +59,14 @@
 #define flush_tlb_all()				\
 do {						\
 	preempt_disable();			\
-	__flush_tlb_all();			\
+	local_flush_tlb_all();			\
 	preempt_enable();			\
 } while (0)
 
 #define flush_tlb_mm(mm)			\
 do {						\
 	preempt_disable();			\
-	__flush_tlb_all();			\
+	local_flush_tlb_all();			\
 	preempt_enable();			\
 } while (0)
 
@@ -59,13 +75,13 @@
 	unsigned long __s __attribute__((unused)) = (start);	\
 	unsigned long __e __attribute__((unused)) = (end);	\
 	preempt_disable();					\
-	__flush_tlb_all();					\
+	local_flush_tlb_all();					\
 	preempt_enable();					\
 } while (0)
 
+#define flush_tlb_page(vma, addr)	local_flush_tlb_page((vma)->vm_mm, addr)
+#define flush_tlb()			flush_tlb_all()
 
-#define __flush_tlb_global()			flush_tlb_all()
-#define flush_tlb()				flush_tlb_all()
 #define flush_tlb_kernel_range(start, end)			\
 do {								\
 	unsigned long __s __attribute__((unused)) = (start);	\
@@ -73,8 +89,6 @@
 	flush_tlb_all();					\
 } while (0)
 
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
-
 #define flush_tlb_pgtables(mm, start, end)	do {} while (0)
 
 #endif /* _ASM_TLBFLUSH_H */
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
index f86c283..1daf97f 100644
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -73,7 +73,7 @@
 	/* pass the memory from the bootmem allocator to the main allocator */
 	free_area_init(zones_size);
 
-	__flush_tlb_all();
+	local_flush_tlb_all();
 }
 
 /*
diff --git a/arch/mn10300/mm/mmu-context.c b/arch/mn10300/mm/mmu-context.c
index 36ba021..3d83966 100644
--- a/arch/mn10300/mm/mmu-context.c
+++ b/arch/mn10300/mm/mmu-context.c
@@ -23,7 +23,7 @@
 /*
  * flush the specified TLB entry
  */
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
 {
 	unsigned long pteu, cnx, flags;
 
@@ -33,7 +33,7 @@
 	 * interference from vmalloc'd regions */
 	local_irq_save(flags);
 
-	cnx = mm_context(vma->vm_mm);
+	cnx = mm_context(mm);
 
 	if (cnx != MMU_NO_CONTEXT) {
 		pteu = addr | (cnx & 0x000000ffUL);
diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c
index 9c1624c..450f7ba 100644
--- a/arch/mn10300/mm/pgtable.c
+++ b/arch/mn10300/mm/pgtable.c
@@ -59,7 +59,7 @@
 	 * It's enough to flush this one mapping.
 	 * (PGE mappings get flushed as well)
 	 */
-	__flush_tlb_one(vaddr);
+	local_flush_tlb_one(vaddr);
 }
 
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)