MIPS: Whitespace cleanup.

Having received another series of whitespace patches I decided to do this
once and for all rather than dealing with this kind of patches trickling
in forever.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 1c8ac49..36b9bd8 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -5,8 +5,8 @@
  *
  * Synthesize TLB refill handlers at runtime.
  *
- * Copyright (C) 2004, 2005, 2006, 2008  Thiemo Seufer
- * Copyright (C) 2005, 2007, 2008, 2009  Maciej W. Rozycki
+ * Copyright (C) 2004, 2005, 2006, 2008	 Thiemo Seufer
+ * Copyright (C) 2005, 2007, 2008, 2009	 Maciej W. Rozycki
  * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
  * Copyright (C) 2008, 2009 Cavium Networks, Inc.
  * Copyright (C) 2011  MIPS Technologies, Inc.
@@ -212,7 +212,7 @@
 /*
  * pgtable bits are assigned dynamically depending on processor feature
  * and statically based on kernel configuration.  This spits out the actual
- * values the kernel is using.  Required to make sense from disassembled
+ * values the kernel is using.	Required to make sense from disassembled
  * TLB exception handlers.
  */
 static void output_pgtable_bits_defines(void)
@@ -464,8 +464,8 @@
  * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
  * 2. A timing hazard exists for the TLBP instruction.
  *
- *      stalling_instruction
- *      TLBP
+ *	stalling_instruction
+ *	TLBP
  *
  * The JTLB is being read for the TLBP throughout the stall generated by the
  * previous instruction. This is not really correct as the stalling instruction
@@ -476,7 +476,7 @@
  * The software work-around is to not allow the instruction preceding the TLBP
  * to stall - make it an NOP or some other instruction guaranteed not to stall.
  *
- * Errata 2 will not be fixed.  This errata is also on the R5000.
+ * Errata 2 will not be fixed.	This errata is also on the R5000.
  *
  * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
  */
@@ -748,7 +748,7 @@
 	 */
 	small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
 
-	/* We can clobber tmp.  It isn't used after this.*/
+	/* We can clobber tmp.	It isn't used after this.*/
 	if (!small_sequence)
 		uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
 
@@ -830,12 +830,12 @@
 		/* Clear lower 23 bits of context. */
 		uasm_i_dins(p, ptr, 0, 0, 23);
 
-		/* 1 0  1 0 1  << 6  xkphys cached */
+		/* 1 0	1 0 1  << 6  xkphys cached */
 		uasm_i_ori(p, ptr, ptr, 0x540);
 		uasm_i_drotr(p, ptr, ptr, 11);
 	}
 #elif defined(CONFIG_SMP)
-# ifdef  CONFIG_MIPS_MT_SMTC
+# ifdef	 CONFIG_MIPS_MT_SMTC
 	/*
 	 * SMTC uses TCBind value as "CPU" index
 	 */
@@ -955,7 +955,7 @@
 
 	/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
 #ifdef CONFIG_SMP
-#ifdef  CONFIG_MIPS_MT_SMTC
+#ifdef	CONFIG_MIPS_MT_SMTC
 	/*
 	 * SMTC uses TCBind value as "CPU" index
 	 */
@@ -965,7 +965,7 @@
 #else
 	/*
 	 * smp_processor_id() << 3 is stored in CONTEXT.
-         */
+	 */
 	uasm_i_mfc0(p, ptr, C0_CONTEXT);
 	UASM_i_LA_mostly(p, tmp, pgdc);
 	uasm_i_srl(p, ptr, ptr, 23);
@@ -1153,7 +1153,7 @@
 
 	if (pgd_reg == -1) {
 		vmalloc_branch_delay_filled = 1;
-		/* 1 0  1 0 1  << 6  xkphys cached */
+		/* 1 0	1 0 1  << 6  xkphys cached */
 		uasm_i_ori(p, ptr, ptr, 0x540);
 		uasm_i_drotr(p, ptr, ptr, 11);
 	}
@@ -1171,9 +1171,9 @@
 	uasm_l_vmalloc_done(l, *p);
 
 	/*
-	 *                         tmp          ptr
-	 * fall-through case =   badvaddr  *pgd_current
-	 * vmalloc case      =   badvaddr  swapper_pg_dir
+	 *			   tmp		ptr
+	 * fall-through case =	 badvaddr  *pgd_current
+	 * vmalloc case	     =	 badvaddr  swapper_pg_dir
 	 */
 
 	if (vmalloc_branch_delay_filled)
@@ -1212,7 +1212,7 @@
 	uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
 	/*
 	 * The in the LWX case we don't want to do the load in the
-	 * delay slot.  It cannot issue in the same cycle and may be
+	 * delay slot.	It cannot issue in the same cycle and may be
 	 * speculative and unneeded.
 	 */
 	if (use_lwx_insns())