MIPS: Whitespace cleanup.

Having received another series of whitespace patches I decided to do this
once and for all rather than dealing with this kind of patches trickling
in forever.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index f7fdc24..314ab55 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -18,7 +18,7 @@
  * over this barrier.  All reads preceding this primitive are guaranteed
  * to access memory (but not necessarily other CPUs' caches) before any
  * reads following this primitive that depend on the data return by
- * any of the preceding reads.  This primitive is much lighter weight than
+ * any of the preceding reads.	This primitive is much lighter weight than
  * rmb() on most CPUs, and is never heavier weight than is
  * rmb().
  *
@@ -43,7 +43,7 @@
  * </programlisting>
  *
  * because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends().  However,
+ * two reads are separated by a read_barrier_depends().	 However,
  * the following code, with the same initial values for "a" and "b":
  *
  * <programlisting>
@@ -57,7 +57,7 @@
  * </programlisting>
  *
  * does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b".  Therefore, on some CPUs, such
+ * the read of "a" and the read of "b".	 Therefore, on some CPUs, such
  * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
  * in cases like this where there are no data dependencies.
  */
@@ -92,7 +92,7 @@
 		: "memory")
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 # define OCTEON_SYNCW_STR	".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n"
-# define __syncw() 	__asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
+# define __syncw()	__asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
 
 # define fast_wmb()	__syncw()
 # define fast_rmb()	barrier()
@@ -158,7 +158,7 @@
 #endif
 
 #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
-#define __WEAK_LLSC_MB		"       sync	\n"
+#define __WEAK_LLSC_MB		"	sync	\n"
 #else
 #define __WEAK_LLSC_MB		"		\n"
 #endif