[MIPS] Eleminate local symbols from the symbol table.

These symbols appear in oprofile output, stacktraces and similar but only
make the output harder to read.  Many identical symbol names such as
"both_aligned" were also being used in multiple source files making it
impossible to see which file actually was meant.  So let's get rid of them.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
diff --git a/arch/mips/lib/memcpy-inatomic.S b/arch/mips/lib/memcpy-inatomic.S
index d1b08f5..736d0fb 100644
--- a/arch/mips/lib/memcpy-inatomic.S
+++ b/arch/mips/lib/memcpy-inatomic.S
@@ -209,36 +209,36 @@
 	and	t1, dst, ADDRMASK
 	PREF(	0, 1*32(src) )
 	PREF(	1, 1*32(dst) )
-	bnez	t2, copy_bytes_checklen
+	bnez	t2, .Lcopy_bytes_checklen
 	 and	t0, src, ADDRMASK
 	PREF(	0, 2*32(src) )
 	PREF(	1, 2*32(dst) )
-	bnez	t1, dst_unaligned
+	bnez	t1, .Ldst_unaligned
 	 nop
-	bnez	t0, src_unaligned_dst_aligned
+	bnez	t0, .Lsrc_unaligned_dst_aligned
 	/*
 	 * use delay slot for fall-through
 	 * src and dst are aligned; need to compute rem
 	 */
-both_aligned:
-	 SRL	t0, len, LOG_NBYTES+3    # +3 for 8 units/iter
-	beqz	t0, cleanup_both_aligned # len < 8*NBYTES
-	 and	rem, len, (8*NBYTES-1)	 # rem = len % (8*NBYTES)
+.Lboth_aligned:
+	 SRL	t0, len, LOG_NBYTES+3    	# +3 for 8 units/iter
+	beqz	t0, .Lcleanup_both_aligned	# len < 8*NBYTES
+	 and	rem, len, (8*NBYTES-1)	 	# rem = len % (8*NBYTES)
 	PREF(	0, 3*32(src) )
 	PREF(	1, 3*32(dst) )
 	.align	4
 1:
-EXC(	LOAD	t0, UNIT(0)(src),	l_exc)
-EXC(	LOAD	t1, UNIT(1)(src),	l_exc_copy)
-EXC(	LOAD	t2, UNIT(2)(src),	l_exc_copy)
-EXC(	LOAD	t3, UNIT(3)(src),	l_exc_copy)
+EXC(	LOAD	t0, UNIT(0)(src),	.Ll_exc)
+EXC(	LOAD	t1, UNIT(1)(src),	.Ll_exc_copy)
+EXC(	LOAD	t2, UNIT(2)(src),	.Ll_exc_copy)
+EXC(	LOAD	t3, UNIT(3)(src),	.Ll_exc_copy)
 	SUB	len, len, 8*NBYTES
-EXC(	LOAD	t4, UNIT(4)(src),	l_exc_copy)
-EXC(	LOAD	t7, UNIT(5)(src),	l_exc_copy)
+EXC(	LOAD	t4, UNIT(4)(src),	.Ll_exc_copy)
+EXC(	LOAD	t7, UNIT(5)(src),	.Ll_exc_copy)
 	STORE	t0, UNIT(0)(dst)
 	STORE	t1, UNIT(1)(dst)
-EXC(	LOAD	t0, UNIT(6)(src),	l_exc_copy)
-EXC(	LOAD	t1, UNIT(7)(src),	l_exc_copy)
+EXC(	LOAD	t0, UNIT(6)(src),	.Ll_exc_copy)
+EXC(	LOAD	t1, UNIT(7)(src),	.Ll_exc_copy)
 	ADD	src, src, 8*NBYTES
 	ADD	dst, dst, 8*NBYTES
 	STORE	t2, UNIT(-6)(dst)
@@ -255,18 +255,18 @@
 	/*
 	 * len == rem == the number of bytes left to copy < 8*NBYTES
 	 */
-cleanup_both_aligned:
-	beqz	len, done
+.Lcleanup_both_aligned:
+	beqz	len, .Ldone
 	 sltu	t0, len, 4*NBYTES
-	bnez	t0, less_than_4units
+	bnez	t0, .Lless_than_4units
 	 and	rem, len, (NBYTES-1)	# rem = len % NBYTES
 	/*
 	 * len >= 4*NBYTES
 	 */
-EXC(	LOAD	t0, UNIT(0)(src),	l_exc)
-EXC(	LOAD	t1, UNIT(1)(src),	l_exc_copy)
-EXC(	LOAD	t2, UNIT(2)(src),	l_exc_copy)
-EXC(	LOAD	t3, UNIT(3)(src),	l_exc_copy)
+EXC(	LOAD	t0, UNIT(0)(src),	.Ll_exc)
+EXC(	LOAD	t1, UNIT(1)(src),	.Ll_exc_copy)
+EXC(	LOAD	t2, UNIT(2)(src),	.Ll_exc_copy)
+EXC(	LOAD	t3, UNIT(3)(src),	.Ll_exc_copy)
 	SUB	len, len, 4*NBYTES
 	ADD	src, src, 4*NBYTES
 	STORE	t0, UNIT(0)(dst)
@@ -275,16 +275,16 @@
 	STORE	t3, UNIT(3)(dst)
 	.set	reorder				/* DADDI_WAR */
 	ADD	dst, dst, 4*NBYTES
-	beqz	len, done
+	beqz	len, .Ldone
 	.set	noreorder
-less_than_4units:
+.Lless_than_4units:
 	/*
 	 * rem = len % NBYTES
 	 */
-	beq	rem, len, copy_bytes
+	beq	rem, len, .Lcopy_bytes
 	 nop
 1:
-EXC(	LOAD	t0, 0(src),		l_exc)
+EXC(	LOAD	t0, 0(src),		.Ll_exc)
 	ADD	src, src, NBYTES
 	SUB	len, len, NBYTES
 	STORE	t0, 0(dst)
@@ -305,17 +305,17 @@
 	 * more instruction-level parallelism.
 	 */
 #define bits t2
-	beqz	len, done
+	beqz	len, .Ldone
 	 ADD	t1, dst, len	# t1 is just past last byte of dst
 	li	bits, 8*NBYTES
 	SLL	rem, len, 3	# rem = number of bits to keep
-EXC(	LOAD	t0, 0(src),		l_exc)
+EXC(	LOAD	t0, 0(src),		.Ll_exc)
 	SUB	bits, bits, rem	# bits = number of bits to discard
 	SHIFT_DISCARD t0, t0, bits
 	STREST	t0, -1(t1)
 	jr	ra
 	 move	len, zero
-dst_unaligned:
+.Ldst_unaligned:
 	/*
 	 * dst is unaligned
 	 * t0 = src & ADDRMASK
@@ -326,22 +326,22 @@
 	 * Set match = (src and dst have same alignment)
 	 */
 #define match rem
-EXC(	LDFIRST	t3, FIRST(0)(src),	l_exc)
+EXC(	LDFIRST	t3, FIRST(0)(src),	.Ll_exc)
 	ADD	t2, zero, NBYTES
-EXC(	LDREST	t3, REST(0)(src),	l_exc_copy)
+EXC(	LDREST	t3, REST(0)(src),	.Ll_exc_copy)
 	SUB	t2, t2, t1	# t2 = number of bytes copied
 	xor	match, t0, t1
 	STFIRST t3, FIRST(0)(dst)
-	beq	len, t2, done
+	beq	len, t2, .Ldone
 	 SUB	len, len, t2
 	ADD	dst, dst, t2
-	beqz	match, both_aligned
+	beqz	match, .Lboth_aligned
 	 ADD	src, src, t2
 
-src_unaligned_dst_aligned:
+.Lsrc_unaligned_dst_aligned:
 	SRL	t0, len, LOG_NBYTES+2    # +2 for 4 units/iter
 	PREF(	0, 3*32(src) )
-	beqz	t0, cleanup_src_unaligned
+	beqz	t0, .Lcleanup_src_unaligned
 	 and	rem, len, (4*NBYTES-1)   # rem = len % 4*NBYTES
 	PREF(	1, 3*32(dst) )
 1:
@@ -351,15 +351,15 @@
  * It's OK to load FIRST(N+1) before REST(N) because the two addresses
  * are to the same unit (unless src is aligned, but it's not).
  */
-EXC(	LDFIRST	t0, FIRST(0)(src),	l_exc)
-EXC(	LDFIRST	t1, FIRST(1)(src),	l_exc_copy)
+EXC(	LDFIRST	t0, FIRST(0)(src),	.Ll_exc)
+EXC(	LDFIRST	t1, FIRST(1)(src),	.Ll_exc_copy)
 	SUB     len, len, 4*NBYTES
-EXC(	LDREST	t0, REST(0)(src),	l_exc_copy)
-EXC(	LDREST	t1, REST(1)(src),	l_exc_copy)
-EXC(	LDFIRST	t2, FIRST(2)(src),	l_exc_copy)
-EXC(	LDFIRST	t3, FIRST(3)(src),	l_exc_copy)
-EXC(	LDREST	t2, REST(2)(src),	l_exc_copy)
-EXC(	LDREST	t3, REST(3)(src),	l_exc_copy)
+EXC(	LDREST	t0, REST(0)(src),	.Ll_exc_copy)
+EXC(	LDREST	t1, REST(1)(src),	.Ll_exc_copy)
+EXC(	LDFIRST	t2, FIRST(2)(src),	.Ll_exc_copy)
+EXC(	LDFIRST	t3, FIRST(3)(src),	.Ll_exc_copy)
+EXC(	LDREST	t2, REST(2)(src),	.Ll_exc_copy)
+EXC(	LDREST	t3, REST(3)(src),	.Ll_exc_copy)
 	PREF(	0, 9*32(src) )		# 0 is PREF_LOAD  (not streamed)
 	ADD	src, src, 4*NBYTES
 #ifdef CONFIG_CPU_SB1
@@ -375,14 +375,14 @@
 	bne	len, rem, 1b
 	.set	noreorder
 
-cleanup_src_unaligned:
-	beqz	len, done
+.Lcleanup_src_unaligned:
+	beqz	len, .Ldone
 	 and	rem, len, NBYTES-1  # rem = len % NBYTES
-	beq	rem, len, copy_bytes
+	beq	rem, len, .Lcopy_bytes
 	 nop
 1:
-EXC(	LDFIRST t0, FIRST(0)(src),	l_exc)
-EXC(	LDREST	t0, REST(0)(src),	l_exc_copy)
+EXC(	LDFIRST t0, FIRST(0)(src),	.Ll_exc)
+EXC(	LDREST	t0, REST(0)(src),	.Ll_exc_copy)
 	ADD	src, src, NBYTES
 	SUB	len, len, NBYTES
 	STORE	t0, 0(dst)
@@ -391,15 +391,15 @@
 	bne	len, rem, 1b
 	.set	noreorder
 
-copy_bytes_checklen:
-	beqz	len, done
+.Lcopy_bytes_checklen:
+	beqz	len, .Ldone
 	 nop
-copy_bytes:
+.Lcopy_bytes:
 	/* 0 < len < NBYTES  */
 #define COPY_BYTE(N)			\
-EXC(	lb	t0, N(src), l_exc);	\
+EXC(	lb	t0, N(src), .Ll_exc);	\
 	SUB	len, len, 1;		\
-	beqz	len, done;		\
+	beqz	len, .Ldone;		\
 	 sb	t0, N(dst)
 
 	COPY_BYTE(0)
@@ -410,16 +410,16 @@
 	COPY_BYTE(4)
 	COPY_BYTE(5)
 #endif
-EXC(	lb	t0, NBYTES-2(src), l_exc)
+EXC(	lb	t0, NBYTES-2(src), .Ll_exc)
 	SUB	len, len, 1
 	jr	ra
 	 sb	t0, NBYTES-2(dst)
-done:
+.Ldone:
 	jr	ra
 	 nop
 	END(__copy_user_inatomic)
 
-l_exc_copy:
+.Ll_exc_copy:
 	/*
 	 * Copy bytes from src until faulting load address (or until a
 	 * lb faults)
@@ -434,14 +434,14 @@
 	 nop
 	LOAD	t0, THREAD_BUADDR(t0)
 1:
-EXC(	lb	t1, 0(src),	l_exc)
+EXC(	lb	t1, 0(src),	.Ll_exc)
 	ADD	src, src, 1
 	sb	t1, 0(dst)	# can't fault -- we're copy_from_user
 	.set	reorder				/* DADDI_WAR */
 	ADD	dst, dst, 1
 	bne	src, t0, 1b
 	.set	noreorder
-l_exc:
+.Ll_exc:
 	LOAD	t0, TI_TASK($28)
 	 nop
 	LOAD	t0, THREAD_BUADDR(t0)	# t0 is just past last good address