ARC: fold __builtin_constant_p() into test_bit()

This makes test_bit() more like its siblings *_bit() routines.
Also add some comments about the constant @nr micro-optimization

Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 1a5bf07..4051e95 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -32,6 +32,20 @@
 
 	m += nr >> 5;
 
+	/*
+	 * ARC ISA micro-optimization:
+	 *
+	 * Instructions dealing with bitpos only consider lower 5 bits (0-31)
+	 * e.g (x << 33) is handled like (x << 1) by ASL instruction
+	 *  (mem pointer still needs adjustment to point to next word)
+	 *
+	 * Hence the masking to clamp @nr arg can be elided in general.
+	 *
+	 * However if @nr is a constant (above assumed it in a register),
+	 * and greater than 31, gcc can optimize away (x << 33) to 0,
+	 * as overflow, given the 32-bit ISA. Thus masking needs to be done
+	 * for constant @nr, but no code is generated due to const prop.
+	 */
 	if (__builtin_constant_p(nr))
 		nr &= 0x1f;
 
@@ -374,29 +388,20 @@
  * This routine doesn't need to be atomic.
  */
 static inline int
-__constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
-{
-	return ((1UL << (nr & 31)) &
-		(((const volatile unsigned int *)addr)[nr >> 5])) != 0;
-}
-
-static inline int
-__test_bit(unsigned int nr, const volatile unsigned long *addr)
+test_bit(unsigned int nr, const volatile unsigned long *addr)
 {
 	unsigned long mask;
 
 	addr += nr >> 5;
 
-	/* ARC700 only considers 5 bits in bit-fiddling insn */
+	if (__builtin_constant_p(nr))
+		nr &= 0x1f;
+
 	mask = 1 << nr;
 
 	return ((mask & *addr) != 0);
 }
 
-#define test_bit(nr, addr)	(__builtin_constant_p(nr) ? \
-					__constant_test_bit((nr), (addr)) : \
-					__test_bit((nr), (addr)))
-
 /*
  * Count the number of zeros, starting from MSB
  * Helper for fls( ) friends