asm-generic/atomic.h: allow SMP peeps to leverage this

Only a few core funcs need to be implemented for SMP systems, so allow the
arches to override them while getting the rest for free.

At least, this is enough to allow the Blackfin SMP port to use things.

Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Cc: Arun Sharma <asharma@fb.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 4c56e0d..e37963c1 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -16,7 +16,11 @@
 #define __ASM_GENERIC_ATOMIC_H
 
 #ifdef CONFIG_SMP
-#error not SMP safe
+/* Force people to define core atomics */
+# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \
+     !defined(atomic_clear_mask) || !defined(atomic_set_mask)
+#  error "SMP requires a little arch-specific magic"
+# endif
 #endif
 
 /*
@@ -34,7 +38,9 @@
  *
  * Atomically reads the value of @v.
  */
+#ifndef atomic_read
 #define atomic_read(v)	(*(volatile int *)&(v)->counter)
+#endif
 
 /**
  * atomic_set - set atomic variable
@@ -55,6 +61,7 @@
  *
  * Atomically adds @i to @v and returns the result
  */
+#ifndef atomic_add_return
 static inline int atomic_add_return(int i, atomic_t *v)
 {
 	unsigned long flags;
@@ -68,6 +75,7 @@
 
 	return temp;
 }
+#endif
 
 /**
  * atomic_sub_return - subtract integer from atomic variable
@@ -76,6 +84,7 @@
  *
  * Atomically subtracts @i from @v and returns the result
  */
+#ifndef atomic_sub_return
 static inline int atomic_sub_return(int i, atomic_t *v)
 {
 	unsigned long flags;
@@ -89,6 +98,7 @@
 
 	return temp;
 }
+#endif
 
 static inline int atomic_add_negative(int i, atomic_t *v)
 {
@@ -147,6 +157,7 @@
  *
  * Atomically clears the bits set in @mask from @v
  */
+#ifndef atomic_clear_mask
 static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
 {
 	unsigned long flags;
@@ -156,6 +167,7 @@
 	v->counter &= mask;
 	raw_local_irq_restore(flags);
 }
+#endif
 
 /**
  * atomic_set_mask - Atomically set bits in atomic variable
@@ -164,6 +176,7 @@
  *
  * Atomically sets the bits set in @mask in @v
  */
+#ifndef atomic_set_mask
 static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
 {
 	unsigned long flags;
@@ -172,6 +185,7 @@
 	v->counter |= mask;
 	raw_local_irq_restore(flags);
 }
+#endif
 
 /* Assume that atomic operations are already serializing */
 #define smp_mb__before_atomic_dec()	barrier()