Blackfin arch: SMP supporting patchset: Blackfin header files and machine common code

Blackfin dual core BF561 processor can support SMP like features.
https://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:smp-like

In this patch, we provide SMP extend to Blackfin header files
and machine common code

Signed-off-by: Graf Yang <graf.yang@analog.com>
Signed-off-by: Bryan Wu <cooloney@kernel.org>

diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index 25776c1..d76275e 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -15,11 +15,80 @@
  */
 
 #define ATOMIC_INIT(i)	{ (i) }
-
-#define atomic_read(v)		((v)->counter)
 #define atomic_set(v, i)	(((v)->counter) = i)
 
-static __inline__ void atomic_add(int i, atomic_t * v)
+#ifdef CONFIG_SMP
+
+#define atomic_read(v)	__raw_uncached_fetch_asm(&(v)->counter)
+
+asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
+
+asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
+
+asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);
+
+asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value);
+
+asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
+
+asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+	__raw_atomic_update_asm(&v->counter, i);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+	__raw_atomic_update_asm(&v->counter, -i);
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+	return __raw_atomic_update_asm(&v->counter, i);
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+	return __raw_atomic_update_asm(&v->counter, -i);
+}
+
+static inline void atomic_inc(volatile atomic_t *v)
+{
+	__raw_atomic_update_asm(&v->counter, 1);
+}
+
+static inline void atomic_dec(volatile atomic_t *v)
+{
+	__raw_atomic_update_asm(&v->counter, -1);
+}
+
+static inline void atomic_clear_mask(int mask, atomic_t *v)
+{
+	__raw_atomic_clear_asm(&v->counter, mask);
+}
+
+static inline void atomic_set_mask(int mask, atomic_t *v)
+{
+	__raw_atomic_set_asm(&v->counter, mask);
+}
+
+static inline int atomic_test_mask(int mask, atomic_t *v)
+{
+	return __raw_atomic_test_asm(&v->counter, mask);
+}
+
+/* Atomic operations are already serializing */
+#define smp_mb__before_atomic_dec()    barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc()    barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+#else /* !CONFIG_SMP */
+
+#define atomic_read(v)	((v)->counter)
+
+static inline void atomic_add(int i, atomic_t *v)
 {
 	long flags;
 
@@ -28,7 +97,7 @@
 	local_irq_restore(flags);
 }
 
-static __inline__ void atomic_sub(int i, atomic_t * v)
+static inline void atomic_sub(int i, atomic_t *v)
 {
 	long flags;
 
@@ -38,7 +107,7 @@
 
 }
 
-static inline int atomic_add_return(int i, atomic_t * v)
+static inline int atomic_add_return(int i, atomic_t *v)
 {
 	int __temp = 0;
 	long flags;
@@ -52,8 +121,7 @@
 	return __temp;
 }
 
-#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
-static inline int atomic_sub_return(int i, atomic_t * v)
+static inline int atomic_sub_return(int i, atomic_t *v)
 {
 	int __temp = 0;
 	long flags;
@@ -66,7 +134,7 @@
 	return __temp;
 }
 
-static __inline__ void atomic_inc(volatile atomic_t * v)
+static inline void atomic_inc(volatile atomic_t *v)
 {
 	long flags;
 
@@ -75,20 +143,7 @@
 	local_irq_restore(flags);
 }
 
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-#define atomic_add_unless(v, a, u)				\
-({								\
-	int c, old;						\
-	c = atomic_read(v);					\
-	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
-		c = old;					\
-	c != (u);						\
-})
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-static __inline__ void atomic_dec(volatile atomic_t * v)
+static inline void atomic_dec(volatile atomic_t *v)
 {
 	long flags;
 
@@ -97,7 +152,7 @@
 	local_irq_restore(flags);
 }
 
-static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t * v)
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
 {
 	long flags;
 
@@ -106,7 +161,7 @@
 	local_irq_restore(flags);
 }
 
-static __inline__ void atomic_set_mask(unsigned int mask, atomic_t * v)
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
 {
 	long flags;
 
@@ -121,9 +176,25 @@
 #define smp_mb__before_atomic_inc()    barrier()
 #define smp_mb__after_atomic_inc() barrier()
 
+#endif /* !CONFIG_SMP */
+
+#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
 #define atomic_dec_return(v) atomic_sub_return(1,(v))
 #define atomic_inc_return(v) atomic_add_return(1,(v))
 
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+#define atomic_add_unless(v, a, u)				\
+({								\
+	int c, old;						\
+	c = atomic_read(v);					\
+	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+		c = old;					\
+	c != (u);						\
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
 /*
  * atomic_inc_and_test - increment and test
  * @v: pointer of type atomic_t