blob: bdce95e77f858b84b1a8f88c482dc078e272b001 [file] [log] [blame]
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07001/* atomic.h: These still suck, but the I-cache hit rate is higher.
2 *
3 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
4 * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
5 * Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
6 *
7 * Additions by Keith M Wesolowski (wesolows@foobazco.org) based
8 * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
9 */
10
11#ifndef __ARCH_SPARC_ATOMIC__
12#define __ARCH_SPARC_ATOMIC__
13
14#include <linux/types.h>
15
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070016#ifdef __KERNEL__
17
Stephen Rothwellc7cb1522009-04-14 02:00:48 -070018#include <asm/system.h>
19
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070020#define ATOMIC_INIT(i) { (i) }
21
22extern int __atomic_add_return(int, atomic_t *);
23extern int atomic_cmpxchg(atomic_t *, int, int);
24#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
Arun Sharmaf24219b2011-07-26 16:09:07 -070025extern int __atomic_add_unless(atomic_t *, int, int);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070026extern void atomic_set(atomic_t *, int);
27
Anton Blanchardf3d46f92010-05-17 14:33:53 +100028#define atomic_read(v) (*(volatile int *)&(v)->counter)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070029
30#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
31#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
32#define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
33#define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
34
35#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
36#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
37#define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
38#define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
39
40#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
41
42/*
43 * atomic_inc_and_test - increment and test
44 * @v: pointer of type atomic_t
45 *
46 * Atomically increments @v by 1
47 * and returns true if the result is zero, or false for all
48 * other cases.
49 */
50#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
51
52#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
53#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
54
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070055
56/* This is the old 24-bit implementation. It's still used internally
57 * by some sparc-specific code, notably the semaphore implementation.
58 */
59typedef struct { volatile int counter; } atomic24_t;
60
61#ifndef CONFIG_SMP
62
63#define ATOMIC24_INIT(i) { (i) }
64#define atomic24_read(v) ((v)->counter)
65#define atomic24_set(v, i) (((v)->counter) = i)
66
67#else
68/* We do the bulk of the actual work out of line in two common
69 * routines in assembler, see arch/sparc/lib/atomic.S for the
70 * "fun" details.
71 *
72 * For SMP the trick is you embed the spin lock byte within
73 * the word, use the low byte so signedness is easily retained
74 * via a quick arithmetic shift. It looks like this:
75 *
76 * ----------------------------------------
77 * | signed 24-bit counter value | lock | atomic_t
78 * ----------------------------------------
79 * 31 8 7 0
80 */
81
82#define ATOMIC24_INIT(i) { ((i) << 8) }
83
84static inline int atomic24_read(const atomic24_t *v)
85{
86 int ret = v->counter;
87
88 while(ret & 0xff)
89 ret = v->counter;
90
91 return ret >> 8;
92}
93
94#define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
95#endif
96
97static inline int __atomic24_add(int i, atomic24_t *v)
98{
99 register volatile int *ptr asm("g1");
100 register int increment asm("g2");
101 register int tmp1 asm("g3");
102 register int tmp2 asm("g4");
103 register int tmp3 asm("g7");
104
105 ptr = &v->counter;
106 increment = i;
107
108 __asm__ __volatile__(
109 "mov %%o7, %%g4\n\t"
110 "call ___atomic24_add\n\t"
111 " add %%o7, 8, %%o7\n"
112 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
113 : "0" (increment), "r" (ptr)
114 : "memory", "cc");
115
116 return increment;
117}
118
119static inline int __atomic24_sub(int i, atomic24_t *v)
120{
121 register volatile int *ptr asm("g1");
122 register int increment asm("g2");
123 register int tmp1 asm("g3");
124 register int tmp2 asm("g4");
125 register int tmp3 asm("g7");
126
127 ptr = &v->counter;
128 increment = i;
129
130 __asm__ __volatile__(
131 "mov %%o7, %%g4\n\t"
132 "call ___atomic24_sub\n\t"
133 " add %%o7, 8, %%o7\n"
134 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
135 : "0" (increment), "r" (ptr)
136 : "memory", "cc");
137
138 return increment;
139}
140
141#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
142#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
143
144#define atomic24_dec_return(v) __atomic24_sub(1, (v))
145#define atomic24_inc_return(v) __atomic24_add(1, (v))
146
147#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
148#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
149
150#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
151#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
152
153#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
154
155/* Atomic operations are already serializing */
156#define smp_mb__before_atomic_dec() barrier()
157#define smp_mb__after_atomic_dec() barrier()
158#define smp_mb__before_atomic_inc() barrier()
159#define smp_mb__after_atomic_inc() barrier()
160
161#endif /* !(__KERNEL__) */
162
Arnd Bergmann72099ed2009-05-13 22:56:29 +0000163#include <asm-generic/atomic-long.h>
Sam Ravnborgf5e706a2008-07-17 21:55:51 -0700164#endif /* !(__ARCH_SPARC_ATOMIC__) */