blob: 612d8327665351c43b39dcd41e5323178ecc6f60 [file] [log] [blame]
Mathieu Desnoyers6d8944a2007-05-08 00:34:53 -07001#ifndef _ARCH_POWERPC_LOCAL_H
2#define _ARCH_POWERPC_LOCAL_H
3
4#include <linux/percpu.h>
5#include <asm/atomic.h>
6
7typedef struct
8{
9 atomic_long_t a;
10} local_t;
11
12#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
13
14#define local_read(l) atomic_long_read(&(l)->a)
15#define local_set(l,i) atomic_long_set(&(l)->a, (i))
16
17#define local_add(i,l) atomic_long_add((i),(&(l)->a))
18#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
19#define local_inc(l) atomic_long_inc(&(l)->a)
20#define local_dec(l) atomic_long_dec(&(l)->a)
21
22static __inline__ long local_add_return(long a, local_t *l)
23{
24 long t;
25
26 __asm__ __volatile__(
27"1:" PPC_LLARX "%0,0,%2 # local_add_return\n\
28 add %0,%1,%0\n"
29 PPC405_ERR77(0,%2)
30 PPC_STLCX "%0,0,%2 \n\
31 bne- 1b"
32 : "=&r" (t)
33 : "r" (a), "r" (&(l->a.counter))
34 : "cc", "memory");
35
36 return t;
37}
38
39#define local_add_negative(a, l) (local_add_return((a), (l)) < 0)
40
41static __inline__ long local_sub_return(long a, local_t *l)
42{
43 long t;
44
45 __asm__ __volatile__(
46"1:" PPC_LLARX "%0,0,%2 # local_sub_return\n\
47 subf %0,%1,%0\n"
48 PPC405_ERR77(0,%2)
49 PPC_STLCX "%0,0,%2 \n\
50 bne- 1b"
51 : "=&r" (t)
52 : "r" (a), "r" (&(l->a.counter))
53 : "cc", "memory");
54
55 return t;
56}
57
58static __inline__ long local_inc_return(local_t *l)
59{
60 long t;
61
62 __asm__ __volatile__(
63"1:" PPC_LLARX "%0,0,%1 # local_inc_return\n\
64 addic %0,%0,1\n"
65 PPC405_ERR77(0,%1)
66 PPC_STLCX "%0,0,%1 \n\
67 bne- 1b"
68 : "=&r" (t)
69 : "r" (&(l->a.counter))
70 : "cc", "memory");
71
72 return t;
73}
74
75/*
76 * local_inc_and_test - increment and test
77 * @l: pointer of type local_t
78 *
79 * Atomically increments @l by 1
80 * and returns true if the result is zero, or false for all
81 * other cases.
82 */
83#define local_inc_and_test(l) (local_inc_return(l) == 0)
84
85static __inline__ long local_dec_return(local_t *l)
86{
87 long t;
88
89 __asm__ __volatile__(
90"1:" PPC_LLARX "%0,0,%1 # local_dec_return\n\
91 addic %0,%0,-1\n"
92 PPC405_ERR77(0,%1)
93 PPC_STLCX "%0,0,%1\n\
94 bne- 1b"
95 : "=&r" (t)
96 : "r" (&(l->a.counter))
97 : "cc", "memory");
98
99 return t;
100}
101
102#define local_cmpxchg(l, o, n) \
103 (cmpxchg_local(&((l)->a.counter), (o), (n)))
104#define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n)))
105
106/**
107 * local_add_unless - add unless the number is a given value
108 * @l: pointer of type local_t
109 * @a: the amount to add to v...
110 * @u: ...unless v is equal to u.
111 *
112 * Atomically adds @a to @l, so long as it was not @u.
113 * Returns non-zero if @l was not @u, and zero otherwise.
114 */
115static __inline__ int local_add_unless(local_t *l, long a, long u)
116{
117 long t;
118
119 __asm__ __volatile__ (
120"1:" PPC_LLARX "%0,0,%1 # local_add_unless\n\
121 cmpw 0,%0,%3 \n\
122 beq- 2f \n\
123 add %0,%2,%0 \n"
124 PPC405_ERR77(0,%2)
125 PPC_STLCX "%0,0,%1 \n\
126 bne- 1b \n"
127" subf %0,%2,%0 \n\
1282:"
129 : "=&r" (t)
130 : "r" (&(l->a.counter)), "r" (a), "r" (u)
131 : "cc", "memory");
132
133 return t != u;
134}
135
136#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
137
138#define local_sub_and_test(a, l) (local_sub_return((a), (l)) == 0)
139#define local_dec_and_test(l) (local_dec_return((l)) == 0)
140
141/*
142 * Atomically test *l and decrement if it is greater than 0.
143 * The function returns the old value of *l minus 1.
144 */
145static __inline__ long local_dec_if_positive(local_t *l)
146{
147 long t;
148
149 __asm__ __volatile__(
150"1:" PPC_LLARX "%0,0,%1 # local_dec_if_positive\n\
151 cmpwi %0,1\n\
152 addi %0,%0,-1\n\
153 blt- 2f\n"
154 PPC405_ERR77(0,%1)
155 PPC_STLCX "%0,0,%1\n\
156 bne- 1b"
157 "\n\
1582:" : "=&b" (t)
159 : "r" (&(l->a.counter))
160 : "cc", "memory");
161
162 return t;
163}
164
165/* Use these for per-cpu local_t variables: on some archs they are
166 * much more efficient than these naive implementations. Note they take
167 * a variable, not an address.
168 */
169
170#define __local_inc(l) ((l)->a.counter++)
171#define __local_dec(l) ((l)->a.counter++)
172#define __local_add(i,l) ((l)->a.counter+=(i))
173#define __local_sub(i,l) ((l)->a.counter-=(i))
174
175/* Need to disable preemption for the cpu local counters otherwise we could
176 still access a variable of a previous CPU in a non atomic way. */
177#define cpu_local_wrap_v(l) \
178 ({ local_t res__; \
179 preempt_disable(); \
180 res__ = (l); \
181 preempt_enable(); \
182 res__; })
183#define cpu_local_wrap(l) \
184 ({ preempt_disable(); \
185 l; \
186 preempt_enable(); }) \
187
188#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
189#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
190#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
191#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
192#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
193#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
194
195#define __cpu_local_inc(l) cpu_local_inc(l)
196#define __cpu_local_dec(l) cpu_local_dec(l)
197#define __cpu_local_add(i, l) cpu_local_add((i), (l))
198#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
199
200#endif /* _ARCH_POWERPC_LOCAL_H */