blob: 48bf9b8ab8ff7fe0f22c049712993881d429c79f [file] [log] [blame]
Kyle McMartin2e13b312006-01-17 08:33:01 -07001/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
2 * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
3 */
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#ifndef _ASM_PARISC_ATOMIC_H_
6#define _ASM_PARISC_ATOMIC_H_
7
Kyle McMartin2e13b312006-01-17 08:33:01 -07008#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11/*
12 * Atomic operations that C can't guarantee us. Useful for
13 * resource counting etc..
14 *
15 * And probably incredibly slow on parisc. OTOH, we don't
16 * have to write any serious assembly. prumpf
17 */
18
19#ifdef CONFIG_SMP
20#include <asm/spinlock.h>
21#include <asm/cache.h> /* we use L1_CACHE_BYTES */
22
23/* Use an array of spinlocks for our atomic_ts.
24 * Hash function to index into a different SPINLOCK.
25 * Since "a" is usually an address, use one spinlock per cacheline.
26 */
27# define ATOMIC_HASH_SIZE 4
28# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
29
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070030extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070032/* Can't use raw_spin_lock_irq because of #include problems, so
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 * this is the substitute */
34#define _atomic_spin_lock_irqsave(l,f) do { \
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070035 raw_spinlock_t *s = ATOMIC_HASH(l); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 local_irq_save(f); \
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070037 __raw_spin_lock(s); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070038} while(0)
39
40#define _atomic_spin_unlock_irqrestore(l,f) do { \
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070041 raw_spinlock_t *s = ATOMIC_HASH(l); \
42 __raw_spin_unlock(s); \
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 local_irq_restore(f); \
44} while(0)
45
46
47#else
48# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
49# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
50#endif
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* This should get optimized out since it's never called.
53** Or get a link error if xchg is used "wrong".
54*/
55extern void __xchg_called_with_bad_pointer(void);
56
57
58/* __xchg32/64 defined in arch/parisc/lib/bitops.c */
59extern unsigned long __xchg8(char, char *);
60extern unsigned long __xchg32(int, int *);
61#ifdef __LP64__
62extern unsigned long __xchg64(unsigned long, unsigned long *);
63#endif
64
65/* optimizer better get rid of switch since size is a constant */
Kyle McMartin2e13b312006-01-17 08:33:01 -070066static __inline__ unsigned long
67__xchg(unsigned long x, __volatile__ void * ptr, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -070068{
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 switch(size) {
70#ifdef __LP64__
71 case 8: return __xchg64(x,(unsigned long *) ptr);
72#endif
73 case 4: return __xchg32((int) x, (int *) ptr);
74 case 1: return __xchg8((char) x, (char *) ptr);
75 }
76 __xchg_called_with_bad_pointer();
77 return x;
78}
79
80
81/*
82** REVISIT - Abandoned use of LDCW in xchg() for now:
83** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
84** o and while we are at it, could __LP64__ code use LDCD too?
85**
86** if (__builtin_constant_p(x) && (x == NULL))
87** if (((unsigned long)p & 0xf) == 0)
88** return __ldcw(p);
89*/
90#define xchg(ptr,x) \
91 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
92
93
94#define __HAVE_ARCH_CMPXCHG 1
95
96/* bug catcher for when unsupported size is used - won't link */
97extern void __cmpxchg_called_with_bad_pointer(void);
98
99/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
100extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
101extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
102
103/* don't worry...optimizer will get rid of most of this */
104static __inline__ unsigned long
105__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
106{
107 switch(size) {
108#ifdef __LP64__
109 case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
110#endif
111 case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
112 }
113 __cmpxchg_called_with_bad_pointer();
114 return old;
115}
116
117#define cmpxchg(ptr,o,n) \
118 ({ \
119 __typeof__(*(ptr)) _o_ = (o); \
120 __typeof__(*(ptr)) _n_ = (n); \
121 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
122 (unsigned long)_n_, sizeof(*(ptr))); \
123 })
124
Kyle McMartin2e13b312006-01-17 08:33:01 -0700125/* Note that we need not lock read accesses - aligned word writes/reads
126 * are atomic, so a reader never sees unconsistent values.
127 *
128 * Cache-line alignment would conflict with, for example, linux/module.h
129 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Kyle McMartin2e13b312006-01-17 08:33:01 -0700131typedef struct { volatile int counter; } atomic_t;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
133/* It's possible to reduce all atomic operations to either
134 * __atomic_add_return, atomic_set and atomic_read (the latter
135 * is there only for consistency).
136 */
137
138static __inline__ int __atomic_add_return(int i, atomic_t *v)
139{
140 int ret;
141 unsigned long flags;
142 _atomic_spin_lock_irqsave(v, flags);
143
144 ret = (v->counter += i);
145
146 _atomic_spin_unlock_irqrestore(v, flags);
147 return ret;
148}
149
150static __inline__ void atomic_set(atomic_t *v, int i)
151{
152 unsigned long flags;
153 _atomic_spin_lock_irqsave(v, flags);
154
155 v->counter = i;
156
157 _atomic_spin_unlock_irqrestore(v, flags);
158}
159
160static __inline__ int atomic_read(const atomic_t *v)
161{
162 return v->counter;
163}
164
165/* exported interface */
Nick Piggin4a6dae62005-11-13 16:07:24 -0800166#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
Ingo Molnarffbf6702006-01-09 15:59:17 -0800167#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
Nick Piggin8426e1f2005-11-13 16:07:25 -0800169/**
170 * atomic_add_unless - add unless the number is a given value
171 * @v: pointer of type atomic_t
172 * @a: the amount to add to v...
173 * @u: ...unless v is equal to u.
174 *
175 * Atomically adds @a to @v, so long as it was not @u.
176 * Returns non-zero if @v was not @u, and zero otherwise.
177 */
178#define atomic_add_unless(v, a, u) \
179({ \
180 int c, old; \
181 c = atomic_read(v); \
182 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
183 c = old; \
184 c != (u); \
185})
186#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
187
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188#define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v))))
189#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v))))
190#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
191#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
192
193#define atomic_add_return(i,v) (__atomic_add_return( ((int)i),(v)))
194#define atomic_sub_return(i,v) (__atomic_add_return(-((int)i),(v)))
195#define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
196#define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
197
198#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
199
200/*
201 * atomic_inc_and_test - increment and test
202 * @v: pointer of type atomic_t
203 *
204 * Atomically increments @v by 1
205 * and returns true if the result is zero, or false for all
206 * other cases.
207 */
208#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
209
210#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
211
Kyle McMartin4da9f132006-03-29 19:47:32 -0500212#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
213
Kyle McMartin2e13b312006-01-17 08:33:01 -0700214#define ATOMIC_INIT(i) ((atomic_t) { (i) })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
216#define smp_mb__before_atomic_dec() smp_mb()
217#define smp_mb__after_atomic_dec() smp_mb()
218#define smp_mb__before_atomic_inc() smp_mb()
219#define smp_mb__after_atomic_inc() smp_mb()
220
Kyle McMartin2e13b312006-01-17 08:33:01 -0700221#ifdef __LP64__
222
223typedef struct { volatile s64 counter; } atomic64_t;
224
225#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
226
227static __inline__ int
228__atomic64_add_return(s64 i, atomic64_t *v)
229{
230 int ret;
231 unsigned long flags;
232 _atomic_spin_lock_irqsave(v, flags);
233
234 ret = (v->counter += i);
235
236 _atomic_spin_unlock_irqrestore(v, flags);
237 return ret;
238}
239
240static __inline__ void
241atomic64_set(atomic64_t *v, s64 i)
242{
243 unsigned long flags;
244 _atomic_spin_lock_irqsave(v, flags);
245
246 v->counter = i;
247
248 _atomic_spin_unlock_irqrestore(v, flags);
249}
250
251static __inline__ s64
252atomic64_read(const atomic64_t *v)
253{
254 return v->counter;
255}
256
257#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)i),(v))))
258#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)i),(v))))
259#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
260#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
261
262#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)i),(v)))
263#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)i),(v)))
264#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
265#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
266
267#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
268
269#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
270#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
Kyle McMartin4da9f132006-03-29 19:47:32 -0500271#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
Kyle McMartin2e13b312006-01-17 08:33:01 -0700272
273#endif /* __LP64__ */
274
Christoph Lameterd3cb4872006-01-06 00:11:20 -0800275#include <asm-generic/atomic.h>
Kyle McMartin2e13b312006-01-17 08:33:01 -0700276
277#endif /* _ASM_PARISC_ATOMIC_H_ */