blob: 569ec7574baf24b01ce450c4f6a5cdc32d805a60 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_IA64_ATOMIC_H
2#define _ASM_IA64_ATOMIC_H
3
4/*
5 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc..
7 *
8 * NOTE: don't mess with the types below! The "unsigned long" and
9 * "int" types were carefully placed so as to ensure proper operation
10 * of the macros.
11 *
12 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
13 * David Mosberger-Tang <davidm@hpl.hp.com>
14 */
15#include <linux/types.h>
16
17#include <asm/intrinsics.h>
18
19/*
20 * On IA-64, counter must always be volatile to ensure that that the
21 * memory accesses are ordered.
22 */
23typedef struct { volatile __s32 counter; } atomic_t;
24typedef struct { volatile __s64 counter; } atomic64_t;
25
26#define ATOMIC_INIT(i) ((atomic_t) { (i) })
27#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
28
29#define atomic_read(v) ((v)->counter)
30#define atomic64_read(v) ((v)->counter)
31
32#define atomic_set(v,i) (((v)->counter) = (i))
33#define atomic64_set(v,i) (((v)->counter) = (i))
34
35static __inline__ int
36ia64_atomic_add (int i, atomic_t *v)
37{
38 __s32 old, new;
39 CMPXCHG_BUGCHECK_DECL
40
41 do {
42 CMPXCHG_BUGCHECK(v);
43 old = atomic_read(v);
44 new = old + i;
45 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
46 return new;
47}
48
49static __inline__ int
50ia64_atomic64_add (__s64 i, atomic64_t *v)
51{
52 __s64 old, new;
53 CMPXCHG_BUGCHECK_DECL
54
55 do {
56 CMPXCHG_BUGCHECK(v);
57 old = atomic_read(v);
58 new = old + i;
59 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
60 return new;
61}
62
63static __inline__ int
64ia64_atomic_sub (int i, atomic_t *v)
65{
66 __s32 old, new;
67 CMPXCHG_BUGCHECK_DECL
68
69 do {
70 CMPXCHG_BUGCHECK(v);
71 old = atomic_read(v);
72 new = old - i;
73 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
74 return new;
75}
76
77static __inline__ int
78ia64_atomic64_sub (__s64 i, atomic64_t *v)
79{
80 __s64 old, new;
81 CMPXCHG_BUGCHECK_DECL
82
83 do {
84 CMPXCHG_BUGCHECK(v);
85 old = atomic_read(v);
86 new = old - i;
87 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
88 return new;
89}
90
Nick Piggin4a6dae62005-11-13 16:07:24 -080091#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
Ingo Molnarffbf6702006-01-09 15:59:17 -080092#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
Nick Piggin4a6dae62005-11-13 16:07:24 -080093
Nick Piggin8426e1f2005-11-13 16:07:25 -080094#define atomic_add_unless(v, a, u) \
95({ \
96 int c, old; \
97 c = atomic_read(v); \
Nick Piggin0b2fcfd2006-03-23 03:01:02 -080098 for (;;) { \
99 if (unlikely(c == (u))) \
100 break; \
101 old = atomic_cmpxchg((v), c, c + (a)); \
102 if (likely(old == c)) \
103 break; \
Nick Piggin8426e1f2005-11-13 16:07:25 -0800104 c = old; \
Nick Piggin0b2fcfd2006-03-23 03:01:02 -0800105 } \
Nick Piggin8426e1f2005-11-13 16:07:25 -0800106 c != (u); \
107})
108#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110#define atomic_add_return(i,v) \
111({ \
112 int __ia64_aar_i = (i); \
113 (__builtin_constant_p(i) \
114 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
115 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
116 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
117 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
118 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
119 : ia64_atomic_add(__ia64_aar_i, v); \
120})
121
122#define atomic64_add_return(i,v) \
123({ \
124 long __ia64_aar_i = (i); \
125 (__builtin_constant_p(i) \
126 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
127 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
128 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
129 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
130 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
131 : ia64_atomic64_add(__ia64_aar_i, v); \
132})
133
134/*
135 * Atomically add I to V and return TRUE if the resulting value is
136 * negative.
137 */
138static __inline__ int
139atomic_add_negative (int i, atomic_t *v)
140{
141 return atomic_add_return(i, v) < 0;
142}
143
144static __inline__ int
145atomic64_add_negative (__s64 i, atomic64_t *v)
146{
147 return atomic64_add_return(i, v) < 0;
148}
149
150#define atomic_sub_return(i,v) \
151({ \
152 int __ia64_asr_i = (i); \
153 (__builtin_constant_p(i) \
154 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
155 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
156 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
157 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
158 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
159 : ia64_atomic_sub(__ia64_asr_i, v); \
160})
161
162#define atomic64_sub_return(i,v) \
163({ \
164 long __ia64_asr_i = (i); \
165 (__builtin_constant_p(i) \
166 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
167 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
168 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
169 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
170 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
171 : ia64_atomic64_sub(__ia64_asr_i, v); \
172})
173
174#define atomic_dec_return(v) atomic_sub_return(1, (v))
175#define atomic_inc_return(v) atomic_add_return(1, (v))
176#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
177#define atomic64_inc_return(v) atomic64_add_return(1, (v))
178
179#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
180#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
181#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
182#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
183#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
184#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
185
186#define atomic_add(i,v) atomic_add_return((i), (v))
187#define atomic_sub(i,v) atomic_sub_return((i), (v))
188#define atomic_inc(v) atomic_add(1, (v))
189#define atomic_dec(v) atomic_sub(1, (v))
190
191#define atomic64_add(i,v) atomic64_add_return((i), (v))
192#define atomic64_sub(i,v) atomic64_sub_return((i), (v))
193#define atomic64_inc(v) atomic64_add(1, (v))
194#define atomic64_dec(v) atomic64_sub(1, (v))
195
196/* Atomic operations are already serializing */
197#define smp_mb__before_atomic_dec() barrier()
198#define smp_mb__after_atomic_dec() barrier()
199#define smp_mb__before_atomic_inc() barrier()
200#define smp_mb__after_atomic_inc() barrier()
201
Christoph Lameterd3cb4872006-01-06 00:11:20 -0800202#include <asm-generic/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203#endif /* _ASM_IA64_ATOMIC_H */