blob: e451534fe54d9184fbdaab4c8684c1e5a288ff39 [file] [log] [blame]
Arun Sharmaacac43e2011-07-26 16:09:08 -07001/* Atomic operations usable in machine independent code */
Eric Dumazet3f9d35b2010-11-11 14:05:08 -08002#ifndef _LINUX_ATOMIC_H
3#define _LINUX_ATOMIC_H
4#include <asm/atomic.h>
Will Deacon654672d2015-08-06 17:54:37 +01005#include <asm/barrier.h>
6
7/*
8 * Relaxed variants of xchg, cmpxchg and some atomic operations.
9 *
10 * We support four variants:
11 *
12 * - Fully ordered: The default implementation, no suffix required.
13 * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
14 * - Release: Provides RELEASE semantics, _release suffix.
15 * - Relaxed: No ordering guarantees, _relaxed suffix.
16 *
17 * For compound atomics performing both a load and a store, ACQUIRE
18 * semantics apply only to the load and RELEASE semantics only to the
19 * store portion of the operation. Note that a failed cmpxchg_acquire
20 * does -not- imply any memory ordering constraints.
21 *
22 * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
23 */
24
25#ifndef atomic_read_acquire
26#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
27#endif
28
29#ifndef atomic_set_release
30#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
31#endif
32
33/*
34 * The idea here is to build acquire/release variants by adding explicit
35 * barriers on top of the relaxed variant. In the case where the relaxed
36 * variant is already fully ordered, no additional barriers are needed.
Boqun Fenge1ab7f32015-12-15 22:24:14 +080037 *
38 * Besides, if an arch has a special barrier for acquire/release, it could
39 * implement its own __atomic_op_* and use the same framework for building
40 * variants
Will Deacon654672d2015-08-06 17:54:37 +010041 */
Boqun Fenge1ab7f32015-12-15 22:24:14 +080042#ifndef __atomic_op_acquire
Will Deacon654672d2015-08-06 17:54:37 +010043#define __atomic_op_acquire(op, args...) \
44({ \
45 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
46 smp_mb__after_atomic(); \
47 __ret; \
48})
Boqun Fenge1ab7f32015-12-15 22:24:14 +080049#endif
Will Deacon654672d2015-08-06 17:54:37 +010050
Boqun Fenge1ab7f32015-12-15 22:24:14 +080051#ifndef __atomic_op_release
Will Deacon654672d2015-08-06 17:54:37 +010052#define __atomic_op_release(op, args...) \
53({ \
54 smp_mb__before_atomic(); \
55 op##_relaxed(args); \
56})
Boqun Fenge1ab7f32015-12-15 22:24:14 +080057#endif
Will Deacon654672d2015-08-06 17:54:37 +010058
Boqun Fenge1ab7f32015-12-15 22:24:14 +080059#ifndef __atomic_op_fence
Will Deacon654672d2015-08-06 17:54:37 +010060#define __atomic_op_fence(op, args...) \
61({ \
62 typeof(op##_relaxed(args)) __ret; \
63 smp_mb__before_atomic(); \
64 __ret = op##_relaxed(args); \
65 smp_mb__after_atomic(); \
66 __ret; \
67})
Boqun Fenge1ab7f32015-12-15 22:24:14 +080068#endif
Will Deacon654672d2015-08-06 17:54:37 +010069
70/* atomic_add_return_relaxed */
71#ifndef atomic_add_return_relaxed
72#define atomic_add_return_relaxed atomic_add_return
73#define atomic_add_return_acquire atomic_add_return
74#define atomic_add_return_release atomic_add_return
75
76#else /* atomic_add_return_relaxed */
77
78#ifndef atomic_add_return_acquire
79#define atomic_add_return_acquire(...) \
80 __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
81#endif
82
83#ifndef atomic_add_return_release
84#define atomic_add_return_release(...) \
85 __atomic_op_release(atomic_add_return, __VA_ARGS__)
86#endif
87
88#ifndef atomic_add_return
89#define atomic_add_return(...) \
90 __atomic_op_fence(atomic_add_return, __VA_ARGS__)
91#endif
92#endif /* atomic_add_return_relaxed */
93
Davidlohr Bueso63ab7bd2015-09-30 13:03:11 -070094/* atomic_inc_return_relaxed */
95#ifndef atomic_inc_return_relaxed
96#define atomic_inc_return_relaxed atomic_inc_return
97#define atomic_inc_return_acquire atomic_inc_return
98#define atomic_inc_return_release atomic_inc_return
99
100#else /* atomic_inc_return_relaxed */
101
102#ifndef atomic_inc_return_acquire
103#define atomic_inc_return_acquire(...) \
104 __atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
105#endif
106
107#ifndef atomic_inc_return_release
108#define atomic_inc_return_release(...) \
109 __atomic_op_release(atomic_inc_return, __VA_ARGS__)
110#endif
111
112#ifndef atomic_inc_return
113#define atomic_inc_return(...) \
114 __atomic_op_fence(atomic_inc_return, __VA_ARGS__)
115#endif
116#endif /* atomic_inc_return_relaxed */
117
Will Deacon654672d2015-08-06 17:54:37 +0100118/* atomic_sub_return_relaxed */
119#ifndef atomic_sub_return_relaxed
120#define atomic_sub_return_relaxed atomic_sub_return
121#define atomic_sub_return_acquire atomic_sub_return
122#define atomic_sub_return_release atomic_sub_return
123
124#else /* atomic_sub_return_relaxed */
125
126#ifndef atomic_sub_return_acquire
127#define atomic_sub_return_acquire(...) \
128 __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
129#endif
130
131#ifndef atomic_sub_return_release
132#define atomic_sub_return_release(...) \
133 __atomic_op_release(atomic_sub_return, __VA_ARGS__)
134#endif
135
136#ifndef atomic_sub_return
137#define atomic_sub_return(...) \
138 __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
139#endif
140#endif /* atomic_sub_return_relaxed */
141
Davidlohr Bueso63ab7bd2015-09-30 13:03:11 -0700142/* atomic_dec_return_relaxed */
143#ifndef atomic_dec_return_relaxed
144#define atomic_dec_return_relaxed atomic_dec_return
145#define atomic_dec_return_acquire atomic_dec_return
146#define atomic_dec_return_release atomic_dec_return
147
148#else /* atomic_dec_return_relaxed */
149
150#ifndef atomic_dec_return_acquire
151#define atomic_dec_return_acquire(...) \
152 __atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
153#endif
154
155#ifndef atomic_dec_return_release
156#define atomic_dec_return_release(...) \
157 __atomic_op_release(atomic_dec_return, __VA_ARGS__)
158#endif
159
160#ifndef atomic_dec_return
161#define atomic_dec_return(...) \
162 __atomic_op_fence(atomic_dec_return, __VA_ARGS__)
163#endif
164#endif /* atomic_dec_return_relaxed */
165
Will Deacon654672d2015-08-06 17:54:37 +0100166/* atomic_xchg_relaxed */
167#ifndef atomic_xchg_relaxed
168#define atomic_xchg_relaxed atomic_xchg
169#define atomic_xchg_acquire atomic_xchg
170#define atomic_xchg_release atomic_xchg
171
172#else /* atomic_xchg_relaxed */
173
174#ifndef atomic_xchg_acquire
175#define atomic_xchg_acquire(...) \
176 __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
177#endif
178
179#ifndef atomic_xchg_release
180#define atomic_xchg_release(...) \
181 __atomic_op_release(atomic_xchg, __VA_ARGS__)
182#endif
183
184#ifndef atomic_xchg
185#define atomic_xchg(...) \
186 __atomic_op_fence(atomic_xchg, __VA_ARGS__)
187#endif
188#endif /* atomic_xchg_relaxed */
189
190/* atomic_cmpxchg_relaxed */
191#ifndef atomic_cmpxchg_relaxed
192#define atomic_cmpxchg_relaxed atomic_cmpxchg
193#define atomic_cmpxchg_acquire atomic_cmpxchg
194#define atomic_cmpxchg_release atomic_cmpxchg
195
196#else /* atomic_cmpxchg_relaxed */
197
198#ifndef atomic_cmpxchg_acquire
199#define atomic_cmpxchg_acquire(...) \
200 __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
201#endif
202
203#ifndef atomic_cmpxchg_release
204#define atomic_cmpxchg_release(...) \
205 __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
206#endif
207
208#ifndef atomic_cmpxchg
209#define atomic_cmpxchg(...) \
210 __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
211#endif
212#endif /* atomic_cmpxchg_relaxed */
213
214#ifndef atomic64_read_acquire
215#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
216#endif
217
218#ifndef atomic64_set_release
219#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
220#endif
221
222/* atomic64_add_return_relaxed */
223#ifndef atomic64_add_return_relaxed
224#define atomic64_add_return_relaxed atomic64_add_return
225#define atomic64_add_return_acquire atomic64_add_return
226#define atomic64_add_return_release atomic64_add_return
227
228#else /* atomic64_add_return_relaxed */
229
230#ifndef atomic64_add_return_acquire
231#define atomic64_add_return_acquire(...) \
232 __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
233#endif
234
235#ifndef atomic64_add_return_release
236#define atomic64_add_return_release(...) \
237 __atomic_op_release(atomic64_add_return, __VA_ARGS__)
238#endif
239
240#ifndef atomic64_add_return
241#define atomic64_add_return(...) \
242 __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
243#endif
244#endif /* atomic64_add_return_relaxed */
245
Davidlohr Bueso63ab7bd2015-09-30 13:03:11 -0700246/* atomic64_inc_return_relaxed */
247#ifndef atomic64_inc_return_relaxed
248#define atomic64_inc_return_relaxed atomic64_inc_return
249#define atomic64_inc_return_acquire atomic64_inc_return
250#define atomic64_inc_return_release atomic64_inc_return
251
252#else /* atomic64_inc_return_relaxed */
253
254#ifndef atomic64_inc_return_acquire
255#define atomic64_inc_return_acquire(...) \
256 __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
257#endif
258
259#ifndef atomic64_inc_return_release
260#define atomic64_inc_return_release(...) \
261 __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
262#endif
263
264#ifndef atomic64_inc_return
265#define atomic64_inc_return(...) \
266 __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
267#endif
268#endif /* atomic64_inc_return_relaxed */
269
270
Will Deacon654672d2015-08-06 17:54:37 +0100271/* atomic64_sub_return_relaxed */
272#ifndef atomic64_sub_return_relaxed
273#define atomic64_sub_return_relaxed atomic64_sub_return
274#define atomic64_sub_return_acquire atomic64_sub_return
275#define atomic64_sub_return_release atomic64_sub_return
276
277#else /* atomic64_sub_return_relaxed */
278
279#ifndef atomic64_sub_return_acquire
280#define atomic64_sub_return_acquire(...) \
281 __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
282#endif
283
284#ifndef atomic64_sub_return_release
285#define atomic64_sub_return_release(...) \
286 __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
287#endif
288
289#ifndef atomic64_sub_return
290#define atomic64_sub_return(...) \
291 __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
292#endif
293#endif /* atomic64_sub_return_relaxed */
294
Davidlohr Bueso63ab7bd2015-09-30 13:03:11 -0700295/* atomic64_dec_return_relaxed */
296#ifndef atomic64_dec_return_relaxed
297#define atomic64_dec_return_relaxed atomic64_dec_return
298#define atomic64_dec_return_acquire atomic64_dec_return
299#define atomic64_dec_return_release atomic64_dec_return
300
301#else /* atomic64_dec_return_relaxed */
302
303#ifndef atomic64_dec_return_acquire
304#define atomic64_dec_return_acquire(...) \
305 __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
306#endif
307
308#ifndef atomic64_dec_return_release
309#define atomic64_dec_return_release(...) \
310 __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
311#endif
312
313#ifndef atomic64_dec_return
314#define atomic64_dec_return(...) \
315 __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
316#endif
317#endif /* atomic64_dec_return_relaxed */
318
Will Deacon654672d2015-08-06 17:54:37 +0100319/* atomic64_xchg_relaxed */
320#ifndef atomic64_xchg_relaxed
321#define atomic64_xchg_relaxed atomic64_xchg
322#define atomic64_xchg_acquire atomic64_xchg
323#define atomic64_xchg_release atomic64_xchg
324
325#else /* atomic64_xchg_relaxed */
326
327#ifndef atomic64_xchg_acquire
328#define atomic64_xchg_acquire(...) \
329 __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
330#endif
331
332#ifndef atomic64_xchg_release
333#define atomic64_xchg_release(...) \
334 __atomic_op_release(atomic64_xchg, __VA_ARGS__)
335#endif
336
337#ifndef atomic64_xchg
338#define atomic64_xchg(...) \
339 __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
340#endif
341#endif /* atomic64_xchg_relaxed */
342
343/* atomic64_cmpxchg_relaxed */
344#ifndef atomic64_cmpxchg_relaxed
345#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
346#define atomic64_cmpxchg_acquire atomic64_cmpxchg
347#define atomic64_cmpxchg_release atomic64_cmpxchg
348
349#else /* atomic64_cmpxchg_relaxed */
350
351#ifndef atomic64_cmpxchg_acquire
352#define atomic64_cmpxchg_acquire(...) \
353 __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
354#endif
355
356#ifndef atomic64_cmpxchg_release
357#define atomic64_cmpxchg_release(...) \
358 __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
359#endif
360
361#ifndef atomic64_cmpxchg
362#define atomic64_cmpxchg(...) \
363 __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
364#endif
365#endif /* atomic64_cmpxchg_relaxed */
366
367/* cmpxchg_relaxed */
368#ifndef cmpxchg_relaxed
369#define cmpxchg_relaxed cmpxchg
370#define cmpxchg_acquire cmpxchg
371#define cmpxchg_release cmpxchg
372
373#else /* cmpxchg_relaxed */
374
375#ifndef cmpxchg_acquire
376#define cmpxchg_acquire(...) \
377 __atomic_op_acquire(cmpxchg, __VA_ARGS__)
378#endif
379
380#ifndef cmpxchg_release
381#define cmpxchg_release(...) \
382 __atomic_op_release(cmpxchg, __VA_ARGS__)
383#endif
384
385#ifndef cmpxchg
386#define cmpxchg(...) \
387 __atomic_op_fence(cmpxchg, __VA_ARGS__)
388#endif
389#endif /* cmpxchg_relaxed */
390
391/* cmpxchg64_relaxed */
392#ifndef cmpxchg64_relaxed
393#define cmpxchg64_relaxed cmpxchg64
394#define cmpxchg64_acquire cmpxchg64
395#define cmpxchg64_release cmpxchg64
396
397#else /* cmpxchg64_relaxed */
398
399#ifndef cmpxchg64_acquire
400#define cmpxchg64_acquire(...) \
401 __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
402#endif
403
404#ifndef cmpxchg64_release
405#define cmpxchg64_release(...) \
406 __atomic_op_release(cmpxchg64, __VA_ARGS__)
407#endif
408
409#ifndef cmpxchg64
410#define cmpxchg64(...) \
411 __atomic_op_fence(cmpxchg64, __VA_ARGS__)
412#endif
413#endif /* cmpxchg64_relaxed */
414
415/* xchg_relaxed */
416#ifndef xchg_relaxed
417#define xchg_relaxed xchg
418#define xchg_acquire xchg
419#define xchg_release xchg
420
421#else /* xchg_relaxed */
422
423#ifndef xchg_acquire
424#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
425#endif
426
427#ifndef xchg_release
428#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
429#endif
430
431#ifndef xchg
432#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
433#endif
434#endif /* xchg_relaxed */
Eric Dumazet3f9d35b2010-11-11 14:05:08 -0800435
436/**
Arun Sharmaf24219b2011-07-26 16:09:07 -0700437 * atomic_add_unless - add unless the number is already a given value
438 * @v: pointer of type atomic_t
439 * @a: the amount to add to v...
440 * @u: ...unless v is equal to u.
441 *
442 * Atomically adds @a to @v, so long as @v was not already @u.
443 * Returns non-zero if @v was not @u, and zero otherwise.
444 */
445static inline int atomic_add_unless(atomic_t *v, int a, int u)
446{
447 return __atomic_add_unless(v, a, u) != u;
448}
449
450/**
Arun Sharma600634972011-07-26 16:09:06 -0700451 * atomic_inc_not_zero - increment unless the number is zero
452 * @v: pointer of type atomic_t
453 *
454 * Atomically increments @v by 1, so long as @v is non-zero.
455 * Returns non-zero if @v was non-zero, and zero otherwise.
456 */
Anton Blanchardb1ada602012-02-29 21:09:53 +0000457#ifndef atomic_inc_not_zero
Arun Sharma600634972011-07-26 16:09:06 -0700458#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
Anton Blanchardb1ada602012-02-29 21:09:53 +0000459#endif
Arun Sharma600634972011-07-26 16:09:06 -0700460
Peter Zijlstrade9e4322015-04-24 01:12:32 +0200461#ifndef atomic_andnot
462static inline void atomic_andnot(int i, atomic_t *v)
463{
464 atomic_and(~i, v);
465}
466#endif
467
468static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
469{
470 atomic_andnot(mask, v);
471}
472
473static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
474{
475 atomic_or(mask, v);
476}
477
Arun Sharma600634972011-07-26 16:09:06 -0700478/**
Eric Dumazet3f9d35b2010-11-11 14:05:08 -0800479 * atomic_inc_not_zero_hint - increment if not null
480 * @v: pointer of type atomic_t
481 * @hint: probable value of the atomic before the increment
482 *
483 * This version of atomic_inc_not_zero() gives a hint of probable
484 * value of the atomic. This helps processor to not read the memory
485 * before doing the atomic read/modify/write cycle, lowering
486 * number of bus transactions on some arches.
487 *
488 * Returns: 0 if increment was not done, 1 otherwise.
489 */
490#ifndef atomic_inc_not_zero_hint
491static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
492{
493 int val, c = hint;
494
495 /* sanity test, should be removed by compiler if hint is a constant */
496 if (!hint)
497 return atomic_inc_not_zero(v);
498
499 do {
500 val = atomic_cmpxchg(v, c, c + 1);
501 if (val == c)
502 return 1;
503 c = val;
504 } while (c);
505
506 return 0;
507}
508#endif
509
Al Viro07b8ce12011-06-20 10:52:57 -0400510#ifndef atomic_inc_unless_negative
511static inline int atomic_inc_unless_negative(atomic_t *p)
512{
513 int v, v1;
514 for (v = 0; v >= 0; v = v1) {
515 v1 = atomic_cmpxchg(p, v, v + 1);
516 if (likely(v1 == v))
517 return 1;
518 }
519 return 0;
520}
521#endif
522
523#ifndef atomic_dec_unless_positive
524static inline int atomic_dec_unless_positive(atomic_t *p)
525{
526 int v, v1;
527 for (v = 0; v <= 0; v = v1) {
528 v1 = atomic_cmpxchg(p, v, v - 1);
529 if (likely(v1 == v))
530 return 1;
531 }
532 return 0;
533}
534#endif
535
Shaohua Lie79bee22012-10-08 16:32:18 -0700536/*
537 * atomic_dec_if_positive - decrement by 1 if old value positive
538 * @v: pointer of type atomic_t
539 *
540 * The function returns the old value of *v minus 1, even if
541 * the atomic variable, v, was not decremented.
542 */
543#ifndef atomic_dec_if_positive
544static inline int atomic_dec_if_positive(atomic_t *v)
545{
546 int c, old, dec;
547 c = atomic_read(v);
548 for (;;) {
549 dec = c - 1;
550 if (unlikely(dec < 0))
551 break;
552 old = atomic_cmpxchg((v), c, dec);
553 if (likely(old == c))
554 break;
555 c = old;
556 }
557 return dec;
558}
559#endif
560
Frederic Weisbecker5fd7a092015-08-11 18:03:23 +0200561/**
Frederic Weisbecker5acba712016-03-24 15:37:59 +0100562 * atomic_fetch_or - perform *p |= mask and return old value of *p
Frederic Weisbecker5acba712016-03-24 15:37:59 +0100563 * @mask: mask to OR on the atomic_t
Peter Zijlstraa1cc5bc2016-04-21 20:35:25 +0200564 * @p: pointer to atomic_t
Frederic Weisbecker5acba712016-03-24 15:37:59 +0100565 */
566#ifndef atomic_fetch_or
Peter Zijlstraa1cc5bc2016-04-21 20:35:25 +0200567static inline int atomic_fetch_or(int mask, atomic_t *p)
Frederic Weisbecker5acba712016-03-24 15:37:59 +0100568{
569 int old, val = atomic_read(p);
570
571 for (;;) {
572 old = atomic_cmpxchg(p, val, val | mask);
573 if (old == val)
574 break;
575 val = old;
576 }
577
578 return old;
579}
580#endif
581
Arun Sharma78477772011-07-26 16:09:08 -0700582#ifdef CONFIG_GENERIC_ATOMIC64
583#include <asm-generic/atomic64.h>
584#endif
Peter Zijlstrade9e4322015-04-24 01:12:32 +0200585
586#ifndef atomic64_andnot
587static inline void atomic64_andnot(long long i, atomic64_t *v)
588{
589 atomic64_and(~i, v);
590}
591#endif
592
Peter Zijlstra90fe6512015-09-18 15:04:59 +0200593#include <asm-generic/atomic-long.h>
594
Eric Dumazet3f9d35b2010-11-11 14:05:08 -0800595#endif /* _LINUX_ATOMIC_H */