blob: 6106b59d326066f869cc105939c7fb18dfa51310 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Brian Gerst1a3b1d82010-01-07 11:53:33 -05002#ifndef _ASM_X86_ATOMIC64_64_H
3#define _ASM_X86_ATOMIC64_64_H
4
5#include <linux/types.h>
6#include <asm/alternative.h>
7#include <asm/cmpxchg.h>
8
9/* The 64-bit atomic type */
10
11#define ATOMIC64_INIT(i) { (i) }
12
13/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010014 * arch_atomic64_read - read atomic64 variable
Brian Gerst1a3b1d82010-01-07 11:53:33 -050015 * @v: pointer of type atomic64_t
16 *
17 * Atomically reads the value of @v.
18 * Doesn't imply a read memory barrier.
19 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010020static inline long arch_atomic64_read(const atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -050021{
Peter Zijlstra62e8a322015-09-18 11:13:10 +020022 return READ_ONCE((v)->counter);
Brian Gerst1a3b1d82010-01-07 11:53:33 -050023}
24
25/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010026 * arch_atomic64_set - set atomic64 variable
Brian Gerst1a3b1d82010-01-07 11:53:33 -050027 * @v: pointer to type atomic64_t
28 * @i: required value
29 *
30 * Atomically sets the value of @v to @i.
31 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010032static inline void arch_atomic64_set(atomic64_t *v, long i)
Brian Gerst1a3b1d82010-01-07 11:53:33 -050033{
Peter Zijlstra62e8a322015-09-18 11:13:10 +020034 WRITE_ONCE(v->counter, i);
Brian Gerst1a3b1d82010-01-07 11:53:33 -050035}
36
37/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010038 * arch_atomic64_add - add integer to atomic64 variable
Brian Gerst1a3b1d82010-01-07 11:53:33 -050039 * @i: integer value to add
40 * @v: pointer to type atomic64_t
41 *
42 * Atomically adds @i to @v.
43 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010044static __always_inline void arch_atomic64_add(long i, atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -050045{
46 asm volatile(LOCK_PREFIX "addq %1,%0"
47 : "=m" (v->counter)
48 : "er" (i), "m" (v->counter));
49}
50
51/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010052 * arch_atomic64_sub - subtract the atomic64 variable
Brian Gerst1a3b1d82010-01-07 11:53:33 -050053 * @i: integer value to subtract
54 * @v: pointer to type atomic64_t
55 *
56 * Atomically subtracts @i from @v.
57 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010058static inline void arch_atomic64_sub(long i, atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -050059{
60 asm volatile(LOCK_PREFIX "subq %1,%0"
61 : "=m" (v->counter)
62 : "er" (i), "m" (v->counter));
63}
64
65/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010066 * arch_atomic64_sub_and_test - subtract value from variable and test result
Brian Gerst1a3b1d82010-01-07 11:53:33 -050067 * @i: integer value to subtract
68 * @v: pointer to type atomic64_t
69 *
70 * Atomically subtracts @i from @v and returns
71 * true if the result is zero, or false for all
72 * other cases.
73 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010074static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -050075{
H. Peter Anvin18fe5822016-06-08 12:38:39 -070076 GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
Brian Gerst1a3b1d82010-01-07 11:53:33 -050077}
78
79/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010080 * arch_atomic64_inc - increment atomic64 variable
Brian Gerst1a3b1d82010-01-07 11:53:33 -050081 * @v: pointer to type atomic64_t
82 *
83 * Atomically increments @v by 1.
84 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010085static __always_inline void arch_atomic64_inc(atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -050086{
87 asm volatile(LOCK_PREFIX "incq %0"
88 : "=m" (v->counter)
89 : "m" (v->counter));
90}
91
92/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010093 * arch_atomic64_dec - decrement atomic64 variable
Brian Gerst1a3b1d82010-01-07 11:53:33 -050094 * @v: pointer to type atomic64_t
95 *
96 * Atomically decrements @v by 1.
97 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +010098static __always_inline void arch_atomic64_dec(atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -050099{
100 asm volatile(LOCK_PREFIX "decq %0"
101 : "=m" (v->counter)
102 : "m" (v->counter));
103}
104
105/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100106 * arch_atomic64_dec_and_test - decrement and test
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500107 * @v: pointer to type atomic64_t
108 *
109 * Atomically decrements @v by 1 and
110 * returns true if the result is 0, or false for all other
111 * cases.
112 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100113static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500114{
H. Peter Anvin18fe5822016-06-08 12:38:39 -0700115 GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500116}
117
118/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100119 * arch_atomic64_inc_and_test - increment and test
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500120 * @v: pointer to type atomic64_t
121 *
122 * Atomically increments @v by 1
123 * and returns true if the result is zero, or false for all
124 * other cases.
125 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100126static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500127{
H. Peter Anvin18fe5822016-06-08 12:38:39 -0700128 GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500129}
130
131/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100132 * arch_atomic64_add_negative - add and test if negative
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500133 * @i: integer value to add
134 * @v: pointer to type atomic64_t
135 *
136 * Atomically adds @i to @v and returns true
137 * if the result is negative, or false when
138 * result is greater than or equal to zero.
139 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100140static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500141{
H. Peter Anvin18fe5822016-06-08 12:38:39 -0700142 GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500143}
144
145/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100146 * arch_atomic64_add_return - add and return
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500147 * @i: integer value to add
148 * @v: pointer to type atomic64_t
149 *
150 * Atomically adds @i to @v and returns @i + @v
151 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100152static __always_inline long arch_atomic64_add_return(long i, atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500153{
Jeremy Fitzhardinge8b8bc2f2011-08-23 16:59:58 -0700154 return i + xadd(&v->counter, i);
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500155}
156
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100157static inline long arch_atomic64_sub_return(long i, atomic64_t *v)
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500158{
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100159 return arch_atomic64_add_return(-i, v);
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500160}
161
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100162static inline long arch_atomic64_fetch_add(long i, atomic64_t *v)
Peter Zijlstraa8bccca2016-04-18 01:16:03 +0200163{
164 return xadd(&v->counter, i);
165}
166
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100167static inline long arch_atomic64_fetch_sub(long i, atomic64_t *v)
Peter Zijlstraa8bccca2016-04-18 01:16:03 +0200168{
169 return xadd(&v->counter, -i);
170}
171
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100172#define arch_atomic64_inc_return(v) (arch_atomic64_add_return(1, (v)))
173#define arch_atomic64_dec_return(v) (arch_atomic64_sub_return(1, (v)))
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500174
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100175static inline long arch_atomic64_cmpxchg(atomic64_t *v, long old, long new)
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500176{
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100177 return arch_cmpxchg(&v->counter, old, new);
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500178}
179
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100180#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
181static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new)
Peter Zijlstraa9ebf302017-02-01 16:39:38 +0100182{
183 return try_cmpxchg(&v->counter, old, new);
184}
185
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100186static inline long arch_atomic64_xchg(atomic64_t *v, long new)
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500187{
188 return xchg(&v->counter, new);
189}
190
191/**
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100192 * arch_atomic64_add_unless - add unless the number is a given value
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500193 * @v: pointer of type atomic64_t
194 * @a: the amount to add to v...
195 * @u: ...unless v is equal to u.
196 *
197 * Atomically adds @a to @v, so long as it was not @u.
Arun Sharmaf24219b2011-07-26 16:09:07 -0700198 * Returns the old value of @v.
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500199 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100200static inline bool arch_atomic64_add_unless(atomic64_t *v, long a, long u)
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500201{
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100202 s64 c = arch_atomic64_read(v);
Peter Zijlstrae6790e42017-03-17 20:44:45 +0100203 do {
204 if (unlikely(c == u))
205 return false;
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100206 } while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
Peter Zijlstrae6790e42017-03-17 20:44:45 +0100207 return true;
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500208}
209
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100210#define arch_atomic64_inc_not_zero(v) arch_atomic64_add_unless((v), 1, 0)
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500211
Luca Barbierid7f6de12010-02-26 12:22:41 +0100212/*
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100213 * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
Luca Barbierid7f6de12010-02-26 12:22:41 +0100214 * @v: pointer of type atomic_t
215 *
216 * The function returns the old value of *v minus 1, even if
217 * the atomic variable, v, was not decremented.
218 */
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100219static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
Luca Barbierid7f6de12010-02-26 12:22:41 +0100220{
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100221 s64 dec, c = arch_atomic64_read(v);
Peter Zijlstrae6790e42017-03-17 20:44:45 +0100222 do {
Luca Barbierid7f6de12010-02-26 12:22:41 +0100223 dec = c - 1;
224 if (unlikely(dec < 0))
225 break;
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100226 } while (!arch_atomic64_try_cmpxchg(v, &c, dec));
Luca Barbierid7f6de12010-02-26 12:22:41 +0100227 return dec;
228}
229
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100230static inline void arch_atomic64_and(long i, atomic64_t *v)
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200231{
232 asm volatile(LOCK_PREFIX "andq %1,%0"
233 : "+m" (v->counter)
234 : "er" (i)
235 : "memory");
Peter Zijlstra7fc18452014-04-23 20:28:37 +0200236}
237
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100238static inline long arch_atomic64_fetch_and(long i, atomic64_t *v)
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200239{
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100240 s64 val = arch_atomic64_read(v);
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200241
242 do {
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100243 } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200244 return val;
Peter Zijlstraa8bccca2016-04-18 01:16:03 +0200245}
Peter Zijlstra7fc18452014-04-23 20:28:37 +0200246
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100247static inline void arch_atomic64_or(long i, atomic64_t *v)
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200248{
249 asm volatile(LOCK_PREFIX "orq %1,%0"
250 : "+m" (v->counter)
251 : "er" (i)
252 : "memory");
253}
Peter Zijlstraa8bccca2016-04-18 01:16:03 +0200254
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100255static inline long arch_atomic64_fetch_or(long i, atomic64_t *v)
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200256{
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100257 s64 val = arch_atomic64_read(v);
Peter Zijlstraa8bccca2016-04-18 01:16:03 +0200258
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200259 do {
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100260 } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200261 return val;
262}
263
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100264static inline void arch_atomic64_xor(long i, atomic64_t *v)
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200265{
266 asm volatile(LOCK_PREFIX "xorq %1,%0"
267 : "+m" (v->counter)
268 : "er" (i)
269 : "memory");
270}
271
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100272static inline long arch_atomic64_fetch_xor(long i, atomic64_t *v)
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200273{
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100274 s64 val = arch_atomic64_read(v);
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200275
276 do {
Dmitry Vyukov8bf705d2018-01-29 18:26:05 +0100277 } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
Dmitry Vyukovba1c9f82017-06-17 11:15:27 +0200278 return val;
279}
Peter Zijlstra7fc18452014-04-23 20:28:37 +0200280
Brian Gerst1a3b1d82010-01-07 11:53:33 -0500281#endif /* _ASM_X86_ATOMIC64_64_H */