Paul E. McKenney | 1c27b64 | 2018-01-18 19:58:55 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | // |
| 3 | // An earlier version of this file appears in the companion webpage for |
| 4 | // "Frightening small children and disconcerting grown-ups: Concurrency |
| 5 | // in the Linux kernel" by Alglave, Maranget, McKenney, Parri, and Stern, |
| 6 | // which is to appear in ASPLOS 2018. |
| 7 | |
| 8 | // ONCE |
| 9 | READ_ONCE(X) __load{once}(X) |
| 10 | WRITE_ONCE(X,V) { __store{once}(X,V); } |
| 11 | |
| 12 | // Release Acquire and friends |
| 13 | smp_store_release(X,V) { __store{release}(*X,V); } |
| 14 | smp_load_acquire(X) __load{acquire}(*X) |
| 15 | rcu_assign_pointer(X,V) { __store{release}(X,V); } |
Alan Stern | bd5c0ba | 2018-03-07 09:27:40 -0800 | [diff] [blame] | 16 | rcu_dereference(X) __load{once}(X) |
Paul E. McKenney | 1c27b64 | 2018-01-18 19:58:55 -0800 | [diff] [blame] | 17 | |
| 18 | // Fences |
| 19 | smp_mb() { __fence{mb} ; } |
| 20 | smp_rmb() { __fence{rmb} ; } |
| 21 | smp_wmb() { __fence{wmb} ; } |
Paul E. McKenney | cac79a3 | 2018-02-20 15:25:11 -0800 | [diff] [blame] | 22 | smp_mb__before_atomic() { __fence{before-atomic} ; } |
| 23 | smp_mb__after_atomic() { __fence{after-atomic} ; } |
| 24 | smp_mb__after_spinlock() { __fence{after-spinlock} ; } |
Paul E. McKenney | 1c27b64 | 2018-01-18 19:58:55 -0800 | [diff] [blame] | 25 | |
| 26 | // Exchange |
| 27 | xchg(X,V) __xchg{mb}(X,V) |
| 28 | xchg_relaxed(X,V) __xchg{once}(X,V) |
| 29 | xchg_release(X,V) __xchg{release}(X,V) |
| 30 | xchg_acquire(X,V) __xchg{acquire}(X,V) |
| 31 | cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) |
| 32 | cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) |
| 33 | cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) |
| 34 | cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W) |
| 35 | |
| 36 | // Spinlocks |
| 37 | spin_lock(X) { __lock(X) ; } |
| 38 | spin_unlock(X) { __unlock(X) ; } |
| 39 | spin_trylock(X) __trylock(X) |
| 40 | |
| 41 | // RCU |
| 42 | rcu_read_lock() { __fence{rcu-lock}; } |
| 43 | rcu_read_unlock() { __fence{rcu-unlock};} |
| 44 | synchronize_rcu() { __fence{sync-rcu}; } |
| 45 | synchronize_rcu_expedited() { __fence{sync-rcu}; } |
| 46 | |
| 47 | // Atomic |
| 48 | atomic_read(X) READ_ONCE(*X) |
| 49 | atomic_set(X,V) { WRITE_ONCE(*X,V) ; } |
| 50 | atomic_read_acquire(X) smp_load_acquire(X) |
| 51 | atomic_set_release(X,V) { smp_store_release(X,V); } |
| 52 | |
| 53 | atomic_add(V,X) { __atomic_op(X,+,V) ; } |
| 54 | atomic_sub(V,X) { __atomic_op(X,-,V) ; } |
| 55 | atomic_inc(X) { __atomic_op(X,+,1) ; } |
| 56 | atomic_dec(X) { __atomic_op(X,-,1) ; } |
| 57 | |
| 58 | atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V) |
| 59 | atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V) |
| 60 | atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V) |
| 61 | atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V) |
| 62 | atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V) |
| 63 | atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V) |
| 64 | atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V) |
| 65 | atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V) |
| 66 | |
| 67 | atomic_inc_return(X) __atomic_op_return{mb}(X,+,1) |
| 68 | atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1) |
| 69 | atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1) |
| 70 | atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1) |
| 71 | atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1) |
| 72 | atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1) |
| 73 | atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1) |
| 74 | atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1) |
| 75 | |
| 76 | atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V) |
| 77 | atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V) |
| 78 | atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V) |
| 79 | atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V) |
| 80 | atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V) |
| 81 | atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V) |
| 82 | atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V) |
| 83 | atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V) |
| 84 | |
| 85 | atomic_dec_return(X) __atomic_op_return{mb}(X,-,1) |
| 86 | atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1) |
| 87 | atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1) |
| 88 | atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1) |
| 89 | atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1) |
| 90 | atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1) |
| 91 | atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1) |
| 92 | atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1) |
| 93 | |
| 94 | atomic_xchg(X,V) __xchg{mb}(X,V) |
| 95 | atomic_xchg_relaxed(X,V) __xchg{once}(X,V) |
| 96 | atomic_xchg_release(X,V) __xchg{release}(X,V) |
| 97 | atomic_xchg_acquire(X,V) __xchg{acquire}(X,V) |
| 98 | atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W) |
| 99 | atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W) |
| 100 | atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W) |
| 101 | atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W) |
| 102 | |
| 103 | atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0 |
| 104 | atomic_dec_and_test(X) __atomic_op_return{mb}(X,-,1) == 0 |
| 105 | atomic_inc_and_test(X) __atomic_op_return{mb}(X,+,1) == 0 |
| 106 | atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0 |