Cyril Hrubis | bbdb9f7 | 2016-03-16 15:53:57 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016 Cyril Hrubis <chrubis@suse.cz> |
| 3 | * |
| 4 | * This program is free software: you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation, either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 16 | */ |
| 17 | |
| 18 | #ifndef TST_ATOMIC_H__ |
| 19 | #define TST_ATOMIC_H__ |
| 20 | |
Jan Stancek | f61391e | 2016-04-13 13:09:23 +0200 | [diff] [blame] | 21 | #include "config.h" |
| 22 | |
| 23 | #if HAVE_SYNC_ADD_AND_FETCH == 1 |
| 24 | static inline int tst_atomic_add_return(int i, int *v) |
| 25 | { |
| 26 | return __sync_add_and_fetch(v, i); |
| 27 | } |
Jan Stancek | eb0ad60 | 2016-04-13 13:14:25 +0200 | [diff] [blame] | 28 | |
| 29 | #elif defined(__i386__) || defined(__x86_64__) |
| 30 | static inline int tst_atomic_add_return(int i, int *v) |
| 31 | { |
| 32 | int __ret = i; |
| 33 | |
| 34 | /* |
| 35 | * taken from arch/x86/include/asm/cmpxchg.h |
| 36 | * Since we always pass int sized parameter, we can simplify it |
| 37 | * and cherry-pick only that specific case. |
| 38 | * |
| 39 | switch (sizeof(*v)) { |
| 40 | case 1: |
| 41 | asm volatile ("lock; xaddb %b0, %1\n" |
| 42 | : "+q" (__ret), "+m" (*v) : : "memory", "cc"); |
| 43 | break; |
| 44 | case 2: |
| 45 | asm volatile ("lock; xaddw %w0, %1\n" |
| 46 | : "+r" (__ret), "+m" (*v) : : "memory", "cc"); |
| 47 | break; |
| 48 | case 4: |
| 49 | asm volatile ("lock; xaddl %0, %1\n" |
| 50 | : "+r" (__ret), "+m" (*v) : : "memory", "cc"); |
| 51 | break; |
| 52 | case 8: |
| 53 | asm volatile ("lock; xaddq %q0, %1\n" |
| 54 | : "+r" (__ret), "+m" (*v) : : "memory", "cc"); |
| 55 | break; |
| 56 | default: |
| 57 | __xadd_wrong_size(); |
| 58 | } |
| 59 | */ |
| 60 | asm volatile ("lock; xaddl %0, %1\n" |
| 61 | : "+r" (__ret), "+m" (*v) : : "memory", "cc"); |
| 62 | |
| 63 | return i + __ret; |
| 64 | } |
| 65 | |
| 66 | #elif defined(__powerpc__) || defined(__powerpc64__) |
| 67 | static inline int tst_atomic_add_return(int i, int *v) |
| 68 | { |
| 69 | int t; |
| 70 | |
| 71 | /* taken from arch/powerpc/include/asm/atomic.h */ |
| 72 | asm volatile( |
| 73 | " sync\n" |
| 74 | "1: lwarx %0,0,%2 # atomic_add_return\n" |
| 75 | " add %0,%1,%0\n" |
| 76 | " stwcx. %0,0,%2 \n" |
| 77 | " bne- 1b\n" |
| 78 | " sync\n" |
| 79 | : "=&r" (t) |
| 80 | : "r" (i), "r" (v) |
| 81 | : "cc", "memory"); |
| 82 | |
| 83 | return t; |
| 84 | } |
| 85 | |
| 86 | #elif defined(__s390__) || defined(__s390x__) |
| 87 | static inline int tst_atomic_add_return(int i, int *v) |
| 88 | { |
| 89 | int old_val, new_val; |
| 90 | |
| 91 | /* taken from arch/s390/include/asm/atomic.h */ |
| 92 | asm volatile( |
| 93 | " l %0,%2\n" |
| 94 | "0: lr %1,%0\n" |
| 95 | " ar %1,%3\n" |
| 96 | " cs %0,%1,%2\n" |
| 97 | " jl 0b" |
| 98 | : "=&d" (old_val), "=&d" (new_val), "+Q" (*v) |
| 99 | : "d" (i) |
| 100 | : "cc", "memory"); |
| 101 | |
| 102 | return old_val + i; |
| 103 | } |
Vineet Gupta | 7452a9f | 2016-08-31 16:39:48 -0700 | [diff] [blame] | 104 | |
| 105 | #elif defined(__arc__) |
| 106 | |
| 107 | /*ARCv2 defines the smp barriers */ |
| 108 | #ifdef __ARC700__ |
| 109 | #define smp_mb() |
| 110 | #else |
| 111 | #define smp_mb() asm volatile("dmb 3\n" : : : "memory") |
| 112 | #endif |
| 113 | |
| 114 | static inline int tst_atomic_add_return(int i, int *v) |
| 115 | { |
| 116 | unsigned int val; |
| 117 | |
| 118 | smp_mb(); |
| 119 | |
| 120 | asm volatile( |
| 121 | "1: llock %[val], [%[ctr]] \n" |
| 122 | " add %[val], %[val], %[i] \n" |
| 123 | " scond %[val], [%[ctr]] \n" |
| 124 | " bnz 1b \n" |
| 125 | : [val] "=&r" (val) |
| 126 | : [ctr] "r" (v), |
| 127 | [i] "ir" (i) |
| 128 | : "cc", "memory"); |
| 129 | |
| 130 | smp_mb(); |
| 131 | |
| 132 | return val; |
| 133 | } |
Jan Stancek | eb0ad60 | 2016-04-13 13:14:25 +0200 | [diff] [blame] | 134 | #else /* HAVE_SYNC_ADD_AND_FETCH == 1 */ |
Jan Stancek | f61391e | 2016-04-13 13:09:23 +0200 | [diff] [blame] | 135 | # error Your compiler does not provide __sync_add_and_fetch and LTP\ |
| 136 | implementation is missing for your architecture. |
| 137 | #endif |
| 138 | |
Jan Stancek | c54ca05 | 2016-04-13 12:31:13 +0200 | [diff] [blame] | 139 | static inline int tst_atomic_inc(int *v) |
Cyril Hrubis | bbdb9f7 | 2016-03-16 15:53:57 +0100 | [diff] [blame] | 140 | { |
Jan Stancek | f61391e | 2016-04-13 13:09:23 +0200 | [diff] [blame] | 141 | return tst_atomic_add_return(1, v); |
Cyril Hrubis | bbdb9f7 | 2016-03-16 15:53:57 +0100 | [diff] [blame] | 142 | } |
| 143 | |
Cyril Hrubis | a8b752d | 2016-08-04 13:44:39 +0200 | [diff] [blame] | 144 | static inline int tst_atomic_dec(int *v) |
| 145 | { |
| 146 | return tst_atomic_add_return(-1, v); |
| 147 | } |
| 148 | |
Cyril Hrubis | bbdb9f7 | 2016-03-16 15:53:57 +0100 | [diff] [blame] | 149 | #endif /* TST_ATOMIC_H__ */ |