blob: 733c60eb859c4a93e56837bb64824756c93c0d19 [file] [log] [blame]
Stephen Hines176edba2014-12-01 14:53:08 -08001// RUN: %clang_cc1 %s -emit-llvm -o - -ffreestanding -triple=i686-apple-darwin9 | FileCheck %s
Eli Friedman276b0612011-10-11 02:20:01 +00002
Richard Smithe1b2abc2012-04-10 22:49:28 +00003// Also test serialization of atomic operations here, to avoid duplicating the
4// test.
Stephen Hines176edba2014-12-01 14:53:08 -08005// RUN: %clang_cc1 %s -emit-pch -o %t -ffreestanding -triple=i686-apple-darwin9
6// RUN: %clang_cc1 %s -include-pch %t -ffreestanding -triple=i686-apple-darwin9 -emit-llvm -o - | FileCheck %s
Richard Smithe1b2abc2012-04-10 22:49:28 +00007#ifndef ALREADY_INCLUDED
8#define ALREADY_INCLUDED
9
Stephen Hines176edba2014-12-01 14:53:08 -080010#include <stdatomic.h>
Eli Friedman276b0612011-10-11 02:20:01 +000011
Stephen Hines176edba2014-12-01 14:53:08 -080012// Basic IRGen tests for __c11_atomic_* and GNU __atomic_*
Eli Friedman276b0612011-10-11 02:20:01 +000013
14int fi1(_Atomic(int) *i) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -070015 // CHECK-LABEL: @fi1
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -070016 // CHECK: load atomic i32, i32* {{.*}} seq_cst
Richard Smithfafbf062012-04-11 17:55:32 +000017 return __c11_atomic_load(i, memory_order_seq_cst);
Eli Friedman276b0612011-10-11 02:20:01 +000018}
19
Richard Smithff34d402012-04-12 05:08:17 +000020int fi1a(int *i) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -070021 // CHECK-LABEL: @fi1a
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -070022 // CHECK: load atomic i32, i32* {{.*}} seq_cst
Richard Smithff34d402012-04-12 05:08:17 +000023 int v;
24 __atomic_load(i, &v, memory_order_seq_cst);
25 return v;
26}
27
28int fi1b(int *i) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -070029 // CHECK-LABEL: @fi1b
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -070030 // CHECK: load atomic i32, i32* {{.*}} seq_cst
Richard Smithff34d402012-04-12 05:08:17 +000031 return __atomic_load_n(i, memory_order_seq_cst);
32}
33
Stephen Hines176edba2014-12-01 14:53:08 -080034int fi1c(atomic_int *i) {
35 // CHECK-LABEL: @fi1c
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -070036 // CHECK: load atomic i32, i32* {{.*}} seq_cst
Stephen Hines176edba2014-12-01 14:53:08 -080037 return atomic_load(i);
38}
39
Eli Friedman276b0612011-10-11 02:20:01 +000040void fi2(_Atomic(int) *i) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -070041 // CHECK-LABEL: @fi2
Eli Friedman276b0612011-10-11 02:20:01 +000042 // CHECK: store atomic i32 {{.*}} seq_cst
Richard Smithfafbf062012-04-11 17:55:32 +000043 __c11_atomic_store(i, 1, memory_order_seq_cst);
Eli Friedman276b0612011-10-11 02:20:01 +000044}
45
Richard Smithff34d402012-04-12 05:08:17 +000046void fi2a(int *i) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -070047 // CHECK-LABEL: @fi2a
Richard Smithff34d402012-04-12 05:08:17 +000048 // CHECK: store atomic i32 {{.*}} seq_cst
49 int v = 1;
50 __atomic_store(i, &v, memory_order_seq_cst);
Eli Friedman276b0612011-10-11 02:20:01 +000051}
52
Richard Smithff34d402012-04-12 05:08:17 +000053void fi2b(int *i) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -070054 // CHECK-LABEL: @fi2b
Richard Smithff34d402012-04-12 05:08:17 +000055 // CHECK: store atomic i32 {{.*}} seq_cst
56 __atomic_store_n(i, 1, memory_order_seq_cst);
57}
58
Stephen Hines176edba2014-12-01 14:53:08 -080059void fi2c(atomic_int *i) {
60 // CHECK-LABEL: @fi2c
61 // CHECK: store atomic i32 {{.*}} seq_cst
62 atomic_store(i, 1);
63}
64
Richard Smithff34d402012-04-12 05:08:17 +000065int fi3(_Atomic(int) *i) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -070066 // CHECK-LABEL: @fi3
Richard Smithff34d402012-04-12 05:08:17 +000067 // CHECK: atomicrmw and
68 // CHECK-NOT: and
69 return __c11_atomic_fetch_and(i, 1, memory_order_seq_cst);
70}
71
72int fi3a(int *i) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -070073 // CHECK-LABEL: @fi3a
Richard Smithff34d402012-04-12 05:08:17 +000074 // CHECK: atomicrmw xor
75 // CHECK-NOT: xor
76 return __atomic_fetch_xor(i, 1, memory_order_seq_cst);
77}
78
79int fi3b(int *i) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -070080 // CHECK-LABEL: @fi3b
Richard Smithff34d402012-04-12 05:08:17 +000081 // CHECK: atomicrmw add
82 // CHECK: add
83 return __atomic_add_fetch(i, 1, memory_order_seq_cst);
84}
85
Richard Smith51b92402012-04-13 06:31:38 +000086int fi3c(int *i) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -070087 // CHECK-LABEL: @fi3c
Richard Smith51b92402012-04-13 06:31:38 +000088 // CHECK: atomicrmw nand
89 // CHECK-NOT: and
90 return __atomic_fetch_nand(i, 1, memory_order_seq_cst);
91}
92
93int fi3d(int *i) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -070094 // CHECK-LABEL: @fi3d
Richard Smith51b92402012-04-13 06:31:38 +000095 // CHECK: atomicrmw nand
96 // CHECK: and
97 // CHECK: xor
98 return __atomic_nand_fetch(i, 1, memory_order_seq_cst);
99}
100
Stephen Hines176edba2014-12-01 14:53:08 -0800101int fi3e(atomic_int *i) {
102 // CHECK-LABEL: @fi3e
103 // CHECK: atomicrmw or
104 // CHECK-NOT: {{ or }}
105 return atomic_fetch_or(i, 1);
106}
107
Richard Smithff34d402012-04-12 05:08:17 +0000108_Bool fi4(_Atomic(int) *i) {
Stephen Hines176edba2014-12-01 14:53:08 -0800109 // CHECK-LABEL: @fi4(
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700110 // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
111 // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
112 // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
Stephen Hines651f13c2014-04-23 16:59:28 -0700113 // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
114 // CHECK: store i32 [[OLD]]
Eli Friedman276b0612011-10-11 02:20:01 +0000115 int cmp = 0;
Richard Smithff34d402012-04-12 05:08:17 +0000116 return __c11_atomic_compare_exchange_strong(i, &cmp, 1, memory_order_acquire, memory_order_acquire);
117}
118
119_Bool fi4a(int *i) {
Stephen Hines176edba2014-12-01 14:53:08 -0800120 // CHECK-LABEL: @fi4a
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700121 // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
122 // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
123 // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
Stephen Hines651f13c2014-04-23 16:59:28 -0700124 // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
125 // CHECK: store i32 [[OLD]]
Richard Smithff34d402012-04-12 05:08:17 +0000126 int cmp = 0;
127 int desired = 1;
128 return __atomic_compare_exchange(i, &cmp, &desired, 0, memory_order_acquire, memory_order_acquire);
129}
130
131_Bool fi4b(int *i) {
Stephen Hines176edba2014-12-01 14:53:08 -0800132 // CHECK-LABEL: @fi4b(
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700133 // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg weak i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]]
134 // CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
135 // CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
Stephen Hines651f13c2014-04-23 16:59:28 -0700136 // CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
137 // CHECK: store i32 [[OLD]]
Richard Smithff34d402012-04-12 05:08:17 +0000138 int cmp = 0;
139 return __atomic_compare_exchange_n(i, &cmp, 1, 1, memory_order_acquire, memory_order_acquire);
Eli Friedman276b0612011-10-11 02:20:01 +0000140}
141
Stephen Hines176edba2014-12-01 14:53:08 -0800142_Bool fi4c(atomic_int *i) {
143 // CHECK-LABEL: @fi4c
144 // CHECK: cmpxchg i32*
145 int cmp = 0;
146 return atomic_compare_exchange_strong(i, &cmp, 1);
147}
148
Eli Friedman276b0612011-10-11 02:20:01 +0000149float ff1(_Atomic(float) *d) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700150 // CHECK-LABEL: @ff1
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -0700151 // CHECK: load atomic i32, i32* {{.*}} monotonic
Richard Smithfafbf062012-04-11 17:55:32 +0000152 return __c11_atomic_load(d, memory_order_relaxed);
Eli Friedman276b0612011-10-11 02:20:01 +0000153}
154
155void ff2(_Atomic(float) *d) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700156 // CHECK-LABEL: @ff2
Eli Friedman276b0612011-10-11 02:20:01 +0000157 // CHECK: store atomic i32 {{.*}} release
Richard Smithfafbf062012-04-11 17:55:32 +0000158 __c11_atomic_store(d, 1, memory_order_release);
Eli Friedman276b0612011-10-11 02:20:01 +0000159}
160
161float ff3(_Atomic(float) *d) {
Richard Smithfafbf062012-04-11 17:55:32 +0000162 return __c11_atomic_exchange(d, 2, memory_order_seq_cst);
Eli Friedman276b0612011-10-11 02:20:01 +0000163}
164
Stephen Hines176edba2014-12-01 14:53:08 -0800165struct S {
166 double x;
167};
168
169struct S fd1(struct S *a) {
170 // CHECK-LABEL: @fd1
171 // CHECK: [[RETVAL:%.*]] = alloca %struct.S, align 4
172 // CHECK: [[RET:%.*]] = alloca %struct.S, align 4
173 // CHECK: [[CALL:%.*]] = call i64 @__atomic_load_8(
174 // CHECK: [[CAST:%.*]] = bitcast %struct.S* [[RET]] to i64*
175 // CHECK: store i64 [[CALL]], i64* [[CAST]], align 4
176 struct S ret;
177 __atomic_load(a, &ret, memory_order_seq_cst);
178 return ret;
179}
180
181void fd2(struct S *a, struct S *b) {
182 // CHECK-LABEL: @fd2
183 // CHECK: [[A_ADDR:%.*]] = alloca %struct.S*, align 4
184 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4
185 // CHECK-NEXT: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4
186 // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -0700187 // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4
188 // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4
Stephen Hines176edba2014-12-01 14:53:08 -0800189 // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i8*
190 // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64*
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -0700191 // CHECK-NEXT: [[LOAD_B:%.*]] = load i64, i64* [[COERCED_B]], align 4
Stephen Hines176edba2014-12-01 14:53:08 -0800192 // CHECK-NEXT: call void @__atomic_store_8(i8* [[COERCED_A]], i64 [[LOAD_B]],
193 // CHECK-NEXT: ret void
194 __atomic_store(a, b, memory_order_seq_cst);
195}
196
197void fd3(struct S *a, struct S *b, struct S *c) {
198 // CHECK-LABEL: @fd3
199 // CHECK: [[A_ADDR:%.*]] = alloca %struct.S*, align 4
200 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4
201 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca %struct.S*, align 4
202 // CHECK-NEXT: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4
203 // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4
204 // CHECK-NEXT: store %struct.S* %c, %struct.S** [[C_ADDR]], align 4
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -0700205 // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4
206 // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4
207 // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S*, %struct.S** [[C_ADDR]], align 4
Stephen Hines176edba2014-12-01 14:53:08 -0800208 // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i8*
209 // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64*
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -0700210 // CHECK-NEXT: [[LOAD_B:%.*]] = load i64, i64* [[COERCED_B]], align 4
Stephen Hines176edba2014-12-01 14:53:08 -0800211 // CHECK-NEXT: [[CALL:%.*]] = call i64 @__atomic_exchange_8(i8* [[COERCED_A]], i64 [[LOAD_B]],
212 // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64*
213 // CHECK-NEXT: store i64 [[CALL]], i64* [[COERCED_C]], align 4
214
215 __atomic_exchange(a, b, c, memory_order_seq_cst);
216}
217
218_Bool fd4(struct S *a, struct S *b, struct S *c) {
219 // CHECK-LABEL: @fd4
220 // CHECK: [[A_ADDR:%.*]] = alloca %struct.S*, align 4
221 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4
222 // CHECK-NEXT: [[C_ADDR:%.*]] = alloca %struct.S*, align 4
223 // CHECK: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4
224 // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4
225 // CHECK-NEXT: store %struct.S* %c, %struct.S** [[C_ADDR]], align 4
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -0700226 // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4
227 // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4
228 // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S*, %struct.S** [[C_ADDR]], align 4
Stephen Hines176edba2014-12-01 14:53:08 -0800229 // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i8*
230 // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i8*
231 // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64*
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -0700232 // CHECK-NEXT: [[LOAD_C:%.*]] = load i64, i64* [[COERCED_C]], align 4
Stephen Hines176edba2014-12-01 14:53:08 -0800233 // CHECK-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange_8(i8* [[COERCED_A]], i8* [[COERCED_B]], i64 [[LOAD_C]]
234 // CHECK-NEXT: ret i1 [[CALL]]
235 return __atomic_compare_exchange(a, b, c, 1, 5, 5);
236}
237
Eli Friedman276b0612011-10-11 02:20:01 +0000238int* fp1(_Atomic(int*) *p) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700239 // CHECK-LABEL: @fp1
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -0700240 // CHECK: load atomic i32, i32* {{.*}} seq_cst
Richard Smithfafbf062012-04-11 17:55:32 +0000241 return __c11_atomic_load(p, memory_order_seq_cst);
Eli Friedman276b0612011-10-11 02:20:01 +0000242}
243
244int* fp2(_Atomic(int*) *p) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700245 // CHECK-LABEL: @fp2
Eli Friedman276b0612011-10-11 02:20:01 +0000246 // CHECK: store i32 4
247 // CHECK: atomicrmw add {{.*}} monotonic
Richard Smithfafbf062012-04-11 17:55:32 +0000248 return __c11_atomic_fetch_add(p, 1, memory_order_relaxed);
Eli Friedman276b0612011-10-11 02:20:01 +0000249}
250
Richard Smithff34d402012-04-12 05:08:17 +0000251int *fp2a(int **p) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700252 // CHECK-LABEL: @fp2a
Richard Smithff34d402012-04-12 05:08:17 +0000253 // CHECK: store i32 4
254 // CHECK: atomicrmw sub {{.*}} monotonic
Richard Smith2c39d712012-04-13 00:45:38 +0000255 // Note, the GNU builtins do not multiply by sizeof(T)!
256 return __atomic_fetch_sub(p, 4, memory_order_relaxed);
Richard Smithff34d402012-04-12 05:08:17 +0000257}
258
Eli Friedman2be46072011-10-14 20:59:01 +0000259_Complex float fc(_Atomic(_Complex float) *c) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700260 // CHECK-LABEL: @fc
Eli Friedman276b0612011-10-11 02:20:01 +0000261 // CHECK: atomicrmw xchg i64*
Richard Smithfafbf062012-04-11 17:55:32 +0000262 return __c11_atomic_exchange(c, 2, memory_order_seq_cst);
Eli Friedman276b0612011-10-11 02:20:01 +0000263}
264
265typedef struct X { int x; } X;
266X fs(_Atomic(X) *c) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700267 // CHECK-LABEL: @fs
Eli Friedman276b0612011-10-11 02:20:01 +0000268 // CHECK: atomicrmw xchg i32*
Richard Smithfafbf062012-04-11 17:55:32 +0000269 return __c11_atomic_exchange(c, (X){2}, memory_order_seq_cst);
Eli Friedman276b0612011-10-11 02:20:01 +0000270}
Eli Friedman454b57a2011-10-17 21:44:23 +0000271
Richard Smithff34d402012-04-12 05:08:17 +0000272X fsa(X *c, X *d) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700273 // CHECK-LABEL: @fsa
Richard Smithff34d402012-04-12 05:08:17 +0000274 // CHECK: atomicrmw xchg i32*
275 X ret;
276 __atomic_exchange(c, d, &ret, memory_order_seq_cst);
277 return ret;
278}
279
280_Bool fsb(_Bool *c) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700281 // CHECK-LABEL: @fsb
Richard Smithff34d402012-04-12 05:08:17 +0000282 // CHECK: atomicrmw xchg i8*
283 return __atomic_exchange_n(c, 1, memory_order_seq_cst);
284}
285
Richard Smith2c39d712012-04-13 00:45:38 +0000286char flag1;
287volatile char flag2;
288void test_and_set() {
289 // CHECK: atomicrmw xchg i8* @flag1, i8 1 seq_cst
290 __atomic_test_and_set(&flag1, memory_order_seq_cst);
291 // CHECK: atomicrmw volatile xchg i8* @flag2, i8 1 acquire
292 __atomic_test_and_set(&flag2, memory_order_acquire);
293 // CHECK: store atomic volatile i8 0, i8* @flag2 release
294 __atomic_clear(&flag2, memory_order_release);
295 // CHECK: store atomic i8 0, i8* @flag1 seq_cst
296 __atomic_clear(&flag1, memory_order_seq_cst);
297}
298
299struct Sixteen {
300 char c[16];
301} sixteen;
302struct Seventeen {
303 char c[17];
304} seventeen;
305
306int lock_free(struct Incomplete *incomplete) {
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700307 // CHECK-LABEL: @lock_free
Richard Smith2c39d712012-04-13 00:45:38 +0000308
309 // CHECK: call i32 @__atomic_is_lock_free(i32 3, i8* null)
310 __c11_atomic_is_lock_free(3);
311
312 // CHECK: call i32 @__atomic_is_lock_free(i32 16, i8* {{.*}}@sixteen{{.*}})
313 __atomic_is_lock_free(16, &sixteen);
314
315 // CHECK: call i32 @__atomic_is_lock_free(i32 17, i8* {{.*}}@seventeen{{.*}})
316 __atomic_is_lock_free(17, &seventeen);
317
318 // CHECK: call i32 @__atomic_is_lock_free(i32 4, {{.*}})
319 __atomic_is_lock_free(4, incomplete);
320
321 char cs[20];
322 // CHECK: call i32 @__atomic_is_lock_free(i32 4, {{.*}})
323 __atomic_is_lock_free(4, cs+1);
324
325 // CHECK-NOT: call
326 __atomic_always_lock_free(3, 0);
327 __atomic_always_lock_free(16, 0);
328 __atomic_always_lock_free(17, 0);
329 __atomic_always_lock_free(16, &sixteen);
330 __atomic_always_lock_free(17, &seventeen);
331
332 int n;
333 __atomic_is_lock_free(4, &n);
334
Eli Friedman454b57a2011-10-17 21:44:23 +0000335 // CHECK: ret i32 1
Richard Smithfafbf062012-04-11 17:55:32 +0000336 return __c11_atomic_is_lock_free(sizeof(_Atomic(int)));
Eli Friedman7f20c7c2011-10-17 21:48:31 +0000337}
David Chisnall3a7d69b2012-03-29 18:01:11 +0000338
339// Tests for atomic operations on big values. These should call the functions
340// defined here:
341// http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary#The_Library_interface
342
343struct foo {
344 int big[128];
345};
Richard Smithff34d402012-04-12 05:08:17 +0000346struct bar {
347 char c[3];
348};
David Chisnall3a7d69b2012-03-29 18:01:11 +0000349
Richard Smithff34d402012-04-12 05:08:17 +0000350struct bar smallThing, thing1, thing2;
351struct foo bigThing;
David Chisnall3a7d69b2012-03-29 18:01:11 +0000352_Atomic(struct foo) bigAtomic;
353
354void structAtomicStore() {
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700355 // CHECK-LABEL: @structAtomicStore
David Chisnall3a7d69b2012-03-29 18:01:11 +0000356 struct foo f = {0};
Richard Smithff34d402012-04-12 05:08:17 +0000357 struct bar b = {0};
358 __atomic_store(&smallThing, &b, 5);
359 // CHECK: call void @__atomic_store(i32 3, i8* {{.*}} @smallThing
360
361 __atomic_store(&bigThing, &f, 5);
362 // CHECK: call void @__atomic_store(i32 512, i8* {{.*}} @bigThing
David Chisnall3a7d69b2012-03-29 18:01:11 +0000363}
364void structAtomicLoad() {
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700365 // CHECK-LABEL: @structAtomicLoad
Richard Smithff34d402012-04-12 05:08:17 +0000366 struct bar b;
367 __atomic_load(&smallThing, &b, 5);
368 // CHECK: call void @__atomic_load(i32 3, i8* {{.*}} @smallThing
369
Fariborz Jahanian538bbe52013-05-28 17:37:39 +0000370 struct foo f = {0};
Richard Smithff34d402012-04-12 05:08:17 +0000371 __atomic_load(&bigThing, &f, 5);
372 // CHECK: call void @__atomic_load(i32 512, i8* {{.*}} @bigThing
David Chisnall3a7d69b2012-03-29 18:01:11 +0000373}
374struct foo structAtomicExchange() {
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700375 // CHECK-LABEL: @structAtomicExchange
David Chisnall3a7d69b2012-03-29 18:01:11 +0000376 struct foo f = {0};
Richard Smithff34d402012-04-12 05:08:17 +0000377 struct foo old;
378 __atomic_exchange(&f, &bigThing, &old, 5);
379 // CHECK: call void @__atomic_exchange(i32 512, {{.*}}, i8* bitcast ({{.*}} @bigThing to i8*),
380
Richard Smithfafbf062012-04-11 17:55:32 +0000381 return __c11_atomic_exchange(&bigAtomic, f, 5);
Richard Smithff34d402012-04-12 05:08:17 +0000382 // CHECK: call void @__atomic_exchange(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
David Chisnall3a7d69b2012-03-29 18:01:11 +0000383}
384int structAtomicCmpExchange() {
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700385 // CHECK-LABEL: @structAtomicCmpExchange
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700386 // CHECK: %[[x_mem:.*]] = alloca i8
Richard Smithff34d402012-04-12 05:08:17 +0000387 _Bool x = __atomic_compare_exchange(&smallThing, &thing1, &thing2, 1, 5, 5);
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700388 // CHECK: %[[call1:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 3, {{.*}} @smallThing{{.*}} @thing1{{.*}} @thing2
389 // CHECK: %[[zext1:.*]] = zext i1 %[[call1]] to i8
390 // CHECK: store i8 %[[zext1]], i8* %[[x_mem]], align 1
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -0700391 // CHECK: %[[x:.*]] = load i8, i8* %[[x_mem]]
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700392 // CHECK: %[[x_bool:.*]] = trunc i8 %[[x]] to i1
393 // CHECK: %[[conv1:.*]] = zext i1 %[[x_bool]] to i32
Richard Smithff34d402012-04-12 05:08:17 +0000394
David Chisnall3a7d69b2012-03-29 18:01:11 +0000395 struct foo f = {0};
396 struct foo g = {0};
397 g.big[12] = 12;
Richard Smithff34d402012-04-12 05:08:17 +0000398 return x & __c11_atomic_compare_exchange_strong(&bigAtomic, &f, g, 5, 5);
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700399 // CHECK: %[[call2:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 512, i8* bitcast ({{.*}} @bigAtomic to i8*),
400 // CHECK: %[[conv2:.*]] = zext i1 %[[call2]] to i32
401 // CHECK: %[[and:.*]] = and i32 %[[conv1]], %[[conv2]]
402 // CHECK: ret i32 %[[and]]
Richard Smithff34d402012-04-12 05:08:17 +0000403}
404
405// Check that no atomic operations are used in any initialisation of _Atomic
406// types.
407_Atomic(int) atomic_init_i = 42;
408
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700409// CHECK-LABEL: @atomic_init_foo
Richard Smithff34d402012-04-12 05:08:17 +0000410void atomic_init_foo()
411{
412 // CHECK-NOT: }
413 // CHECK-NOT: atomic
414 // CHECK: store
415 _Atomic(int) j = 12;
416
417 // CHECK-NOT: }
418 // CHECK-NOT: atomic
419 // CHECK: store
420 __c11_atomic_init(&j, 42);
421
422 // CHECK-NOT: atomic
423 // CHECK: }
David Chisnall3a7d69b2012-03-29 18:01:11 +0000424}
Richard Smithe1b2abc2012-04-10 22:49:28 +0000425
Stephen Hines651f13c2014-04-23 16:59:28 -0700426// CHECK-LABEL: @failureOrder
427void failureOrder(_Atomic(int) *ptr, int *ptr2) {
428 __c11_atomic_compare_exchange_strong(ptr, ptr2, 43, memory_order_acquire, memory_order_relaxed);
429 // CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acquire monotonic
430
431 __c11_atomic_compare_exchange_weak(ptr, ptr2, 43, memory_order_seq_cst, memory_order_acquire);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700432 // CHECK: cmpxchg weak i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst acquire
Stephen Hines651f13c2014-04-23 16:59:28 -0700433
434 // Unknown ordering: conservatively pick strongest valid option (for now!).
435 __atomic_compare_exchange(ptr2, ptr2, ptr2, 0, memory_order_acq_rel, *ptr2);
436 // CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acq_rel acquire
437
438 // Undefined behaviour: don't really care what that last ordering is so leave
439 // it out:
440 __atomic_compare_exchange_n(ptr2, ptr2, 43, 1, memory_order_seq_cst, 42);
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700441 // CHECK: cmpxchg weak i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst
Stephen Hines651f13c2014-04-23 16:59:28 -0700442}
443
444// CHECK-LABEL: @generalFailureOrder
445void generalFailureOrder(_Atomic(int) *ptr, int *ptr2, int success, int fail) {
446 __c11_atomic_compare_exchange_strong(ptr, ptr2, 42, success, fail);
447 // CHECK: switch i32 {{.*}}, label %[[MONOTONIC:[0-9a-zA-Z._]+]] [
448 // CHECK-NEXT: i32 1, label %[[ACQUIRE:[0-9a-zA-Z._]+]]
449 // CHECK-NEXT: i32 2, label %[[ACQUIRE]]
450 // CHECK-NEXT: i32 3, label %[[RELEASE:[0-9a-zA-Z._]+]]
451 // CHECK-NEXT: i32 4, label %[[ACQREL:[0-9a-zA-Z._]+]]
452 // CHECK-NEXT: i32 5, label %[[SEQCST:[0-9a-zA-Z._]+]]
453
454 // CHECK: [[MONOTONIC]]
455 // CHECK: switch {{.*}}, label %[[MONOTONIC_MONOTONIC:[0-9a-zA-Z._]+]] [
456 // CHECK-NEXT: ]
457
458 // CHECK: [[ACQUIRE]]
459 // CHECK: switch {{.*}}, label %[[ACQUIRE_MONOTONIC:[0-9a-zA-Z._]+]] [
460 // CHECK-NEXT: i32 1, label %[[ACQUIRE_ACQUIRE:[0-9a-zA-Z._]+]]
461 // CHECK-NEXT: i32 2, label %[[ACQUIRE_ACQUIRE:[0-9a-zA-Z._]+]]
462 // CHECK-NEXT: ]
463
464 // CHECK: [[RELEASE]]
465 // CHECK: switch {{.*}}, label %[[RELEASE_MONOTONIC:[0-9a-zA-Z._]+]] [
466 // CHECK-NEXT: ]
467
468 // CHECK: [[ACQREL]]
469 // CHECK: switch {{.*}}, label %[[ACQREL_MONOTONIC:[0-9a-zA-Z._]+]] [
470 // CHECK-NEXT: i32 1, label %[[ACQREL_ACQUIRE:[0-9a-zA-Z._]+]]
471 // CHECK-NEXT: i32 2, label %[[ACQREL_ACQUIRE:[0-9a-zA-Z._]+]]
472 // CHECK-NEXT: ]
473
474 // CHECK: [[SEQCST]]
475 // CHECK: switch {{.*}}, label %[[SEQCST_MONOTONIC:[0-9a-zA-Z._]+]] [
476 // CHECK-NEXT: i32 1, label %[[SEQCST_ACQUIRE:[0-9a-zA-Z._]+]]
477 // CHECK-NEXT: i32 2, label %[[SEQCST_ACQUIRE:[0-9a-zA-Z._]+]]
478 // CHECK-NEXT: i32 5, label %[[SEQCST_SEQCST:[0-9a-zA-Z._]+]]
479 // CHECK-NEXT: ]
480
481 // CHECK: [[MONOTONIC_MONOTONIC]]
482 // CHECK: cmpxchg {{.*}} monotonic monotonic
483 // CHECK: br
484
485 // CHECK: [[ACQUIRE_MONOTONIC]]
486 // CHECK: cmpxchg {{.*}} acquire monotonic
487 // CHECK: br
488
489 // CHECK: [[ACQUIRE_ACQUIRE]]
490 // CHECK: cmpxchg {{.*}} acquire acquire
491 // CHECK: br
492
493 // CHECK: [[ACQREL_MONOTONIC]]
494 // CHECK: cmpxchg {{.*}} acq_rel monotonic
495 // CHECK: br
496
497 // CHECK: [[ACQREL_ACQUIRE]]
498 // CHECK: cmpxchg {{.*}} acq_rel acquire
499 // CHECK: br
500
501 // CHECK: [[SEQCST_MONOTONIC]]
502 // CHECK: cmpxchg {{.*}} seq_cst monotonic
503 // CHECK: br
504
505 // CHECK: [[SEQCST_ACQUIRE]]
506 // CHECK: cmpxchg {{.*}} seq_cst acquire
507 // CHECK: br
508
509 // CHECK: [[SEQCST_SEQCST]]
510 // CHECK: cmpxchg {{.*}} seq_cst seq_cst
511 // CHECK: br
Eli Friedmanee1ea802012-10-30 01:15:28 +0000512}
513
Stephen Hinesc568f1e2014-07-21 00:47:37 -0700514void generalWeakness(int *ptr, int *ptr2, _Bool weak) {
515 __atomic_compare_exchange_n(ptr, ptr2, 42, weak, memory_order_seq_cst, memory_order_seq_cst);
516 // CHECK: switch i1 {{.*}}, label %[[WEAK:[0-9a-zA-Z._]+]] [
517 // CHECK-NEXT: i1 false, label %[[STRONG:[0-9a-zA-Z._]+]]
518
519 // CHECK: [[STRONG]]
520 // CHECK-NOT: br
521 // CHECK: cmpxchg {{.*}} seq_cst seq_cst
522 // CHECK: br
523
524 // CHECK: [[WEAK]]
525 // CHECK-NOT: br
526 // CHECK: cmpxchg weak {{.*}} seq_cst seq_cst
527 // CHECK: br
528}
529
530// Having checked the flow in the previous two cases, we'll trust clang to
531// combine them sanely.
532void EMIT_ALL_THE_THINGS(int *ptr, int *ptr2, int new, _Bool weak, int success, int fail) {
533 __atomic_compare_exchange(ptr, ptr2, &new, weak, success, fail);
534
535 // CHECK: = cmpxchg {{.*}} monotonic monotonic
536 // CHECK: = cmpxchg weak {{.*}} monotonic monotonic
537 // CHECK: = cmpxchg {{.*}} acquire monotonic
538 // CHECK: = cmpxchg {{.*}} acquire acquire
539 // CHECK: = cmpxchg weak {{.*}} acquire monotonic
540 // CHECK: = cmpxchg weak {{.*}} acquire acquire
541 // CHECK: = cmpxchg {{.*}} release monotonic
542 // CHECK: = cmpxchg weak {{.*}} release monotonic
543 // CHECK: = cmpxchg {{.*}} acq_rel monotonic
544 // CHECK: = cmpxchg {{.*}} acq_rel acquire
545 // CHECK: = cmpxchg weak {{.*}} acq_rel monotonic
546 // CHECK: = cmpxchg weak {{.*}} acq_rel acquire
547 // CHECK: = cmpxchg {{.*}} seq_cst monotonic
548 // CHECK: = cmpxchg {{.*}} seq_cst acquire
549 // CHECK: = cmpxchg {{.*}} seq_cst seq_cst
550 // CHECK: = cmpxchg weak {{.*}} seq_cst monotonic
551 // CHECK: = cmpxchg weak {{.*}} seq_cst acquire
552 // CHECK: = cmpxchg weak {{.*}} seq_cst seq_cst
553}
554
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700555int PR21643() {
556 return __atomic_or_fetch((int __attribute__((address_space(257))) *)0x308, 1,
557 __ATOMIC_RELAXED);
558 // CHECK: %[[atomictmp:.*]] = alloca i32, align 4
559 // CHECK: %[[atomicdst:.*]] = alloca i32, align 4
560 // CHECK: store i32 1, i32* %[[atomictmp]]
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -0700561 // CHECK: %[[one:.*]] = load i32, i32* %[[atomictmp]], align 4
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700562 // CHECK: %[[old:.*]] = atomicrmw or i32 addrspace(257)* inttoptr (i32 776 to i32 addrspace(257)*), i32 %[[one]] monotonic
563 // CHECK: %[[new:.*]] = or i32 %[[old]], %[[one]]
564 // CHECK: store i32 %[[new]], i32* %[[atomicdst]], align 4
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -0700565 // CHECK: %[[ret:.*]] = load i32, i32* %[[atomicdst]], align 4
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700566 // CHECK: ret i32 %[[ret]]
567}
568
569int PR17306_1(volatile _Atomic(int) *i) {
570 // CHECK-LABEL: @PR17306_1
571 // CHECK: %[[i_addr:.*]] = alloca i32
572 // CHECK-NEXT: %[[atomicdst:.*]] = alloca i32
573 // CHECK-NEXT: store i32* %i, i32** %[[i_addr]]
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -0700574 // CHECK-NEXT: %[[addr:.*]] = load i32*, i32** %[[i_addr]]
575 // CHECK-NEXT: %[[res:.*]] = load atomic volatile i32, i32* %[[addr]] seq_cst
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700576 // CHECK-NEXT: store i32 %[[res]], i32* %[[atomicdst]]
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -0700577 // CHECK-NEXT: %[[retval:.*]] = load i32, i32* %[[atomicdst]]
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700578 // CHECK-NEXT: ret i32 %[[retval]]
579 return __c11_atomic_load(i, memory_order_seq_cst);
580}
581
582int PR17306_2(volatile int *i, int value) {
583 // CHECK-LABEL: @PR17306_2
584 // CHECK: %[[i_addr:.*]] = alloca i32*
585 // CHECK-NEXT: %[[value_addr:.*]] = alloca i32
586 // CHECK-NEXT: %[[atomictmp:.*]] = alloca i32
587 // CHECK-NEXT: %[[atomicdst:.*]] = alloca i32
588 // CHECK-NEXT: store i32* %i, i32** %[[i_addr]]
589 // CHECK-NEXT: store i32 %value, i32* %[[value_addr]]
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -0700590 // CHECK-NEXT: %[[i_lval:.*]] = load i32*, i32** %[[i_addr]]
591 // CHECK-NEXT: %[[value:.*]] = load i32, i32* %[[value_addr]]
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700592 // CHECK-NEXT: store i32 %[[value]], i32* %[[atomictmp]]
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -0700593 // CHECK-NEXT: %[[value_lval:.*]] = load i32, i32* %[[atomictmp]]
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700594 // CHECK-NEXT: %[[old_val:.*]] = atomicrmw volatile add i32* %[[i_lval]], i32 %[[value_lval]] seq_cst
595 // CHECK-NEXT: %[[new_val:.*]] = add i32 %[[old_val]], %[[value_lval]]
596 // CHECK-NEXT: store i32 %[[new_val]], i32* %[[atomicdst]]
Pirama Arumuga Nainar3ea9e332015-04-08 08:57:32 -0700597 // CHECK-NEXT: %[[retval:.*]] = load i32, i32* %[[atomicdst]]
Stephen Hines0e2c34f2015-03-23 12:09:02 -0700598 // CHECK-NEXT: ret i32 %[[retval]]
599 return __atomic_add_fetch(i, value, memory_order_seq_cst);
600}
601
Richard Smithe1b2abc2012-04-10 22:49:28 +0000602#endif