blob: 02bf215c6bffd93095f60914192a567024c77a8a [file] [log] [blame]
Kostya Serebryanye5079222012-04-27 07:31:53 +00001; RUN: opt < %s -tsan -S | FileCheck %s
2; Check that atomic memory operations are converted to calls into ThreadSanitizer runtime.
3target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
4
5define i8 @atomic8_load_unordered(i8* %a) nounwind uwtable {
6entry:
7 %0 = load atomic i8* %a unordered, align 1
8 ret i8 %0
9}
10; CHECK: atomic8_load_unordered
11; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 1)
12
13define i8 @atomic8_load_monotonic(i8* %a) nounwind uwtable {
14entry:
15 %0 = load atomic i8* %a monotonic, align 1
16 ret i8 %0
17}
18; CHECK: atomic8_load_monotonic
19; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 1)
20
21define i8 @atomic8_load_acquire(i8* %a) nounwind uwtable {
22entry:
23 %0 = load atomic i8* %a acquire, align 1
24 ret i8 %0
25}
26; CHECK: atomic8_load_acquire
27; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 4)
28
29define i8 @atomic8_load_seq_cst(i8* %a) nounwind uwtable {
30entry:
31 %0 = load atomic i8* %a seq_cst, align 1
32 ret i8 %0
33}
34; CHECK: atomic8_load_seq_cst
35; CHECK: call i8 @__tsan_atomic8_load(i8* %a, i32 32)
36
37define void @atomic8_store_unordered(i8* %a) nounwind uwtable {
38entry:
39 store atomic i8 0, i8* %a unordered, align 1
40 ret void
41}
42; CHECK: atomic8_store_unordered
43; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 1)
44
45define void @atomic8_store_monotonic(i8* %a) nounwind uwtable {
46entry:
47 store atomic i8 0, i8* %a monotonic, align 1
48 ret void
49}
50; CHECK: atomic8_store_monotonic
51; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 1)
52
53define void @atomic8_store_release(i8* %a) nounwind uwtable {
54entry:
55 store atomic i8 0, i8* %a release, align 1
56 ret void
57}
58; CHECK: atomic8_store_release
59; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 8)
60
61define void @atomic8_store_seq_cst(i8* %a) nounwind uwtable {
62entry:
63 store atomic i8 0, i8* %a seq_cst, align 1
64 ret void
65}
66; CHECK: atomic8_store_seq_cst
67; CHECK: call void @__tsan_atomic8_store(i8* %a, i8 0, i32 32)
68
69define i16 @atomic16_load_unordered(i16* %a) nounwind uwtable {
70entry:
71 %0 = load atomic i16* %a unordered, align 2
72 ret i16 %0
73}
74; CHECK: atomic16_load_unordered
75; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 1)
76
77define i16 @atomic16_load_monotonic(i16* %a) nounwind uwtable {
78entry:
79 %0 = load atomic i16* %a monotonic, align 2
80 ret i16 %0
81}
82; CHECK: atomic16_load_monotonic
83; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 1)
84
85define i16 @atomic16_load_acquire(i16* %a) nounwind uwtable {
86entry:
87 %0 = load atomic i16* %a acquire, align 2
88 ret i16 %0
89}
90; CHECK: atomic16_load_acquire
91; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 4)
92
93define i16 @atomic16_load_seq_cst(i16* %a) nounwind uwtable {
94entry:
95 %0 = load atomic i16* %a seq_cst, align 2
96 ret i16 %0
97}
98; CHECK: atomic16_load_seq_cst
99; CHECK: call i16 @__tsan_atomic16_load(i16* %a, i32 32)
100
101define void @atomic16_store_unordered(i16* %a) nounwind uwtable {
102entry:
103 store atomic i16 0, i16* %a unordered, align 2
104 ret void
105}
106; CHECK: atomic16_store_unordered
107; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 1)
108
109define void @atomic16_store_monotonic(i16* %a) nounwind uwtable {
110entry:
111 store atomic i16 0, i16* %a monotonic, align 2
112 ret void
113}
114; CHECK: atomic16_store_monotonic
115; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 1)
116
117define void @atomic16_store_release(i16* %a) nounwind uwtable {
118entry:
119 store atomic i16 0, i16* %a release, align 2
120 ret void
121}
122; CHECK: atomic16_store_release
123; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 8)
124
125define void @atomic16_store_seq_cst(i16* %a) nounwind uwtable {
126entry:
127 store atomic i16 0, i16* %a seq_cst, align 2
128 ret void
129}
130; CHECK: atomic16_store_seq_cst
131; CHECK: call void @__tsan_atomic16_store(i16* %a, i16 0, i32 32)
132
133define i32 @atomic32_load_unordered(i32* %a) nounwind uwtable {
134entry:
135 %0 = load atomic i32* %a unordered, align 4
136 ret i32 %0
137}
138; CHECK: atomic32_load_unordered
139; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 1)
140
141define i32 @atomic32_load_monotonic(i32* %a) nounwind uwtable {
142entry:
143 %0 = load atomic i32* %a monotonic, align 4
144 ret i32 %0
145}
146; CHECK: atomic32_load_monotonic
147; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 1)
148
149define i32 @atomic32_load_acquire(i32* %a) nounwind uwtable {
150entry:
151 %0 = load atomic i32* %a acquire, align 4
152 ret i32 %0
153}
154; CHECK: atomic32_load_acquire
155; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 4)
156
157define i32 @atomic32_load_seq_cst(i32* %a) nounwind uwtable {
158entry:
159 %0 = load atomic i32* %a seq_cst, align 4
160 ret i32 %0
161}
162; CHECK: atomic32_load_seq_cst
163; CHECK: call i32 @__tsan_atomic32_load(i32* %a, i32 32)
164
165define void @atomic32_store_unordered(i32* %a) nounwind uwtable {
166entry:
167 store atomic i32 0, i32* %a unordered, align 4
168 ret void
169}
170; CHECK: atomic32_store_unordered
171; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 1)
172
173define void @atomic32_store_monotonic(i32* %a) nounwind uwtable {
174entry:
175 store atomic i32 0, i32* %a monotonic, align 4
176 ret void
177}
178; CHECK: atomic32_store_monotonic
179; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 1)
180
181define void @atomic32_store_release(i32* %a) nounwind uwtable {
182entry:
183 store atomic i32 0, i32* %a release, align 4
184 ret void
185}
186; CHECK: atomic32_store_release
187; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 8)
188
189define void @atomic32_store_seq_cst(i32* %a) nounwind uwtable {
190entry:
191 store atomic i32 0, i32* %a seq_cst, align 4
192 ret void
193}
194; CHECK: atomic32_store_seq_cst
195; CHECK: call void @__tsan_atomic32_store(i32* %a, i32 0, i32 32)
196
197define i64 @atomic64_load_unordered(i64* %a) nounwind uwtable {
198entry:
199 %0 = load atomic i64* %a unordered, align 8
200 ret i64 %0
201}
202; CHECK: atomic64_load_unordered
203; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 1)
204
205define i64 @atomic64_load_monotonic(i64* %a) nounwind uwtable {
206entry:
207 %0 = load atomic i64* %a monotonic, align 8
208 ret i64 %0
209}
210; CHECK: atomic64_load_monotonic
211; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 1)
212
213define i64 @atomic64_load_acquire(i64* %a) nounwind uwtable {
214entry:
215 %0 = load atomic i64* %a acquire, align 8
216 ret i64 %0
217}
218; CHECK: atomic64_load_acquire
219; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 4)
220
221define i64 @atomic64_load_seq_cst(i64* %a) nounwind uwtable {
222entry:
223 %0 = load atomic i64* %a seq_cst, align 8
224 ret i64 %0
225}
226; CHECK: atomic64_load_seq_cst
227; CHECK: call i64 @__tsan_atomic64_load(i64* %a, i32 32)
228
229define void @atomic64_store_unordered(i64* %a) nounwind uwtable {
230entry:
231 store atomic i64 0, i64* %a unordered, align 8
232 ret void
233}
234; CHECK: atomic64_store_unordered
235; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 1)
236
237define void @atomic64_store_monotonic(i64* %a) nounwind uwtable {
238entry:
239 store atomic i64 0, i64* %a monotonic, align 8
240 ret void
241}
242; CHECK: atomic64_store_monotonic
243; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 1)
244
245define void @atomic64_store_release(i64* %a) nounwind uwtable {
246entry:
247 store atomic i64 0, i64* %a release, align 8
248 ret void
249}
250; CHECK: atomic64_store_release
251; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 8)
252
253define void @atomic64_store_seq_cst(i64* %a) nounwind uwtable {
254entry:
255 store atomic i64 0, i64* %a seq_cst, align 8
256 ret void
257}
258; CHECK: atomic64_store_seq_cst
259; CHECK: call void @__tsan_atomic64_store(i64* %a, i64 0, i32 32)
260
261define i128 @atomic128_load_unordered(i128* %a) nounwind uwtable {
262entry:
263 %0 = load atomic i128* %a unordered, align 16
264 ret i128 %0
265}
266; CHECK: atomic128_load_unordered
267; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 1)
268
269define i128 @atomic128_load_monotonic(i128* %a) nounwind uwtable {
270entry:
271 %0 = load atomic i128* %a monotonic, align 16
272 ret i128 %0
273}
274; CHECK: atomic128_load_monotonic
275; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 1)
276
277define i128 @atomic128_load_acquire(i128* %a) nounwind uwtable {
278entry:
279 %0 = load atomic i128* %a acquire, align 16
280 ret i128 %0
281}
282; CHECK: atomic128_load_acquire
283; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 4)
284
285define i128 @atomic128_load_seq_cst(i128* %a) nounwind uwtable {
286entry:
287 %0 = load atomic i128* %a seq_cst, align 16
288 ret i128 %0
289}
290; CHECK: atomic128_load_seq_cst
291; CHECK: call i128 @__tsan_atomic128_load(i128* %a, i32 32)
292
293define void @atomic128_store_unordered(i128* %a) nounwind uwtable {
294entry:
295 store atomic i128 0, i128* %a unordered, align 16
296 ret void
297}
298; CHECK: atomic128_store_unordered
299; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 1)
300
301define void @atomic128_store_monotonic(i128* %a) nounwind uwtable {
302entry:
303 store atomic i128 0, i128* %a monotonic, align 16
304 ret void
305}
306; CHECK: atomic128_store_monotonic
307; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 1)
308
309define void @atomic128_store_release(i128* %a) nounwind uwtable {
310entry:
311 store atomic i128 0, i128* %a release, align 16
312 ret void
313}
314; CHECK: atomic128_store_release
315; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 8)
316
317define void @atomic128_store_seq_cst(i128* %a) nounwind uwtable {
318entry:
319 store atomic i128 0, i128* %a seq_cst, align 16
320 ret void
321}
322; CHECK: atomic128_store_seq_cst
323; CHECK: call void @__tsan_atomic128_store(i128* %a, i128 0, i32 32)