blob: 474c0e6a9852bfd91f0c3f70a90dfe8b679fa55f [file] [log] [blame]
Michael Liaoda22b302013-03-06 00:17:04 +00001; RUN: llc < %s -O0 -march=x86-64 -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X64
2; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X32
Michael Liaod5cac372013-03-07 01:01:29 +00003; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 -mattr=-cmov -verify-machineinstrs | FileCheck %s --check-prefix NOCMOV
Michael Liao32376622012-09-20 03:06:15 +00004
5@sc32 = external global i32
6
7define void @atomic_fetch_add32() nounwind {
8; X64: atomic_fetch_add32
9; X32: atomic_fetch_add32
10entry:
11; 32-bit
12 %t1 = atomicrmw add i32* @sc32, i32 1 acquire
13; X64: lock
14; X64: incl
15; X32: lock
16; X32: incl
17 %t2 = atomicrmw add i32* @sc32, i32 3 acquire
18; X64: lock
19; X64: addl $3
20; X32: lock
21; X32: addl $3
22 %t3 = atomicrmw add i32* @sc32, i32 5 acquire
23; X64: lock
24; X64: xaddl
25; X32: lock
26; X32: xaddl
27 %t4 = atomicrmw add i32* @sc32, i32 %t3 acquire
28; X64: lock
29; X64: addl
30; X32: lock
31; X32: addl
32 ret void
33; X64: ret
34; X32: ret
35}
36
37define void @atomic_fetch_sub32() nounwind {
38; X64: atomic_fetch_sub32
39; X32: atomic_fetch_sub32
40 %t1 = atomicrmw sub i32* @sc32, i32 1 acquire
41; X64: lock
42; X64: decl
43; X32: lock
44; X32: decl
45 %t2 = atomicrmw sub i32* @sc32, i32 3 acquire
46; X64: lock
47; X64: subl $3
48; X32: lock
49; X32: subl $3
50 %t3 = atomicrmw sub i32* @sc32, i32 5 acquire
51; X64: lock
52; X64: xaddl
53; X32: lock
54; X32: xaddl
55 %t4 = atomicrmw sub i32* @sc32, i32 %t3 acquire
56; X64: lock
57; X64: subl
58; X32: lock
59; X32: subl
60 ret void
61; X64: ret
62; X32: ret
63}
64
65define void @atomic_fetch_and32() nounwind {
66; X64: atomic_fetch_and32
67; X32: atomic_fetch_and32
68 %t1 = atomicrmw and i32* @sc32, i32 3 acquire
69; X64: lock
70; X64: andl $3
71; X32: lock
72; X32: andl $3
73 %t2 = atomicrmw and i32* @sc32, i32 5 acquire
74; X64: andl
75; X64: lock
76; X64: cmpxchgl
77; X32: andl
78; X32: lock
79; X32: cmpxchgl
80 %t3 = atomicrmw and i32* @sc32, i32 %t2 acquire
81; X64: lock
82; X64: andl
83; X32: lock
84; X32: andl
85 ret void
86; X64: ret
87; X32: ret
88}
89
90define void @atomic_fetch_or32() nounwind {
91; X64: atomic_fetch_or32
92; X32: atomic_fetch_or32
93 %t1 = atomicrmw or i32* @sc32, i32 3 acquire
94; X64: lock
95; X64: orl $3
96; X32: lock
97; X32: orl $3
98 %t2 = atomicrmw or i32* @sc32, i32 5 acquire
99; X64: orl
100; X64: lock
101; X64: cmpxchgl
102; X32: orl
103; X32: lock
104; X32: cmpxchgl
105 %t3 = atomicrmw or i32* @sc32, i32 %t2 acquire
106; X64: lock
107; X64: orl
108; X32: lock
109; X32: orl
110 ret void
111; X64: ret
112; X32: ret
113}
114
115define void @atomic_fetch_xor32() nounwind {
116; X64: atomic_fetch_xor32
117; X32: atomic_fetch_xor32
118 %t1 = atomicrmw xor i32* @sc32, i32 3 acquire
119; X64: lock
120; X64: xorl $3
121; X32: lock
122; X32: xorl $3
123 %t2 = atomicrmw xor i32* @sc32, i32 5 acquire
124; X64: xorl
125; X64: lock
126; X64: cmpxchgl
127; X32: xorl
128; X32: lock
129; X32: cmpxchgl
130 %t3 = atomicrmw xor i32* @sc32, i32 %t2 acquire
131; X64: lock
132; X64: xorl
133; X32: lock
134; X32: xorl
135 ret void
136; X64: ret
137; X32: ret
138}
139
140define void @atomic_fetch_nand32(i32 %x) nounwind {
141; X64: atomic_fetch_nand32
142; X32: atomic_fetch_nand32
143 %t1 = atomicrmw nand i32* @sc32, i32 %x acquire
144; X64: andl
145; X64: notl
146; X64: lock
147; X64: cmpxchgl
148; X32: andl
149; X32: notl
150; X32: lock
151; X32: cmpxchgl
152 ret void
153; X64: ret
154; X32: ret
155}
156
157define void @atomic_fetch_max32(i32 %x) nounwind {
158 %t1 = atomicrmw max i32* @sc32, i32 %x acquire
159; X64: cmpl
160; X64: cmov
161; X64: lock
162; X64: cmpxchgl
163
164; X32: cmpl
165; X32: cmov
166; X32: lock
167; X32: cmpxchgl
Michael Liaod5cac372013-03-07 01:01:29 +0000168
169; NOCMOV: cmpl
170; NOCMOV: jl
171; NOCMOV: lock
172; NOCMOV: cmpxchgl
Michael Liao32376622012-09-20 03:06:15 +0000173 ret void
174; X64: ret
175; X32: ret
Michael Liaod5cac372013-03-07 01:01:29 +0000176; NOCMOV: ret
Michael Liao32376622012-09-20 03:06:15 +0000177}
178
179define void @atomic_fetch_min32(i32 %x) nounwind {
180 %t1 = atomicrmw min i32* @sc32, i32 %x acquire
181; X64: cmpl
182; X64: cmov
183; X64: lock
184; X64: cmpxchgl
185
186; X32: cmpl
187; X32: cmov
188; X32: lock
189; X32: cmpxchgl
Michael Liaod5cac372013-03-07 01:01:29 +0000190
191; NOCMOV: cmpl
192; NOCMOV: jg
193; NOCMOV: lock
194; NOCMOV: cmpxchgl
Michael Liao32376622012-09-20 03:06:15 +0000195 ret void
196; X64: ret
197; X32: ret
Michael Liaod5cac372013-03-07 01:01:29 +0000198; NOCMOV: ret
Michael Liao32376622012-09-20 03:06:15 +0000199}
200
201define void @atomic_fetch_umax32(i32 %x) nounwind {
202 %t1 = atomicrmw umax i32* @sc32, i32 %x acquire
203; X64: cmpl
204; X64: cmov
205; X64: lock
206; X64: cmpxchgl
207
208; X32: cmpl
209; X32: cmov
210; X32: lock
211; X32: cmpxchgl
Michael Liaod5cac372013-03-07 01:01:29 +0000212
213; NOCMOV: cmpl
214; NOCMOV: jb
215; NOCMOV: lock
216; NOCMOV: cmpxchgl
Michael Liao32376622012-09-20 03:06:15 +0000217 ret void
218; X64: ret
219; X32: ret
Michael Liaod5cac372013-03-07 01:01:29 +0000220; NOCMOV: ret
Michael Liao32376622012-09-20 03:06:15 +0000221}
222
223define void @atomic_fetch_umin32(i32 %x) nounwind {
224 %t1 = atomicrmw umin i32* @sc32, i32 %x acquire
225; X64: cmpl
226; X64: cmov
227; X64: lock
228; X64: cmpxchgl
Michael Liaod5cac372013-03-07 01:01:29 +0000229
Michael Liao32376622012-09-20 03:06:15 +0000230; X32: cmpl
231; X32: cmov
232; X32: lock
233; X32: cmpxchgl
Michael Liaod5cac372013-03-07 01:01:29 +0000234
235; NOCMOV: cmpl
236; NOCMOV: ja
237; NOCMOV: lock
238; NOCMOV: cmpxchgl
Michael Liao32376622012-09-20 03:06:15 +0000239 ret void
240; X64: ret
241; X32: ret
Michael Liaod5cac372013-03-07 01:01:29 +0000242; NOCMOV: ret
Michael Liao32376622012-09-20 03:06:15 +0000243}
244
245define void @atomic_fetch_cmpxchg32() nounwind {
Tim Northovere94a5182014-03-11 10:48:52 +0000246 %t1 = cmpxchg i32* @sc32, i32 0, i32 1 acquire acquire
Michael Liao32376622012-09-20 03:06:15 +0000247; X64: lock
248; X64: cmpxchgl
249; X32: lock
250; X32: cmpxchgl
251 ret void
252; X64: ret
253; X32: ret
254}
255
256define void @atomic_fetch_store32(i32 %x) nounwind {
257 store atomic i32 %x, i32* @sc32 release, align 4
258; X64-NOT: lock
259; X64: movl
260; X32-NOT: lock
261; X32: movl
262 ret void
263; X64: ret
264; X32: ret
265}
266
267define void @atomic_fetch_swap32(i32 %x) nounwind {
268 %t1 = atomicrmw xchg i32* @sc32, i32 %x acquire
269; X64-NOT: lock
270; X64: xchgl
271; X32-NOT: lock
272; X32: xchgl
273 ret void
274; X64: ret
275; X32: ret
276}