blob: 50c5751f0181d9b2966a568b929adcc0a66aef4c [file] [log] [blame]
Michael Liaoda22b302013-03-06 00:17:04 +00001; RUN: llc < %s -O0 -march=x86-64 -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X64
2; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X32
Michael Liao32376622012-09-20 03:06:15 +00003
4@sc32 = external global i32
5
6define void @atomic_fetch_add32() nounwind {
7; X64: atomic_fetch_add32
8; X32: atomic_fetch_add32
9entry:
10; 32-bit
11 %t1 = atomicrmw add i32* @sc32, i32 1 acquire
12; X64: lock
13; X64: incl
14; X32: lock
15; X32: incl
16 %t2 = atomicrmw add i32* @sc32, i32 3 acquire
17; X64: lock
18; X64: addl $3
19; X32: lock
20; X32: addl $3
21 %t3 = atomicrmw add i32* @sc32, i32 5 acquire
22; X64: lock
23; X64: xaddl
24; X32: lock
25; X32: xaddl
26 %t4 = atomicrmw add i32* @sc32, i32 %t3 acquire
27; X64: lock
28; X64: addl
29; X32: lock
30; X32: addl
31 ret void
32; X64: ret
33; X32: ret
34}
35
36define void @atomic_fetch_sub32() nounwind {
37; X64: atomic_fetch_sub32
38; X32: atomic_fetch_sub32
39 %t1 = atomicrmw sub i32* @sc32, i32 1 acquire
40; X64: lock
41; X64: decl
42; X32: lock
43; X32: decl
44 %t2 = atomicrmw sub i32* @sc32, i32 3 acquire
45; X64: lock
46; X64: subl $3
47; X32: lock
48; X32: subl $3
49 %t3 = atomicrmw sub i32* @sc32, i32 5 acquire
50; X64: lock
51; X64: xaddl
52; X32: lock
53; X32: xaddl
54 %t4 = atomicrmw sub i32* @sc32, i32 %t3 acquire
55; X64: lock
56; X64: subl
57; X32: lock
58; X32: subl
59 ret void
60; X64: ret
61; X32: ret
62}
63
64define void @atomic_fetch_and32() nounwind {
65; X64: atomic_fetch_and32
66; X32: atomic_fetch_and32
67 %t1 = atomicrmw and i32* @sc32, i32 3 acquire
68; X64: lock
69; X64: andl $3
70; X32: lock
71; X32: andl $3
72 %t2 = atomicrmw and i32* @sc32, i32 5 acquire
73; X64: andl
74; X64: lock
75; X64: cmpxchgl
76; X32: andl
77; X32: lock
78; X32: cmpxchgl
79 %t3 = atomicrmw and i32* @sc32, i32 %t2 acquire
80; X64: lock
81; X64: andl
82; X32: lock
83; X32: andl
84 ret void
85; X64: ret
86; X32: ret
87}
88
89define void @atomic_fetch_or32() nounwind {
90; X64: atomic_fetch_or32
91; X32: atomic_fetch_or32
92 %t1 = atomicrmw or i32* @sc32, i32 3 acquire
93; X64: lock
94; X64: orl $3
95; X32: lock
96; X32: orl $3
97 %t2 = atomicrmw or i32* @sc32, i32 5 acquire
98; X64: orl
99; X64: lock
100; X64: cmpxchgl
101; X32: orl
102; X32: lock
103; X32: cmpxchgl
104 %t3 = atomicrmw or i32* @sc32, i32 %t2 acquire
105; X64: lock
106; X64: orl
107; X32: lock
108; X32: orl
109 ret void
110; X64: ret
111; X32: ret
112}
113
114define void @atomic_fetch_xor32() nounwind {
115; X64: atomic_fetch_xor32
116; X32: atomic_fetch_xor32
117 %t1 = atomicrmw xor i32* @sc32, i32 3 acquire
118; X64: lock
119; X64: xorl $3
120; X32: lock
121; X32: xorl $3
122 %t2 = atomicrmw xor i32* @sc32, i32 5 acquire
123; X64: xorl
124; X64: lock
125; X64: cmpxchgl
126; X32: xorl
127; X32: lock
128; X32: cmpxchgl
129 %t3 = atomicrmw xor i32* @sc32, i32 %t2 acquire
130; X64: lock
131; X64: xorl
132; X32: lock
133; X32: xorl
134 ret void
135; X64: ret
136; X32: ret
137}
138
139define void @atomic_fetch_nand32(i32 %x) nounwind {
140; X64: atomic_fetch_nand32
141; X32: atomic_fetch_nand32
142 %t1 = atomicrmw nand i32* @sc32, i32 %x acquire
143; X64: andl
144; X64: notl
145; X64: lock
146; X64: cmpxchgl
147; X32: andl
148; X32: notl
149; X32: lock
150; X32: cmpxchgl
151 ret void
152; X64: ret
153; X32: ret
154}
155
156define void @atomic_fetch_max32(i32 %x) nounwind {
157 %t1 = atomicrmw max i32* @sc32, i32 %x acquire
158; X64: cmpl
159; X64: cmov
160; X64: lock
161; X64: cmpxchgl
162
163; X32: cmpl
164; X32: cmov
165; X32: lock
166; X32: cmpxchgl
167 ret void
168; X64: ret
169; X32: ret
170}
171
172define void @atomic_fetch_min32(i32 %x) nounwind {
173 %t1 = atomicrmw min i32* @sc32, i32 %x acquire
174; X64: cmpl
175; X64: cmov
176; X64: lock
177; X64: cmpxchgl
178
179; X32: cmpl
180; X32: cmov
181; X32: lock
182; X32: cmpxchgl
183 ret void
184; X64: ret
185; X32: ret
186}
187
188define void @atomic_fetch_umax32(i32 %x) nounwind {
189 %t1 = atomicrmw umax i32* @sc32, i32 %x acquire
190; X64: cmpl
191; X64: cmov
192; X64: lock
193; X64: cmpxchgl
194
195; X32: cmpl
196; X32: cmov
197; X32: lock
198; X32: cmpxchgl
199 ret void
200; X64: ret
201; X32: ret
202}
203
204define void @atomic_fetch_umin32(i32 %x) nounwind {
205 %t1 = atomicrmw umin i32* @sc32, i32 %x acquire
206; X64: cmpl
207; X64: cmov
208; X64: lock
209; X64: cmpxchgl
210; X32: cmpl
211; X32: cmov
212; X32: lock
213; X32: cmpxchgl
214 ret void
215; X64: ret
216; X32: ret
217}
218
219define void @atomic_fetch_cmpxchg32() nounwind {
220 %t1 = cmpxchg i32* @sc32, i32 0, i32 1 acquire
221; X64: lock
222; X64: cmpxchgl
223; X32: lock
224; X32: cmpxchgl
225 ret void
226; X64: ret
227; X32: ret
228}
229
230define void @atomic_fetch_store32(i32 %x) nounwind {
231 store atomic i32 %x, i32* @sc32 release, align 4
232; X64-NOT: lock
233; X64: movl
234; X32-NOT: lock
235; X32: movl
236 ret void
237; X64: ret
238; X32: ret
239}
240
241define void @atomic_fetch_swap32(i32 %x) nounwind {
242 %t1 = atomicrmw xchg i32* @sc32, i32 %x acquire
243; X64-NOT: lock
244; X64: xchgl
245; X32-NOT: lock
246; X32: xchgl
247 ret void
248; X64: ret
249; X32: ret
250}