blob: 6858dbabe6499ff67245ea970616322eb2f7cf82 [file] [log] [blame]
Scott Michelec2a08f2007-12-15 00:38:50 +00001; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s
Scott Michel504c3692007-12-17 22:32:34 +00002; RUN: grep and %t1.s | count 232
Scott Michelec2a08f2007-12-15 00:38:50 +00003; RUN: grep andc %t1.s | count 85
4; RUN: grep andi %t1.s | count 36
Scott Michel504c3692007-12-17 22:32:34 +00005; RUN: grep andhi %t1.s | count 30
6; RUN: grep andbi %t1.s | count 4
Scott Michel9de5d0d2008-01-11 02:53:15 +00007target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128"
8target triple = "spu"
Scott Michelec2a08f2007-12-15 00:38:50 +00009
10; AND instruction generation:
11define <4 x i32> @and_v4i32_1(<4 x i32> %arg1, <4 x i32> %arg2) {
12 %A = and <4 x i32> %arg1, %arg2
13 ret <4 x i32> %A
14}
15
16define <4 x i32> @and_v4i32_2(<4 x i32> %arg1, <4 x i32> %arg2) {
17 %A = and <4 x i32> %arg2, %arg1
18 ret <4 x i32> %A
19}
20
21define <8 x i16> @and_v8i16_1(<8 x i16> %arg1, <8 x i16> %arg2) {
22 %A = and <8 x i16> %arg1, %arg2
23 ret <8 x i16> %A
24}
25
26define <8 x i16> @and_v8i16_2(<8 x i16> %arg1, <8 x i16> %arg2) {
27 %A = and <8 x i16> %arg2, %arg1
28 ret <8 x i16> %A
29}
30
31define <16 x i8> @and_v16i8_1(<16 x i8> %arg1, <16 x i8> %arg2) {
32 %A = and <16 x i8> %arg2, %arg1
33 ret <16 x i8> %A
34}
35
36define <16 x i8> @and_v16i8_2(<16 x i8> %arg1, <16 x i8> %arg2) {
37 %A = and <16 x i8> %arg1, %arg2
38 ret <16 x i8> %A
39}
40
41define i32 @and_i32_1(i32 %arg1, i32 %arg2) {
42 %A = and i32 %arg2, %arg1
43 ret i32 %A
44}
45
46define i32 @and_i32_2(i32 %arg1, i32 %arg2) {
47 %A = and i32 %arg1, %arg2
48 ret i32 %A
49}
50
51define i16 @and_i16_1(i16 %arg1, i16 %arg2) {
52 %A = and i16 %arg2, %arg1
53 ret i16 %A
54}
55
56define i16 @and_i16_2(i16 %arg1, i16 %arg2) {
57 %A = and i16 %arg1, %arg2
58 ret i16 %A
59}
60
61define i8 @and_i8_1(i8 %arg1, i8 %arg2) {
62 %A = and i8 %arg2, %arg1
63 ret i8 %A
64}
65
66define i8 @and_i8_2(i8 %arg1, i8 %arg2) {
67 %A = and i8 %arg1, %arg2
68 ret i8 %A
69}
70
71; ANDC instruction generation:
72define <4 x i32> @andc_v4i32_1(<4 x i32> %arg1, <4 x i32> %arg2) {
73 %A = xor <4 x i32> %arg2, < i32 -1, i32 -1, i32 -1, i32 -1 >
74 %B = and <4 x i32> %arg1, %A
75 ret <4 x i32> %B
76}
77
78define <4 x i32> @andc_v4i32_2(<4 x i32> %arg1, <4 x i32> %arg2) {
79 %A = xor <4 x i32> %arg1, < i32 -1, i32 -1, i32 -1, i32 -1 >
80 %B = and <4 x i32> %arg2, %A
81 ret <4 x i32> %B
82}
83
84define <4 x i32> @andc_v4i32_3(<4 x i32> %arg1, <4 x i32> %arg2) {
85 %A = xor <4 x i32> %arg1, < i32 -1, i32 -1, i32 -1, i32 -1 >
86 %B = and <4 x i32> %A, %arg2
87 ret <4 x i32> %B
88}
89
90define <8 x i16> @andc_v8i16_1(<8 x i16> %arg1, <8 x i16> %arg2) {
91 %A = xor <8 x i16> %arg2, < i16 -1, i16 -1, i16 -1, i16 -1,
92 i16 -1, i16 -1, i16 -1, i16 -1 >
93 %B = and <8 x i16> %arg1, %A
94 ret <8 x i16> %B
95}
96
97define <8 x i16> @andc_v8i16_2(<8 x i16> %arg1, <8 x i16> %arg2) {
98 %A = xor <8 x i16> %arg1, < i16 -1, i16 -1, i16 -1, i16 -1,
99 i16 -1, i16 -1, i16 -1, i16 -1 >
100 %B = and <8 x i16> %arg2, %A
101 ret <8 x i16> %B
102}
103
104define <16 x i8> @andc_v16i8_1(<16 x i8> %arg1, <16 x i8> %arg2) {
105 %A = xor <16 x i8> %arg1, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
106 i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
107 i8 -1, i8 -1, i8 -1, i8 -1 >
108 %B = and <16 x i8> %arg2, %A
109 ret <16 x i8> %B
110}
111
112define <16 x i8> @andc_v16i8_2(<16 x i8> %arg1, <16 x i8> %arg2) {
113 %A = xor <16 x i8> %arg2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
114 i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
115 i8 -1, i8 -1, i8 -1, i8 -1 >
116 %B = and <16 x i8> %arg1, %A
117 ret <16 x i8> %B
118}
119
120define <16 x i8> @andc_v16i8_3(<16 x i8> %arg1, <16 x i8> %arg2) {
121 %A = xor <16 x i8> %arg2, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
122 i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
123 i8 -1, i8 -1, i8 -1, i8 -1 >
124 %B = and <16 x i8> %A, %arg1
125 ret <16 x i8> %B
126}
127
128define i32 @andc_i32_1(i32 %arg1, i32 %arg2) {
129 %A = xor i32 %arg2, -1
130 %B = and i32 %A, %arg1
131 ret i32 %B
132}
133
134define i32 @andc_i32_2(i32 %arg1, i32 %arg2) {
135 %A = xor i32 %arg1, -1
136 %B = and i32 %A, %arg2
137 ret i32 %B
138}
139
140define i32 @andc_i32_3(i32 %arg1, i32 %arg2) {
141 %A = xor i32 %arg2, -1
142 %B = and i32 %arg1, %A
143 ret i32 %B
144}
145
146define i16 @andc_i16_1(i16 %arg1, i16 %arg2) {
147 %A = xor i16 %arg2, -1
148 %B = and i16 %A, %arg1
149 ret i16 %B
150}
151
152define i16 @andc_i16_2(i16 %arg1, i16 %arg2) {
153 %A = xor i16 %arg1, -1
154 %B = and i16 %A, %arg2
155 ret i16 %B
156}
157
158define i16 @andc_i16_3(i16 %arg1, i16 %arg2) {
159 %A = xor i16 %arg2, -1
160 %B = and i16 %arg1, %A
161 ret i16 %B
162}
163
164define i8 @andc_i8_1(i8 %arg1, i8 %arg2) {
165 %A = xor i8 %arg2, -1
166 %B = and i8 %A, %arg1
167 ret i8 %B
168}
169
170define i8 @andc_i8_2(i8 %arg1, i8 %arg2) {
171 %A = xor i8 %arg1, -1
172 %B = and i8 %A, %arg2
173 ret i8 %B
174}
175
176define i8 @andc_i8_3(i8 %arg1, i8 %arg2) {
177 %A = xor i8 %arg2, -1
178 %B = and i8 %arg1, %A
179 ret i8 %B
180}
181
182; ANDI instruction generation (i32 data type):
183define <4 x i32> @andi_v4i32_1(<4 x i32> %in) {
184 %tmp2 = and <4 x i32> %in, < i32 511, i32 511, i32 511, i32 511 >
185 ret <4 x i32> %tmp2
186}
187
188define <4 x i32> @andi_v4i32_2(<4 x i32> %in) {
189 %tmp2 = and <4 x i32> %in, < i32 510, i32 510, i32 510, i32 510 >
190 ret <4 x i32> %tmp2
191}
192
193define <4 x i32> @andi_v4i32_3(<4 x i32> %in) {
194 %tmp2 = and <4 x i32> %in, < i32 -1, i32 -1, i32 -1, i32 -1 >
195 ret <4 x i32> %tmp2
196}
197
198define <4 x i32> @andi_v4i32_4(<4 x i32> %in) {
199 %tmp2 = and <4 x i32> %in, < i32 -512, i32 -512, i32 -512, i32 -512 >
200 ret <4 x i32> %tmp2
201}
202
203define i32 @andi_u32(i32 zeroext %in) zeroext {
204 %tmp37 = and i32 %in, 37
205 ret i32 %tmp37
206}
207
208define i32 @andi_i32(i32 signext %in) signext {
209 %tmp38 = and i32 %in, 37
210 ret i32 %tmp38
211}
212
213define i32 @andi_i32_1(i32 %in) {
214 %tmp37 = and i32 %in, 37
215 ret i32 %tmp37
216}
217
218; ANDHI instruction generation (i16 data type):
219define <8 x i16> @andhi_v8i16_1(<8 x i16> %in) {
220 %tmp2 = and <8 x i16> %in, < i16 511, i16 511, i16 511, i16 511,
221 i16 511, i16 511, i16 511, i16 511 >
222 ret <8 x i16> %tmp2
223}
224
225define <8 x i16> @andhi_v8i16_2(<8 x i16> %in) {
226 %tmp2 = and <8 x i16> %in, < i16 510, i16 510, i16 510, i16 510,
227 i16 510, i16 510, i16 510, i16 510 >
228 ret <8 x i16> %tmp2
229}
230
231define <8 x i16> @andhi_v8i16_3(<8 x i16> %in) {
232 %tmp2 = and <8 x i16> %in, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1,
233 i16 -1, i16 -1, i16 -1 >
234 ret <8 x i16> %tmp2
235}
236
237define <8 x i16> @andhi_v8i16_4(<8 x i16> %in) {
238 %tmp2 = and <8 x i16> %in, < i16 -512, i16 -512, i16 -512, i16 -512,
239 i16 -512, i16 -512, i16 -512, i16 -512 >
240 ret <8 x i16> %tmp2
241}
242
243define i16 @andhi_u16(i16 zeroext %in) zeroext {
244 %tmp37 = and i16 %in, 37 ; <i16> [#uses=1]
245 ret i16 %tmp37
246}
247
248define i16 @andhi_i16(i16 signext %in) signext {
249 %tmp38 = and i16 %in, 37 ; <i16> [#uses=1]
250 ret i16 %tmp38
251}
252
253; i8 data type (s/b ANDBI if 8-bit registers were supported):
254define <16 x i8> @and_v16i8(<16 x i8> %in) {
255 ; ANDBI generated for vector types
256 %tmp2 = and <16 x i8> %in, < i8 42, i8 42, i8 42, i8 42, i8 42, i8 42,
257 i8 42, i8 42, i8 42, i8 42, i8 42, i8 42,
258 i8 42, i8 42, i8 42, i8 42 >
259 ret <16 x i8> %tmp2
260}
261
262define i8 @and_u8(i8 zeroext %in) zeroext {
Scott Michel504c3692007-12-17 22:32:34 +0000263 ; ANDBI generated:
264 %tmp37 = and i8 %in, 37
Scott Michelec2a08f2007-12-15 00:38:50 +0000265 ret i8 %tmp37
266}
267
Scott Michel504c3692007-12-17 22:32:34 +0000268define i8 @and_sext8(i8 signext %in) signext {
269 ; ANDBI generated
270 %tmp38 = and i8 %in, 37
271 ret i8 %tmp38
272}
273
274define i8 @and_i8(i8 %in) {
275 ; ANDBI generated
276 %tmp38 = and i8 %in, 205
Scott Michelec2a08f2007-12-15 00:38:50 +0000277 ret i8 %tmp38
278}