blob: 94aa390fe9dc212b2e873bc506f13a19012d54a7 [file] [log] [blame]
Scott Michel8b6b4202007-12-04 22:35:58 +00001//==- SPUInstrInfo.td - Describe the Cell SPU Instructions -*- tablegen -*-==//
2//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner081ce942007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Scott Michel8b6b4202007-12-04 22:35:58 +00007//
8//===----------------------------------------------------------------------===//
9// Cell SPU Instructions:
10//===----------------------------------------------------------------------===//
11
12//===----------------------------------------------------------------------===//
13// TODO Items (not urgent today, but would be nice, low priority)
14//
15// ANDBI, ORBI: SPU constructs a 4-byte constant for these instructions by
16// concatenating the byte argument b as "bbbb". Could recognize this bit pattern
17// in 16-bit and 32-bit constants and reduce instruction count.
18//===----------------------------------------------------------------------===//
19
20//===----------------------------------------------------------------------===//
21// Pseudo instructions:
22//===----------------------------------------------------------------------===//
23
24let hasCtrlDep = 1, Defs = [R1], Uses = [R1] in {
25 def ADJCALLSTACKDOWN : Pseudo<(outs), (ins u16imm:$amt),
26 "${:comment} ADJCALLSTACKDOWN",
27 [(callseq_start imm:$amt)]>;
28 def ADJCALLSTACKUP : Pseudo<(outs), (ins u16imm:$amt),
29 "${:comment} ADJCALLSTACKUP",
30 [(callseq_end imm:$amt)]>;
31}
32
33//===----------------------------------------------------------------------===//
34// DWARF debugging Pseudo Instructions
35//===----------------------------------------------------------------------===//
36
37def DWARF_LOC : Pseudo<(outs), (ins i32imm:$line, i32imm:$col, i32imm:$file),
38 "${:comment} .loc $file, $line, $col",
39 [(dwarf_loc (i32 imm:$line), (i32 imm:$col),
40 (i32 imm:$file))]>;
41
42//===----------------------------------------------------------------------===//
43// Loads:
44// NB: The ordering is actually important, since the instruction selection
45// will try each of the instructions in sequence, i.e., the D-form first with
46// the 10-bit displacement, then the A-form with the 16 bit displacement, and
47// finally the X-form with the register-register.
48//===----------------------------------------------------------------------===//
49
Chris Lattner1a1932c2008-01-06 23:38:27 +000050let isSimpleLoad = 1 in {
Scott Michel8b6b4202007-12-04 22:35:58 +000051 def LQDv16i8:
52 RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
53 "lqd\t$rT, $src", LoadStore,
54 [(set (v16i8 VECREG:$rT), (load dform_addr:$src))]>;
55
56 def LQDv8i16:
57 RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
58 "lqd\t$rT, $src", LoadStore,
59 [(set (v8i16 VECREG:$rT), (load dform_addr:$src))]>;
60
61 def LQDv4i32:
62 RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
63 "lqd\t$rT, $src", LoadStore,
64 [(set (v4i32 VECREG:$rT), (load dform_addr:$src))]>;
65
66 def LQDv2i64:
67 RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
68 "lqd\t$rT, $src", LoadStore,
69 [(set (v2i64 VECREG:$rT), (load dform_addr:$src))]>;
70
71 def LQDv4f32:
72 RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
73 "lqd\t$rT, $src", LoadStore,
74 [(set (v4f32 VECREG:$rT), (load dform_addr:$src))]>;
75
76 def LQDv2f64:
77 RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
78 "lqd\t$rT, $src", LoadStore,
79 [(set (v2f64 VECREG:$rT), (load dform_addr:$src))]>;
80
81 def LQDr128:
82 RI10Form<0b00101100, (outs GPRC:$rT), (ins memri10:$src),
83 "lqd\t$rT, $src", LoadStore,
84 [(set GPRC:$rT, (load dform_addr:$src))]>;
85
86 def LQDr64:
87 RI10Form<0b00101100, (outs R64C:$rT), (ins memri10:$src),
88 "lqd\t$rT, $src", LoadStore,
89 [(set R64C:$rT, (load dform_addr:$src))]>;
90
91 def LQDr32:
92 RI10Form<0b00101100, (outs R32C:$rT), (ins memri10:$src),
93 "lqd\t$rT, $src", LoadStore,
94 [(set R32C:$rT, (load dform_addr:$src))]>;
95
96 // Floating Point
97 def LQDf32:
98 RI10Form<0b00101100, (outs R32FP:$rT), (ins memri10:$src),
99 "lqd\t$rT, $src", LoadStore,
100 [(set R32FP:$rT, (load dform_addr:$src))]>;
101
102 def LQDf64:
103 RI10Form<0b00101100, (outs R64FP:$rT), (ins memri10:$src),
104 "lqd\t$rT, $src", LoadStore,
105 [(set R64FP:$rT, (load dform_addr:$src))]>;
106 // END Floating Point
107
108 def LQDr16:
109 RI10Form<0b00101100, (outs R16C:$rT), (ins memri10:$src),
110 "lqd\t$rT, $src", LoadStore,
111 [(set R16C:$rT, (load dform_addr:$src))]>;
112
Scott Michel438be252007-12-17 22:32:34 +0000113 def LQDr8:
114 RI10Form<0b00101100, (outs R8C:$rT), (ins memri10:$src),
115 "lqd\t$rT, $src", LoadStore,
116 [(set R8C:$rT, (load dform_addr:$src))]>;
117
Scott Michel8b6b4202007-12-04 22:35:58 +0000118 def LQAv16i8:
119 RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
120 "lqa\t$rT, $src", LoadStore,
121 [(set (v16i8 VECREG:$rT), (load aform_addr:$src))]>;
122
123 def LQAv8i16:
124 RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
125 "lqa\t$rT, $src", LoadStore,
126 [(set (v8i16 VECREG:$rT), (load aform_addr:$src))]>;
127
128 def LQAv4i32:
129 RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
130 "lqa\t$rT, $src", LoadStore,
131 [(set (v4i32 VECREG:$rT), (load aform_addr:$src))]>;
132
133 def LQAv2i64:
134 RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
135 "lqa\t$rT, $src", LoadStore,
136 [(set (v2i64 VECREG:$rT), (load aform_addr:$src))]>;
137
138 def LQAv4f32:
139 RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
140 "lqa\t$rT, $src", LoadStore,
141 [(set (v4f32 VECREG:$rT), (load aform_addr:$src))]>;
142
143 def LQAv2f64:
144 RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
145 "lqa\t$rT, $src", LoadStore,
146 [(set (v2f64 VECREG:$rT), (load aform_addr:$src))]>;
147
148 def LQAr128:
149 RI16Form<0b100001100, (outs GPRC:$rT), (ins addr256k:$src),
150 "lqa\t$rT, $src", LoadStore,
151 [(set GPRC:$rT, (load aform_addr:$src))]>;
152
153 def LQAr64:
154 RI16Form<0b100001100, (outs R64C:$rT), (ins addr256k:$src),
155 "lqa\t$rT, $src", LoadStore,
156 [(set R64C:$rT, (load aform_addr:$src))]>;
157
158 def LQAr32:
159 RI16Form<0b100001100, (outs R32C:$rT), (ins addr256k:$src),
160 "lqa\t$rT, $src", LoadStore,
Scott Micheldbac4cf2008-01-11 02:53:15 +0000161 [(set R32C:$rT, (load aform_addr:$src))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000162
163 def LQAf32:
164 RI16Form<0b100001100, (outs R32FP:$rT), (ins addr256k:$src),
165 "lqa\t$rT, $src", LoadStore,
166 [(set R32FP:$rT, (load aform_addr:$src))]>;
167
168 def LQAf64:
169 RI16Form<0b100001100, (outs R64FP:$rT), (ins addr256k:$src),
170 "lqa\t$rT, $src", LoadStore,
171 [(set R64FP:$rT, (load aform_addr:$src))]>;
172
173 def LQAr16:
174 RI16Form<0b100001100, (outs R16C:$rT), (ins addr256k:$src),
175 "lqa\t$rT, $src", LoadStore,
176 [(set R16C:$rT, (load aform_addr:$src))]>;
177
Scott Michel438be252007-12-17 22:32:34 +0000178 def LQAr8:
179 RI16Form<0b100001100, (outs R8C:$rT), (ins addr256k:$src),
180 "lqa\t$rT, $src", LoadStore,
181 [(set R8C:$rT, (load aform_addr:$src))]>;
182
Scott Michel8b6b4202007-12-04 22:35:58 +0000183 def LQXv16i8:
184 RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
185 "lqx\t$rT, $src", LoadStore,
186 [(set (v16i8 VECREG:$rT), (load xform_addr:$src))]>;
187
188 def LQXv8i16:
189 RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
190 "lqx\t$rT, $src", LoadStore,
191 [(set (v8i16 VECREG:$rT), (load xform_addr:$src))]>;
192
193 def LQXv4i32:
194 RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
195 "lqx\t$rT, $src", LoadStore,
196 [(set (v4i32 VECREG:$rT), (load xform_addr:$src))]>;
197
198 def LQXv2i64:
199 RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
200 "lqx\t$rT, $src", LoadStore,
201 [(set (v2i64 VECREG:$rT), (load xform_addr:$src))]>;
202
203 def LQXv4f32:
204 RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
205 "lqx\t$rT, $src", LoadStore,
206 [(set (v4f32 VECREG:$rT), (load xform_addr:$src))]>;
207
208 def LQXv2f64:
209 RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
210 "lqx\t$rT, $src", LoadStore,
211 [(set (v2f64 VECREG:$rT), (load xform_addr:$src))]>;
212
213 def LQXr128:
214 RRForm<0b00100011100, (outs GPRC:$rT), (ins memrr:$src),
215 "lqx\t$rT, $src", LoadStore,
216 [(set GPRC:$rT, (load xform_addr:$src))]>;
217
218 def LQXr64:
219 RRForm<0b00100011100, (outs R64C:$rT), (ins memrr:$src),
220 "lqx\t$rT, $src", LoadStore,
221 [(set R64C:$rT, (load xform_addr:$src))]>;
222
223 def LQXr32:
224 RRForm<0b00100011100, (outs R32C:$rT), (ins memrr:$src),
225 "lqx\t$rT, $src", LoadStore,
226 [(set R32C:$rT, (load xform_addr:$src))]>;
227
228 def LQXf32:
229 RRForm<0b00100011100, (outs R32FP:$rT), (ins memrr:$src),
230 "lqx\t$rT, $src", LoadStore,
231 [(set R32FP:$rT, (load xform_addr:$src))]>;
232
233 def LQXf64:
234 RRForm<0b00100011100, (outs R64FP:$rT), (ins memrr:$src),
235 "lqx\t$rT, $src", LoadStore,
236 [(set R64FP:$rT, (load xform_addr:$src))]>;
237
238 def LQXr16:
239 RRForm<0b00100011100, (outs R16C:$rT), (ins memrr:$src),
240 "lqx\t$rT, $src", LoadStore,
241 [(set R16C:$rT, (load xform_addr:$src))]>;
242
Scott Michel438be252007-12-17 22:32:34 +0000243 def LQXr8:
244 RRForm<0b00100011100, (outs R8C:$rT), (ins memrr:$src),
245 "lqx\t$rT, $src", LoadStore,
246 [(set R8C:$rT, (load xform_addr:$src))]>;
247
Scott Michel8b6b4202007-12-04 22:35:58 +0000248/* Load quadword, PC relative: Not much use at this point in time.
249 Might be of use later for relocatable code.
250 def LQR : RI16Form<0b111001100, (outs VECREG:$rT), (ins s16imm:$disp),
251 "lqr\t$rT, $disp", LoadStore,
252 [(set VECREG:$rT, (load iaddr:$disp))]>;
253 */
Scott Michel8b6b4202007-12-04 22:35:58 +0000254}
255
256//===----------------------------------------------------------------------===//
257// Stores:
258//===----------------------------------------------------------------------===//
259
Chris Lattneref8d6082008-01-06 06:44:58 +0000260def STQDv16i8 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
261 "stqd\t$rT, $src", LoadStore,
262 [(store (v16i8 VECREG:$rT), dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000263
Chris Lattneref8d6082008-01-06 06:44:58 +0000264def STQDv8i16 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
265 "stqd\t$rT, $src", LoadStore,
266 [(store (v8i16 VECREG:$rT), dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000267
Chris Lattneref8d6082008-01-06 06:44:58 +0000268def STQDv4i32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
269 "stqd\t$rT, $src", LoadStore,
270 [(store (v4i32 VECREG:$rT), dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000271
Chris Lattneref8d6082008-01-06 06:44:58 +0000272def STQDv2i64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
273 "stqd\t$rT, $src", LoadStore,
274 [(store (v2i64 VECREG:$rT), dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000275
Chris Lattneref8d6082008-01-06 06:44:58 +0000276def STQDv4f32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
277 "stqd\t$rT, $src", LoadStore,
278 [(store (v4f32 VECREG:$rT), dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000279
Chris Lattneref8d6082008-01-06 06:44:58 +0000280def STQDv2f64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
281 "stqd\t$rT, $src", LoadStore,
282 [(store (v2f64 VECREG:$rT), dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000283
Chris Lattneref8d6082008-01-06 06:44:58 +0000284def STQDr128 : RI10Form<0b00100100, (outs), (ins GPRC:$rT, memri10:$src),
285 "stqd\t$rT, $src", LoadStore,
286 [(store GPRC:$rT, dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000287
Chris Lattneref8d6082008-01-06 06:44:58 +0000288def STQDr64 : RI10Form<0b00100100, (outs), (ins R64C:$rT, memri10:$src),
289 "stqd\t$rT, $src", LoadStore,
290 [(store R64C:$rT, dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000291
Chris Lattneref8d6082008-01-06 06:44:58 +0000292def STQDr32 : RI10Form<0b00100100, (outs), (ins R32C:$rT, memri10:$src),
293 "stqd\t$rT, $src", LoadStore,
294 [(store R32C:$rT, dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000295
Chris Lattneref8d6082008-01-06 06:44:58 +0000296// Floating Point
297def STQDf32 : RI10Form<0b00100100, (outs), (ins R32FP:$rT, memri10:$src),
298 "stqd\t$rT, $src", LoadStore,
299 [(store R32FP:$rT, dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000300
Chris Lattneref8d6082008-01-06 06:44:58 +0000301def STQDf64 : RI10Form<0b00100100, (outs), (ins R64FP:$rT, memri10:$src),
302 "stqd\t$rT, $src", LoadStore,
303 [(store R64FP:$rT, dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000304
Chris Lattneref8d6082008-01-06 06:44:58 +0000305def STQDr16 : RI10Form<0b00100100, (outs), (ins R16C:$rT, memri10:$src),
306 "stqd\t$rT, $src", LoadStore,
307 [(store R16C:$rT, dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000308
Chris Lattneref8d6082008-01-06 06:44:58 +0000309def STQDr8 : RI10Form<0b00100100, (outs), (ins R8C:$rT, memri10:$src),
310 "stqd\t$rT, $src", LoadStore,
311 [(store R8C:$rT, dform_addr:$src)]>;
Scott Michel438be252007-12-17 22:32:34 +0000312
Chris Lattneref8d6082008-01-06 06:44:58 +0000313def STQAv16i8 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
314 "stqa\t$rT, $src", LoadStore,
315 [(store (v16i8 VECREG:$rT), aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000316
Chris Lattneref8d6082008-01-06 06:44:58 +0000317def STQAv8i16 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
318 "stqa\t$rT, $src", LoadStore,
319 [(store (v8i16 VECREG:$rT), aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000320
Chris Lattneref8d6082008-01-06 06:44:58 +0000321def STQAv4i32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
322 "stqa\t$rT, $src", LoadStore,
323 [(store (v4i32 VECREG:$rT), aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000324
Chris Lattneref8d6082008-01-06 06:44:58 +0000325def STQAv2i64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
326 "stqa\t$rT, $src", LoadStore,
327 [(store (v2i64 VECREG:$rT), aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000328
Chris Lattneref8d6082008-01-06 06:44:58 +0000329def STQAv4f32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
330 "stqa\t$rT, $src", LoadStore,
331 [(store (v4f32 VECREG:$rT), aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000332
Chris Lattneref8d6082008-01-06 06:44:58 +0000333def STQAv2f64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
334 "stqa\t$rT, $src", LoadStore,
335 [(store (v2f64 VECREG:$rT), aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000336
Chris Lattneref8d6082008-01-06 06:44:58 +0000337def STQAr128 : RI10Form<0b00100100, (outs), (ins GPRC:$rT, addr256k:$src),
338 "stqa\t$rT, $src", LoadStore,
339 [(store GPRC:$rT, aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000340
Chris Lattneref8d6082008-01-06 06:44:58 +0000341def STQAr64 : RI10Form<0b00100100, (outs), (ins R64C:$rT, addr256k:$src),
342 "stqa\t$rT, $src", LoadStore,
343 [(store R64C:$rT, aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000344
Chris Lattneref8d6082008-01-06 06:44:58 +0000345def STQAr32 : RI10Form<0b00100100, (outs), (ins R32C:$rT, addr256k:$src),
346 "stqa\t$rT, $src", LoadStore,
347 [(store R32C:$rT, aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000348
Chris Lattneref8d6082008-01-06 06:44:58 +0000349// Floating Point
350def STQAf32 : RI10Form<0b00100100, (outs), (ins R32FP:$rT, addr256k:$src),
351 "stqa\t$rT, $src", LoadStore,
352 [(store R32FP:$rT, aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000353
Chris Lattneref8d6082008-01-06 06:44:58 +0000354def STQAf64 : RI10Form<0b00100100, (outs), (ins R64FP:$rT, addr256k:$src),
355 "stqa\t$rT, $src", LoadStore,
356 [(store R64FP:$rT, aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000357
Chris Lattneref8d6082008-01-06 06:44:58 +0000358def STQAr16 : RI10Form<0b00100100, (outs), (ins R16C:$rT, addr256k:$src),
359 "stqa\t$rT, $src", LoadStore,
360 [(store R16C:$rT, aform_addr:$src)]>;
Scott Michel438be252007-12-17 22:32:34 +0000361
Chris Lattneref8d6082008-01-06 06:44:58 +0000362def STQAr8 : RI10Form<0b00100100, (outs), (ins R8C:$rT, addr256k:$src),
363 "stqa\t$rT, $src", LoadStore,
364 [(store R8C:$rT, aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000365
Chris Lattneref8d6082008-01-06 06:44:58 +0000366def STQXv16i8 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
367 "stqx\t$rT, $src", LoadStore,
368 [(store (v16i8 VECREG:$rT), xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000369
Chris Lattneref8d6082008-01-06 06:44:58 +0000370def STQXv8i16 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
371 "stqx\t$rT, $src", LoadStore,
372 [(store (v8i16 VECREG:$rT), xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000373
Chris Lattneref8d6082008-01-06 06:44:58 +0000374def STQXv4i32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
375 "stqx\t$rT, $src", LoadStore,
376 [(store (v4i32 VECREG:$rT), xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000377
Chris Lattneref8d6082008-01-06 06:44:58 +0000378def STQXv2i64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
379 "stqx\t$rT, $src", LoadStore,
380 [(store (v2i64 VECREG:$rT), xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000381
Chris Lattneref8d6082008-01-06 06:44:58 +0000382def STQXv4f32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
383 "stqx\t$rT, $src", LoadStore,
384 [(store (v4f32 VECREG:$rT), xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000385
Chris Lattneref8d6082008-01-06 06:44:58 +0000386def STQXv2f64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
387 "stqx\t$rT, $src", LoadStore,
388 [(store (v2f64 VECREG:$rT), xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000389
Chris Lattneref8d6082008-01-06 06:44:58 +0000390def STQXr128 : RI10Form<0b00100100, (outs), (ins GPRC:$rT, memrr:$src),
391 "stqx\t$rT, $src", LoadStore,
392 [(store GPRC:$rT, xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000393
Chris Lattneref8d6082008-01-06 06:44:58 +0000394def STQXr64:
395 RI10Form<0b00100100, (outs), (ins R64C:$rT, memrr:$src),
396 "stqx\t$rT, $src", LoadStore,
397 [(store R64C:$rT, xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000398
Chris Lattneref8d6082008-01-06 06:44:58 +0000399def STQXr32:
400 RI10Form<0b00100100, (outs), (ins R32C:$rT, memrr:$src),
401 "stqx\t$rT, $src", LoadStore,
402 [(store R32C:$rT, xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000403
Chris Lattneref8d6082008-01-06 06:44:58 +0000404// Floating Point
405def STQXf32:
406 RI10Form<0b00100100, (outs), (ins R32FP:$rT, memrr:$src),
407 "stqx\t$rT, $src", LoadStore,
408 [(store R32FP:$rT, xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000409
Chris Lattneref8d6082008-01-06 06:44:58 +0000410def STQXf64:
411 RI10Form<0b00100100, (outs), (ins R64FP:$rT, memrr:$src),
412 "stqx\t$rT, $src", LoadStore,
413 [(store R64FP:$rT, xform_addr:$src)]>;
414
415def STQXr16:
416 RI10Form<0b00100100, (outs), (ins R16C:$rT, memrr:$src),
417 "stqx\t$rT, $src", LoadStore,
418 [(store R16C:$rT, xform_addr:$src)]>;
419
420def STQXr8:
421 RI10Form<0b00100100, (outs), (ins R8C:$rT, memrr:$src),
422 "stqx\t$rT, $src", LoadStore,
423 [(store R8C:$rT, xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000424
425/* Store quadword, PC relative: Not much use at this point in time. Might
Chris Lattneref8d6082008-01-06 06:44:58 +0000426 be useful for relocatable code.
427def STQR : RI16Form<0b111000100, (outs), (ins VECREG:$rT, s16imm:$disp),
428 "stqr\t$rT, $disp", LoadStore,
429 [(store VECREG:$rT, iaddr:$disp)]>;
430*/
Scott Michel8b6b4202007-12-04 22:35:58 +0000431
432//===----------------------------------------------------------------------===//
433// Generate Controls for Insertion:
434//===----------------------------------------------------------------------===//
435
436def CBD :
437 RI7Form<0b10101111100, (outs VECREG:$rT), (ins memri7:$src),
438 "cbd\t$rT, $src", ShuffleOp,
439 [(set (v16i8 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
440
441def CBX : RRForm<0b00101011100, (outs VECREG:$rT), (ins memrr:$src),
442 "cbx\t$rT, $src", ShuffleOp,
443 [(set (v16i8 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
444
445def CHD : RI7Form<0b10101111100, (outs VECREG:$rT), (ins memri7:$src),
446 "chd\t$rT, $src", ShuffleOp,
447 [(set (v8i16 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
448
449def CHX : RRForm<0b10101011100, (outs VECREG:$rT), (ins memrr:$src),
450 "chx\t$rT, $src", ShuffleOp,
451 [(set (v8i16 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
452
453def CWD : RI7Form<0b01101111100, (outs VECREG:$rT), (ins memri7:$src),
454 "cwd\t$rT, $src", ShuffleOp,
455 [(set (v4i32 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
456
457def CWX : RRForm<0b01101011100, (outs VECREG:$rT), (ins memrr:$src),
458 "cwx\t$rT, $src", ShuffleOp,
459 [(set (v4i32 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
460
461def CDD : RI7Form<0b11101111100, (outs VECREG:$rT), (ins memri7:$src),
462 "cdd\t$rT, $src", ShuffleOp,
463 [(set (v2i64 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
464
465def CDX : RRForm<0b11101011100, (outs VECREG:$rT), (ins memrr:$src),
466 "cdx\t$rT, $src", ShuffleOp,
467 [(set (v2i64 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
468
469//===----------------------------------------------------------------------===//
470// Constant formation:
471//===----------------------------------------------------------------------===//
472
473def ILHv8i16:
474 RI16Form<0b110000010, (outs VECREG:$rT), (ins s16imm:$val),
475 "ilh\t$rT, $val", ImmLoad,
476 [(set (v8i16 VECREG:$rT), (v8i16 v8i16SExt16Imm:$val))]>;
477
478def ILHr16:
479 RI16Form<0b110000010, (outs R16C:$rT), (ins s16imm:$val),
480 "ilh\t$rT, $val", ImmLoad,
481 [(set R16C:$rT, immSExt16:$val)]>;
482
Scott Michel438be252007-12-17 22:32:34 +0000483// Cell SPU doesn't have a native 8-bit immediate load, but ILH works ("with
484// the right constant")
485def ILHr8:
486 RI16Form<0b110000010, (outs R8C:$rT), (ins s16imm_i8:$val),
487 "ilh\t$rT, $val", ImmLoad,
488 [(set R8C:$rT, immSExt8:$val)]>;
489
Scott Michel8b6b4202007-12-04 22:35:58 +0000490// IL does sign extension!
491def ILr64:
492 RI16Form<0b100000010, (outs R64C:$rT), (ins s16imm_i64:$val),
493 "il\t$rT, $val", ImmLoad,
494 [(set R64C:$rT, immSExt16:$val)]>;
495
496def ILv2i64:
497 RI16Form<0b100000010, (outs VECREG:$rT), (ins s16imm_i64:$val),
498 "il\t$rT, $val", ImmLoad,
499 [(set VECREG:$rT, (v2i64 v2i64SExt16Imm:$val))]>;
500
501def ILv4i32:
502 RI16Form<0b100000010, (outs VECREG:$rT), (ins s16imm:$val),
503 "il\t$rT, $val", ImmLoad,
504 [(set VECREG:$rT, (v4i32 v4i32SExt16Imm:$val))]>;
505
506def ILr32:
507 RI16Form<0b100000010, (outs R32C:$rT), (ins s16imm_i32:$val),
508 "il\t$rT, $val", ImmLoad,
509 [(set R32C:$rT, immSExt16:$val)]>;
510
511def ILf32:
512 RI16Form<0b100000010, (outs R32FP:$rT), (ins s16imm_f32:$val),
513 "il\t$rT, $val", ImmLoad,
514 [(set R32FP:$rT, (SPUFPconstant fpimmSExt16:$val))]>;
515
516def ILf64:
517 RI16Form<0b100000010, (outs R64FP:$rT), (ins s16imm_f64:$val),
518 "il\t$rT, $val", ImmLoad,
519 [(set R64FP:$rT, (SPUFPconstant fpimmSExt16:$val))]>;
520
521def ILHUv4i32:
522 RI16Form<0b010000010, (outs VECREG:$rT), (ins u16imm:$val),
523 "ilhu\t$rT, $val", ImmLoad,
524 [(set VECREG:$rT, (v4i32 immILHUvec:$val))]>;
525
526def ILHUr32:
527 RI16Form<0b010000010, (outs R32C:$rT), (ins u16imm:$val),
528 "ilhu\t$rT, $val", ImmLoad,
529 [(set R32C:$rT, hi16:$val)]>;
530
531// ILHUf32: Used to custom lower float constant loads
532def ILHUf32:
533 RI16Form<0b010000010, (outs R32FP:$rT), (ins f16imm:$val),
534 "ilhu\t$rT, $val", ImmLoad,
535 [(set R32FP:$rT, (SPUFPconstant hi16_f32:$val))]>;
536
537// ILHUhi: Used for loading high portion of an address. Note the symbolHi
538// printer used for the operand.
539def ILHUhi : RI16Form<0b010000010, (outs R32C:$rT), (ins symbolHi:$val),
540 "ilhu\t$rT, $val", ImmLoad,
541 [(set R32C:$rT, hi16:$val)]>;
542
543// Immediate load address (can also be used to load 18-bit unsigned constants,
544// see the zext 16->32 pattern)
545def ILAr64:
546 RI18Form<0b1000010, (outs R64C:$rT), (ins u18imm_i64:$val),
547 "ila\t$rT, $val", LoadNOP,
548 [(set R64C:$rT, imm18:$val)]>;
549
550// TODO: ILAv2i64
551
552def ILAv2i64:
553 RI18Form<0b1000010, (outs VECREG:$rT), (ins u18imm:$val),
554 "ila\t$rT, $val", LoadNOP,
555 [(set (v2i64 VECREG:$rT), v2i64Uns18Imm:$val)]>;
556
557def ILAv4i32:
558 RI18Form<0b1000010, (outs VECREG:$rT), (ins u18imm:$val),
559 "ila\t$rT, $val", LoadNOP,
560 [(set (v4i32 VECREG:$rT), v4i32Uns18Imm:$val)]>;
561
562def ILAr32:
563 RI18Form<0b1000010, (outs R32C:$rT), (ins u18imm:$val),
564 "ila\t$rT, $val", LoadNOP,
565 [(set R32C:$rT, imm18:$val)]>;
566
567def ILAf32:
568 RI18Form<0b1000010, (outs R32FP:$rT), (ins f18imm:$val),
569 "ila\t$rT, $val", LoadNOP,
570 [(set R32FP:$rT, (SPUFPconstant fpimm18:$val))]>;
571
572def ILAf64:
573 RI18Form<0b1000010, (outs R64FP:$rT), (ins f18imm_f64:$val),
574 "ila\t$rT, $val", LoadNOP,
575 [(set R64FP:$rT, (SPUFPconstant fpimm18:$val))]>;
576
577def ILAlo:
578 RI18Form<0b1000010, (outs R32C:$rT), (ins symbolLo:$val),
579 "ila\t$rT, $val", ImmLoad,
580 [(set R32C:$rT, imm18:$val)]>;
581
582def ILAlsa:
583 RI18Form<0b1000010, (outs R32C:$rT), (ins symbolLSA:$val),
584 "ila\t$rT, $val", ImmLoad,
585 [/* no pattern */]>;
586
587// Immediate OR, Halfword Lower: The "other" part of loading large constants
588// into 32-bit registers. See the anonymous pattern Pat<(i32 imm:$imm), ...>
589// Note that these are really two operand instructions, but they're encoded
590// as three operands with the first two arguments tied-to each other.
591
592def IOHLvec:
593 RI16Form<0b100000110, (outs VECREG:$rT), (ins VECREG:$rS, u16imm:$val),
594 "iohl\t$rT, $val", ImmLoad,
595 [/* insert intrinsic here */]>,
596 RegConstraint<"$rS = $rT">,
597 NoEncode<"$rS">;
598
599def IOHLr32:
600 RI16Form<0b100000110, (outs R32C:$rT), (ins R32C:$rS, i32imm:$val),
601 "iohl\t$rT, $val", ImmLoad,
602 [/* insert intrinsic here */]>,
603 RegConstraint<"$rS = $rT">,
604 NoEncode<"$rS">;
605
606def IOHLf32:
607 RI16Form<0b100000110, (outs R32FP:$rT), (ins R32FP:$rS, f32imm:$val),
608 "iohl\t$rT, $val", ImmLoad,
609 [/* insert intrinsic here */]>,
610 RegConstraint<"$rS = $rT">,
611 NoEncode<"$rS">;
612
Scott Micheldbac4cf2008-01-11 02:53:15 +0000613def IOHLlo:
614 RI16Form<0b100000110, (outs R32C:$rT), (ins R32C:$rS, symbolLo:$val),
615 "iohl\t$rT, $val", ImmLoad,
616 [/* no pattern */]>,
617 RegConstraint<"$rS = $rT">,
618 NoEncode<"$rS">;
619
Scott Michel8b6b4202007-12-04 22:35:58 +0000620// Form select mask for bytes using immediate, used in conjunction with the
621// SELB instruction:
622
623def FSMBIv16i8 : RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
624 "fsmbi\t$rT, $val", SelectOp,
625 [(set (v16i8 VECREG:$rT), (SPUfsmbi_v16i8 immU16:$val))]>;
626
627def FSMBIv8i16 : RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
628 "fsmbi\t$rT, $val", SelectOp,
629 [(set (v8i16 VECREG:$rT), (SPUfsmbi_v8i16 immU16:$val))]>;
630
631def FSMBIvecv4i32 : RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
632 "fsmbi\t$rT, $val", SelectOp,
633 [(set (v4i32 VECREG:$rT), (SPUfsmbi_v4i32 immU16:$val))]>;
634
635//===----------------------------------------------------------------------===//
636// Integer and Logical Operations:
637//===----------------------------------------------------------------------===//
638
639def AHv8i16:
640 RRForm<0b00010011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
641 "ah\t$rT, $rA, $rB", IntegerOp,
642 [(set (v8i16 VECREG:$rT), (int_spu_si_ah VECREG:$rA, VECREG:$rB))]>;
643
644def : Pat<(add (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)),
645 (AHv8i16 VECREG:$rA, VECREG:$rB)>;
646
647// [(set (v8i16 VECREG:$rT), (add (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)))]>;
648
649def AHr16:
650 RRForm<0b00010011000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
651 "ah\t$rT, $rA, $rB", IntegerOp,
652 [(set R16C:$rT, (add R16C:$rA, R16C:$rB))]>;
653
654def AHIvec:
655 RI10Form<0b10111000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
656 "ahi\t$rT, $rA, $val", IntegerOp,
657 [(set (v8i16 VECREG:$rT), (add (v8i16 VECREG:$rA),
658 v8i16SExt10Imm:$val))]>;
659
660def AHIr16 : RI10Form<0b10111000, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
661 "ahi\t$rT, $rA, $val", IntegerOp,
662 [(set R16C:$rT, (add R16C:$rA, v8i16SExt10Imm:$val))]>;
663
664def Avec : RRForm<0b00000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
665 "a\t$rT, $rA, $rB", IntegerOp,
666 [(set (v4i32 VECREG:$rT), (add (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
667
668def : Pat<(add (v16i8 VECREG:$rA), (v16i8 VECREG:$rB)),
669 (Avec VECREG:$rA, VECREG:$rB)>;
670
671def Ar32 : RRForm<0b00000011000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
672 "a\t$rT, $rA, $rB", IntegerOp,
673 [(set R32C:$rT, (add R32C:$rA, R32C:$rB))]>;
674
Scott Michel438be252007-12-17 22:32:34 +0000675def Ar8:
676 RRForm<0b00000011000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
677 "a\t$rT, $rA, $rB", IntegerOp,
678 [(set R8C:$rT, (add R8C:$rA, R8C:$rB))]>;
679
Scott Michel8b6b4202007-12-04 22:35:58 +0000680def AIvec:
681 RI10Form<0b00111000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
682 "ai\t$rT, $rA, $val", IntegerOp,
683 [(set (v4i32 VECREG:$rT), (add (v4i32 VECREG:$rA),
684 v4i32SExt10Imm:$val))]>;
685
Scott Michel438be252007-12-17 22:32:34 +0000686def AIr32:
687 RI10Form<0b00111000, (outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val),
688 "ai\t$rT, $rA, $val", IntegerOp,
689 [(set R32C:$rT, (add R32C:$rA, i32ImmSExt10:$val))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000690
Scott Michel438be252007-12-17 22:32:34 +0000691def SFHvec:
692 RRForm<0b00010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
693 "sfh\t$rT, $rA, $rB", IntegerOp,
694 [(set (v8i16 VECREG:$rT), (sub (v8i16 VECREG:$rA),
695 (v8i16 VECREG:$rB)))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000696
Scott Michel438be252007-12-17 22:32:34 +0000697def SFHr16:
698 RRForm<0b00010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
699 "sfh\t$rT, $rA, $rB", IntegerOp,
700 [(set R16C:$rT, (sub R16C:$rA, R16C:$rB))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000701
702def SFHIvec:
703 RI10Form<0b10110000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
704 "sfhi\t$rT, $rA, $val", IntegerOp,
705 [(set (v8i16 VECREG:$rT), (sub v8i16SExt10Imm:$val,
706 (v8i16 VECREG:$rA)))]>;
707
708def SFHIr16 : RI10Form<0b10110000, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
709 "sfhi\t$rT, $rA, $val", IntegerOp,
710 [(set R16C:$rT, (sub i16ImmSExt10:$val, R16C:$rA))]>;
711
712def SFvec : RRForm<0b00000010000, (outs VECREG:$rT),
713 (ins VECREG:$rA, VECREG:$rB),
714 "sf\t$rT, $rA, $rB", IntegerOp,
715 [(set (v4i32 VECREG:$rT), (sub (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
716
717def SFr32 : RRForm<0b00000010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
718 "sf\t$rT, $rA, $rB", IntegerOp,
719 [(set R32C:$rT, (sub R32C:$rA, R32C:$rB))]>;
720
721def SFIvec:
722 RI10Form<0b00110000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
723 "sfi\t$rT, $rA, $val", IntegerOp,
724 [(set (v4i32 VECREG:$rT), (sub v4i32SExt10Imm:$val,
725 (v4i32 VECREG:$rA)))]>;
726
727def SFIr32 : RI10Form<0b00110000, (outs R32C:$rT),
728 (ins R32C:$rA, s10imm_i32:$val),
729 "sfi\t$rT, $rA, $val", IntegerOp,
730 [(set R32C:$rT, (sub i32ImmSExt10:$val, R32C:$rA))]>;
731
732// ADDX: only available in vector form, doesn't match a pattern.
733def ADDXvec:
734 RRForm<0b00000010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
735 VECREG:$rCarry),
736 "addx\t$rT, $rA, $rB", IntegerOp,
737 []>,
738 RegConstraint<"$rCarry = $rT">,
739 NoEncode<"$rCarry">;
740
741// CG: only available in vector form, doesn't match a pattern.
742def CGvec:
743 RRForm<0b01000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
744 VECREG:$rCarry),
745 "cg\t$rT, $rA, $rB", IntegerOp,
746 []>,
747 RegConstraint<"$rCarry = $rT">,
748 NoEncode<"$rCarry">;
749
750// SFX: only available in vector form, doesn't match a pattern
751def SFXvec:
752 RRForm<0b10000010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
753 VECREG:$rCarry),
754 "sfx\t$rT, $rA, $rB", IntegerOp,
755 []>,
756 RegConstraint<"$rCarry = $rT">,
757 NoEncode<"$rCarry">;
758
759// BG: only available in vector form, doesn't match a pattern.
760def BGvec:
761 RRForm<0b01000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
762 VECREG:$rCarry),
763 "bg\t$rT, $rA, $rB", IntegerOp,
764 []>,
765 RegConstraint<"$rCarry = $rT">,
766 NoEncode<"$rCarry">;
767
768// BGX: only available in vector form, doesn't match a pattern.
769def BGXvec:
770 RRForm<0b11000010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
771 VECREG:$rCarry),
772 "bgx\t$rT, $rA, $rB", IntegerOp,
773 []>,
774 RegConstraint<"$rCarry = $rT">,
775 NoEncode<"$rCarry">;
776
777// Halfword multiply variants:
778// N.B: These can be used to build up larger quantities (16x16 -> 32)
779
780def MPYv8i16:
781 RRForm<0b00100011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
782 "mpy\t$rT, $rA, $rB", IntegerMulDiv,
783 [(set (v8i16 VECREG:$rT), (SPUmpy_v8i16 (v8i16 VECREG:$rA),
784 (v8i16 VECREG:$rB)))]>;
785
786def MPYr16:
787 RRForm<0b00100011110, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
788 "mpy\t$rT, $rA, $rB", IntegerMulDiv,
789 [(set R16C:$rT, (mul R16C:$rA, R16C:$rB))]>;
790
791def MPYUv4i32:
792 RRForm<0b00110011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
793 "mpyu\t$rT, $rA, $rB", IntegerMulDiv,
794 [(set (v4i32 VECREG:$rT),
795 (SPUmpyu_v4i32 (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
796
797def MPYUr16:
798 RRForm<0b00110011110, (outs R32C:$rT), (ins R16C:$rA, R16C:$rB),
799 "mpyu\t$rT, $rA, $rB", IntegerMulDiv,
800 [(set R32C:$rT, (mul (zext R16C:$rA),
801 (zext R16C:$rB)))]>;
802
803def MPYUr32:
804 RRForm<0b00110011110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
805 "mpyu\t$rT, $rA, $rB", IntegerMulDiv,
806 [(set R32C:$rT, (SPUmpyu_i32 R32C:$rA, R32C:$rB))]>;
807
808// mpyi: multiply 16 x s10imm -> 32 result (custom lowering for 32 bit result,
809// this only produces the lower 16 bits)
810def MPYIvec:
811 RI10Form<0b00101110, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
812 "mpyi\t$rT, $rA, $val", IntegerMulDiv,
813 [(set (v8i16 VECREG:$rT), (mul (v8i16 VECREG:$rA), v8i16SExt10Imm:$val))]>;
814
815def MPYIr16:
816 RI10Form<0b00101110, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
817 "mpyi\t$rT, $rA, $val", IntegerMulDiv,
818 [(set R16C:$rT, (mul R16C:$rA, i16ImmSExt10:$val))]>;
819
820// mpyui: same issues as other multiplies, plus, this doesn't match a
821// pattern... but may be used during target DAG selection or lowering
822def MPYUIvec:
823 RI10Form<0b10101110, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
824 "mpyui\t$rT, $rA, $val", IntegerMulDiv,
825 []>;
826
827def MPYUIr16:
828 RI10Form<0b10101110, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
829 "mpyui\t$rT, $rA, $val", IntegerMulDiv,
830 []>;
831
832// mpya: 16 x 16 + 16 -> 32 bit result
833def MPYAvec:
834 RRRForm<0b0011, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
835 "mpya\t$rT, $rA, $rB, $rC", IntegerMulDiv,
836 [(set (v4i32 VECREG:$rT), (add (v4i32 (bitconvert (mul (v8i16 VECREG:$rA),
837 (v8i16 VECREG:$rB)))),
838 (v4i32 VECREG:$rC)))]>;
839
840def MPYAr32:
841 RRRForm<0b0011, (outs R32C:$rT), (ins R16C:$rA, R16C:$rB, R32C:$rC),
842 "mpya\t$rT, $rA, $rB, $rC", IntegerMulDiv,
843 [(set R32C:$rT, (add (sext (mul R16C:$rA, R16C:$rB)),
844 R32C:$rC))]>;
845
846def : Pat<(add (mul (sext R16C:$rA), (sext R16C:$rB)), R32C:$rC),
847 (MPYAr32 R16C:$rA, R16C:$rB, R32C:$rC)>;
848
849def MPYAr32_sextinreg:
850 RRRForm<0b0011, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB, R32C:$rC),
851 "mpya\t$rT, $rA, $rB, $rC", IntegerMulDiv,
852 [(set R32C:$rT, (add (mul (sext_inreg R32C:$rA, i16),
853 (sext_inreg R32C:$rB, i16)),
854 R32C:$rC))]>;
855
856//def MPYAr32:
857// RRRForm<0b0011, (outs R32C:$rT), (ins R16C:$rA, R16C:$rB, R32C:$rC),
858// "mpya\t$rT, $rA, $rB, $rC", IntegerMulDiv,
859// [(set R32C:$rT, (add (sext (mul R16C:$rA, R16C:$rB)),
860// R32C:$rC))]>;
861
862// mpyh: multiply high, used to synthesize 32-bit multiplies
863def MPYHv4i32:
864 RRForm<0b10100011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
865 "mpyh\t$rT, $rA, $rB", IntegerMulDiv,
866 [(set (v4i32 VECREG:$rT),
867 (SPUmpyh_v4i32 (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
868
869def MPYHr32:
870 RRForm<0b10100011110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
871 "mpyh\t$rT, $rA, $rB", IntegerMulDiv,
872 [(set R32C:$rT, (SPUmpyh_i32 R32C:$rA, R32C:$rB))]>;
873
874// mpys: multiply high and shift right (returns the top half of
875// a 16-bit multiply, sign extended to 32 bits.)
876def MPYSvec:
877 RRForm<0b11100011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
878 "mpys\t$rT, $rA, $rB", IntegerMulDiv,
879 []>;
880
881def MPYSr16:
882 RRForm<0b11100011110, (outs R32C:$rT), (ins R16C:$rA, R16C:$rB),
883 "mpys\t$rT, $rA, $rB", IntegerMulDiv,
884 []>;
885
886// mpyhh: multiply high-high (returns the 32-bit result from multiplying
887// the top 16 bits of the $rA, $rB)
888def MPYHHv8i16:
889 RRForm<0b01100011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
890 "mpyhh\t$rT, $rA, $rB", IntegerMulDiv,
891 [(set (v8i16 VECREG:$rT),
892 (SPUmpyhh_v8i16 (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)))]>;
893
894def MPYHHr32:
895 RRForm<0b01100011110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
896 "mpyhh\t$rT, $rA, $rB", IntegerMulDiv,
897 []>;
898
899// mpyhha: Multiply high-high, add to $rT:
900def MPYHHAvec:
901 RRForm<0b01100010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
902 "mpyhha\t$rT, $rA, $rB", IntegerMulDiv,
903 []>;
904
905def MPYHHAr32:
906 RRForm<0b01100010110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
907 "mpyhha\t$rT, $rA, $rB", IntegerMulDiv,
908 []>;
909
910// mpyhhu: Multiply high-high, unsigned
911def MPYHHUvec:
912 RRForm<0b01110011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
913 "mpyhhu\t$rT, $rA, $rB", IntegerMulDiv,
914 []>;
915
916def MPYHHUr32:
917 RRForm<0b01110011110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
918 "mpyhhu\t$rT, $rA, $rB", IntegerMulDiv,
919 []>;
920
921// mpyhhau: Multiply high-high, unsigned
922def MPYHHAUvec:
923 RRForm<0b01110010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
924 "mpyhhau\t$rT, $rA, $rB", IntegerMulDiv,
925 []>;
926
927def MPYHHAUr32:
928 RRForm<0b01110010110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
929 "mpyhhau\t$rT, $rA, $rB", IntegerMulDiv,
930 []>;
931
932// clz: Count leading zeroes
933def CLZv4i32:
934 RRForm_1<0b10100101010, (outs VECREG:$rT), (ins VECREG:$rA),
935 "clz\t$rT, $rA", IntegerOp,
936 [/* intrinsic */]>;
937
938def CLZr32:
939 RRForm_1<0b10100101010, (outs R32C:$rT), (ins R32C:$rA),
940 "clz\t$rT, $rA", IntegerOp,
941 [(set R32C:$rT, (ctlz R32C:$rA))]>;
942
943// cntb: Count ones in bytes (aka "population count")
944// NOTE: This instruction is really a vector instruction, but the custom
945// lowering code uses it in unorthodox ways to support CTPOP for other
946// data types!
947def CNTBv16i8:
948 RRForm_1<0b00101101010, (outs VECREG:$rT), (ins VECREG:$rA),
949 "cntb\t$rT, $rA", IntegerOp,
950 [(set (v16i8 VECREG:$rT), (SPUcntb_v16i8 (v16i8 VECREG:$rA)))]>;
951
952def CNTBv8i16 :
953 RRForm_1<0b00101101010, (outs VECREG:$rT), (ins VECREG:$rA),
954 "cntb\t$rT, $rA", IntegerOp,
955 [(set (v8i16 VECREG:$rT), (SPUcntb_v8i16 (v8i16 VECREG:$rA)))]>;
956
957def CNTBv4i32 :
958 RRForm_1<0b00101101010, (outs VECREG:$rT), (ins VECREG:$rA),
959 "cntb\t$rT, $rA", IntegerOp,
960 [(set (v4i32 VECREG:$rT), (SPUcntb_v4i32 (v4i32 VECREG:$rA)))]>;
961
962// fsmb: Form select mask for bytes. N.B. Input operand, $rA, is 16-bits
963def FSMB:
964 RRForm_1<0b01101101100, (outs VECREG:$rT), (ins R16C:$rA),
965 "fsmb\t$rT, $rA", SelectOp,
966 []>;
967
968// fsmh: Form select mask for halfwords. N.B., Input operand, $rA, is
969// only 8-bits wide (even though it's input as 16-bits here)
970def FSMH:
971 RRForm_1<0b10101101100, (outs VECREG:$rT), (ins R16C:$rA),
972 "fsmh\t$rT, $rA", SelectOp,
973 []>;
974
975// fsm: Form select mask for words. Like the other fsm* instructions,
976// only the lower 4 bits of $rA are significant.
977def FSM:
978 RRForm_1<0b00101101100, (outs VECREG:$rT), (ins R16C:$rA),
979 "fsm\t$rT, $rA", SelectOp,
980 []>;
981
982// gbb: Gather all low order bits from each byte in $rA into a single 16-bit
983// quantity stored into $rT
984def GBB:
985 RRForm_1<0b01001101100, (outs R16C:$rT), (ins VECREG:$rA),
986 "gbb\t$rT, $rA", GatherOp,
987 []>;
988
989// gbh: Gather all low order bits from each halfword in $rA into a single
990// 8-bit quantity stored in $rT
991def GBH:
992 RRForm_1<0b10001101100, (outs R16C:$rT), (ins VECREG:$rA),
993 "gbh\t$rT, $rA", GatherOp,
994 []>;
995
996// gb: Gather all low order bits from each word in $rA into a single
997// 4-bit quantity stored in $rT
998def GB:
999 RRForm_1<0b00001101100, (outs R16C:$rT), (ins VECREG:$rA),
1000 "gb\t$rT, $rA", GatherOp,
1001 []>;
1002
1003// avgb: average bytes
1004def AVGB:
1005 RRForm<0b11001011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1006 "avgb\t$rT, $rA, $rB", ByteOp,
1007 []>;
1008
1009// absdb: absolute difference of bytes
1010def ABSDB:
1011 RRForm<0b11001010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1012 "absdb\t$rT, $rA, $rB", ByteOp,
1013 []>;
1014
1015// sumb: sum bytes into halfwords
1016def SUMB:
1017 RRForm<0b11001010010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1018 "sumb\t$rT, $rA, $rB", ByteOp,
1019 []>;
1020
1021// Sign extension operations:
1022def XSBHvec:
1023 RRForm_1<0b01101101010, (outs VECREG:$rDst), (ins VECREG:$rSrc),
1024 "xsbh\t$rDst, $rSrc", IntegerOp,
1025 [(set (v8i16 VECREG:$rDst), (sext (v16i8 VECREG:$rSrc)))]>;
1026
1027// Ordinary form for XSBH
1028def XSBHr16:
1029 RRForm_1<0b01101101010, (outs R16C:$rDst), (ins R16C:$rSrc),
1030 "xsbh\t$rDst, $rSrc", IntegerOp,
1031 [(set R16C:$rDst, (sext_inreg R16C:$rSrc, i8))]>;
1032
Scott Michel438be252007-12-17 22:32:34 +00001033def XSBHr8:
1034 RRForm_1<0b01101101010, (outs R16C:$rDst), (ins R8C:$rSrc),
1035 "xsbh\t$rDst, $rSrc", IntegerOp,
1036 [(set R16C:$rDst, (sext R8C:$rSrc))]>;
1037
Scott Michel8b6b4202007-12-04 22:35:58 +00001038// 32-bit form for XSBH: used to sign extend 8-bit quantities to 16-bit
1039// quantities to 32-bit quantities via a 32-bit register (see the sext 8->32
1040// pattern below). Intentionally doesn't match a pattern because we want the
1041// sext 8->32 pattern to do the work for us, namely because we need the extra
1042// XSHWr32.
1043def XSBHr32:
1044 RRForm_1<0b01101101010, (outs R32C:$rDst), (ins R32C:$rSrc),
1045 "xsbh\t$rDst, $rSrc", IntegerOp,
1046 [(set R32C:$rDst, (sext_inreg R32C:$rSrc, i8))]>;
1047
1048// Sign extend halfwords to words:
1049def XSHWvec:
1050 RRForm_1<0b01101101010, (outs VECREG:$rDest), (ins VECREG:$rSrc),
1051 "xshw\t$rDest, $rSrc", IntegerOp,
1052 [(set (v4i32 VECREG:$rDest), (sext (v8i16 VECREG:$rSrc)))]>;
1053
1054def XSHWr32:
1055 RRForm_1<0b01101101010, (outs R32C:$rDst), (ins R32C:$rSrc),
1056 "xshw\t$rDst, $rSrc", IntegerOp,
1057 [(set R32C:$rDst, (sext_inreg R32C:$rSrc, i16))]>;
1058
1059def XSHWr16:
1060 RRForm_1<0b01101101010, (outs R32C:$rDst), (ins R16C:$rSrc),
1061 "xshw\t$rDst, $rSrc", IntegerOp,
1062 [(set R32C:$rDst, (sext R16C:$rSrc))]>;
1063
1064def XSWDvec:
1065 RRForm_1<0b01100101010, (outs VECREG:$rDst), (ins VECREG:$rSrc),
1066 "xswd\t$rDst, $rSrc", IntegerOp,
1067 [(set (v2i64 VECREG:$rDst), (sext (v4i32 VECREG:$rSrc)))]>;
1068
1069def XSWDr64:
1070 RRForm_1<0b01100101010, (outs R64C:$rDst), (ins R64C:$rSrc),
1071 "xswd\t$rDst, $rSrc", IntegerOp,
1072 [(set R64C:$rDst, (sext_inreg R64C:$rSrc, i32))]>;
1073
1074def XSWDr32:
1075 RRForm_1<0b01100101010, (outs R64C:$rDst), (ins R32C:$rSrc),
1076 "xswd\t$rDst, $rSrc", IntegerOp,
1077 [(set R64C:$rDst, (SPUsext32_to_64 R32C:$rSrc))]>;
1078
1079def : Pat<(sext R32C:$inp),
1080 (XSWDr32 R32C:$inp)>;
1081
1082// AND operations
1083def ANDv16i8:
1084 RRForm<0b10000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1085 "and\t$rT, $rA, $rB", IntegerOp,
1086 [(set (v16i8 VECREG:$rT), (and (v16i8 VECREG:$rA),
1087 (v16i8 VECREG:$rB)))]>;
1088
1089def ANDv8i16:
1090 RRForm<0b10000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1091 "and\t$rT, $rA, $rB", IntegerOp,
1092 [(set (v8i16 VECREG:$rT), (and (v8i16 VECREG:$rA),
1093 (v8i16 VECREG:$rB)))]>;
1094
1095def ANDv4i32:
1096 RRForm<0b10000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1097 "and\t$rT, $rA, $rB", IntegerOp,
1098 [(set (v4i32 VECREG:$rT), (and (v4i32 VECREG:$rA),
1099 (v4i32 VECREG:$rB)))]>;
1100
1101def ANDr32:
1102 RRForm<0b10000011000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1103 "and\t$rT, $rA, $rB", IntegerOp,
1104 [(set R32C:$rT, (and R32C:$rA, R32C:$rB))]>;
1105
1106//===---------------------------------------------
1107// Special instructions to perform the fabs instruction
1108def ANDfabs32:
1109 RRForm<0b10000011000, (outs R32FP:$rT), (ins R32FP:$rA, R32C:$rB),
1110 "and\t$rT, $rA, $rB", IntegerOp,
1111 [/* Intentionally does not match a pattern */]>;
1112
1113def ANDfabs64:
1114 RRForm<0b10000011000, (outs R64FP:$rT), (ins R64FP:$rA, VECREG:$rB),
1115 "and\t$rT, $rA, $rB", IntegerOp,
1116 [/* Intentionally does not match a pattern */]>;
1117
1118// Could use ANDv4i32, but won't for clarity
1119def ANDfabsvec:
1120 RRForm<0b10000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1121 "and\t$rT, $rA, $rB", IntegerOp,
1122 [/* Intentionally does not match a pattern */]>;
1123
1124//===---------------------------------------------
1125
1126def ANDr16:
1127 RRForm<0b10000011000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1128 "and\t$rT, $rA, $rB", IntegerOp,
1129 [(set R16C:$rT, (and R16C:$rA, R16C:$rB))]>;
1130
Scott Michel438be252007-12-17 22:32:34 +00001131def ANDr8:
1132 RRForm<0b10000011000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1133 "and\t$rT, $rA, $rB", IntegerOp,
1134 [(set R8C:$rT, (and R8C:$rA, R8C:$rB))]>;
1135
Scott Michel8b6b4202007-12-04 22:35:58 +00001136// Hacked form of AND to zero-extend 16-bit quantities to 32-bit
1137// quantities -- see 16->32 zext pattern.
1138//
1139// This pattern is somewhat artificial, since it might match some
1140// compiler generated pattern but it is unlikely to do so.
1141def AND2To4:
1142 RRForm<0b10000011000, (outs R32C:$rT), (ins R16C:$rA, R32C:$rB),
1143 "and\t$rT, $rA, $rB", IntegerOp,
1144 [(set R32C:$rT, (and (zext R16C:$rA), R32C:$rB))]>;
1145
1146// N.B.: vnot_conv is one of those special target selection pattern fragments,
1147// in which we expect there to be a bit_convert on the constant. Bear in mind
1148// that llvm translates "not <reg>" to "xor <reg>, -1" (or in this case, a
1149// constant -1 vector.)
1150def ANDCv16i8:
1151 RRForm<0b10000011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1152 "andc\t$rT, $rA, $rB", IntegerOp,
1153 [(set (v16i8 VECREG:$rT), (and (v16i8 VECREG:$rA),
1154 (vnot (v16i8 VECREG:$rB))))]>;
1155
1156def ANDCv8i16:
1157 RRForm<0b10000011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1158 "andc\t$rT, $rA, $rB", IntegerOp,
1159 [(set (v8i16 VECREG:$rT), (and (v8i16 VECREG:$rA),
1160 (vnot (v8i16 VECREG:$rB))))]>;
1161
1162def ANDCv4i32:
1163 RRForm<0b10000011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1164 "andc\t$rT, $rA, $rB", IntegerOp,
1165 [(set (v4i32 VECREG:$rT), (and (v4i32 VECREG:$rA),
1166 (vnot (v4i32 VECREG:$rB))))]>;
1167
1168def ANDCr32:
1169 RRForm<0b10000011010, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1170 "andc\t$rT, $rA, $rB", IntegerOp,
1171 [(set R32C:$rT, (and R32C:$rA, (not R32C:$rB)))]>;
1172
1173def ANDCr16:
1174 RRForm<0b10000011010, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1175 "andc\t$rT, $rA, $rB", IntegerOp,
1176 [(set R16C:$rT, (and R16C:$rA, (not R16C:$rB)))]>;
1177
Scott Michel438be252007-12-17 22:32:34 +00001178def ANDCr8:
1179 RRForm<0b10000011010, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1180 "andc\t$rT, $rA, $rB", IntegerOp,
1181 [(set R8C:$rT, (and R8C:$rA, (not R8C:$rB)))]>;
1182
Scott Michel8b6b4202007-12-04 22:35:58 +00001183def ANDBIv16i8:
1184 RI10Form<0b01101000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
1185 "andbi\t$rT, $rA, $val", IntegerOp,
1186 [(set (v16i8 VECREG:$rT),
1187 (and (v16i8 VECREG:$rA), (v16i8 v16i8U8Imm:$val)))]>;
1188
Scott Michel438be252007-12-17 22:32:34 +00001189def ANDBIr8:
1190 RI10Form<0b01101000, (outs R8C:$rT), (ins R8C:$rA, u10imm_i8:$val),
1191 "andbi\t$rT, $rA, $val", IntegerOp,
1192 [(set R8C:$rT, (and R8C:$rA, immU8:$val))]>;
1193
Scott Michel8b6b4202007-12-04 22:35:58 +00001194def ANDHIv8i16:
1195 RI10Form<0b10101000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
1196 "andhi\t$rT, $rA, $val", IntegerOp,
1197 [(set (v8i16 VECREG:$rT),
1198 (and (v8i16 VECREG:$rA), v8i16SExt10Imm:$val))]>;
1199
1200def ANDHIr16:
1201 RI10Form<0b10101000, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
1202 "andhi\t$rT, $rA, $val", IntegerOp,
Scott Michel438be252007-12-17 22:32:34 +00001203 [(set R16C:$rT, (and R16C:$rA, i16ImmUns10:$val))]>;
1204
1205def ANDHI1To2:
1206 RI10Form<0b10101000, (outs R16C:$rT), (ins R8C:$rA, s10imm:$val),
1207 "andhi\t$rT, $rA, $val", IntegerOp,
1208 [(set R16C:$rT, (and (zext R8C:$rA), i16ImmSExt10:$val))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00001209
1210def ANDIv4i32:
1211 RI10Form<0b00101000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
1212 "andi\t$rT, $rA, $val", IntegerOp,
1213 [(set (v4i32 VECREG:$rT),
1214 (and (v4i32 VECREG:$rA), v4i32SExt10Imm:$val))]>;
1215
1216def ANDIr32:
1217 RI10Form<0b10101000, (outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val),
1218 "andi\t$rT, $rA, $val", IntegerOp,
1219 [(set R32C:$rT, (and R32C:$rA, i32ImmSExt10:$val))]>;
1220
Scott Michel438be252007-12-17 22:32:34 +00001221// Hacked form of ANDI to zero-extend i8 quantities to i32. See the zext 8->32
1222// pattern below.
1223def ANDI1To4:
1224 RI10Form<0b10101000, (outs R32C:$rT), (ins R8C:$rA, s10imm_i32:$val),
1225 "andi\t$rT, $rA, $val", IntegerOp,
1226 [(set R32C:$rT, (and (zext R8C:$rA), i32ImmSExt10:$val))]>;
1227
Scott Michel8b6b4202007-12-04 22:35:58 +00001228// Hacked form of ANDI to zero-extend i16 quantities to i32. See the
1229// zext 16->32 pattern below.
1230//
1231// Note that this pattern is somewhat artificial, since it might match
1232// something the compiler generates but is unlikely to occur in practice.
1233def ANDI2To4:
1234 RI10Form<0b10101000, (outs R32C:$rT), (ins R16C:$rA, s10imm_i32:$val),
1235 "andi\t$rT, $rA, $val", IntegerOp,
1236 [(set R32C:$rT, (and (zext R16C:$rA), i32ImmSExt10:$val))]>;
1237
1238// Bitwise OR group:
1239// Bitwise "or" (N.B.: These are also register-register copy instructions...)
1240def ORv16i8:
1241 RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1242 "or\t$rT, $rA, $rB", IntegerOp,
1243 [(set (v16i8 VECREG:$rT), (or (v16i8 VECREG:$rA), (v16i8 VECREG:$rB)))]>;
1244
1245def ORv8i16:
1246 RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1247 "or\t$rT, $rA, $rB", IntegerOp,
1248 [(set (v8i16 VECREG:$rT), (or (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)))]>;
1249
1250def ORv4i32:
1251 RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1252 "or\t$rT, $rA, $rB", IntegerOp,
1253 [(set (v4i32 VECREG:$rT), (or (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
1254
1255def ORv4f32:
1256 RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1257 "or\t$rT, $rA, $rB", IntegerOp,
1258 [(set (v4f32 VECREG:$rT),
1259 (v4f32 (bitconvert (or (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))))]>;
1260
1261def ORv2f64:
1262 RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1263 "or\t$rT, $rA, $rB", IntegerOp,
1264 [(set (v2f64 VECREG:$rT),
1265 (v2f64 (bitconvert (or (v2i64 VECREG:$rA), (v2i64 VECREG:$rB)))))]>;
1266
1267def ORgprc:
1268 RRForm<0b10000010000, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB),
1269 "or\t$rT, $rA, $rB", IntegerOp,
1270 [(set GPRC:$rT, (or GPRC:$rA, GPRC:$rB))]>;
1271
1272def ORr64:
1273 RRForm<0b10000010000, (outs R64C:$rT), (ins R64C:$rA, R64C:$rB),
1274 "or\t$rT, $rA, $rB", IntegerOp,
1275 [(set R64C:$rT, (or R64C:$rA, R64C:$rB))]>;
1276
1277def ORr32:
1278 RRForm<0b10000010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1279 "or\t$rT, $rA, $rB", IntegerOp,
1280 [(set R32C:$rT, (or R32C:$rA, R32C:$rB))]>;
1281
1282def ORr16:
1283 RRForm<0b10000010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1284 "or\t$rT, $rA, $rB", IntegerOp,
1285 [(set R16C:$rT, (or R16C:$rA, R16C:$rB))]>;
1286
Scott Michel438be252007-12-17 22:32:34 +00001287def ORr8:
1288 RRForm<0b10000010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1289 "or\t$rT, $rA, $rB", IntegerOp,
1290 [(set R8C:$rT, (or R8C:$rA, R8C:$rB))]>;
1291
Scott Michel754d8662007-12-20 00:44:13 +00001292// OR instruction forms that are used to copy f32 and f64 registers.
1293// They do not match patterns.
1294def ORf32:
1295 RRForm<0b10000010000, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB),
1296 "or\t$rT, $rA, $rB", IntegerOp,
1297 [/* no pattern */]>;
1298
1299def ORf64:
1300 RRForm<0b10000010000, (outs R64FP:$rT), (ins R64FP:$rA, R64FP:$rB),
1301 "or\t$rT, $rA, $rB", IntegerOp,
1302 [/* no pattern */]>;
1303
Scott Michel8b6b4202007-12-04 22:35:58 +00001304// ORv*_*: Used in scalar->vector promotions:
Scott Michel438be252007-12-17 22:32:34 +00001305def ORv16i8_i8:
1306 RRForm<0b10000010000, (outs VECREG:$rT), (ins R8C:$rA, R8C:$rB),
1307 "or\t$rT, $rA, $rB", IntegerOp,
1308 [/* no pattern */]>;
1309
1310def : Pat<(v16i8 (SPUpromote_scalar R8C:$rA)),
1311 (ORv16i8_i8 R8C:$rA, R8C:$rA)>;
1312
Scott Michel8b6b4202007-12-04 22:35:58 +00001313def ORv8i16_i16:
1314 RRForm<0b10000010000, (outs VECREG:$rT), (ins R16C:$rA, R16C:$rB),
1315 "or\t$rT, $rA, $rB", IntegerOp,
1316 [/* no pattern */]>;
1317
1318def : Pat<(v8i16 (SPUpromote_scalar R16C:$rA)),
1319 (ORv8i16_i16 R16C:$rA, R16C:$rA)>;
1320
1321def ORv4i32_i32:
1322 RRForm<0b10000010000, (outs VECREG:$rT), (ins R32C:$rA, R32C:$rB),
1323 "or\t$rT, $rA, $rB", IntegerOp,
1324 [/* no pattern */]>;
1325
1326def : Pat<(v4i32 (SPUpromote_scalar R32C:$rA)),
1327 (ORv4i32_i32 R32C:$rA, R32C:$rA)>;
1328
1329def ORv2i64_i64:
1330 RRForm<0b10000010000, (outs VECREG:$rT), (ins R64C:$rA, R64C:$rB),
1331 "or\t$rT, $rA, $rB", IntegerOp,
1332 [/* no pattern */]>;
1333
1334def : Pat<(v2i64 (SPUpromote_scalar R64C:$rA)),
1335 (ORv2i64_i64 R64C:$rA, R64C:$rA)>;
1336
1337def ORv4f32_f32:
1338 RRForm<0b10000010000, (outs VECREG:$rT), (ins R32FP:$rA, R32FP:$rB),
1339 "or\t$rT, $rA, $rB", IntegerOp,
1340 [/* no pattern */]>;
1341
1342def : Pat<(v4f32 (SPUpromote_scalar R32FP:$rA)),
1343 (ORv4f32_f32 R32FP:$rA, R32FP:$rA)>;
1344
1345def ORv2f64_f64:
1346 RRForm<0b10000010000, (outs VECREG:$rT), (ins R64FP:$rA, R64FP:$rB),
1347 "or\t$rT, $rA, $rB", IntegerOp,
1348 [/* no pattern */]>;
1349
1350def : Pat<(v2f64 (SPUpromote_scalar R64FP:$rA)),
1351 (ORv2f64_f64 R64FP:$rA, R64FP:$rA)>;
1352
1353// ORi*_v*: Used to extract vector element 0 (the preferred slot)
Scott Michel438be252007-12-17 22:32:34 +00001354def ORi8_v16i8:
1355 RRForm<0b10000010000, (outs R8C:$rT), (ins VECREG:$rA, VECREG:$rB),
1356 "or\t$rT, $rA, $rB", IntegerOp,
1357 [/* no pattern */]>;
1358
1359def : Pat<(SPUextract_elt0 (v16i8 VECREG:$rA)),
1360 (ORi8_v16i8 VECREG:$rA, VECREG:$rA)>;
1361
Scott Michel8b6b4202007-12-04 22:35:58 +00001362def ORi16_v8i16:
1363 RRForm<0b10000010000, (outs R16C:$rT), (ins VECREG:$rA, VECREG:$rB),
1364 "or\t$rT, $rA, $rB", IntegerOp,
1365 [/* no pattern */]>;
1366
1367def : Pat<(SPUextract_elt0 (v8i16 VECREG:$rA)),
1368 (ORi16_v8i16 VECREG:$rA, VECREG:$rA)>;
1369
1370def : Pat<(SPUextract_elt0_chained (v8i16 VECREG:$rA)),
1371 (ORi16_v8i16 VECREG:$rA, VECREG:$rA)>;
1372
1373def ORi32_v4i32:
1374 RRForm<0b10000010000, (outs R32C:$rT), (ins VECREG:$rA, VECREG:$rB),
1375 "or\t$rT, $rA, $rB", IntegerOp,
1376 [/* no pattern */]>;
1377
1378def : Pat<(SPUextract_elt0 (v4i32 VECREG:$rA)),
1379 (ORi32_v4i32 VECREG:$rA, VECREG:$rA)>;
1380
1381def : Pat<(SPUextract_elt0_chained (v4i32 VECREG:$rA)),
1382 (ORi32_v4i32 VECREG:$rA, VECREG:$rA)>;
1383
1384def ORi64_v2i64:
1385 RRForm<0b10000010000, (outs R64C:$rT), (ins VECREG:$rA, VECREG:$rB),
1386 "or\t$rT, $rA, $rB", IntegerOp,
1387 [/* no pattern */]>;
1388
1389def : Pat<(SPUextract_elt0 (v2i64 VECREG:$rA)),
1390 (ORi64_v2i64 VECREG:$rA, VECREG:$rA)>;
1391
1392def : Pat<(SPUextract_elt0_chained (v2i64 VECREG:$rA)),
1393 (ORi64_v2i64 VECREG:$rA, VECREG:$rA)>;
1394
1395def ORf32_v4f32:
1396 RRForm<0b10000010000, (outs R32FP:$rT), (ins VECREG:$rA, VECREG:$rB),
1397 "or\t$rT, $rA, $rB", IntegerOp,
1398 [/* no pattern */]>;
1399
1400def : Pat<(SPUextract_elt0 (v4f32 VECREG:$rA)),
1401 (ORf32_v4f32 VECREG:$rA, VECREG:$rA)>;
1402
1403def : Pat<(SPUextract_elt0_chained (v4f32 VECREG:$rA)),
1404 (ORf32_v4f32 VECREG:$rA, VECREG:$rA)>;
1405
1406def ORf64_v2f64:
1407 RRForm<0b10000010000, (outs R64FP:$rT), (ins VECREG:$rA, VECREG:$rB),
1408 "or\t$rT, $rA, $rB", IntegerOp,
1409 [/* no pattern */]>;
1410
1411def : Pat<(SPUextract_elt0 (v2f64 VECREG:$rA)),
1412 (ORf64_v2f64 VECREG:$rA, VECREG:$rA)>;
1413
1414def : Pat<(SPUextract_elt0_chained (v2f64 VECREG:$rA)),
1415 (ORf64_v2f64 VECREG:$rA, VECREG:$rA)>;
1416
1417// ORC: Bitwise "or" with complement (match before ORvec, ORr32)
1418def ORCv16i8:
1419 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1420 "orc\t$rT, $rA, $rB", IntegerOp,
1421 [(set (v16i8 VECREG:$rT), (or (v16i8 VECREG:$rA),
1422 (vnot (v16i8 VECREG:$rB))))]>;
1423
1424def ORCv8i16:
1425 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1426 "orc\t$rT, $rA, $rB", IntegerOp,
1427 [(set (v8i16 VECREG:$rT), (or (v8i16 VECREG:$rA),
1428 (vnot (v8i16 VECREG:$rB))))]>;
1429
1430def ORCv4i32:
1431 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1432 "orc\t$rT, $rA, $rB", IntegerOp,
1433 [(set (v4i32 VECREG:$rT), (or (v4i32 VECREG:$rA),
1434 (vnot (v4i32 VECREG:$rB))))]>;
1435
1436def ORCr32:
1437 RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1438 "orc\t$rT, $rA, $rB", IntegerOp,
1439 [(set R32C:$rT, (or R32C:$rA, (not R32C:$rB)))]>;
1440
1441def ORCr16:
1442 RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1443 "orc\t$rT, $rA, $rB", IntegerOp,
1444 [(set R16C:$rT, (or R16C:$rA, (not R16C:$rB)))]>;
1445
Scott Michel438be252007-12-17 22:32:34 +00001446def ORCr8:
1447 RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1448 "orc\t$rT, $rA, $rB", IntegerOp,
1449 [(set R8C:$rT, (or R8C:$rA, (not R8C:$rB)))]>;
1450
Scott Michel8b6b4202007-12-04 22:35:58 +00001451// OR byte immediate
1452def ORBIv16i8:
1453 RI10Form<0b01100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
1454 "orbi\t$rT, $rA, $val", IntegerOp,
1455 [(set (v16i8 VECREG:$rT),
1456 (or (v16i8 VECREG:$rA), (v16i8 v16i8U8Imm:$val)))]>;
1457
Scott Michel438be252007-12-17 22:32:34 +00001458def ORBIr8:
1459 RI10Form<0b01100000, (outs R8C:$rT), (ins R8C:$rA, u10imm_i8:$val),
1460 "orbi\t$rT, $rA, $val", IntegerOp,
1461 [(set R8C:$rT, (or R8C:$rA, immU8:$val))]>;
1462
Scott Michel8b6b4202007-12-04 22:35:58 +00001463// OR halfword immediate
1464def ORHIv8i16:
Scott Michel438be252007-12-17 22:32:34 +00001465 RI10Form<0b10100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00001466 "orhi\t$rT, $rA, $val", IntegerOp,
1467 [(set (v8i16 VECREG:$rT), (or (v8i16 VECREG:$rA),
Scott Michel438be252007-12-17 22:32:34 +00001468 v8i16Uns10Imm:$val))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00001469
1470def ORHIr16:
Scott Michel438be252007-12-17 22:32:34 +00001471 RI10Form<0b10100000, (outs R16C:$rT), (ins R16C:$rA, u10imm:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00001472 "orhi\t$rT, $rA, $val", IntegerOp,
Scott Michel438be252007-12-17 22:32:34 +00001473 [(set R16C:$rT, (or R16C:$rA, i16ImmUns10:$val))]>;
1474
1475// Hacked form of ORHI used to promote 8-bit registers to 16-bit
1476def ORHI1To2:
1477 RI10Form<0b10100000, (outs R16C:$rT), (ins R8C:$rA, s10imm:$val),
1478 "orhi\t$rT, $rA, $val", IntegerOp,
1479 [(set R16C:$rT, (or (anyext R8C:$rA), i16ImmSExt10:$val))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00001480
1481// Bitwise "or" with immediate
1482def ORIv4i32:
Scott Michel438be252007-12-17 22:32:34 +00001483 RI10Form<0b00100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00001484 "ori\t$rT, $rA, $val", IntegerOp,
1485 [(set (v4i32 VECREG:$rT), (or (v4i32 VECREG:$rA),
Scott Michel438be252007-12-17 22:32:34 +00001486 v4i32Uns10Imm:$val))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00001487
1488def ORIr32:
Scott Michel438be252007-12-17 22:32:34 +00001489 RI10Form<0b00100000, (outs R32C:$rT), (ins R32C:$rA, u10imm_i32:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00001490 "ori\t$rT, $rA, $val", IntegerOp,
Scott Michel438be252007-12-17 22:32:34 +00001491 [(set R32C:$rT, (or R32C:$rA, i32ImmUns10:$val))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00001492
Scott Michel8b6b4202007-12-04 22:35:58 +00001493def ORIr64:
1494 RI10Form_1<0b00100000, (outs R64C:$rT), (ins R64C:$rA, s10imm_i32:$val),
1495 "ori\t$rT, $rA, $val", IntegerOp,
1496 [/* no pattern */]>;
1497
1498// ORI2To4: hacked version of the ori instruction to extend 16-bit quantities
1499// to 32-bit quantities. used exclusively to match "anyext" conversions (vide
1500// infra "anyext 16->32" pattern.)
1501def ORI2To4:
1502 RI10Form<0b00100000, (outs R32C:$rT), (ins R16C:$rA, s10imm_i32:$val),
1503 "ori\t$rT, $rA, $val", IntegerOp,
1504 [(set R32C:$rT, (or (anyext R16C:$rA), i32ImmSExt10:$val))]>;
1505
Scott Michel438be252007-12-17 22:32:34 +00001506// ORI1To4: Hacked version of the ORI instruction to extend 16-bit quantities
1507// to 32-bit quantities. Used exclusively to match "anyext" conversions (vide
1508// infra "anyext 16->32" pattern.)
1509def ORI1To4:
1510 RI10Form<0b00100000, (outs R32C:$rT), (ins R8C:$rA, s10imm_i32:$val),
1511 "ori\t$rT, $rA, $val", IntegerOp,
1512 [(set R32C:$rT, (or (anyext R8C:$rA), i32ImmSExt10:$val))]>;
1513
Scott Michel8b6b4202007-12-04 22:35:58 +00001514// ORX: "or" across the vector: or's $rA's word slots leaving the result in
1515// $rT[0], slots 1-3 are zeroed.
1516//
Scott Michel438be252007-12-17 22:32:34 +00001517// FIXME: Needs to match an intrinsic pattern.
Scott Michel8b6b4202007-12-04 22:35:58 +00001518def ORXv4i32:
1519 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1520 "orx\t$rT, $rA, $rB", IntegerOp,
1521 []>;
1522
Scott Michel438be252007-12-17 22:32:34 +00001523// XOR:
Scott Michel8b6b4202007-12-04 22:35:58 +00001524def XORv16i8:
1525 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1526 "xor\t$rT, $rA, $rB", IntegerOp,
1527 [(set (v16i8 VECREG:$rT), (xor (v16i8 VECREG:$rA), (v16i8 VECREG:$rB)))]>;
1528
1529def XORv8i16:
1530 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1531 "xor\t$rT, $rA, $rB", IntegerOp,
1532 [(set (v8i16 VECREG:$rT), (xor (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)))]>;
1533
1534def XORv4i32:
1535 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1536 "xor\t$rT, $rA, $rB", IntegerOp,
1537 [(set (v4i32 VECREG:$rT), (xor (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
1538
1539def XORr32:
1540 RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1541 "xor\t$rT, $rA, $rB", IntegerOp,
1542 [(set R32C:$rT, (xor R32C:$rA, R32C:$rB))]>;
1543
1544//==----------------------------------------------------------
1545// Special forms for floating point instructions.
1546// Bitwise ORs and ANDs don't make sense for normal floating
1547// point numbers. These operations (fneg and fabs), however,
1548// require bitwise logical ops to manipulate the sign bit.
1549def XORfneg32:
1550 RRForm<0b10010010000, (outs R32FP:$rT), (ins R32FP:$rA, R32C:$rB),
1551 "xor\t$rT, $rA, $rB", IntegerOp,
1552 [/* Intentionally does not match a pattern, see fneg32 */]>;
1553
1554// KLUDGY! Better way to do this without a VECREG? bitconvert?
1555// VECREG is assumed to contain two identical 64-bit masks, so
1556// it doesn't matter which word we select for the xor
1557def XORfneg64:
1558 RRForm<0b10010010000, (outs R64FP:$rT), (ins R64FP:$rA, VECREG:$rB),
1559 "xor\t$rT, $rA, $rB", IntegerOp,
1560 [/* Intentionally does not match a pattern, see fneg64 */]>;
1561
1562// Could use XORv4i32, but will use this for clarity
1563def XORfnegvec:
1564 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1565 "xor\t$rT, $rA, $rB", IntegerOp,
1566 [/* Intentionally does not match a pattern, see fneg{32,64} */]>;
1567
1568//==----------------------------------------------------------
1569
1570def XORr16:
1571 RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1572 "xor\t$rT, $rA, $rB", IntegerOp,
1573 [(set R16C:$rT, (xor R16C:$rA, R16C:$rB))]>;
1574
Scott Michel438be252007-12-17 22:32:34 +00001575def XORr8:
1576 RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1577 "xor\t$rT, $rA, $rB", IntegerOp,
1578 [(set R8C:$rT, (xor R8C:$rA, R8C:$rB))]>;
1579
Scott Michel8b6b4202007-12-04 22:35:58 +00001580def XORBIv16i8:
1581 RI10Form<0b01100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
1582 "xorbi\t$rT, $rA, $val", IntegerOp,
1583 [(set (v16i8 VECREG:$rT), (xor (v16i8 VECREG:$rA), v16i8U8Imm:$val))]>;
1584
Scott Michel438be252007-12-17 22:32:34 +00001585def XORBIr8:
1586 RI10Form<0b01100000, (outs R8C:$rT), (ins R8C:$rA, u10imm_i8:$val),
1587 "xorbi\t$rT, $rA, $val", IntegerOp,
1588 [(set R8C:$rT, (xor R8C:$rA, immU8:$val))]>;
1589
Scott Michel8b6b4202007-12-04 22:35:58 +00001590def XORHIv8i16:
1591 RI10Form<0b10100000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
1592 "xorhi\t$rT, $rA, $val", IntegerOp,
1593 [(set (v8i16 VECREG:$rT), (xor (v8i16 VECREG:$rA),
1594 v8i16SExt10Imm:$val))]>;
1595
1596def XORHIr16:
1597 RI10Form<0b10100000, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
1598 "xorhi\t$rT, $rA, $val", IntegerOp,
1599 [(set R16C:$rT, (xor R16C:$rA, i16ImmSExt10:$val))]>;
1600
1601def XORIv4i32:
1602 RI10Form<0b00100000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
1603 "xori\t$rT, $rA, $val", IntegerOp,
1604 [(set (v4i32 VECREG:$rT), (xor (v4i32 VECREG:$rA),
1605 v4i32SExt10Imm:$val))]>;
1606
1607def XORIr32:
1608 RI10Form<0b00100000, (outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val),
1609 "xori\t$rT, $rA, $val", IntegerOp,
1610 [(set R32C:$rT, (xor R32C:$rA, i32ImmSExt10:$val))]>;
1611
1612// NAND:
1613def NANDv16i8:
1614 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1615 "nand\t$rT, $rA, $rB", IntegerOp,
1616 [(set (v16i8 VECREG:$rT), (vnot (and (v16i8 VECREG:$rA),
1617 (v16i8 VECREG:$rB))))]>;
1618
1619def NANDv8i16:
1620 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1621 "nand\t$rT, $rA, $rB", IntegerOp,
1622 [(set (v8i16 VECREG:$rT), (vnot (and (v8i16 VECREG:$rA),
1623 (v8i16 VECREG:$rB))))]>;
1624
1625def NANDv4i32:
1626 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1627 "nand\t$rT, $rA, $rB", IntegerOp,
1628 [(set (v4i32 VECREG:$rT), (vnot (and (v4i32 VECREG:$rA),
1629 (v4i32 VECREG:$rB))))]>;
1630
1631def NANDr32:
1632 RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1633 "nand\t$rT, $rA, $rB", IntegerOp,
1634 [(set R32C:$rT, (not (and R32C:$rA, R32C:$rB)))]>;
1635
1636def NANDr16:
1637 RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1638 "nand\t$rT, $rA, $rB", IntegerOp,
1639 [(set R16C:$rT, (not (and R16C:$rA, R16C:$rB)))]>;
1640
Scott Michel438be252007-12-17 22:32:34 +00001641def NANDr8:
1642 RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1643 "nand\t$rT, $rA, $rB", IntegerOp,
1644 [(set R8C:$rT, (not (and R8C:$rA, R8C:$rB)))]>;
1645
Scott Michel8b6b4202007-12-04 22:35:58 +00001646// NOR:
1647def NORv16i8:
1648 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1649 "nor\t$rT, $rA, $rB", IntegerOp,
1650 [(set (v16i8 VECREG:$rT), (vnot (or (v16i8 VECREG:$rA),
1651 (v16i8 VECREG:$rB))))]>;
1652
1653def NORv8i16:
1654 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1655 "nor\t$rT, $rA, $rB", IntegerOp,
1656 [(set (v8i16 VECREG:$rT), (vnot (or (v8i16 VECREG:$rA),
1657 (v8i16 VECREG:$rB))))]>;
1658
1659def NORv4i32:
1660 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1661 "nor\t$rT, $rA, $rB", IntegerOp,
1662 [(set (v4i32 VECREG:$rT), (vnot (or (v4i32 VECREG:$rA),
1663 (v4i32 VECREG:$rB))))]>;
1664
1665def NORr32:
1666 RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1667 "nor\t$rT, $rA, $rB", IntegerOp,
1668 [(set R32C:$rT, (not (or R32C:$rA, R32C:$rB)))]>;
1669
1670def NORr16:
1671 RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1672 "nor\t$rT, $rA, $rB", IntegerOp,
1673 [(set R16C:$rT, (not (or R16C:$rA, R16C:$rB)))]>;
1674
Scott Michel438be252007-12-17 22:32:34 +00001675def NORr8:
1676 RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1677 "nor\t$rT, $rA, $rB", IntegerOp,
1678 [(set R8C:$rT, (not (or R8C:$rA, R8C:$rB)))]>;
1679
Scott Michel8b6b4202007-12-04 22:35:58 +00001680// EQV: Equivalence (1 for each same bit, otherwise 0)
1681def EQVv16i8:
1682 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1683 "eqv\t$rT, $rA, $rB", IntegerOp,
1684 [(set (v16i8 VECREG:$rT), (or (and (v16i8 VECREG:$rA),
1685 (v16i8 VECREG:$rB)),
1686 (and (vnot (v16i8 VECREG:$rA)),
1687 (vnot (v16i8 VECREG:$rB)))))]>;
1688
1689def : Pat<(xor (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rB))),
1690 (EQVv16i8 VECREG:$rA, VECREG:$rB)>;
1691
1692def : Pat<(xor (vnot (v16i8 VECREG:$rA)), (v16i8 VECREG:$rB)),
1693 (EQVv16i8 VECREG:$rA, VECREG:$rB)>;
1694
1695def EQVv8i16:
1696 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1697 "eqv\t$rT, $rA, $rB", IntegerOp,
1698 [(set (v8i16 VECREG:$rT), (or (and (v8i16 VECREG:$rA),
1699 (v8i16 VECREG:$rB)),
1700 (and (vnot (v8i16 VECREG:$rA)),
1701 (vnot (v8i16 VECREG:$rB)))))]>;
1702
1703def : Pat<(xor (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rB))),
1704 (EQVv8i16 VECREG:$rA, VECREG:$rB)>;
1705
1706def : Pat<(xor (vnot (v8i16 VECREG:$rA)), (v8i16 VECREG:$rB)),
1707 (EQVv8i16 VECREG:$rA, VECREG:$rB)>;
1708
1709def EQVv4i32:
1710 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1711 "eqv\t$rT, $rA, $rB", IntegerOp,
1712 [(set (v4i32 VECREG:$rT), (or (and (v4i32 VECREG:$rA),
1713 (v4i32 VECREG:$rB)),
1714 (and (vnot (v4i32 VECREG:$rA)),
1715 (vnot (v4i32 VECREG:$rB)))))]>;
1716
1717def : Pat<(xor (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rB))),
1718 (EQVv4i32 VECREG:$rA, VECREG:$rB)>;
1719
1720def : Pat<(xor (vnot (v4i32 VECREG:$rA)), (v4i32 VECREG:$rB)),
1721 (EQVv4i32 VECREG:$rA, VECREG:$rB)>;
1722
1723def EQVr32:
1724 RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1725 "eqv\t$rT, $rA, $rB", IntegerOp,
1726 [(set R32C:$rT, (or (and R32C:$rA, R32C:$rB),
1727 (and (not R32C:$rA), (not R32C:$rB))))]>;
1728
1729def : Pat<(xor R32C:$rA, (not R32C:$rB)),
1730 (EQVr32 R32C:$rA, R32C:$rB)>;
1731
1732def : Pat<(xor (not R32C:$rA), R32C:$rB),
1733 (EQVr32 R32C:$rA, R32C:$rB)>;
1734
1735def EQVr16:
1736 RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1737 "eqv\t$rT, $rA, $rB", IntegerOp,
1738 [(set R16C:$rT, (or (and R16C:$rA, R16C:$rB),
1739 (and (not R16C:$rA), (not R16C:$rB))))]>;
1740
1741def : Pat<(xor R16C:$rA, (not R16C:$rB)),
1742 (EQVr16 R16C:$rA, R16C:$rB)>;
1743
1744def : Pat<(xor (not R16C:$rA), R16C:$rB),
1745 (EQVr16 R16C:$rA, R16C:$rB)>;
1746
Scott Michel438be252007-12-17 22:32:34 +00001747def EQVr8:
1748 RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1749 "eqv\t$rT, $rA, $rB", IntegerOp,
1750 [(set R8C:$rT, (or (and R8C:$rA, R8C:$rB),
1751 (and (not R8C:$rA), (not R8C:$rB))))]>;
1752
1753def : Pat<(xor R8C:$rA, (not R8C:$rB)),
1754 (EQVr8 R8C:$rA, R8C:$rB)>;
1755
1756def : Pat<(xor (not R8C:$rA), R8C:$rB),
1757 (EQVr8 R8C:$rA, R8C:$rB)>;
1758
Scott Michel8b6b4202007-12-04 22:35:58 +00001759// gcc optimizes (p & q) | (~p & ~q) -> ~(p | q) | (p & q), so match that
1760// pattern also:
1761def : Pat<(or (vnot (or (v16i8 VECREG:$rA), (v16i8 VECREG:$rB))),
1762 (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rB))),
1763 (EQVv16i8 VECREG:$rA, VECREG:$rB)>;
1764
1765def : Pat<(or (vnot (or (v8i16 VECREG:$rA), (v8i16 VECREG:$rB))),
1766 (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rB))),
1767 (EQVv8i16 VECREG:$rA, VECREG:$rB)>;
1768
1769def : Pat<(or (vnot (or (v4i32 VECREG:$rA), (v4i32 VECREG:$rB))),
1770 (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rB))),
1771 (EQVv4i32 VECREG:$rA, VECREG:$rB)>;
1772
1773def : Pat<(or (not (or R32C:$rA, R32C:$rB)), (and R32C:$rA, R32C:$rB)),
1774 (EQVr32 R32C:$rA, R32C:$rB)>;
1775
1776def : Pat<(or (not (or R16C:$rA, R16C:$rB)), (and R16C:$rA, R16C:$rB)),
1777 (EQVr16 R16C:$rA, R16C:$rB)>;
1778
Scott Michel438be252007-12-17 22:32:34 +00001779def : Pat<(or (not (or R8C:$rA, R8C:$rB)), (and R8C:$rA, R8C:$rB)),
1780 (EQVr8 R8C:$rA, R8C:$rB)>;
1781
Scott Michel8b6b4202007-12-04 22:35:58 +00001782// Select bits:
1783def SELBv16i8:
1784 RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
1785 "selb\t$rT, $rA, $rB, $rC", IntegerOp,
1786 [(set (v16i8 VECREG:$rT),
1787 (SPUselb_v16i8 (v16i8 VECREG:$rA), (v16i8 VECREG:$rB),
1788 (v16i8 VECREG:$rC)))]>;
1789
1790def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
1791 (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
1792 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1793
1794def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
1795 (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
1796 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1797
1798def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
1799 (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
1800 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1801
1802def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
1803 (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
1804 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1805
1806def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
1807 (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
1808 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1809
1810def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
1811 (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
1812 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1813
1814def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
1815 (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
1816 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1817
1818def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
1819 (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
1820 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1821
1822def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
1823 (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
1824 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1825
1826def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
1827 (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
1828 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1829
1830def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
1831 (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
1832 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1833
1834def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
1835 (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
1836 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1837
1838def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
1839 (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
1840 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1841
1842def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
1843 (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
1844 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1845
1846def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
1847 (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
1848 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1849
1850def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
1851 (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
1852 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1853
1854def SELBv8i16:
1855 RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
1856 "selb\t$rT, $rA, $rB, $rC", IntegerOp,
1857 [(set (v8i16 VECREG:$rT),
1858 (SPUselb_v8i16 (v8i16 VECREG:$rA), (v8i16 VECREG:$rB),
1859 (v8i16 VECREG:$rC)))]>;
1860
1861def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
1862 (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
1863 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1864
1865def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
1866 (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
1867 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1868
1869def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
1870 (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
1871 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1872
1873def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
1874 (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
1875 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1876
1877def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
1878 (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
1879 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1880
1881def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
1882 (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
1883 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1884
1885def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
1886 (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
1887 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1888
1889def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
1890 (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
1891 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1892
1893def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
1894 (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
1895 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1896
1897def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
1898 (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
1899 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1900
1901def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
1902 (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
1903 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1904
1905def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
1906 (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
1907 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1908
1909def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
1910 (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
1911 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1912
1913def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
1914 (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
1915 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1916
1917def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
1918 (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
1919 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1920
1921def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
1922 (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
1923 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1924
1925def SELBv4i32:
1926 RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
1927 "selb\t$rT, $rA, $rB, $rC", IntegerOp,
1928 [(set (v4i32 VECREG:$rT),
1929 (SPUselb_v4i32 (v4i32 VECREG:$rA), (v4i32 VECREG:$rB),
1930 (v4i32 VECREG:$rC)))]>;
1931
1932def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
1933 (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
1934 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1935
1936def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
1937 (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
1938 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1939
1940def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
1941 (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
1942 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1943
1944def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
1945 (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
1946 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1947
1948def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
1949 (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
1950 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1951
1952def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
1953 (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
1954 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1955
1956def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
1957 (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
1958 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1959
1960def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
1961 (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
1962 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1963
1964def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
1965 (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
1966 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1967
1968def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
1969 (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
1970 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1971
1972def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
1973 (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
1974 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1975
1976def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
1977 (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
1978 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1979
1980def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
1981 (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
1982 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1983
1984def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
1985 (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
1986 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1987
1988def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
1989 (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
1990 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1991
1992def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
1993 (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
1994 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1995
1996def SELBr32:
1997 RRRForm<0b1000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB, R32C:$rC),
1998 "selb\t$rT, $rA, $rB, $rC", IntegerOp,
1999 []>;
2000
2001// And the various patterns that can be matched... (all 8 of them :-)
2002def : Pat<(or (and R32C:$rA, R32C:$rC),
2003 (and R32C:$rB, (not R32C:$rC))),
2004 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2005
2006def : Pat<(or (and R32C:$rC, R32C:$rA),
2007 (and R32C:$rB, (not R32C:$rC))),
2008 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2009
2010def : Pat<(or (and R32C:$rA, R32C:$rC),
2011 (and (not R32C:$rC), R32C:$rB)),
2012 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2013
2014def : Pat<(or (and R32C:$rC, R32C:$rA),
2015 (and (not R32C:$rC), R32C:$rB)),
2016 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2017
2018def : Pat<(or (and R32C:$rA, (not R32C:$rC)),
2019 (and R32C:$rB, R32C:$rC)),
2020 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2021
2022def : Pat<(or (and R32C:$rA, (not R32C:$rC)),
2023 (and R32C:$rC, R32C:$rB)),
2024 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2025
2026def : Pat<(or (and (not R32C:$rC), R32C:$rA),
2027 (and R32C:$rB, R32C:$rC)),
2028 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2029
2030def : Pat<(or (and (not R32C:$rC), R32C:$rA),
2031 (and R32C:$rC, R32C:$rB)),
2032 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2033
2034def SELBr16:
2035 RRRForm<0b1000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB, R16C:$rC),
2036 "selb\t$rT, $rA, $rB, $rC", IntegerOp,
2037 []>;
2038
2039def : Pat<(or (and R16C:$rA, R16C:$rC),
2040 (and R16C:$rB, (not R16C:$rC))),
2041 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2042
2043def : Pat<(or (and R16C:$rC, R16C:$rA),
2044 (and R16C:$rB, (not R16C:$rC))),
2045 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2046
2047def : Pat<(or (and R16C:$rA, R16C:$rC),
2048 (and (not R16C:$rC), R16C:$rB)),
2049 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2050
2051def : Pat<(or (and R16C:$rC, R16C:$rA),
2052 (and (not R16C:$rC), R16C:$rB)),
2053 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2054
2055def : Pat<(or (and R16C:$rA, (not R16C:$rC)),
2056 (and R16C:$rB, R16C:$rC)),
2057 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2058
2059def : Pat<(or (and R16C:$rA, (not R16C:$rC)),
2060 (and R16C:$rC, R16C:$rB)),
2061 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2062
2063def : Pat<(or (and (not R16C:$rC), R16C:$rA),
2064 (and R16C:$rB, R16C:$rC)),
2065 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2066
2067def : Pat<(or (and (not R16C:$rC), R16C:$rA),
2068 (and R16C:$rC, R16C:$rB)),
2069 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
Scott Michel438be252007-12-17 22:32:34 +00002070
2071def SELBr8:
2072 RRRForm<0b1000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB, R8C:$rC),
2073 "selb\t$rT, $rA, $rB, $rC", IntegerOp,
2074 []>;
2075
2076def : Pat<(or (and R8C:$rA, R8C:$rC),
2077 (and R8C:$rB, (not R8C:$rC))),
2078 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2079
2080def : Pat<(or (and R8C:$rC, R8C:$rA),
2081 (and R8C:$rB, (not R8C:$rC))),
2082 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2083
2084def : Pat<(or (and R8C:$rA, R8C:$rC),
2085 (and (not R8C:$rC), R8C:$rB)),
2086 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2087
2088def : Pat<(or (and R8C:$rC, R8C:$rA),
2089 (and (not R8C:$rC), R8C:$rB)),
2090 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2091
2092def : Pat<(or (and R8C:$rA, (not R8C:$rC)),
2093 (and R8C:$rB, R8C:$rC)),
2094 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2095
2096def : Pat<(or (and R8C:$rA, (not R8C:$rC)),
2097 (and R8C:$rC, R8C:$rB)),
2098 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2099
2100def : Pat<(or (and (not R8C:$rC), R8C:$rA),
2101 (and R8C:$rB, R8C:$rC)),
2102 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2103
2104def : Pat<(or (and (not R8C:$rC), R8C:$rA),
2105 (and R8C:$rC, R8C:$rB)),
2106 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002107
2108//===----------------------------------------------------------------------===//
2109// Vector shuffle...
2110//===----------------------------------------------------------------------===//
2111
2112def SHUFB:
2113 RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
2114 "shufb\t$rT, $rA, $rB, $rC", IntegerOp,
Scott Michel754d8662007-12-20 00:44:13 +00002115 [/* no pattern */]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002116
2117// SPUshuffle is generated in LowerVECTOR_SHUFFLE and gets replaced with SHUFB.
2118// See the SPUshuffle SDNode operand above, which sets up the DAG pattern
2119// matcher to emit something when the LowerVECTOR_SHUFFLE generates a node with
2120// the SPUISD::SHUFB opcode.
2121def : Pat<(SPUshuffle (v16i8 VECREG:$rA), (v16i8 VECREG:$rB), VECREG:$rC),
2122 (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
2123
2124def : Pat<(SPUshuffle (v8i16 VECREG:$rA), (v8i16 VECREG:$rB), VECREG:$rC),
2125 (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
2126
2127def : Pat<(SPUshuffle (v4i32 VECREG:$rA), (v4i32 VECREG:$rB), VECREG:$rC),
2128 (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
2129
Scott Michel754d8662007-12-20 00:44:13 +00002130def : Pat<(SPUshuffle (v4f32 VECREG:$rA), (v4f32 VECREG:$rB), VECREG:$rC),
2131 (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
2132
Scott Michel8b6b4202007-12-04 22:35:58 +00002133def : Pat<(SPUshuffle (v2i64 VECREG:$rA), (v2i64 VECREG:$rB), VECREG:$rC),
2134 (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
2135
Scott Michel754d8662007-12-20 00:44:13 +00002136def : Pat<(SPUshuffle (v2f64 VECREG:$rA), (v2f64 VECREG:$rB), VECREG:$rC),
2137 (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
2138
Scott Michel8b6b4202007-12-04 22:35:58 +00002139//===----------------------------------------------------------------------===//
2140// Shift and rotate group:
2141//===----------------------------------------------------------------------===//
2142
2143def SHLHv8i16:
2144 RRForm<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, R16C:$rB),
2145 "shlh\t$rT, $rA, $rB", RotateShift,
2146 [(set (v8i16 VECREG:$rT),
2147 (SPUvec_shl_v8i16 (v8i16 VECREG:$rA), R16C:$rB))]>;
2148
2149// $rB gets promoted to 32-bit register type when confronted with
2150// this llvm assembly code:
2151//
2152// define i16 @shlh_i16_1(i16 %arg1, i16 %arg2) {
2153// %A = shl i16 %arg1, %arg2
2154// ret i16 %A
2155// }
2156//
2157// However, we will generate this code when lowering 8-bit shifts and rotates.
2158
2159def SHLHr16:
2160 RRForm<0b11111010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
2161 "shlh\t$rT, $rA, $rB", RotateShift,
2162 [(set R16C:$rT, (shl R16C:$rA, R16C:$rB))]>;
2163
2164def SHLHr16_r32:
2165 RRForm<0b11111010000, (outs R16C:$rT), (ins R16C:$rA, R32C:$rB),
2166 "shlh\t$rT, $rA, $rB", RotateShift,
2167 [(set R16C:$rT, (shl R16C:$rA, R32C:$rB))]>;
2168
2169def SHLHIv8i16:
Scott Michel438be252007-12-17 22:32:34 +00002170 RI7Form<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i8:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00002171 "shlhi\t$rT, $rA, $val", RotateShift,
2172 [(set (v8i16 VECREG:$rT),
Scott Michel438be252007-12-17 22:32:34 +00002173 (SPUvec_shl_v8i16 (v8i16 VECREG:$rA), (i8 uimm7:$val)))]>;
2174
2175def : Pat<(SPUvec_shl_v8i16 (v8i16 VECREG:$rA), (i16 uimm7:$val)),
2176 (SHLHIv8i16 VECREG:$rA, imm:$val)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002177
2178def : Pat<(SPUvec_shl_v8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val)),
2179 (SHLHIv8i16 VECREG:$rA, imm:$val)>;
2180
2181def SHLHIr16:
2182 RI7Form<0b11111010000, (outs R16C:$rT), (ins R16C:$rA, u7imm_i32:$val),
2183 "shlhi\t$rT, $rA, $val", RotateShift,
2184 [(set R16C:$rT, (shl R16C:$rA, (i32 uimm7:$val)))]>;
Scott Michel438be252007-12-17 22:32:34 +00002185
2186def : Pat<(shl R16C:$rA, (i8 uimm7:$val)),
2187 (SHLHIr16 R16C:$rA, uimm7:$val)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002188
2189def : Pat<(shl R16C:$rA, (i16 uimm7:$val)),
2190 (SHLHIr16 R16C:$rA, uimm7:$val)>;
2191
2192def SHLv4i32:
2193 RRForm<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, R16C:$rB),
2194 "shl\t$rT, $rA, $rB", RotateShift,
2195 [(set (v4i32 VECREG:$rT),
2196 (SPUvec_shl_v4i32 (v4i32 VECREG:$rA), R16C:$rB))]>;
2197
2198def SHLr32:
2199 RRForm<0b11111010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
2200 "shl\t$rT, $rA, $rB", RotateShift,
2201 [(set R32C:$rT, (shl R32C:$rA, R32C:$rB))]>;
2202
2203def SHLIv4i32:
Scott Michel438be252007-12-17 22:32:34 +00002204 RI7Form<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i8:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00002205 "shli\t$rT, $rA, $val", RotateShift,
2206 [(set (v4i32 VECREG:$rT),
Scott Michel438be252007-12-17 22:32:34 +00002207 (SPUvec_shl_v4i32 (v4i32 VECREG:$rA), (i8 uimm7:$val)))]>;
2208
2209def: Pat<(SPUvec_shl_v4i32 (v4i32 VECREG:$rA), (i16 uimm7:$val)),
2210 (SHLIv4i32 VECREG:$rA, uimm7:$val)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002211
2212def: Pat<(SPUvec_shl_v4i32 (v4i32 VECREG:$rA), (i32 uimm7:$val)),
2213 (SHLIv4i32 VECREG:$rA, uimm7:$val)>;
2214
2215def SHLIr32:
2216 RI7Form<0b11111010000, (outs R32C:$rT), (ins R32C:$rA, u7imm_i32:$val),
2217 "shli\t$rT, $rA, $val", RotateShift,
2218 [(set R32C:$rT, (shl R32C:$rA, (i32 uimm7:$val)))]>;
2219
2220def : Pat<(shl R32C:$rA, (i16 uimm7:$val)),
2221 (SHLIr32 R32C:$rA, uimm7:$val)>;
2222
Scott Michel438be252007-12-17 22:32:34 +00002223def : Pat<(shl R32C:$rA, (i8 uimm7:$val)),
2224 (SHLIr32 R32C:$rA, uimm7:$val)>;
2225
Scott Michel8b6b4202007-12-04 22:35:58 +00002226// SHLQBI vec form: Note that this will shift the entire vector (the 128-bit
2227// register) to the left. Vector form is here to ensure type correctness.
2228def SHLQBIvec:
2229 RRForm<0b11011011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2230 "shlqbi\t$rT, $rA, $rB", RotateShift,
2231 [/* intrinsic */]>;
2232
2233// See note above on SHLQBI.
2234def SHLQBIIvec:
2235 RI7Form<0b11011111100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
2236 "shlqbii\t$rT, $rA, $val", RotateShift,
2237 [/* intrinsic */]>;
2238
2239// SHLQBY, SHLQBYI vector forms: Shift the entire vector to the left by bytes,
2240// not by bits.
2241def SHLQBYvec:
2242 RI7Form<0b11111011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2243 "shlqbyi\t$rT, $rA, $rB", RotateShift,
2244 [/* intrinsic */]>;
2245
2246def SHLQBYIvec:
2247 RI7Form<0b11111111100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
2248 "shlqbyi\t$rT, $rA, $val", RotateShift,
2249 [/* intrinsic */]>;
2250
2251// ROTH v8i16 form:
2252def ROTHv8i16:
2253 RRForm<0b00111010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2254 "roth\t$rT, $rA, $rB", RotateShift,
2255 [(set (v8i16 VECREG:$rT),
2256 (SPUvec_rotl_v8i16 VECREG:$rA, VECREG:$rB))]>;
2257
2258def ROTHr16:
2259 RRForm<0b00111010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
2260 "roth\t$rT, $rA, $rB", RotateShift,
2261 [(set R16C:$rT, (rotl R16C:$rA, R16C:$rB))]>;
2262
2263def ROTHr16_r32:
2264 RRForm<0b00111010000, (outs R16C:$rT), (ins R16C:$rA, R32C:$rB),
2265 "roth\t$rT, $rA, $rB", RotateShift,
2266 [(set R16C:$rT, (rotl R16C:$rA, R32C:$rB))]>;
2267
Scott Michel438be252007-12-17 22:32:34 +00002268// The rotate amount is in the same bits whether we've got an 8-bit, 16-bit or
2269// 32-bit register
2270def ROTHr16_r8:
2271 RRForm<0b00111010000, (outs R16C:$rT), (ins R16C:$rA, R8C:$rB),
2272 "roth\t$rT, $rA, $rB", RotateShift,
2273 [(set R16C:$rT, (rotl R16C:$rA, (i32 (zext R8C:$rB))))]>;
2274
2275def : Pat<(rotl R16C:$rA, (i32 (sext R8C:$rB))),
2276 (ROTHr16_r8 R16C:$rA, R8C:$rB)>;
2277
2278def : Pat<(rotl R16C:$rA, (i32 (zext R8C:$rB))),
2279 (ROTHr16_r8 R16C:$rA, R8C:$rB)>;
2280
2281def : Pat<(rotl R16C:$rA, (i32 (anyext R8C:$rB))),
2282 (ROTHr16_r8 R16C:$rA, R8C:$rB)>;
2283
Scott Michel8b6b4202007-12-04 22:35:58 +00002284def ROTHIv8i16:
Scott Michel438be252007-12-17 22:32:34 +00002285 RI7Form<0b00111110000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i8:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00002286 "rothi\t$rT, $rA, $val", RotateShift,
2287 [(set (v8i16 VECREG:$rT),
Scott Michel438be252007-12-17 22:32:34 +00002288 (SPUvec_rotl_v8i16 VECREG:$rA, (i8 uimm7:$val)))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002289
2290def : Pat<(SPUvec_rotl_v8i16 VECREG:$rA, (i16 uimm7:$val)),
2291 (ROTHIv8i16 VECREG:$rA, imm:$val)>;
2292
2293def : Pat<(SPUvec_rotl_v8i16 VECREG:$rA, (i32 uimm7:$val)),
2294 (ROTHIv8i16 VECREG:$rA, imm:$val)>;
2295
2296def ROTHIr16:
2297 RI7Form<0b00111110000, (outs R16C:$rT), (ins R16C:$rA, u7imm:$val),
2298 "rothi\t$rT, $rA, $val", RotateShift,
2299 [(set R16C:$rT, (rotl R16C:$rA, (i16 uimm7:$val)))]>;
2300
2301def ROTHIr16_i32:
2302 RI7Form<0b00111110000, (outs R16C:$rT), (ins R16C:$rA, u7imm_i32:$val),
2303 "rothi\t$rT, $rA, $val", RotateShift,
2304 [(set R16C:$rT, (rotl R16C:$rA, (i32 uimm7:$val)))]>;
2305
Scott Michel438be252007-12-17 22:32:34 +00002306def ROTHIr16_i8:
2307 RI7Form<0b00111110000, (outs R16C:$rT), (ins R16C:$rA, u7imm_i8:$val),
2308 "rothi\t$rT, $rA, $val", RotateShift,
2309 [(set R16C:$rT, (rotl R16C:$rA, (i8 uimm7:$val)))]>;
2310
Scott Michel8b6b4202007-12-04 22:35:58 +00002311def ROTv4i32:
2312 RRForm<0b00011010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
2313 "rot\t$rT, $rA, $rB", RotateShift,
2314 [(set (v4i32 VECREG:$rT),
2315 (SPUvec_rotl_v4i32 (v4i32 VECREG:$rA), R32C:$rB))]>;
2316
2317def ROTr32:
2318 RRForm<0b00011010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
2319 "rot\t$rT, $rA, $rB", RotateShift,
2320 [(set R32C:$rT, (rotl R32C:$rA, R32C:$rB))]>;
2321
Scott Michel438be252007-12-17 22:32:34 +00002322// The rotate amount is in the same bits whether we've got an 8-bit, 16-bit or
2323// 32-bit register
2324def ROTr32_r16_anyext:
2325 RRForm<0b00011010000, (outs R32C:$rT), (ins R32C:$rA, R16C:$rB),
2326 "rot\t$rT, $rA, $rB", RotateShift,
2327 [(set R32C:$rT, (rotl R32C:$rA, (i32 (anyext R16C:$rB))))]>;
2328
2329def : Pat<(rotl R32C:$rA, (i32 (zext R16C:$rB))),
2330 (ROTr32_r16_anyext R32C:$rA, R16C:$rB)>;
2331
2332def : Pat<(rotl R32C:$rA, (i32 (sext R16C:$rB))),
2333 (ROTr32_r16_anyext R32C:$rA, R16C:$rB)>;
2334
2335def ROTr32_r8_anyext:
2336 RRForm<0b00011010000, (outs R32C:$rT), (ins R32C:$rA, R8C:$rB),
2337 "rot\t$rT, $rA, $rB", RotateShift,
2338 [(set R32C:$rT, (rotl R32C:$rA, (i32 (anyext R8C:$rB))))]>;
2339
2340def : Pat<(rotl R32C:$rA, (i32 (zext R8C:$rB))),
2341 (ROTr32_r8_anyext R32C:$rA, R8C:$rB)>;
2342
2343def : Pat<(rotl R32C:$rA, (i32 (sext R8C:$rB))),
2344 (ROTr32_r8_anyext R32C:$rA, R8C:$rB)>;
2345
Scott Michel8b6b4202007-12-04 22:35:58 +00002346def ROTIv4i32:
2347 RI7Form<0b00011110000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i32:$val),
2348 "roti\t$rT, $rA, $val", RotateShift,
2349 [(set (v4i32 VECREG:$rT),
2350 (SPUvec_rotl_v4i32 (v4i32 VECREG:$rA), (i32 uimm7:$val)))]>;
2351
2352def : Pat<(SPUvec_rotl_v4i32 (v4i32 VECREG:$rA), (i16 uimm7:$val)),
2353 (ROTIv4i32 VECREG:$rA, imm:$val)>;
2354
Scott Michel438be252007-12-17 22:32:34 +00002355def : Pat<(SPUvec_rotl_v4i32 (v4i32 VECREG:$rA), (i8 uimm7:$val)),
2356 (ROTIv4i32 VECREG:$rA, imm:$val)>;
2357
Scott Michel8b6b4202007-12-04 22:35:58 +00002358def ROTIr32:
2359 RI7Form<0b00011110000, (outs R32C:$rT), (ins R32C:$rA, u7imm_i32:$val),
2360 "roti\t$rT, $rA, $val", RotateShift,
2361 [(set R32C:$rT, (rotl R32C:$rA, (i32 uimm7:$val)))]>;
2362
2363def ROTIr32_i16:
2364 RI7Form<0b00111110000, (outs R32C:$rT), (ins R32C:$rA, u7imm:$val),
2365 "roti\t$rT, $rA, $val", RotateShift,
2366 [(set R32C:$rT, (rotl R32C:$rA, (i16 uimm7:$val)))]>;
2367
Scott Michel438be252007-12-17 22:32:34 +00002368def ROTIr32_i8:
2369 RI7Form<0b00111110000, (outs R32C:$rT), (ins R32C:$rA, u7imm_i8:$val),
2370 "roti\t$rT, $rA, $val", RotateShift,
2371 [(set R32C:$rT, (rotl R32C:$rA, (i8 uimm7:$val)))]>;
2372
Scott Michel8b6b4202007-12-04 22:35:58 +00002373// ROTQBY* vector forms: This rotates the entire vector, but vector registers
2374// are used here for type checking (instances where ROTQBI is used actually
2375// use vector registers)
2376def ROTQBYvec:
Scott Micheldbac4cf2008-01-11 02:53:15 +00002377 RRForm<0b00111011100, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
Scott Michel8b6b4202007-12-04 22:35:58 +00002378 "rotqby\t$rT, $rA, $rB", RotateShift,
Scott Micheldbac4cf2008-01-11 02:53:15 +00002379 [(set (v16i8 VECREG:$rT), (SPUrotbytes_left (v16i8 VECREG:$rA), R32C:$rB))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002380
Scott Micheldbac4cf2008-01-11 02:53:15 +00002381def : Pat<(SPUrotbytes_left_chained (v16i8 VECREG:$rA), R32C:$rB),
2382 (ROTQBYvec VECREG:$rA, R32C:$rB)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002383
2384// See ROTQBY note above.
2385def ROTQBYIvec:
2386 RI7Form<0b00111111100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
2387 "rotqbyi\t$rT, $rA, $val", RotateShift,
2388 [(set (v16i8 VECREG:$rT),
2389 (SPUrotbytes_left (v16i8 VECREG:$rA), (i16 uimm7:$val)))]>;
2390
2391def : Pat<(SPUrotbytes_left_chained (v16i8 VECREG:$rA), (i16 uimm7:$val)),
2392 (ROTQBYIvec VECREG:$rA, uimm7:$val)>;
2393
2394// See ROTQBY note above.
2395def ROTQBYBIvec:
2396 RI7Form<0b00110011100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
2397 "rotqbybi\t$rT, $rA, $val", RotateShift,
2398 [/* intrinsic */]>;
2399
2400// See ROTQBY note above.
2401//
2402// Assume that the user of this instruction knows to shift the rotate count
2403// into bit 29
2404def ROTQBIvec:
2405 RRForm<0b00011011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2406 "rotqbi\t$rT, $rA, $rB", RotateShift,
2407 [/* insert intrinsic here */]>;
2408
2409// See ROTQBY note above.
2410def ROTQBIIvec:
2411 RI7Form<0b00011111100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i32:$val),
2412 "rotqbii\t$rT, $rA, $val", RotateShift,
2413 [/* insert intrinsic here */]>;
2414
2415// ROTHM v8i16 form:
2416// NOTE(1): No vector rotate is generated by the C/C++ frontend (today),
2417// so this only matches a synthetically generated/lowered code
2418// fragment.
2419// NOTE(2): $rB must be negated before the right rotate!
2420def ROTHMv8i16:
2421 RRForm<0b10111010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
2422 "rothm\t$rT, $rA, $rB", RotateShift,
2423 [/* see patterns below - $rB must be negated */]>;
2424
2425def : Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), R32C:$rB),
2426 (ROTHMv8i16 VECREG:$rA, (SFIr32 R32C:$rB, 0))>;
2427
2428def : Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), R16C:$rB),
2429 (ROTHMv8i16 VECREG:$rA,
2430 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2431
Scott Michel438be252007-12-17 22:32:34 +00002432def : Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), R8C:$rB),
Scott Michel8b6b4202007-12-04 22:35:58 +00002433 (ROTHMv8i16 VECREG:$rA,
Scott Michel438be252007-12-17 22:32:34 +00002434 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB) ), 0))>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002435
2436// ROTHM r16 form: Rotate 16-bit quantity to right, zero fill at the left
2437// Note: This instruction doesn't match a pattern because rB must be negated
2438// for the instruction to work. Thus, the pattern below the instruction!
2439def ROTHMr16:
2440 RRForm<0b10111010000, (outs R16C:$rT), (ins R16C:$rA, R32C:$rB),
2441 "rothm\t$rT, $rA, $rB", RotateShift,
2442 [/* see patterns below - $rB must be negated! */]>;
2443
2444def : Pat<(srl R16C:$rA, R32C:$rB),
2445 (ROTHMr16 R16C:$rA, (SFIr32 R32C:$rB, 0))>;
2446
2447def : Pat<(srl R16C:$rA, R16C:$rB),
2448 (ROTHMr16 R16C:$rA,
2449 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2450
Scott Michel438be252007-12-17 22:32:34 +00002451def : Pat<(srl R16C:$rA, R8C:$rB),
Scott Michel8b6b4202007-12-04 22:35:58 +00002452 (ROTHMr16 R16C:$rA,
Scott Michel438be252007-12-17 22:32:34 +00002453 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB) ), 0))>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002454
2455// ROTHMI v8i16 form: See the comment for ROTHM v8i16. The difference here is
2456// that the immediate can be complemented, so that the user doesn't have to
2457// worry about it.
2458def ROTHMIv8i16:
2459 RI7Form<0b10111110000, (outs VECREG:$rT), (ins VECREG:$rA, rothNeg7imm:$val),
2460 "rothmi\t$rT, $rA, $val", RotateShift,
2461 [(set (v8i16 VECREG:$rT),
2462 (SPUvec_srl_v8i16 (v8i16 VECREG:$rA), (i32 imm:$val)))]>;
2463
2464def: Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), (i16 imm:$val)),
2465 (ROTHMIv8i16 VECREG:$rA, imm:$val)>;
Scott Michel438be252007-12-17 22:32:34 +00002466
2467def: Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), (i8 imm:$val)),
2468 (ROTHMIv8i16 VECREG:$rA, imm:$val)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002469
2470def ROTHMIr16:
2471 RI7Form<0b10111110000, (outs R16C:$rT), (ins R16C:$rA, rothNeg7imm:$val),
2472 "rothmi\t$rT, $rA, $val", RotateShift,
2473 [(set R16C:$rT, (srl R16C:$rA, (i32 uimm7:$val)))]>;
2474
2475def: Pat<(srl R16C:$rA, (i16 uimm7:$val)),
2476 (ROTHMIr16 R16C:$rA, uimm7:$val)>;
2477
Scott Michel438be252007-12-17 22:32:34 +00002478def: Pat<(srl R16C:$rA, (i8 uimm7:$val)),
2479 (ROTHMIr16 R16C:$rA, uimm7:$val)>;
2480
Scott Michel8b6b4202007-12-04 22:35:58 +00002481// ROTM v4i32 form: See the ROTHM v8i16 comments.
2482def ROTMv4i32:
2483 RRForm<0b10011010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
2484 "rotm\t$rT, $rA, $rB", RotateShift,
2485 [/* see patterns below - $rB must be negated */]>;
2486
2487def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, R32C:$rB),
2488 (ROTMv4i32 VECREG:$rA, (SFIr32 R32C:$rB, 0))>;
2489
2490def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, R16C:$rB),
2491 (ROTMv4i32 VECREG:$rA,
2492 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2493
2494def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, /* R8C */ R16C:$rB),
2495 (ROTMv4i32 VECREG:$rA,
2496 (SFIr32 (XSHWr16 /* (XSBHr8 R8C */ R16C:$rB) /*)*/, 0))>;
2497
2498def ROTMr32:
2499 RRForm<0b10011010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
2500 "rotm\t$rT, $rA, $rB", RotateShift,
2501 [/* see patterns below - $rB must be negated */]>;
2502
2503def : Pat<(srl R32C:$rA, R32C:$rB),
2504 (ROTMr32 R32C:$rA, (SFIr32 R32C:$rB, 0))>;
2505
2506def : Pat<(srl R32C:$rA, R16C:$rB),
2507 (ROTMr32 R32C:$rA,
2508 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2509
Scott Michel438be252007-12-17 22:32:34 +00002510def : Pat<(srl R32C:$rA, R8C:$rB),
2511 (ROTMr32 R32C:$rA,
2512 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
2513
Scott Michel8b6b4202007-12-04 22:35:58 +00002514// ROTMI v4i32 form: See the comment for ROTHM v8i16.
2515def ROTMIv4i32:
2516 RI7Form<0b10011110000, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
2517 "rotmi\t$rT, $rA, $val", RotateShift,
2518 [(set (v4i32 VECREG:$rT),
2519 (SPUvec_srl_v4i32 VECREG:$rA, (i32 uimm7:$val)))]>;
2520
2521def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, (i16 uimm7:$val)),
2522 (ROTMIv4i32 VECREG:$rA, uimm7:$val)>;
Scott Michel438be252007-12-17 22:32:34 +00002523
2524def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, (i8 uimm7:$val)),
2525 (ROTMIv4i32 VECREG:$rA, uimm7:$val)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002526
2527// ROTMI r32 form: know how to complement the immediate value.
2528def ROTMIr32:
2529 RI7Form<0b10011110000, (outs R32C:$rT), (ins R32C:$rA, rotNeg7imm:$val),
2530 "rotmi\t$rT, $rA, $val", RotateShift,
2531 [(set R32C:$rT, (srl R32C:$rA, (i32 uimm7:$val)))]>;
2532
2533def : Pat<(srl R32C:$rA, (i16 imm:$val)),
2534 (ROTMIr32 R32C:$rA, uimm7:$val)>;
2535
Scott Michel438be252007-12-17 22:32:34 +00002536def : Pat<(srl R32C:$rA, (i8 imm:$val)),
2537 (ROTMIr32 R32C:$rA, uimm7:$val)>;
2538
Scott Michel8b6b4202007-12-04 22:35:58 +00002539// ROTQMBYvec: This is a vector form merely so that when used in an
2540// instruction pattern, type checking will succeed. This instruction assumes
2541// that the user knew to complement $rB.
2542def ROTQMBYvec:
2543 RRForm<0b10111011100, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
2544 "rotqmby\t$rT, $rA, $rB", RotateShift,
2545 [(set (v16i8 VECREG:$rT),
2546 (SPUrotbytes_right_zfill (v16i8 VECREG:$rA), R32C:$rB))]>;
2547
2548def ROTQMBYIvec:
2549 RI7Form<0b10111111100, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
2550 "rotqmbyi\t$rT, $rA, $val", RotateShift,
2551 [(set (v16i8 VECREG:$rT),
2552 (SPUrotbytes_right_zfill (v16i8 VECREG:$rA), (i32 uimm7:$val)))]>;
2553
2554def : Pat<(SPUrotbytes_right_zfill VECREG:$rA, (i16 uimm7:$val)),
2555 (ROTQMBYIvec VECREG:$rA, uimm7:$val)>;
2556
2557def ROTQMBYBIvec:
2558 RRForm<0b10110011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2559 "rotqmbybi\t$rT, $rA, $rB", RotateShift,
2560 [/* intrinsic */]>;
2561
2562def ROTQMBIvec:
2563 RRForm<0b10011011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2564 "rotqmbi\t$rT, $rA, $rB", RotateShift,
2565 [/* intrinsic */]>;
2566
2567def ROTQMBIIvec:
2568 RI7Form<0b10011111100, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
2569 "rotqmbii\t$rT, $rA, $val", RotateShift,
2570 [/* intrinsic */]>;
2571
2572def ROTMAHv8i16:
2573 RRForm<0b01111010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
2574 "rotmah\t$rT, $rA, $rB", RotateShift,
2575 [/* see patterns below - $rB must be negated */]>;
2576
2577def : Pat<(SPUvec_sra_v8i16 VECREG:$rA, R32C:$rB),
2578 (ROTMAHv8i16 VECREG:$rA, (SFIr32 R32C:$rB, 0))>;
2579
2580def : Pat<(SPUvec_sra_v8i16 VECREG:$rA, R16C:$rB),
2581 (ROTMAHv8i16 VECREG:$rA,
2582 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2583
Scott Michel438be252007-12-17 22:32:34 +00002584def : Pat<(SPUvec_sra_v8i16 VECREG:$rA, R8C:$rB),
2585 (ROTMAHv8i16 VECREG:$rA,
2586 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
2587
Scott Michel8b6b4202007-12-04 22:35:58 +00002588def ROTMAHr16:
2589 RRForm<0b01111010000, (outs R16C:$rT), (ins R16C:$rA, R32C:$rB),
2590 "rotmah\t$rT, $rA, $rB", RotateShift,
2591 [/* see patterns below - $rB must be negated */]>;
2592
2593def : Pat<(sra R16C:$rA, R32C:$rB),
2594 (ROTMAHr16 R16C:$rA, (SFIr32 R32C:$rB, 0))>;
2595
2596def : Pat<(sra R16C:$rA, R16C:$rB),
2597 (ROTMAHr16 R16C:$rA,
2598 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2599
Scott Michel438be252007-12-17 22:32:34 +00002600def : Pat<(sra R16C:$rA, R8C:$rB),
2601 (ROTMAHr16 R16C:$rA,
2602 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
2603
Scott Michel8b6b4202007-12-04 22:35:58 +00002604def ROTMAHIv8i16:
2605 RRForm<0b01111110000, (outs VECREG:$rT), (ins VECREG:$rA, rothNeg7imm:$val),
2606 "rotmahi\t$rT, $rA, $val", RotateShift,
2607 [(set (v8i16 VECREG:$rT),
2608 (SPUvec_sra_v8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val)))]>;
2609
2610def : Pat<(SPUvec_sra_v8i16 (v8i16 VECREG:$rA), (i16 uimm7:$val)),
2611 (ROTMAHIv8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val))>;
2612
Scott Michel438be252007-12-17 22:32:34 +00002613def : Pat<(SPUvec_sra_v8i16 (v8i16 VECREG:$rA), (i8 uimm7:$val)),
2614 (ROTMAHIv8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val))>;
2615
Scott Michel8b6b4202007-12-04 22:35:58 +00002616def ROTMAHIr16:
2617 RRForm<0b01111110000, (outs R16C:$rT), (ins R16C:$rA, rothNeg7imm_i16:$val),
2618 "rotmahi\t$rT, $rA, $val", RotateShift,
2619 [(set R16C:$rT, (sra R16C:$rA, (i16 uimm7:$val)))]>;
2620
2621def : Pat<(sra R16C:$rA, (i32 imm:$val)),
2622 (ROTMAHIr16 R16C:$rA, uimm7:$val)>;
2623
Scott Michel438be252007-12-17 22:32:34 +00002624def : Pat<(sra R16C:$rA, (i8 imm:$val)),
2625 (ROTMAHIr16 R16C:$rA, uimm7:$val)>;
2626
Scott Michel8b6b4202007-12-04 22:35:58 +00002627def ROTMAv4i32:
2628 RRForm<0b01011010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
2629 "rotma\t$rT, $rA, $rB", RotateShift,
2630 [/* see patterns below - $rB must be negated */]>;
2631
2632def : Pat<(SPUvec_sra_v4i32 VECREG:$rA, R32C:$rB),
2633 (ROTMAv4i32 (v4i32 VECREG:$rA), (SFIr32 R32C:$rB, 0))>;
2634
2635def : Pat<(SPUvec_sra_v4i32 VECREG:$rA, R16C:$rB),
2636 (ROTMAv4i32 (v4i32 VECREG:$rA),
2637 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2638
Scott Michel438be252007-12-17 22:32:34 +00002639def : Pat<(SPUvec_sra_v4i32 VECREG:$rA, R8C:$rB),
2640 (ROTMAv4i32 (v4i32 VECREG:$rA),
2641 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
2642
Scott Michel8b6b4202007-12-04 22:35:58 +00002643def ROTMAr32:
2644 RRForm<0b01011010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
2645 "rotma\t$rT, $rA, $rB", RotateShift,
2646 [/* see patterns below - $rB must be negated */]>;
2647
2648def : Pat<(sra R32C:$rA, R32C:$rB),
2649 (ROTMAr32 R32C:$rA, (SFIr32 R32C:$rB, 0))>;
2650
2651def : Pat<(sra R32C:$rA, R16C:$rB),
2652 (ROTMAr32 R32C:$rA,
2653 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2654
Scott Michel438be252007-12-17 22:32:34 +00002655def : Pat<(sra R32C:$rA, R8C:$rB),
2656 (ROTMAr32 R32C:$rA,
2657 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
2658
Scott Michel8b6b4202007-12-04 22:35:58 +00002659def ROTMAIv4i32:
2660 RRForm<0b01011110000, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
2661 "rotmai\t$rT, $rA, $val", RotateShift,
2662 [(set (v4i32 VECREG:$rT),
2663 (SPUvec_sra_v4i32 VECREG:$rA, (i32 uimm7:$val)))]>;
2664
2665def : Pat<(SPUvec_sra_v4i32 VECREG:$rA, (i16 uimm7:$val)),
2666 (ROTMAIv4i32 VECREG:$rA, uimm7:$val)>;
2667
2668def ROTMAIr32:
2669 RRForm<0b01011110000, (outs R32C:$rT), (ins R32C:$rA, rotNeg7imm:$val),
2670 "rotmai\t$rT, $rA, $val", RotateShift,
2671 [(set R32C:$rT, (sra R32C:$rA, (i32 uimm7:$val)))]>;
2672
2673def : Pat<(sra R32C:$rA, (i16 uimm7:$val)),
2674 (ROTMAIr32 R32C:$rA, uimm7:$val)>;
2675
Scott Michel438be252007-12-17 22:32:34 +00002676def : Pat<(sra R32C:$rA, (i8 uimm7:$val)),
2677 (ROTMAIr32 R32C:$rA, uimm7:$val)>;
2678
Scott Michel8b6b4202007-12-04 22:35:58 +00002679//===----------------------------------------------------------------------===//
2680// Branch and conditionals:
2681//===----------------------------------------------------------------------===//
2682
2683let isTerminator = 1, isBarrier = 1 in {
2684 // Halt If Equal (r32 preferred slot only, no vector form)
2685 def HEQr32:
2686 RRForm_3<0b00011011110, (outs), (ins R32C:$rA, R32C:$rB),
2687 "heq\t$rA, $rB", BranchResolv,
2688 [/* no pattern to match */]>;
2689
2690 def HEQIr32 :
2691 RI10Form_2<0b11111110, (outs), (ins R32C:$rA, s10imm:$val),
2692 "heqi\t$rA, $val", BranchResolv,
2693 [/* no pattern to match */]>;
2694
2695 // HGT/HGTI: These instructions use signed arithmetic for the comparison,
2696 // contrasting with HLGT/HLGTI, which use unsigned comparison:
2697 def HGTr32:
2698 RRForm_3<0b00011010010, (outs), (ins R32C:$rA, R32C:$rB),
2699 "hgt\t$rA, $rB", BranchResolv,
2700 [/* no pattern to match */]>;
2701
2702 def HGTIr32:
2703 RI10Form_2<0b11110010, (outs), (ins R32C:$rA, s10imm:$val),
2704 "hgti\t$rA, $val", BranchResolv,
2705 [/* no pattern to match */]>;
2706
2707 def HLGTr32:
2708 RRForm_3<0b00011011010, (outs), (ins R32C:$rA, R32C:$rB),
2709 "hlgt\t$rA, $rB", BranchResolv,
2710 [/* no pattern to match */]>;
2711
2712 def HLGTIr32:
2713 RI10Form_2<0b11111010, (outs), (ins R32C:$rA, s10imm:$val),
2714 "hlgti\t$rA, $val", BranchResolv,
2715 [/* no pattern to match */]>;
2716}
2717
2718// Comparison operators:
Scott Michel438be252007-12-17 22:32:34 +00002719def CEQBr8:
2720 RRForm<0b00001011110, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
2721 "ceqb\t$rT, $rA, $rB", ByteOp,
2722 [/* no pattern to match */]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002723
2724def CEQBv16i8:
2725 RRForm<0b00001011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2726 "ceqb\t$rT, $rA, $rB", ByteOp,
2727 [/* no pattern to match: intrinsic */]>;
2728
Scott Michel438be252007-12-17 22:32:34 +00002729def CEQBIr8:
Scott Micheldbac4cf2008-01-11 02:53:15 +00002730 RI10Form<0b01111110, (outs R8C:$rT), (ins R8C:$rA, s7imm_i8:$val),
Scott Michel438be252007-12-17 22:32:34 +00002731 "ceqbi\t$rT, $rA, $val", ByteOp,
2732 [/* no pattern to match: intrinsic */]>;
2733
Scott Michel8b6b4202007-12-04 22:35:58 +00002734def CEQBIv16i8:
Scott Micheldbac4cf2008-01-11 02:53:15 +00002735 RI10Form<0b01111110, (outs VECREG:$rT), (ins VECREG:$rA, s7imm_i8:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00002736 "ceqbi\t$rT, $rA, $val", ByteOp,
2737 [/* no pattern to match: intrinsic */]>;
2738
2739def CEQHr16:
2740 RRForm<0b00010011110, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
2741 "ceqh\t$rT, $rA, $rB", ByteOp,
2742 [/* no pattern to match */]>;
2743
2744def CEQHv8i16:
2745 RRForm<0b00010011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2746 "ceqh\t$rT, $rA, $rB", ByteOp,
2747 [/* no pattern to match: intrinsic */]>;
2748
2749def CEQHIr16:
2750 RI10Form<0b10111110, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
2751 "ceqhi\t$rT, $rA, $val", ByteOp,
2752 [/* no pattern to match: intrinsic */]>;
2753
2754def CEQHIv8i16:
2755 RI10Form<0b10111110, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
2756 "ceqhi\t$rT, $rA, $val", ByteOp,
2757 [/* no pattern to match: intrinsic */]>;
2758
2759def CEQr32:
2760 RRForm<0b00000011110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
2761 "ceq\t$rT, $rA, $rB", ByteOp,
2762 [/* no pattern to match: intrinsic */]>;
2763
2764def CEQv4i32:
2765 RRForm<0b00000011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2766 "ceq\t$rT, $rA, $rB", ByteOp,
2767 [/* no pattern to match: intrinsic */]>;
2768
2769def CEQIr32:
2770 RI10Form<0b00111110, (outs R32C:$rT), (ins R32C:$rA, s10imm:$val),
2771 "ceqi\t$rT, $rA, $val", ByteOp,
2772 [/* no pattern to match: intrinsic */]>;
2773
2774def CEQIv4i32:
2775 RI10Form<0b00111110, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
2776 "ceqi\t$rT, $rA, $val", ByteOp,
2777 [/* no pattern to match: intrinsic */]>;
2778
2779let isCall = 1,
2780 // All calls clobber the non-callee-saved registers:
2781 Defs = [R0, R1, R2, R3, R4, R5, R6, R7, R8, R9,
2782 R10,R11,R12,R13,R14,R15,R16,R17,R18,R19,
2783 R20,R21,R22,R23,R24,R25,R26,R27,R28,R29,
2784 R30,R31,R32,R33,R34,R35,R36,R37,R38,R39,
2785 R40,R41,R42,R43,R44,R45,R46,R47,R48,R49,
2786 R50,R51,R52,R53,R54,R55,R56,R57,R58,R59,
2787 R60,R61,R62,R63,R64,R65,R66,R67,R68,R69,
2788 R70,R71,R72,R73,R74,R75,R76,R77,R78,R79],
2789 // All of these instructions use $lr (aka $0)
2790 Uses = [R0] in {
2791 // Branch relative and set link: Used if we actually know that the target
2792 // is within [-32768, 32767] bytes of the target
2793 def BRSL:
2794 BranchSetLink<0b011001100, (outs), (ins relcalltarget:$func, variable_ops),
2795 "brsl\t$$lr, $func",
2796 [(SPUcall (SPUpcrel tglobaladdr:$func, 0))]>;
2797
2798 // Branch absolute and set link: Used if we actually know that the target
2799 // is an absolute address
2800 def BRASL:
2801 BranchSetLink<0b011001100, (outs), (ins calltarget:$func, variable_ops),
2802 "brasl\t$$lr, $func",
Scott Micheldbac4cf2008-01-11 02:53:15 +00002803 [(SPUcall (SPUaform tglobaladdr:$func, 0))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002804
2805 // Branch indirect and set link if external data. These instructions are not
2806 // actually generated, matched by an intrinsic:
2807 def BISLED_00: BISLEDForm<0b11, "bisled\t$$lr, $func", [/* empty pattern */]>;
2808 def BISLED_E0: BISLEDForm<0b10, "bisled\t$$lr, $func", [/* empty pattern */]>;
2809 def BISLED_0D: BISLEDForm<0b01, "bisled\t$$lr, $func", [/* empty pattern */]>;
2810 def BISLED_ED: BISLEDForm<0b00, "bisled\t$$lr, $func", [/* empty pattern */]>;
2811
2812 // Branch indirect and set link. This is the "X-form" address version of a
2813 // function call
2814 def BISL:
2815 BIForm<0b10010101100, "bisl\t$$lr, $func", [(SPUcall R32C:$func)]>;
2816}
2817
2818// Unconditional branches:
2819let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in {
2820 def BR :
2821 UncondBranch<0b001001100, (outs), (ins brtarget:$dest),
2822 "br\t$dest",
2823 [(br bb:$dest)]>;
2824
2825 // Unconditional, absolute address branch
2826 def BRA:
2827 UncondBranch<0b001100000, (outs), (ins brtarget:$dest),
2828 "bra\t$dest",
2829 [/* no pattern */]>;
2830
2831 // Indirect branch
2832 def BI:
2833 BIForm<0b00010101100, "bi\t$func", [(brind R32C:$func)]>;
2834
2835 // Various branches:
2836 def BRNZ:
2837 RI16Form<0b010000100, (outs), (ins R32C:$rCond, brtarget:$dest),
2838 "brnz\t$rCond,$dest",
2839 BranchResolv,
2840 [(brcond R32C:$rCond, bb:$dest)]>;
2841
2842 def BRZ:
2843 RI16Form<0b000000100, (outs), (ins R32C:$rT, brtarget:$dest),
2844 "brz\t$rT,$dest",
2845 BranchResolv,
2846 [/* no pattern */]>;
2847
2848 def BRHNZ:
2849 RI16Form<0b011000100, (outs), (ins R16C:$rCond, brtarget:$dest),
2850 "brhnz\t$rCond,$dest",
2851 BranchResolv,
2852 [(brcond R16C:$rCond, bb:$dest)]>;
2853
2854 def BRHZ:
2855 RI16Form<0b001000100, (outs), (ins R16C:$rT, brtarget:$dest),
2856 "brhz\t$rT,$dest",
2857 BranchResolv,
2858 [/* no pattern */]>;
2859
2860/*
2861 def BINZ:
2862 BICondForm<0b10010100100, "binz\t$rA, $func",
2863 [(SPUbinz R32C:$rA, R32C:$func)]>;
2864
2865 def BIZ:
2866 BICondForm<0b00010100100, "biz\t$rA, $func",
2867 [(SPUbiz R32C:$rA, R32C:$func)]>;
2868*/
2869}
2870
2871def : Pat<(brcond (i16 (seteq R16C:$rA, 0)), bb:$dest),
2872 (BRHZ R16C:$rA, bb:$dest)>;
2873def : Pat<(brcond (i16 (setne R16C:$rA, 0)), bb:$dest),
2874 (BRHNZ R16C:$rA, bb:$dest)>;
2875
2876def : Pat<(brcond (i32 (seteq R32C:$rA, 0)), bb:$dest),
2877 (BRZ R32C:$rA, bb:$dest)>;
2878def : Pat<(brcond (i32 (setne R32C:$rA, 0)), bb:$dest),
2879 (BRZ R32C:$rA, bb:$dest)>;
2880
2881let isTerminator = 1, isBarrier = 1 in {
2882 let isReturn = 1 in {
2883 def RET:
2884 RETForm<"bi\t$$lr", [(retflag)]>;
2885 }
2886}
2887
2888//===----------------------------------------------------------------------===//
2889// Various brcond predicates:
2890//===----------------------------------------------------------------------===//
2891/*
2892def : Pat<(brcond (i32 (seteq R32C:$rA, 0)), bb:$dest),
2893 (BRZ R32C:$rA, bb:$dest)>;
2894
2895def : Pat<(brcond (i32 (seteq R32C:$rA, R32C:$rB)), bb:$dest),
2896 (BRNZ (CEQr32 R32C:$rA, R32C:$rB), bb:$dest)>;
2897
2898def : Pat<(brcond (i16 (seteq R16C:$rA, i16ImmSExt10:$val)), bb:$dest),
2899 (BRHNZ (CEQHIr16 R16C:$rA, i16ImmSExt10:$val), bb:$dest)>;
2900
2901def : Pat<(brcond (i16 (seteq R16C:$rA, R16C:$rB)), bb:$dest),
2902 (BRHNZ (CEQHr16 R16C:$rA, R16C:$rB), bb:$dest)>;
2903*/
2904
2905//===----------------------------------------------------------------------===//
2906// Single precision floating point instructions
2907//===----------------------------------------------------------------------===//
2908
2909def FAv4f32:
2910 RRForm<0b00100011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2911 "fa\t$rT, $rA, $rB", SPrecFP,
2912 [(set (v4f32 VECREG:$rT), (fadd (v4f32 VECREG:$rA), (v4f32 VECREG:$rB)))]>;
2913
2914def FAf32 :
2915 RRForm<0b00100011010, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB),
2916 "fa\t$rT, $rA, $rB", SPrecFP,
2917 [(set R32FP:$rT, (fadd R32FP:$rA, R32FP:$rB))]>;
2918
2919def FSv4f32:
2920 RRForm<0b00100011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2921 "fs\t$rT, $rA, $rB", SPrecFP,
2922 [(set (v4f32 VECREG:$rT), (fsub (v4f32 VECREG:$rA), (v4f32 VECREG:$rB)))]>;
2923
2924def FSf32 :
2925 RRForm<0b10100011010, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB),
2926 "fs\t$rT, $rA, $rB", SPrecFP,
2927 [(set R32FP:$rT, (fsub R32FP:$rA, R32FP:$rB))]>;
2928
2929// Floating point reciprocal estimate
2930def FREv4f32 :
2931 RRForm_1<0b00011101100, (outs VECREG:$rT), (ins VECREG:$rA),
2932 "frest\t$rT, $rA", SPrecFP,
2933 [(set (v4f32 VECREG:$rT), (SPUreciprocalEst (v4f32 VECREG:$rA)))]>;
2934
2935def FREf32 :
2936 RRForm_1<0b00011101100, (outs R32FP:$rT), (ins R32FP:$rA),
2937 "frest\t$rT, $rA", SPrecFP,
2938 [(set R32FP:$rT, (SPUreciprocalEst R32FP:$rA))]>;
2939
2940// Floating point interpolate (used in conjunction with reciprocal estimate)
2941def FIv4f32 :
2942 RRForm<0b00101011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2943 "fi\t$rT, $rA, $rB", SPrecFP,
2944 [(set (v4f32 VECREG:$rT), (SPUinterpolate (v4f32 VECREG:$rA),
2945 (v4f32 VECREG:$rB)))]>;
2946
2947def FIf32 :
2948 RRForm<0b00101011110, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB),
2949 "fi\t$rT, $rA, $rB", SPrecFP,
2950 [(set R32FP:$rT, (SPUinterpolate R32FP:$rA, R32FP:$rB))]>;
2951
2952// Floating Compare Equal
2953def FCEQf32 :
2954 RRForm<0b01000011110, (outs R32C:$rT), (ins R32FP:$rA, R32FP:$rB),
2955 "fceq\t$rT, $rA, $rB", SPrecFP,
2956 [(set R32C:$rT, (setoeq R32FP:$rA, R32FP:$rB))]>;
2957
2958def FCMEQf32 :
2959 RRForm<0b01010011110, (outs R32C:$rT), (ins R32FP:$rA, R32FP:$rB),
2960 "fcmeq\t$rT, $rA, $rB", SPrecFP,
2961 [(set R32C:$rT, (setoeq (fabs R32FP:$rA), (fabs R32FP:$rB)))]>;
2962
2963def FCGTf32 :
2964 RRForm<0b01000011010, (outs R32C:$rT), (ins R32FP:$rA, R32FP:$rB),
2965 "fcgt\t$rT, $rA, $rB", SPrecFP,
2966 [(set R32C:$rT, (setogt R32FP:$rA, R32FP:$rB))]>;
2967
2968def FCMGTf32 :
2969 RRForm<0b01010011010, (outs R32C:$rT), (ins R32FP:$rA, R32FP:$rB),
2970 "fcmgt\t$rT, $rA, $rB", SPrecFP,
2971 [(set R32C:$rT, (setogt (fabs R32FP:$rA), (fabs R32FP:$rB)))]>;
2972
2973// FP Status and Control Register Write
2974// Why isn't rT a don't care in the ISA?
2975// Should we create a special RRForm_3 for this guy and zero out the rT?
2976def FSCRWf32 :
2977 RRForm_1<0b01011101110, (outs R32FP:$rT), (ins R32FP:$rA),
2978 "fscrwr\t$rA", SPrecFP,
2979 [/* This instruction requires an intrinsic. Note: rT is unused. */]>;
2980
2981// FP Status and Control Register Read
2982def FSCRRf32 :
2983 RRForm_2<0b01011101110, (outs R32FP:$rT), (ins),
2984 "fscrrd\t$rT", SPrecFP,
2985 [/* This instruction requires an intrinsic */]>;
2986
2987// llvm instruction space
2988// How do these map onto cell instructions?
2989// fdiv rA rB
2990// frest rC rB # c = 1/b (both lines)
2991// fi rC rB rC
2992// fm rD rA rC # d = a * 1/b
2993// fnms rB rD rB rA # b = - (d * b - a) --should == 0 in a perfect world
2994// fma rB rB rC rD # b = b * c + d
2995// = -(d *b -a) * c + d
2996// = a * c - c ( a *b *c - a)
2997
2998// fcopysign (???)
2999
3000// Library calls:
3001// These llvm instructions will actually map to library calls.
3002// All that's needed, then, is to check that the appropriate library is
3003// imported and do a brsl to the proper function name.
3004// frem # fmod(x, y): x - (x/y) * y
3005// (Note: fmod(double, double), fmodf(float,float)
3006// fsqrt?
3007// fsin?
3008// fcos?
3009// Unimplemented SPU instruction space
3010// floating reciprocal absolute square root estimate (frsqest)
3011
3012// The following are probably just intrinsics
3013// status and control register write
3014// status and control register read
3015
3016//--------------------------------------
3017// Floating point multiply instructions
3018//--------------------------------------
3019
3020def FMv4f32:
3021 RRForm<0b00100011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
3022 "fm\t$rT, $rA, $rB", SPrecFP,
3023 [(set (v4f32 VECREG:$rT), (fmul (v4f32 VECREG:$rA),
3024 (v4f32 VECREG:$rB)))]>;
3025
3026def FMf32 :
3027 RRForm<0b01100011010, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB),
3028 "fm\t$rT, $rA, $rB", SPrecFP,
3029 [(set R32FP:$rT, (fmul R32FP:$rA, R32FP:$rB))]>;
3030
3031// Floating point multiply and add
3032// e.g. d = c + (a * b)
3033def FMAv4f32:
3034 RRRForm<0b0111, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3035 "fma\t$rT, $rA, $rB, $rC", SPrecFP,
3036 [(set (v4f32 VECREG:$rT),
3037 (fadd (v4f32 VECREG:$rC),
3038 (fmul (v4f32 VECREG:$rA), (v4f32 VECREG:$rB))))]>;
3039
3040def FMAf32:
3041 RRRForm<0b0111, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB, R32FP:$rC),
3042 "fma\t$rT, $rA, $rB, $rC", SPrecFP,
3043 [(set R32FP:$rT, (fadd R32FP:$rC, (fmul R32FP:$rA, R32FP:$rB)))]>;
3044
3045// FP multiply and subtract
3046// Subtracts value in rC from product
3047// res = a * b - c
3048def FMSv4f32 :
3049 RRRForm<0b0111, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3050 "fms\t$rT, $rA, $rB, $rC", SPrecFP,
3051 [(set (v4f32 VECREG:$rT),
3052 (fsub (fmul (v4f32 VECREG:$rA), (v4f32 VECREG:$rB)),
3053 (v4f32 VECREG:$rC)))]>;
3054
3055def FMSf32 :
3056 RRRForm<0b0111, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB, R32FP:$rC),
3057 "fms\t$rT, $rA, $rB, $rC", SPrecFP,
3058 [(set R32FP:$rT,
3059 (fsub (fmul R32FP:$rA, R32FP:$rB), R32FP:$rC))]>;
3060
3061// Floating Negative Mulitply and Subtract
3062// Subtracts product from value in rC
3063// res = fneg(fms a b c)
3064// = - (a * b - c)
3065// = c - a * b
3066// NOTE: subtraction order
3067// fsub a b = a - b
3068// fs a b = b - a?
3069def FNMSf32 :
3070 RRRForm<0b1101, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB, R32FP:$rC),
3071 "fnms\t$rT, $rA, $rB, $rC", SPrecFP,
3072 [(set R32FP:$rT, (fsub R32FP:$rC, (fmul R32FP:$rA, R32FP:$rB)))]>;
3073
3074def FNMSv4f32 :
3075 RRRForm<0b1101, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3076 "fnms\t$rT, $rA, $rB, $rC", SPrecFP,
3077 [(set (v4f32 VECREG:$rT),
3078 (fsub (v4f32 VECREG:$rC),
3079 (fmul (v4f32 VECREG:$rA),
3080 (v4f32 VECREG:$rB))))]>;
3081
3082//--------------------------------------
3083// Floating Point Conversions
3084// Signed conversions:
3085def CSiFv4f32:
3086 CVTIntFPForm<0b0101101110, (outs VECREG:$rT), (ins VECREG:$rA),
3087 "csflt\t$rT, $rA, 0", SPrecFP,
3088 [(set (v4f32 VECREG:$rT), (sint_to_fp (v4i32 VECREG:$rA)))]>;
3089
3090// Convert signed integer to floating point
3091def CSiFf32 :
3092 CVTIntFPForm<0b0101101110, (outs R32FP:$rT), (ins R32C:$rA),
3093 "csflt\t$rT, $rA, 0", SPrecFP,
3094 [(set R32FP:$rT, (sint_to_fp R32C:$rA))]>;
3095
3096// Convert unsigned into to float
3097def CUiFv4f32 :
3098 CVTIntFPForm<0b1101101110, (outs VECREG:$rT), (ins VECREG:$rA),
3099 "cuflt\t$rT, $rA, 0", SPrecFP,
3100 [(set (v4f32 VECREG:$rT), (uint_to_fp (v4i32 VECREG:$rA)))]>;
3101
3102def CUiFf32 :
3103 CVTIntFPForm<0b1101101110, (outs R32FP:$rT), (ins R32C:$rA),
3104 "cuflt\t$rT, $rA, 0", SPrecFP,
3105 [(set R32FP:$rT, (uint_to_fp R32C:$rA))]>;
3106
3107// Convert float to unsigned int
3108// Assume that scale = 0
3109
3110def CFUiv4f32 :
3111 CVTIntFPForm<0b1101101110, (outs VECREG:$rT), (ins VECREG:$rA),
3112 "cfltu\t$rT, $rA, 0", SPrecFP,
3113 [(set (v4i32 VECREG:$rT), (fp_to_uint (v4f32 VECREG:$rA)))]>;
3114
3115def CFUif32 :
3116 CVTIntFPForm<0b1101101110, (outs R32C:$rT), (ins R32FP:$rA),
3117 "cfltu\t$rT, $rA, 0", SPrecFP,
3118 [(set R32C:$rT, (fp_to_uint R32FP:$rA))]>;
3119
3120// Convert float to signed int
3121// Assume that scale = 0
3122
3123def CFSiv4f32 :
3124 CVTIntFPForm<0b1101101110, (outs VECREG:$rT), (ins VECREG:$rA),
3125 "cflts\t$rT, $rA, 0", SPrecFP,
3126 [(set (v4i32 VECREG:$rT), (fp_to_sint (v4f32 VECREG:$rA)))]>;
3127
3128def CFSif32 :
3129 CVTIntFPForm<0b1101101110, (outs R32C:$rT), (ins R32FP:$rA),
3130 "cflts\t$rT, $rA, 0", SPrecFP,
3131 [(set R32C:$rT, (fp_to_sint R32FP:$rA))]>;
3132
3133//===----------------------------------------------------------------------==//
3134// Single<->Double precision conversions
3135//===----------------------------------------------------------------------==//
3136
3137// NOTE: We use "vec" name suffix here to avoid confusion (e.g. input is a
3138// v4f32, output is v2f64--which goes in the name?)
3139
3140// Floating point extend single to double
3141// NOTE: Not sure if passing in v4f32 to FESDvec is correct since it
3142// operates on two double-word slots (i.e. 1st and 3rd fp numbers
3143// are ignored).
3144def FESDvec :
3145 RRForm_1<0b00011101110, (outs VECREG:$rT), (ins VECREG:$rA),
3146 "fesd\t$rT, $rA", SPrecFP,
3147 [(set (v2f64 VECREG:$rT), (fextend (v4f32 VECREG:$rA)))]>;
3148
3149def FESDf32 :
3150 RRForm_1<0b00011101110, (outs R64FP:$rT), (ins R32FP:$rA),
3151 "fesd\t$rT, $rA", SPrecFP,
3152 [(set R64FP:$rT, (fextend R32FP:$rA))]>;
3153
3154// Floating point round double to single
3155//def FRDSvec :
3156// RRForm_1<0b10011101110, (outs VECREG:$rT), (ins VECREG:$rA),
3157// "frds\t$rT, $rA,", SPrecFP,
3158// [(set (v4f32 R32FP:$rT), (fround (v2f64 R64FP:$rA)))]>;
3159
3160def FRDSf64 :
3161 RRForm_1<0b10011101110, (outs R32FP:$rT), (ins R64FP:$rA),
3162 "frds\t$rT, $rA", SPrecFP,
3163 [(set R32FP:$rT, (fround R64FP:$rA))]>;
3164
3165//ToDo include anyextend?
3166
3167//===----------------------------------------------------------------------==//
3168// Double precision floating point instructions
3169//===----------------------------------------------------------------------==//
3170def FAf64 :
3171 RRForm<0b00110011010, (outs R64FP:$rT), (ins R64FP:$rA, R64FP:$rB),
3172 "dfa\t$rT, $rA, $rB", DPrecFP,
3173 [(set R64FP:$rT, (fadd R64FP:$rA, R64FP:$rB))]>;
3174
3175def FAv2f64 :
3176 RRForm<0b00110011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
3177 "dfa\t$rT, $rA, $rB", DPrecFP,
3178 [(set (v2f64 VECREG:$rT), (fadd (v2f64 VECREG:$rA), (v2f64 VECREG:$rB)))]>;
3179
3180def FSf64 :
3181 RRForm<0b10100011010, (outs R64FP:$rT), (ins R64FP:$rA, R64FP:$rB),
3182 "dfs\t$rT, $rA, $rB", DPrecFP,
3183 [(set R64FP:$rT, (fsub R64FP:$rA, R64FP:$rB))]>;
3184
3185def FSv2f64 :
3186 RRForm<0b10100011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
3187 "dfs\t$rT, $rA, $rB", DPrecFP,
3188 [(set (v2f64 VECREG:$rT),
3189 (fsub (v2f64 VECREG:$rA), (v2f64 VECREG:$rB)))]>;
3190
3191def FMf64 :
3192 RRForm<0b01100011010, (outs R64FP:$rT), (ins R64FP:$rA, R64FP:$rB),
3193 "dfm\t$rT, $rA, $rB", DPrecFP,
3194 [(set R64FP:$rT, (fmul R64FP:$rA, R64FP:$rB))]>;
3195
3196def FMv2f64:
3197 RRForm<0b00100011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
3198 "dfm\t$rT, $rA, $rB", DPrecFP,
3199 [(set (v2f64 VECREG:$rT),
3200 (fmul (v2f64 VECREG:$rA), (v2f64 VECREG:$rB)))]>;
3201
3202def FMAf64:
3203 RRForm<0b00111010110, (outs R64FP:$rT),
3204 (ins R64FP:$rA, R64FP:$rB, R64FP:$rC),
3205 "dfma\t$rT, $rA, $rB", DPrecFP,
3206 [(set R64FP:$rT, (fadd R64FP:$rC, (fmul R64FP:$rA, R64FP:$rB)))]>,
3207 RegConstraint<"$rC = $rT">,
3208 NoEncode<"$rC">;
3209
3210def FMAv2f64:
3211 RRForm<0b00111010110, (outs VECREG:$rT),
3212 (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3213 "dfma\t$rT, $rA, $rB", DPrecFP,
3214 [(set (v2f64 VECREG:$rT),
3215 (fadd (v2f64 VECREG:$rC),
3216 (fmul (v2f64 VECREG:$rA), (v2f64 VECREG:$rB))))]>,
3217 RegConstraint<"$rC = $rT">,
3218 NoEncode<"$rC">;
3219
3220def FMSf64 :
3221 RRForm<0b10111010110, (outs R64FP:$rT),
3222 (ins R64FP:$rA, R64FP:$rB, R64FP:$rC),
3223 "dfms\t$rT, $rA, $rB", DPrecFP,
3224 [(set R64FP:$rT, (fsub (fmul R64FP:$rA, R64FP:$rB), R64FP:$rC))]>,
3225 RegConstraint<"$rC = $rT">,
3226 NoEncode<"$rC">;
3227
3228def FMSv2f64 :
3229 RRForm<0b10111010110, (outs VECREG:$rT),
3230 (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3231 "dfms\t$rT, $rA, $rB", DPrecFP,
3232 [(set (v2f64 VECREG:$rT),
3233 (fsub (fmul (v2f64 VECREG:$rA), (v2f64 VECREG:$rB)),
3234 (v2f64 VECREG:$rC)))]>;
3235
3236// FNMS: - (a * b - c)
3237// - (a * b) + c => c - (a * b)
3238def FNMSf64 :
3239 RRForm<0b01111010110, (outs R64FP:$rT),
3240 (ins R64FP:$rA, R64FP:$rB, R64FP:$rC),
3241 "dfnms\t$rT, $rA, $rB", DPrecFP,
3242 [(set R64FP:$rT, (fsub R64FP:$rC, (fmul R64FP:$rA, R64FP:$rB)))]>,
3243 RegConstraint<"$rC = $rT">,
3244 NoEncode<"$rC">;
3245
3246def : Pat<(fneg (fsub (fmul R64FP:$rA, R64FP:$rB), R64FP:$rC)),
3247 (FNMSf64 R64FP:$rA, R64FP:$rB, R64FP:$rC)>;
3248
3249def FNMSv2f64 :
3250 RRForm<0b01111010110, (outs VECREG:$rT),
3251 (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3252 "dfnms\t$rT, $rA, $rB", DPrecFP,
3253 [(set (v2f64 VECREG:$rT),
3254 (fsub (v2f64 VECREG:$rC),
3255 (fmul (v2f64 VECREG:$rA),
3256 (v2f64 VECREG:$rB))))]>,
3257 RegConstraint<"$rC = $rT">,
3258 NoEncode<"$rC">;
3259
3260def : Pat<(fneg (fsub (fmul (v2f64 VECREG:$rA), (v2f64 VECREG:$rB)),
3261 (v2f64 VECREG:$rC))),
3262 (FNMSv2f64 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
3263
3264// - (a * b + c)
3265// - (a * b) - c
3266def FNMAf64 :
3267 RRForm<0b11111010110, (outs R64FP:$rT),
3268 (ins R64FP:$rA, R64FP:$rB, R64FP:$rC),
3269 "dfnma\t$rT, $rA, $rB", DPrecFP,
3270 [(set R64FP:$rT, (fneg (fadd R64FP:$rC, (fmul R64FP:$rA, R64FP:$rB))))]>,
3271 RegConstraint<"$rC = $rT">,
3272 NoEncode<"$rC">;
3273
3274def FNMAv2f64 :
3275 RRForm<0b11111010110, (outs VECREG:$rT),
3276 (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3277 "dfnma\t$rT, $rA, $rB", DPrecFP,
3278 [(set (v2f64 VECREG:$rT),
3279 (fneg (fadd (v2f64 VECREG:$rC),
3280 (fmul (v2f64 VECREG:$rA),
3281 (v2f64 VECREG:$rB)))))]>,
3282 RegConstraint<"$rC = $rT">,
3283 NoEncode<"$rC">;
3284
3285//===----------------------------------------------------------------------==//
3286// Floating point negation and absolute value
3287//===----------------------------------------------------------------------==//
3288
3289def : Pat<(fneg (v4f32 VECREG:$rA)),
3290 (XORfnegvec (v4f32 VECREG:$rA),
3291 (v4f32 (ILHUv4i32 0x8000)))>;
3292
3293def : Pat<(fneg R32FP:$rA),
3294 (XORfneg32 R32FP:$rA, (ILHUr32 0x8000))>;
3295
3296def : Pat<(fneg (v2f64 VECREG:$rA)),
3297 (XORfnegvec (v2f64 VECREG:$rA),
3298 (v2f64 (ANDBIv16i8 (FSMBIv16i8 0x8080), 0x80)))>;
3299
3300def : Pat<(fneg R64FP:$rA),
3301 (XORfneg64 R64FP:$rA,
3302 (ANDBIv16i8 (FSMBIv16i8 0x8080), 0x80))>;
3303
3304// Floating point absolute value
3305
3306def : Pat<(fabs R32FP:$rA),
3307 (ANDfabs32 R32FP:$rA, (IOHLr32 (ILHUr32 0x7fff), 0xffff))>;
3308
3309def : Pat<(fabs (v4f32 VECREG:$rA)),
3310 (ANDfabsvec (v4f32 VECREG:$rA),
3311 (v4f32 (ANDBIv16i8 (FSMBIv16i8 0xffff), 0x7f)))>;
3312
3313def : Pat<(fabs R64FP:$rA),
3314 (ANDfabs64 R64FP:$rA, (ANDBIv16i8 (FSMBIv16i8 0xffff), 0x7f))>;
3315
3316def : Pat<(fabs (v2f64 VECREG:$rA)),
3317 (ANDfabsvec (v2f64 VECREG:$rA),
3318 (v2f64 (ANDBIv16i8 (FSMBIv16i8 0xffff), 0x7f)))>;
3319
3320//===----------------------------------------------------------------------===//
3321// Execution, Load NOP (execute NOPs belong in even pipeline, load NOPs belong
3322// in the odd pipeline)
3323//===----------------------------------------------------------------------===//
3324
3325def ENOP : I<(outs), (ins), "enop", ExecNOP> {
3326 let Pattern = [];
3327
3328 let Inst{0-10} = 0b10000000010;
3329 let Inst{11-17} = 0;
3330 let Inst{18-24} = 0;
3331 let Inst{25-31} = 0;
3332}
3333
3334def LNOP : I<(outs), (ins), "lnop", LoadNOP> {
3335 let Pattern = [];
3336
3337 let Inst{0-10} = 0b10000000000;
3338 let Inst{11-17} = 0;
3339 let Inst{18-24} = 0;
3340 let Inst{25-31} = 0;
3341}
3342
3343//===----------------------------------------------------------------------===//
3344// Bit conversions (type conversions between vector/packed types)
3345// NOTE: Promotions are handled using the XS* instructions. Truncation
3346// is not handled.
3347//===----------------------------------------------------------------------===//
3348def : Pat<(v16i8 (bitconvert (v8i16 VECREG:$src))), (v16i8 VECREG:$src)>;
3349def : Pat<(v16i8 (bitconvert (v4i32 VECREG:$src))), (v16i8 VECREG:$src)>;
3350def : Pat<(v16i8 (bitconvert (v2i64 VECREG:$src))), (v16i8 VECREG:$src)>;
3351def : Pat<(v16i8 (bitconvert (v4f32 VECREG:$src))), (v16i8 VECREG:$src)>;
3352def : Pat<(v16i8 (bitconvert (v2f64 VECREG:$src))), (v16i8 VECREG:$src)>;
3353
3354def : Pat<(v8i16 (bitconvert (v16i8 VECREG:$src))), (v8i16 VECREG:$src)>;
3355def : Pat<(v8i16 (bitconvert (v4i32 VECREG:$src))), (v8i16 VECREG:$src)>;
3356def : Pat<(v8i16 (bitconvert (v2i64 VECREG:$src))), (v8i16 VECREG:$src)>;
3357def : Pat<(v8i16 (bitconvert (v4f32 VECREG:$src))), (v8i16 VECREG:$src)>;
3358def : Pat<(v8i16 (bitconvert (v2f64 VECREG:$src))), (v8i16 VECREG:$src)>;
3359
3360def : Pat<(v4i32 (bitconvert (v16i8 VECREG:$src))), (v4i32 VECREG:$src)>;
3361def : Pat<(v4i32 (bitconvert (v8i16 VECREG:$src))), (v4i32 VECREG:$src)>;
3362def : Pat<(v4i32 (bitconvert (v2i64 VECREG:$src))), (v4i32 VECREG:$src)>;
3363def : Pat<(v4i32 (bitconvert (v4f32 VECREG:$src))), (v4i32 VECREG:$src)>;
3364def : Pat<(v4i32 (bitconvert (v2f64 VECREG:$src))), (v4i32 VECREG:$src)>;
3365
3366def : Pat<(v2i64 (bitconvert (v16i8 VECREG:$src))), (v2i64 VECREG:$src)>;
3367def : Pat<(v2i64 (bitconvert (v8i16 VECREG:$src))), (v2i64 VECREG:$src)>;
3368def : Pat<(v2i64 (bitconvert (v4i32 VECREG:$src))), (v2i64 VECREG:$src)>;
3369def : Pat<(v2i64 (bitconvert (v4f32 VECREG:$src))), (v2i64 VECREG:$src)>;
3370def : Pat<(v2i64 (bitconvert (v2f64 VECREG:$src))), (v2i64 VECREG:$src)>;
3371
3372def : Pat<(v4f32 (bitconvert (v16i8 VECREG:$src))), (v4f32 VECREG:$src)>;
3373def : Pat<(v4f32 (bitconvert (v8i16 VECREG:$src))), (v4f32 VECREG:$src)>;
3374def : Pat<(v4f32 (bitconvert (v2i64 VECREG:$src))), (v4f32 VECREG:$src)>;
3375def : Pat<(v4f32 (bitconvert (v4i32 VECREG:$src))), (v4f32 VECREG:$src)>;
3376def : Pat<(v4f32 (bitconvert (v2f64 VECREG:$src))), (v4f32 VECREG:$src)>;
3377
3378def : Pat<(v2f64 (bitconvert (v16i8 VECREG:$src))), (v2f64 VECREG:$src)>;
3379def : Pat<(v2f64 (bitconvert (v8i16 VECREG:$src))), (v2f64 VECREG:$src)>;
3380def : Pat<(v2f64 (bitconvert (v4i32 VECREG:$src))), (v2f64 VECREG:$src)>;
3381def : Pat<(v2f64 (bitconvert (v2i64 VECREG:$src))), (v2f64 VECREG:$src)>;
3382def : Pat<(v2f64 (bitconvert (v2f64 VECREG:$src))), (v2f64 VECREG:$src)>;
3383
3384def : Pat<(f32 (bitconvert (i32 R32C:$src))), (f32 R32FP:$src)>;
Scott Michel754d8662007-12-20 00:44:13 +00003385def : Pat<(f64 (bitconvert (i64 R64C:$src))), (f64 R64FP:$src)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00003386
3387//===----------------------------------------------------------------------===//
3388// Instruction patterns:
3389//===----------------------------------------------------------------------===//
3390
3391// General 32-bit constants:
3392def : Pat<(i32 imm:$imm),
3393 (IOHLr32 (ILHUr32 (HI16 imm:$imm)), (LO16 imm:$imm))>;
3394
3395// Single precision float constants:
3396def : Pat<(SPUFPconstant (f32 fpimm:$imm)),
3397 (IOHLf32 (ILHUf32 (HI16_f32 fpimm:$imm)), (LO16_f32 fpimm:$imm))>;
3398
3399// General constant 32-bit vectors
3400def : Pat<(v4i32 v4i32Imm:$imm),
3401 (IOHLvec (v4i32 (ILHUv4i32 (HI16_vec v4i32Imm:$imm))),
3402 (LO16_vec v4i32Imm:$imm))>;
Scott Michel438be252007-12-17 22:32:34 +00003403
3404// 8-bit constants
3405def : Pat<(i8 imm:$imm),
3406 (ILHr8 imm:$imm)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00003407
3408//===----------------------------------------------------------------------===//
3409// Call instruction patterns:
3410//===----------------------------------------------------------------------===//
3411// Return void
3412def : Pat<(ret),
3413 (RET)>;
3414
3415//===----------------------------------------------------------------------===//
3416// Zero/Any/Sign extensions
3417//===----------------------------------------------------------------------===//
3418
3419// zext 1->32: Zero extend i1 to i32
3420def : Pat<(SPUextract_i1_zext R32C:$rSrc),
3421 (ANDIr32 R32C:$rSrc, 0x1)>;
3422
3423// sext 8->32: Sign extend bytes to words
3424def : Pat<(sext_inreg R32C:$rSrc, i8),
3425 (XSHWr32 (XSBHr32 R32C:$rSrc))>;
3426
Scott Michel438be252007-12-17 22:32:34 +00003427def : Pat<(i32 (sext R8C:$rSrc)),
3428 (XSHWr16 (XSBHr8 R8C:$rSrc))>;
3429
Scott Michel8b6b4202007-12-04 22:35:58 +00003430def : Pat<(SPUextract_i8_sext VECREG:$rSrc),
3431 (XSHWr32 (XSBHr32 (ORi32_v4i32 (v4i32 VECREG:$rSrc),
3432 (v4i32 VECREG:$rSrc))))>;
3433
Scott Michel438be252007-12-17 22:32:34 +00003434// zext 8->16: Zero extend bytes to halfwords
3435def : Pat<(i16 (zext R8C:$rSrc)),
3436 (ANDHI1To2 R8C:$rSrc, 0xff)>;
3437
3438// zext 8->32 from preferred slot in load/store
Scott Michel8b6b4202007-12-04 22:35:58 +00003439def : Pat<(SPUextract_i8_zext VECREG:$rSrc),
3440 (ANDIr32 (ORi32_v4i32 (v4i32 VECREG:$rSrc), (v4i32 VECREG:$rSrc)),
3441 0xff)>;
3442
Scott Michel438be252007-12-17 22:32:34 +00003443// zext 8->32: Zero extend bytes to words
3444def : Pat<(i32 (zext R8C:$rSrc)),
3445 (ANDI1To4 R8C:$rSrc, 0xff)>;
3446
3447// anyext 8->16: Extend 8->16 bits, irrespective of sign
3448def : Pat<(i16 (anyext R8C:$rSrc)),
3449 (ORHI1To2 R8C:$rSrc, 0)>;
3450
3451// anyext 8->32: Extend 8->32 bits, irrespective of sign
3452def : Pat<(i32 (anyext R8C:$rSrc)),
3453 (ORI1To4 R8C:$rSrc, 0)>;
3454
Scott Michel8b6b4202007-12-04 22:35:58 +00003455// zext 16->32: Zero extend halfwords to words (note that we have to juggle the
3456// 0xffff constant since it will not fit into an immediate.)
3457def : Pat<(i32 (zext R16C:$rSrc)),
3458 (AND2To4 R16C:$rSrc, (ILAr32 0xffff))>;
3459
3460def : Pat<(i32 (zext (and R16C:$rSrc, 0xf))),
3461 (ANDI2To4 R16C:$rSrc, 0xf)>;
3462
3463def : Pat<(i32 (zext (and R16C:$rSrc, 0xff))),
3464 (ANDI2To4 R16C:$rSrc, 0xff)>;
3465
3466def : Pat<(i32 (zext (and R16C:$rSrc, 0xfff))),
3467 (ANDI2To4 R16C:$rSrc, 0xfff)>;
3468
3469// anyext 16->32: Extend 16->32 bits, irrespective of sign
3470def : Pat<(i32 (anyext R16C:$rSrc)),
3471 (ORI2To4 R16C:$rSrc, 0)>;
3472
3473//===----------------------------------------------------------------------===//
3474// Address translation: SPU, like PPC, has to split addresses into high and
3475// low parts in order to load them into a register.
3476//===----------------------------------------------------------------------===//
3477
Scott Micheldbac4cf2008-01-11 02:53:15 +00003478def : Pat<(SPUhi tglobaladdr:$in, 0), (ILHUhi tglobaladdr:$in)>;
3479def : Pat<(SPUlo tglobaladdr:$in, 0), (ILAlo tglobaladdr:$in)>;
3480def : Pat<(SPUaform tglobaladdr:$in, 0), (ILAlsa tglobaladdr:$in)>;
3481def : Pat<(SPUxform tglobaladdr:$in, 0),
3482 (IOHLlo (ILHUhi tglobaladdr:$in), tglobaladdr:$in)>;
3483def : Pat<(SPUhi tjumptable:$in, 0), (ILHUhi tjumptable:$in)>;
3484def : Pat<(SPUlo tjumptable:$in, 0), (ILAlo tjumptable:$in)>;
3485def : Pat<(SPUaform tjumptable:$in, 0), (ILAlsa tjumptable:$in)>;
3486def : Pat<(SPUxform tjumptable:$in, 0),
3487 (IOHLlo (ILHUhi tjumptable:$in), tjumptable:$in)>;
3488def : Pat<(SPUhi tconstpool:$in , 0), (ILHUhi tconstpool:$in)>;
3489def : Pat<(SPUlo tconstpool:$in , 0), (ILAlo tconstpool:$in)>;
3490def : Pat<(SPUaform tconstpool:$in, 0), (ILAlsa tconstpool:$in)>;
3491/* def : Pat<(SPUxform tconstpool:$in, 0),
3492 (IOHLlo (ILHUhi tconstpool:$in), tconstpool:$in)>; */
Scott Michel8b6b4202007-12-04 22:35:58 +00003493
Scott Michel8b6b4202007-12-04 22:35:58 +00003494// Instrinsics:
3495include "CellSDKIntrinsics.td"