blob: 71cb37dc08d85ccefaee138837cd56fb923d7428 [file] [log] [blame]
Scott Michel8b6b4202007-12-04 22:35:58 +00001//==- SPUInstrInfo.td - Describe the Cell SPU Instructions -*- tablegen -*-==//
2//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner081ce942007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Scott Michel8b6b4202007-12-04 22:35:58 +00007//
8//===----------------------------------------------------------------------===//
9// Cell SPU Instructions:
10//===----------------------------------------------------------------------===//
11
12//===----------------------------------------------------------------------===//
13// TODO Items (not urgent today, but would be nice, low priority)
14//
15// ANDBI, ORBI: SPU constructs a 4-byte constant for these instructions by
16// concatenating the byte argument b as "bbbb". Could recognize this bit pattern
17// in 16-bit and 32-bit constants and reduce instruction count.
18//===----------------------------------------------------------------------===//
19
20//===----------------------------------------------------------------------===//
21// Pseudo instructions:
22//===----------------------------------------------------------------------===//
23
24let hasCtrlDep = 1, Defs = [R1], Uses = [R1] in {
25 def ADJCALLSTACKDOWN : Pseudo<(outs), (ins u16imm:$amt),
26 "${:comment} ADJCALLSTACKDOWN",
27 [(callseq_start imm:$amt)]>;
28 def ADJCALLSTACKUP : Pseudo<(outs), (ins u16imm:$amt),
29 "${:comment} ADJCALLSTACKUP",
30 [(callseq_end imm:$amt)]>;
31}
32
33//===----------------------------------------------------------------------===//
34// DWARF debugging Pseudo Instructions
35//===----------------------------------------------------------------------===//
36
37def DWARF_LOC : Pseudo<(outs), (ins i32imm:$line, i32imm:$col, i32imm:$file),
38 "${:comment} .loc $file, $line, $col",
39 [(dwarf_loc (i32 imm:$line), (i32 imm:$col),
40 (i32 imm:$file))]>;
41
42//===----------------------------------------------------------------------===//
43// Loads:
44// NB: The ordering is actually important, since the instruction selection
45// will try each of the instructions in sequence, i.e., the D-form first with
46// the 10-bit displacement, then the A-form with the 16 bit displacement, and
47// finally the X-form with the register-register.
48//===----------------------------------------------------------------------===//
49
Chris Lattner1a1932c2008-01-06 23:38:27 +000050let isSimpleLoad = 1 in {
Scott Michel8b6b4202007-12-04 22:35:58 +000051 def LQDv16i8:
52 RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
53 "lqd\t$rT, $src", LoadStore,
54 [(set (v16i8 VECREG:$rT), (load dform_addr:$src))]>;
55
56 def LQDv8i16:
57 RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
58 "lqd\t$rT, $src", LoadStore,
59 [(set (v8i16 VECREG:$rT), (load dform_addr:$src))]>;
60
61 def LQDv4i32:
62 RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
63 "lqd\t$rT, $src", LoadStore,
64 [(set (v4i32 VECREG:$rT), (load dform_addr:$src))]>;
65
66 def LQDv2i64:
67 RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
68 "lqd\t$rT, $src", LoadStore,
69 [(set (v2i64 VECREG:$rT), (load dform_addr:$src))]>;
70
71 def LQDv4f32:
72 RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
73 "lqd\t$rT, $src", LoadStore,
74 [(set (v4f32 VECREG:$rT), (load dform_addr:$src))]>;
75
76 def LQDv2f64:
77 RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
78 "lqd\t$rT, $src", LoadStore,
79 [(set (v2f64 VECREG:$rT), (load dform_addr:$src))]>;
80
81 def LQDr128:
82 RI10Form<0b00101100, (outs GPRC:$rT), (ins memri10:$src),
83 "lqd\t$rT, $src", LoadStore,
84 [(set GPRC:$rT, (load dform_addr:$src))]>;
85
86 def LQDr64:
87 RI10Form<0b00101100, (outs R64C:$rT), (ins memri10:$src),
88 "lqd\t$rT, $src", LoadStore,
89 [(set R64C:$rT, (load dform_addr:$src))]>;
90
91 def LQDr32:
92 RI10Form<0b00101100, (outs R32C:$rT), (ins memri10:$src),
93 "lqd\t$rT, $src", LoadStore,
94 [(set R32C:$rT, (load dform_addr:$src))]>;
95
96 // Floating Point
97 def LQDf32:
98 RI10Form<0b00101100, (outs R32FP:$rT), (ins memri10:$src),
99 "lqd\t$rT, $src", LoadStore,
100 [(set R32FP:$rT, (load dform_addr:$src))]>;
101
102 def LQDf64:
103 RI10Form<0b00101100, (outs R64FP:$rT), (ins memri10:$src),
104 "lqd\t$rT, $src", LoadStore,
105 [(set R64FP:$rT, (load dform_addr:$src))]>;
106 // END Floating Point
107
108 def LQDr16:
109 RI10Form<0b00101100, (outs R16C:$rT), (ins memri10:$src),
110 "lqd\t$rT, $src", LoadStore,
111 [(set R16C:$rT, (load dform_addr:$src))]>;
112
Scott Michel438be252007-12-17 22:32:34 +0000113 def LQDr8:
114 RI10Form<0b00101100, (outs R8C:$rT), (ins memri10:$src),
115 "lqd\t$rT, $src", LoadStore,
116 [(set R8C:$rT, (load dform_addr:$src))]>;
117
Scott Michel8b6b4202007-12-04 22:35:58 +0000118 def LQAv16i8:
119 RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
120 "lqa\t$rT, $src", LoadStore,
121 [(set (v16i8 VECREG:$rT), (load aform_addr:$src))]>;
122
123 def LQAv8i16:
124 RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
125 "lqa\t$rT, $src", LoadStore,
126 [(set (v8i16 VECREG:$rT), (load aform_addr:$src))]>;
127
128 def LQAv4i32:
129 RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
130 "lqa\t$rT, $src", LoadStore,
131 [(set (v4i32 VECREG:$rT), (load aform_addr:$src))]>;
132
133 def LQAv2i64:
134 RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
135 "lqa\t$rT, $src", LoadStore,
136 [(set (v2i64 VECREG:$rT), (load aform_addr:$src))]>;
137
138 def LQAv4f32:
139 RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
140 "lqa\t$rT, $src", LoadStore,
141 [(set (v4f32 VECREG:$rT), (load aform_addr:$src))]>;
142
143 def LQAv2f64:
144 RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
145 "lqa\t$rT, $src", LoadStore,
146 [(set (v2f64 VECREG:$rT), (load aform_addr:$src))]>;
147
148 def LQAr128:
149 RI16Form<0b100001100, (outs GPRC:$rT), (ins addr256k:$src),
150 "lqa\t$rT, $src", LoadStore,
151 [(set GPRC:$rT, (load aform_addr:$src))]>;
152
153 def LQAr64:
154 RI16Form<0b100001100, (outs R64C:$rT), (ins addr256k:$src),
155 "lqa\t$rT, $src", LoadStore,
156 [(set R64C:$rT, (load aform_addr:$src))]>;
157
158 def LQAr32:
159 RI16Form<0b100001100, (outs R32C:$rT), (ins addr256k:$src),
160 "lqa\t$rT, $src", LoadStore,
Scott Micheldbac4cf2008-01-11 02:53:15 +0000161 [(set R32C:$rT, (load aform_addr:$src))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000162
163 def LQAf32:
164 RI16Form<0b100001100, (outs R32FP:$rT), (ins addr256k:$src),
165 "lqa\t$rT, $src", LoadStore,
166 [(set R32FP:$rT, (load aform_addr:$src))]>;
167
168 def LQAf64:
169 RI16Form<0b100001100, (outs R64FP:$rT), (ins addr256k:$src),
170 "lqa\t$rT, $src", LoadStore,
171 [(set R64FP:$rT, (load aform_addr:$src))]>;
172
173 def LQAr16:
174 RI16Form<0b100001100, (outs R16C:$rT), (ins addr256k:$src),
175 "lqa\t$rT, $src", LoadStore,
176 [(set R16C:$rT, (load aform_addr:$src))]>;
177
Scott Michel438be252007-12-17 22:32:34 +0000178 def LQAr8:
179 RI16Form<0b100001100, (outs R8C:$rT), (ins addr256k:$src),
180 "lqa\t$rT, $src", LoadStore,
181 [(set R8C:$rT, (load aform_addr:$src))]>;
182
Scott Michel8b6b4202007-12-04 22:35:58 +0000183 def LQXv16i8:
184 RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
185 "lqx\t$rT, $src", LoadStore,
186 [(set (v16i8 VECREG:$rT), (load xform_addr:$src))]>;
187
188 def LQXv8i16:
189 RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
190 "lqx\t$rT, $src", LoadStore,
191 [(set (v8i16 VECREG:$rT), (load xform_addr:$src))]>;
192
193 def LQXv4i32:
194 RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
195 "lqx\t$rT, $src", LoadStore,
196 [(set (v4i32 VECREG:$rT), (load xform_addr:$src))]>;
197
198 def LQXv2i64:
199 RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
200 "lqx\t$rT, $src", LoadStore,
201 [(set (v2i64 VECREG:$rT), (load xform_addr:$src))]>;
202
203 def LQXv4f32:
204 RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
205 "lqx\t$rT, $src", LoadStore,
206 [(set (v4f32 VECREG:$rT), (load xform_addr:$src))]>;
207
208 def LQXv2f64:
209 RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
210 "lqx\t$rT, $src", LoadStore,
211 [(set (v2f64 VECREG:$rT), (load xform_addr:$src))]>;
212
213 def LQXr128:
214 RRForm<0b00100011100, (outs GPRC:$rT), (ins memrr:$src),
215 "lqx\t$rT, $src", LoadStore,
216 [(set GPRC:$rT, (load xform_addr:$src))]>;
217
218 def LQXr64:
219 RRForm<0b00100011100, (outs R64C:$rT), (ins memrr:$src),
220 "lqx\t$rT, $src", LoadStore,
221 [(set R64C:$rT, (load xform_addr:$src))]>;
222
223 def LQXr32:
224 RRForm<0b00100011100, (outs R32C:$rT), (ins memrr:$src),
225 "lqx\t$rT, $src", LoadStore,
226 [(set R32C:$rT, (load xform_addr:$src))]>;
227
228 def LQXf32:
229 RRForm<0b00100011100, (outs R32FP:$rT), (ins memrr:$src),
230 "lqx\t$rT, $src", LoadStore,
231 [(set R32FP:$rT, (load xform_addr:$src))]>;
232
233 def LQXf64:
234 RRForm<0b00100011100, (outs R64FP:$rT), (ins memrr:$src),
235 "lqx\t$rT, $src", LoadStore,
236 [(set R64FP:$rT, (load xform_addr:$src))]>;
237
238 def LQXr16:
239 RRForm<0b00100011100, (outs R16C:$rT), (ins memrr:$src),
240 "lqx\t$rT, $src", LoadStore,
241 [(set R16C:$rT, (load xform_addr:$src))]>;
242
Scott Michel438be252007-12-17 22:32:34 +0000243 def LQXr8:
244 RRForm<0b00100011100, (outs R8C:$rT), (ins memrr:$src),
245 "lqx\t$rT, $src", LoadStore,
246 [(set R8C:$rT, (load xform_addr:$src))]>;
247
Scott Michel8b6b4202007-12-04 22:35:58 +0000248/* Load quadword, PC relative: Not much use at this point in time.
249 Might be of use later for relocatable code.
250 def LQR : RI16Form<0b111001100, (outs VECREG:$rT), (ins s16imm:$disp),
251 "lqr\t$rT, $disp", LoadStore,
252 [(set VECREG:$rT, (load iaddr:$disp))]>;
253 */
Scott Michel8b6b4202007-12-04 22:35:58 +0000254}
255
256//===----------------------------------------------------------------------===//
257// Stores:
258//===----------------------------------------------------------------------===//
259
Chris Lattneref8d6082008-01-06 06:44:58 +0000260def STQDv16i8 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
261 "stqd\t$rT, $src", LoadStore,
262 [(store (v16i8 VECREG:$rT), dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000263
Chris Lattneref8d6082008-01-06 06:44:58 +0000264def STQDv8i16 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
265 "stqd\t$rT, $src", LoadStore,
266 [(store (v8i16 VECREG:$rT), dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000267
Chris Lattneref8d6082008-01-06 06:44:58 +0000268def STQDv4i32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
269 "stqd\t$rT, $src", LoadStore,
270 [(store (v4i32 VECREG:$rT), dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000271
Chris Lattneref8d6082008-01-06 06:44:58 +0000272def STQDv2i64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
273 "stqd\t$rT, $src", LoadStore,
274 [(store (v2i64 VECREG:$rT), dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000275
Chris Lattneref8d6082008-01-06 06:44:58 +0000276def STQDv4f32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
277 "stqd\t$rT, $src", LoadStore,
278 [(store (v4f32 VECREG:$rT), dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000279
Chris Lattneref8d6082008-01-06 06:44:58 +0000280def STQDv2f64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
281 "stqd\t$rT, $src", LoadStore,
282 [(store (v2f64 VECREG:$rT), dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000283
Chris Lattneref8d6082008-01-06 06:44:58 +0000284def STQDr128 : RI10Form<0b00100100, (outs), (ins GPRC:$rT, memri10:$src),
285 "stqd\t$rT, $src", LoadStore,
286 [(store GPRC:$rT, dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000287
Chris Lattneref8d6082008-01-06 06:44:58 +0000288def STQDr64 : RI10Form<0b00100100, (outs), (ins R64C:$rT, memri10:$src),
289 "stqd\t$rT, $src", LoadStore,
290 [(store R64C:$rT, dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000291
Chris Lattneref8d6082008-01-06 06:44:58 +0000292def STQDr32 : RI10Form<0b00100100, (outs), (ins R32C:$rT, memri10:$src),
293 "stqd\t$rT, $src", LoadStore,
294 [(store R32C:$rT, dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000295
Chris Lattneref8d6082008-01-06 06:44:58 +0000296// Floating Point
297def STQDf32 : RI10Form<0b00100100, (outs), (ins R32FP:$rT, memri10:$src),
298 "stqd\t$rT, $src", LoadStore,
299 [(store R32FP:$rT, dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000300
Chris Lattneref8d6082008-01-06 06:44:58 +0000301def STQDf64 : RI10Form<0b00100100, (outs), (ins R64FP:$rT, memri10:$src),
302 "stqd\t$rT, $src", LoadStore,
303 [(store R64FP:$rT, dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000304
Chris Lattneref8d6082008-01-06 06:44:58 +0000305def STQDr16 : RI10Form<0b00100100, (outs), (ins R16C:$rT, memri10:$src),
306 "stqd\t$rT, $src", LoadStore,
307 [(store R16C:$rT, dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000308
Chris Lattneref8d6082008-01-06 06:44:58 +0000309def STQDr8 : RI10Form<0b00100100, (outs), (ins R8C:$rT, memri10:$src),
310 "stqd\t$rT, $src", LoadStore,
311 [(store R8C:$rT, dform_addr:$src)]>;
Scott Michel438be252007-12-17 22:32:34 +0000312
Chris Lattneref8d6082008-01-06 06:44:58 +0000313def STQAv16i8 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
314 "stqa\t$rT, $src", LoadStore,
315 [(store (v16i8 VECREG:$rT), aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000316
Chris Lattneref8d6082008-01-06 06:44:58 +0000317def STQAv8i16 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
318 "stqa\t$rT, $src", LoadStore,
319 [(store (v8i16 VECREG:$rT), aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000320
Chris Lattneref8d6082008-01-06 06:44:58 +0000321def STQAv4i32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
322 "stqa\t$rT, $src", LoadStore,
323 [(store (v4i32 VECREG:$rT), aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000324
Chris Lattneref8d6082008-01-06 06:44:58 +0000325def STQAv2i64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
326 "stqa\t$rT, $src", LoadStore,
327 [(store (v2i64 VECREG:$rT), aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000328
Chris Lattneref8d6082008-01-06 06:44:58 +0000329def STQAv4f32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
330 "stqa\t$rT, $src", LoadStore,
331 [(store (v4f32 VECREG:$rT), aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000332
Chris Lattneref8d6082008-01-06 06:44:58 +0000333def STQAv2f64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
334 "stqa\t$rT, $src", LoadStore,
335 [(store (v2f64 VECREG:$rT), aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000336
Chris Lattneref8d6082008-01-06 06:44:58 +0000337def STQAr128 : RI10Form<0b00100100, (outs), (ins GPRC:$rT, addr256k:$src),
338 "stqa\t$rT, $src", LoadStore,
339 [(store GPRC:$rT, aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000340
Chris Lattneref8d6082008-01-06 06:44:58 +0000341def STQAr64 : RI10Form<0b00100100, (outs), (ins R64C:$rT, addr256k:$src),
342 "stqa\t$rT, $src", LoadStore,
343 [(store R64C:$rT, aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000344
Chris Lattneref8d6082008-01-06 06:44:58 +0000345def STQAr32 : RI10Form<0b00100100, (outs), (ins R32C:$rT, addr256k:$src),
346 "stqa\t$rT, $src", LoadStore,
347 [(store R32C:$rT, aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000348
Chris Lattneref8d6082008-01-06 06:44:58 +0000349// Floating Point
350def STQAf32 : RI10Form<0b00100100, (outs), (ins R32FP:$rT, addr256k:$src),
351 "stqa\t$rT, $src", LoadStore,
352 [(store R32FP:$rT, aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000353
Chris Lattneref8d6082008-01-06 06:44:58 +0000354def STQAf64 : RI10Form<0b00100100, (outs), (ins R64FP:$rT, addr256k:$src),
355 "stqa\t$rT, $src", LoadStore,
356 [(store R64FP:$rT, aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000357
Chris Lattneref8d6082008-01-06 06:44:58 +0000358def STQAr16 : RI10Form<0b00100100, (outs), (ins R16C:$rT, addr256k:$src),
359 "stqa\t$rT, $src", LoadStore,
360 [(store R16C:$rT, aform_addr:$src)]>;
Scott Michel438be252007-12-17 22:32:34 +0000361
Chris Lattneref8d6082008-01-06 06:44:58 +0000362def STQAr8 : RI10Form<0b00100100, (outs), (ins R8C:$rT, addr256k:$src),
363 "stqa\t$rT, $src", LoadStore,
364 [(store R8C:$rT, aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000365
Chris Lattneref8d6082008-01-06 06:44:58 +0000366def STQXv16i8 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
367 "stqx\t$rT, $src", LoadStore,
368 [(store (v16i8 VECREG:$rT), xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000369
Chris Lattneref8d6082008-01-06 06:44:58 +0000370def STQXv8i16 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
371 "stqx\t$rT, $src", LoadStore,
372 [(store (v8i16 VECREG:$rT), xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000373
Chris Lattneref8d6082008-01-06 06:44:58 +0000374def STQXv4i32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
375 "stqx\t$rT, $src", LoadStore,
376 [(store (v4i32 VECREG:$rT), xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000377
Chris Lattneref8d6082008-01-06 06:44:58 +0000378def STQXv2i64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
379 "stqx\t$rT, $src", LoadStore,
380 [(store (v2i64 VECREG:$rT), xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000381
Chris Lattneref8d6082008-01-06 06:44:58 +0000382def STQXv4f32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
383 "stqx\t$rT, $src", LoadStore,
384 [(store (v4f32 VECREG:$rT), xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000385
Chris Lattneref8d6082008-01-06 06:44:58 +0000386def STQXv2f64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
387 "stqx\t$rT, $src", LoadStore,
388 [(store (v2f64 VECREG:$rT), xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000389
Chris Lattneref8d6082008-01-06 06:44:58 +0000390def STQXr128 : RI10Form<0b00100100, (outs), (ins GPRC:$rT, memrr:$src),
391 "stqx\t$rT, $src", LoadStore,
392 [(store GPRC:$rT, xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000393
Chris Lattneref8d6082008-01-06 06:44:58 +0000394def STQXr64:
395 RI10Form<0b00100100, (outs), (ins R64C:$rT, memrr:$src),
396 "stqx\t$rT, $src", LoadStore,
397 [(store R64C:$rT, xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000398
Chris Lattneref8d6082008-01-06 06:44:58 +0000399def STQXr32:
400 RI10Form<0b00100100, (outs), (ins R32C:$rT, memrr:$src),
401 "stqx\t$rT, $src", LoadStore,
402 [(store R32C:$rT, xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000403
Chris Lattneref8d6082008-01-06 06:44:58 +0000404// Floating Point
405def STQXf32:
406 RI10Form<0b00100100, (outs), (ins R32FP:$rT, memrr:$src),
407 "stqx\t$rT, $src", LoadStore,
408 [(store R32FP:$rT, xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000409
Chris Lattneref8d6082008-01-06 06:44:58 +0000410def STQXf64:
411 RI10Form<0b00100100, (outs), (ins R64FP:$rT, memrr:$src),
412 "stqx\t$rT, $src", LoadStore,
413 [(store R64FP:$rT, xform_addr:$src)]>;
414
415def STQXr16:
416 RI10Form<0b00100100, (outs), (ins R16C:$rT, memrr:$src),
417 "stqx\t$rT, $src", LoadStore,
418 [(store R16C:$rT, xform_addr:$src)]>;
419
420def STQXr8:
421 RI10Form<0b00100100, (outs), (ins R8C:$rT, memrr:$src),
422 "stqx\t$rT, $src", LoadStore,
423 [(store R8C:$rT, xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000424
425/* Store quadword, PC relative: Not much use at this point in time. Might
Chris Lattneref8d6082008-01-06 06:44:58 +0000426 be useful for relocatable code.
427def STQR : RI16Form<0b111000100, (outs), (ins VECREG:$rT, s16imm:$disp),
428 "stqr\t$rT, $disp", LoadStore,
429 [(store VECREG:$rT, iaddr:$disp)]>;
430*/
Scott Michel8b6b4202007-12-04 22:35:58 +0000431
432//===----------------------------------------------------------------------===//
433// Generate Controls for Insertion:
434//===----------------------------------------------------------------------===//
435
436def CBD :
437 RI7Form<0b10101111100, (outs VECREG:$rT), (ins memri7:$src),
438 "cbd\t$rT, $src", ShuffleOp,
439 [(set (v16i8 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
440
441def CBX : RRForm<0b00101011100, (outs VECREG:$rT), (ins memrr:$src),
442 "cbx\t$rT, $src", ShuffleOp,
443 [(set (v16i8 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
444
445def CHD : RI7Form<0b10101111100, (outs VECREG:$rT), (ins memri7:$src),
446 "chd\t$rT, $src", ShuffleOp,
447 [(set (v8i16 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
448
449def CHX : RRForm<0b10101011100, (outs VECREG:$rT), (ins memrr:$src),
450 "chx\t$rT, $src", ShuffleOp,
451 [(set (v8i16 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
452
453def CWD : RI7Form<0b01101111100, (outs VECREG:$rT), (ins memri7:$src),
454 "cwd\t$rT, $src", ShuffleOp,
455 [(set (v4i32 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
456
457def CWX : RRForm<0b01101011100, (outs VECREG:$rT), (ins memrr:$src),
458 "cwx\t$rT, $src", ShuffleOp,
459 [(set (v4i32 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
460
461def CDD : RI7Form<0b11101111100, (outs VECREG:$rT), (ins memri7:$src),
462 "cdd\t$rT, $src", ShuffleOp,
463 [(set (v2i64 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
464
465def CDX : RRForm<0b11101011100, (outs VECREG:$rT), (ins memrr:$src),
466 "cdx\t$rT, $src", ShuffleOp,
467 [(set (v2i64 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
468
469//===----------------------------------------------------------------------===//
470// Constant formation:
471//===----------------------------------------------------------------------===//
472
473def ILHv8i16:
474 RI16Form<0b110000010, (outs VECREG:$rT), (ins s16imm:$val),
475 "ilh\t$rT, $val", ImmLoad,
476 [(set (v8i16 VECREG:$rT), (v8i16 v8i16SExt16Imm:$val))]>;
477
478def ILHr16:
479 RI16Form<0b110000010, (outs R16C:$rT), (ins s16imm:$val),
480 "ilh\t$rT, $val", ImmLoad,
481 [(set R16C:$rT, immSExt16:$val)]>;
482
Scott Michel438be252007-12-17 22:32:34 +0000483// Cell SPU doesn't have a native 8-bit immediate load, but ILH works ("with
484// the right constant")
485def ILHr8:
486 RI16Form<0b110000010, (outs R8C:$rT), (ins s16imm_i8:$val),
487 "ilh\t$rT, $val", ImmLoad,
488 [(set R8C:$rT, immSExt8:$val)]>;
489
Scott Michel8b6b4202007-12-04 22:35:58 +0000490// IL does sign extension!
491def ILr64:
492 RI16Form<0b100000010, (outs R64C:$rT), (ins s16imm_i64:$val),
493 "il\t$rT, $val", ImmLoad,
494 [(set R64C:$rT, immSExt16:$val)]>;
495
496def ILv2i64:
497 RI16Form<0b100000010, (outs VECREG:$rT), (ins s16imm_i64:$val),
498 "il\t$rT, $val", ImmLoad,
499 [(set VECREG:$rT, (v2i64 v2i64SExt16Imm:$val))]>;
500
501def ILv4i32:
502 RI16Form<0b100000010, (outs VECREG:$rT), (ins s16imm:$val),
503 "il\t$rT, $val", ImmLoad,
504 [(set VECREG:$rT, (v4i32 v4i32SExt16Imm:$val))]>;
505
506def ILr32:
507 RI16Form<0b100000010, (outs R32C:$rT), (ins s16imm_i32:$val),
508 "il\t$rT, $val", ImmLoad,
509 [(set R32C:$rT, immSExt16:$val)]>;
510
511def ILf32:
512 RI16Form<0b100000010, (outs R32FP:$rT), (ins s16imm_f32:$val),
513 "il\t$rT, $val", ImmLoad,
514 [(set R32FP:$rT, (SPUFPconstant fpimmSExt16:$val))]>;
515
516def ILf64:
517 RI16Form<0b100000010, (outs R64FP:$rT), (ins s16imm_f64:$val),
518 "il\t$rT, $val", ImmLoad,
519 [(set R64FP:$rT, (SPUFPconstant fpimmSExt16:$val))]>;
520
521def ILHUv4i32:
522 RI16Form<0b010000010, (outs VECREG:$rT), (ins u16imm:$val),
523 "ilhu\t$rT, $val", ImmLoad,
524 [(set VECREG:$rT, (v4i32 immILHUvec:$val))]>;
525
526def ILHUr32:
527 RI16Form<0b010000010, (outs R32C:$rT), (ins u16imm:$val),
528 "ilhu\t$rT, $val", ImmLoad,
529 [(set R32C:$rT, hi16:$val)]>;
530
531// ILHUf32: Used to custom lower float constant loads
532def ILHUf32:
533 RI16Form<0b010000010, (outs R32FP:$rT), (ins f16imm:$val),
534 "ilhu\t$rT, $val", ImmLoad,
535 [(set R32FP:$rT, (SPUFPconstant hi16_f32:$val))]>;
536
537// ILHUhi: Used for loading high portion of an address. Note the symbolHi
538// printer used for the operand.
539def ILHUhi : RI16Form<0b010000010, (outs R32C:$rT), (ins symbolHi:$val),
540 "ilhu\t$rT, $val", ImmLoad,
541 [(set R32C:$rT, hi16:$val)]>;
542
543// Immediate load address (can also be used to load 18-bit unsigned constants,
544// see the zext 16->32 pattern)
545def ILAr64:
546 RI18Form<0b1000010, (outs R64C:$rT), (ins u18imm_i64:$val),
547 "ila\t$rT, $val", LoadNOP,
548 [(set R64C:$rT, imm18:$val)]>;
549
550// TODO: ILAv2i64
551
552def ILAv2i64:
553 RI18Form<0b1000010, (outs VECREG:$rT), (ins u18imm:$val),
554 "ila\t$rT, $val", LoadNOP,
555 [(set (v2i64 VECREG:$rT), v2i64Uns18Imm:$val)]>;
556
557def ILAv4i32:
558 RI18Form<0b1000010, (outs VECREG:$rT), (ins u18imm:$val),
559 "ila\t$rT, $val", LoadNOP,
560 [(set (v4i32 VECREG:$rT), v4i32Uns18Imm:$val)]>;
561
562def ILAr32:
563 RI18Form<0b1000010, (outs R32C:$rT), (ins u18imm:$val),
564 "ila\t$rT, $val", LoadNOP,
565 [(set R32C:$rT, imm18:$val)]>;
566
567def ILAf32:
568 RI18Form<0b1000010, (outs R32FP:$rT), (ins f18imm:$val),
569 "ila\t$rT, $val", LoadNOP,
570 [(set R32FP:$rT, (SPUFPconstant fpimm18:$val))]>;
571
572def ILAf64:
573 RI18Form<0b1000010, (outs R64FP:$rT), (ins f18imm_f64:$val),
574 "ila\t$rT, $val", LoadNOP,
575 [(set R64FP:$rT, (SPUFPconstant fpimm18:$val))]>;
576
577def ILAlo:
578 RI18Form<0b1000010, (outs R32C:$rT), (ins symbolLo:$val),
579 "ila\t$rT, $val", ImmLoad,
580 [(set R32C:$rT, imm18:$val)]>;
581
582def ILAlsa:
583 RI18Form<0b1000010, (outs R32C:$rT), (ins symbolLSA:$val),
584 "ila\t$rT, $val", ImmLoad,
585 [/* no pattern */]>;
586
587// Immediate OR, Halfword Lower: The "other" part of loading large constants
588// into 32-bit registers. See the anonymous pattern Pat<(i32 imm:$imm), ...>
589// Note that these are really two operand instructions, but they're encoded
590// as three operands with the first two arguments tied-to each other.
591
592def IOHLvec:
593 RI16Form<0b100000110, (outs VECREG:$rT), (ins VECREG:$rS, u16imm:$val),
594 "iohl\t$rT, $val", ImmLoad,
595 [/* insert intrinsic here */]>,
596 RegConstraint<"$rS = $rT">,
597 NoEncode<"$rS">;
598
599def IOHLr32:
600 RI16Form<0b100000110, (outs R32C:$rT), (ins R32C:$rS, i32imm:$val),
601 "iohl\t$rT, $val", ImmLoad,
602 [/* insert intrinsic here */]>,
603 RegConstraint<"$rS = $rT">,
604 NoEncode<"$rS">;
605
606def IOHLf32:
607 RI16Form<0b100000110, (outs R32FP:$rT), (ins R32FP:$rS, f32imm:$val),
608 "iohl\t$rT, $val", ImmLoad,
609 [/* insert intrinsic here */]>,
610 RegConstraint<"$rS = $rT">,
611 NoEncode<"$rS">;
612
Scott Micheldbac4cf2008-01-11 02:53:15 +0000613def IOHLlo:
614 RI16Form<0b100000110, (outs R32C:$rT), (ins R32C:$rS, symbolLo:$val),
615 "iohl\t$rT, $val", ImmLoad,
616 [/* no pattern */]>,
617 RegConstraint<"$rS = $rT">,
618 NoEncode<"$rS">;
619
Scott Michel8b6b4202007-12-04 22:35:58 +0000620// Form select mask for bytes using immediate, used in conjunction with the
621// SELB instruction:
622
623def FSMBIv16i8 : RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
624 "fsmbi\t$rT, $val", SelectOp,
625 [(set (v16i8 VECREG:$rT), (SPUfsmbi_v16i8 immU16:$val))]>;
626
627def FSMBIv8i16 : RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
628 "fsmbi\t$rT, $val", SelectOp,
629 [(set (v8i16 VECREG:$rT), (SPUfsmbi_v8i16 immU16:$val))]>;
630
631def FSMBIvecv4i32 : RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
632 "fsmbi\t$rT, $val", SelectOp,
633 [(set (v4i32 VECREG:$rT), (SPUfsmbi_v4i32 immU16:$val))]>;
634
635//===----------------------------------------------------------------------===//
636// Integer and Logical Operations:
637//===----------------------------------------------------------------------===//
638
639def AHv8i16:
640 RRForm<0b00010011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
641 "ah\t$rT, $rA, $rB", IntegerOp,
642 [(set (v8i16 VECREG:$rT), (int_spu_si_ah VECREG:$rA, VECREG:$rB))]>;
643
644def : Pat<(add (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)),
645 (AHv8i16 VECREG:$rA, VECREG:$rB)>;
646
647// [(set (v8i16 VECREG:$rT), (add (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)))]>;
648
649def AHr16:
650 RRForm<0b00010011000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
651 "ah\t$rT, $rA, $rB", IntegerOp,
652 [(set R16C:$rT, (add R16C:$rA, R16C:$rB))]>;
653
654def AHIvec:
655 RI10Form<0b10111000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
656 "ahi\t$rT, $rA, $val", IntegerOp,
657 [(set (v8i16 VECREG:$rT), (add (v8i16 VECREG:$rA),
658 v8i16SExt10Imm:$val))]>;
659
660def AHIr16 : RI10Form<0b10111000, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
661 "ahi\t$rT, $rA, $val", IntegerOp,
662 [(set R16C:$rT, (add R16C:$rA, v8i16SExt10Imm:$val))]>;
663
664def Avec : RRForm<0b00000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
665 "a\t$rT, $rA, $rB", IntegerOp,
666 [(set (v4i32 VECREG:$rT), (add (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
667
668def : Pat<(add (v16i8 VECREG:$rA), (v16i8 VECREG:$rB)),
669 (Avec VECREG:$rA, VECREG:$rB)>;
670
671def Ar32 : RRForm<0b00000011000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
672 "a\t$rT, $rA, $rB", IntegerOp,
673 [(set R32C:$rT, (add R32C:$rA, R32C:$rB))]>;
674
Scott Michel438be252007-12-17 22:32:34 +0000675def Ar8:
676 RRForm<0b00000011000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
677 "a\t$rT, $rA, $rB", IntegerOp,
678 [(set R8C:$rT, (add R8C:$rA, R8C:$rB))]>;
679
Scott Michel8b6b4202007-12-04 22:35:58 +0000680def AIvec:
681 RI10Form<0b00111000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
682 "ai\t$rT, $rA, $val", IntegerOp,
683 [(set (v4i32 VECREG:$rT), (add (v4i32 VECREG:$rA),
684 v4i32SExt10Imm:$val))]>;
685
Scott Michel438be252007-12-17 22:32:34 +0000686def AIr32:
687 RI10Form<0b00111000, (outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val),
688 "ai\t$rT, $rA, $val", IntegerOp,
689 [(set R32C:$rT, (add R32C:$rA, i32ImmSExt10:$val))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000690
Scott Michel438be252007-12-17 22:32:34 +0000691def SFHvec:
692 RRForm<0b00010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
693 "sfh\t$rT, $rA, $rB", IntegerOp,
694 [(set (v8i16 VECREG:$rT), (sub (v8i16 VECREG:$rA),
695 (v8i16 VECREG:$rB)))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000696
Scott Michel438be252007-12-17 22:32:34 +0000697def SFHr16:
698 RRForm<0b00010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
699 "sfh\t$rT, $rA, $rB", IntegerOp,
700 [(set R16C:$rT, (sub R16C:$rA, R16C:$rB))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000701
702def SFHIvec:
703 RI10Form<0b10110000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
704 "sfhi\t$rT, $rA, $val", IntegerOp,
705 [(set (v8i16 VECREG:$rT), (sub v8i16SExt10Imm:$val,
706 (v8i16 VECREG:$rA)))]>;
707
708def SFHIr16 : RI10Form<0b10110000, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
709 "sfhi\t$rT, $rA, $val", IntegerOp,
710 [(set R16C:$rT, (sub i16ImmSExt10:$val, R16C:$rA))]>;
711
712def SFvec : RRForm<0b00000010000, (outs VECREG:$rT),
713 (ins VECREG:$rA, VECREG:$rB),
714 "sf\t$rT, $rA, $rB", IntegerOp,
715 [(set (v4i32 VECREG:$rT), (sub (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
716
717def SFr32 : RRForm<0b00000010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
718 "sf\t$rT, $rA, $rB", IntegerOp,
719 [(set R32C:$rT, (sub R32C:$rA, R32C:$rB))]>;
720
721def SFIvec:
722 RI10Form<0b00110000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
723 "sfi\t$rT, $rA, $val", IntegerOp,
724 [(set (v4i32 VECREG:$rT), (sub v4i32SExt10Imm:$val,
725 (v4i32 VECREG:$rA)))]>;
726
727def SFIr32 : RI10Form<0b00110000, (outs R32C:$rT),
728 (ins R32C:$rA, s10imm_i32:$val),
729 "sfi\t$rT, $rA, $val", IntegerOp,
730 [(set R32C:$rT, (sub i32ImmSExt10:$val, R32C:$rA))]>;
731
732// ADDX: only available in vector form, doesn't match a pattern.
733def ADDXvec:
734 RRForm<0b00000010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
735 VECREG:$rCarry),
736 "addx\t$rT, $rA, $rB", IntegerOp,
737 []>,
738 RegConstraint<"$rCarry = $rT">,
739 NoEncode<"$rCarry">;
740
741// CG: only available in vector form, doesn't match a pattern.
742def CGvec:
743 RRForm<0b01000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
744 VECREG:$rCarry),
745 "cg\t$rT, $rA, $rB", IntegerOp,
746 []>,
747 RegConstraint<"$rCarry = $rT">,
748 NoEncode<"$rCarry">;
749
750// SFX: only available in vector form, doesn't match a pattern
751def SFXvec:
752 RRForm<0b10000010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
753 VECREG:$rCarry),
754 "sfx\t$rT, $rA, $rB", IntegerOp,
755 []>,
756 RegConstraint<"$rCarry = $rT">,
757 NoEncode<"$rCarry">;
758
759// BG: only available in vector form, doesn't match a pattern.
760def BGvec:
761 RRForm<0b01000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
762 VECREG:$rCarry),
763 "bg\t$rT, $rA, $rB", IntegerOp,
764 []>,
765 RegConstraint<"$rCarry = $rT">,
766 NoEncode<"$rCarry">;
767
768// BGX: only available in vector form, doesn't match a pattern.
769def BGXvec:
770 RRForm<0b11000010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
771 VECREG:$rCarry),
772 "bgx\t$rT, $rA, $rB", IntegerOp,
773 []>,
774 RegConstraint<"$rCarry = $rT">,
775 NoEncode<"$rCarry">;
776
777// Halfword multiply variants:
778// N.B: These can be used to build up larger quantities (16x16 -> 32)
779
780def MPYv8i16:
781 RRForm<0b00100011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
782 "mpy\t$rT, $rA, $rB", IntegerMulDiv,
783 [(set (v8i16 VECREG:$rT), (SPUmpy_v8i16 (v8i16 VECREG:$rA),
784 (v8i16 VECREG:$rB)))]>;
785
786def MPYr16:
787 RRForm<0b00100011110, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
788 "mpy\t$rT, $rA, $rB", IntegerMulDiv,
789 [(set R16C:$rT, (mul R16C:$rA, R16C:$rB))]>;
790
791def MPYUv4i32:
792 RRForm<0b00110011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
793 "mpyu\t$rT, $rA, $rB", IntegerMulDiv,
794 [(set (v4i32 VECREG:$rT),
795 (SPUmpyu_v4i32 (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
796
797def MPYUr16:
798 RRForm<0b00110011110, (outs R32C:$rT), (ins R16C:$rA, R16C:$rB),
799 "mpyu\t$rT, $rA, $rB", IntegerMulDiv,
800 [(set R32C:$rT, (mul (zext R16C:$rA),
801 (zext R16C:$rB)))]>;
802
803def MPYUr32:
804 RRForm<0b00110011110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
805 "mpyu\t$rT, $rA, $rB", IntegerMulDiv,
806 [(set R32C:$rT, (SPUmpyu_i32 R32C:$rA, R32C:$rB))]>;
807
808// mpyi: multiply 16 x s10imm -> 32 result (custom lowering for 32 bit result,
809// this only produces the lower 16 bits)
810def MPYIvec:
811 RI10Form<0b00101110, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
812 "mpyi\t$rT, $rA, $val", IntegerMulDiv,
813 [(set (v8i16 VECREG:$rT), (mul (v8i16 VECREG:$rA), v8i16SExt10Imm:$val))]>;
814
815def MPYIr16:
816 RI10Form<0b00101110, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
817 "mpyi\t$rT, $rA, $val", IntegerMulDiv,
818 [(set R16C:$rT, (mul R16C:$rA, i16ImmSExt10:$val))]>;
819
820// mpyui: same issues as other multiplies, plus, this doesn't match a
821// pattern... but may be used during target DAG selection or lowering
822def MPYUIvec:
823 RI10Form<0b10101110, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
824 "mpyui\t$rT, $rA, $val", IntegerMulDiv,
825 []>;
826
827def MPYUIr16:
828 RI10Form<0b10101110, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
829 "mpyui\t$rT, $rA, $val", IntegerMulDiv,
830 []>;
831
832// mpya: 16 x 16 + 16 -> 32 bit result
833def MPYAvec:
834 RRRForm<0b0011, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
835 "mpya\t$rT, $rA, $rB, $rC", IntegerMulDiv,
836 [(set (v4i32 VECREG:$rT), (add (v4i32 (bitconvert (mul (v8i16 VECREG:$rA),
837 (v8i16 VECREG:$rB)))),
838 (v4i32 VECREG:$rC)))]>;
839
840def MPYAr32:
841 RRRForm<0b0011, (outs R32C:$rT), (ins R16C:$rA, R16C:$rB, R32C:$rC),
842 "mpya\t$rT, $rA, $rB, $rC", IntegerMulDiv,
843 [(set R32C:$rT, (add (sext (mul R16C:$rA, R16C:$rB)),
844 R32C:$rC))]>;
845
846def : Pat<(add (mul (sext R16C:$rA), (sext R16C:$rB)), R32C:$rC),
847 (MPYAr32 R16C:$rA, R16C:$rB, R32C:$rC)>;
848
849def MPYAr32_sextinreg:
850 RRRForm<0b0011, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB, R32C:$rC),
851 "mpya\t$rT, $rA, $rB, $rC", IntegerMulDiv,
852 [(set R32C:$rT, (add (mul (sext_inreg R32C:$rA, i16),
853 (sext_inreg R32C:$rB, i16)),
854 R32C:$rC))]>;
855
856//def MPYAr32:
857// RRRForm<0b0011, (outs R32C:$rT), (ins R16C:$rA, R16C:$rB, R32C:$rC),
858// "mpya\t$rT, $rA, $rB, $rC", IntegerMulDiv,
859// [(set R32C:$rT, (add (sext (mul R16C:$rA, R16C:$rB)),
860// R32C:$rC))]>;
861
862// mpyh: multiply high, used to synthesize 32-bit multiplies
863def MPYHv4i32:
864 RRForm<0b10100011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
865 "mpyh\t$rT, $rA, $rB", IntegerMulDiv,
866 [(set (v4i32 VECREG:$rT),
867 (SPUmpyh_v4i32 (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
868
869def MPYHr32:
870 RRForm<0b10100011110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
871 "mpyh\t$rT, $rA, $rB", IntegerMulDiv,
872 [(set R32C:$rT, (SPUmpyh_i32 R32C:$rA, R32C:$rB))]>;
873
874// mpys: multiply high and shift right (returns the top half of
875// a 16-bit multiply, sign extended to 32 bits.)
876def MPYSvec:
877 RRForm<0b11100011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
878 "mpys\t$rT, $rA, $rB", IntegerMulDiv,
879 []>;
880
881def MPYSr16:
882 RRForm<0b11100011110, (outs R32C:$rT), (ins R16C:$rA, R16C:$rB),
883 "mpys\t$rT, $rA, $rB", IntegerMulDiv,
884 []>;
885
886// mpyhh: multiply high-high (returns the 32-bit result from multiplying
887// the top 16 bits of the $rA, $rB)
888def MPYHHv8i16:
889 RRForm<0b01100011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
890 "mpyhh\t$rT, $rA, $rB", IntegerMulDiv,
891 [(set (v8i16 VECREG:$rT),
892 (SPUmpyhh_v8i16 (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)))]>;
893
894def MPYHHr32:
895 RRForm<0b01100011110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
896 "mpyhh\t$rT, $rA, $rB", IntegerMulDiv,
897 []>;
898
899// mpyhha: Multiply high-high, add to $rT:
900def MPYHHAvec:
901 RRForm<0b01100010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
902 "mpyhha\t$rT, $rA, $rB", IntegerMulDiv,
903 []>;
904
905def MPYHHAr32:
906 RRForm<0b01100010110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
907 "mpyhha\t$rT, $rA, $rB", IntegerMulDiv,
908 []>;
909
910// mpyhhu: Multiply high-high, unsigned
911def MPYHHUvec:
912 RRForm<0b01110011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
913 "mpyhhu\t$rT, $rA, $rB", IntegerMulDiv,
914 []>;
915
916def MPYHHUr32:
917 RRForm<0b01110011110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
918 "mpyhhu\t$rT, $rA, $rB", IntegerMulDiv,
919 []>;
920
921// mpyhhau: Multiply high-high, unsigned
922def MPYHHAUvec:
923 RRForm<0b01110010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
924 "mpyhhau\t$rT, $rA, $rB", IntegerMulDiv,
925 []>;
926
927def MPYHHAUr32:
928 RRForm<0b01110010110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
929 "mpyhhau\t$rT, $rA, $rB", IntegerMulDiv,
930 []>;
931
932// clz: Count leading zeroes
933def CLZv4i32:
934 RRForm_1<0b10100101010, (outs VECREG:$rT), (ins VECREG:$rA),
935 "clz\t$rT, $rA", IntegerOp,
936 [/* intrinsic */]>;
937
938def CLZr32:
939 RRForm_1<0b10100101010, (outs R32C:$rT), (ins R32C:$rA),
940 "clz\t$rT, $rA", IntegerOp,
941 [(set R32C:$rT, (ctlz R32C:$rA))]>;
942
943// cntb: Count ones in bytes (aka "population count")
944// NOTE: This instruction is really a vector instruction, but the custom
945// lowering code uses it in unorthodox ways to support CTPOP for other
946// data types!
947def CNTBv16i8:
948 RRForm_1<0b00101101010, (outs VECREG:$rT), (ins VECREG:$rA),
949 "cntb\t$rT, $rA", IntegerOp,
950 [(set (v16i8 VECREG:$rT), (SPUcntb_v16i8 (v16i8 VECREG:$rA)))]>;
951
952def CNTBv8i16 :
953 RRForm_1<0b00101101010, (outs VECREG:$rT), (ins VECREG:$rA),
954 "cntb\t$rT, $rA", IntegerOp,
955 [(set (v8i16 VECREG:$rT), (SPUcntb_v8i16 (v8i16 VECREG:$rA)))]>;
956
957def CNTBv4i32 :
958 RRForm_1<0b00101101010, (outs VECREG:$rT), (ins VECREG:$rA),
959 "cntb\t$rT, $rA", IntegerOp,
960 [(set (v4i32 VECREG:$rT), (SPUcntb_v4i32 (v4i32 VECREG:$rA)))]>;
961
962// fsmb: Form select mask for bytes. N.B. Input operand, $rA, is 16-bits
963def FSMB:
964 RRForm_1<0b01101101100, (outs VECREG:$rT), (ins R16C:$rA),
965 "fsmb\t$rT, $rA", SelectOp,
966 []>;
967
968// fsmh: Form select mask for halfwords. N.B., Input operand, $rA, is
969// only 8-bits wide (even though it's input as 16-bits here)
970def FSMH:
971 RRForm_1<0b10101101100, (outs VECREG:$rT), (ins R16C:$rA),
972 "fsmh\t$rT, $rA", SelectOp,
973 []>;
974
975// fsm: Form select mask for words. Like the other fsm* instructions,
976// only the lower 4 bits of $rA are significant.
977def FSM:
978 RRForm_1<0b00101101100, (outs VECREG:$rT), (ins R16C:$rA),
979 "fsm\t$rT, $rA", SelectOp,
980 []>;
981
982// gbb: Gather all low order bits from each byte in $rA into a single 16-bit
983// quantity stored into $rT
984def GBB:
985 RRForm_1<0b01001101100, (outs R16C:$rT), (ins VECREG:$rA),
986 "gbb\t$rT, $rA", GatherOp,
987 []>;
988
989// gbh: Gather all low order bits from each halfword in $rA into a single
990// 8-bit quantity stored in $rT
991def GBH:
992 RRForm_1<0b10001101100, (outs R16C:$rT), (ins VECREG:$rA),
993 "gbh\t$rT, $rA", GatherOp,
994 []>;
995
996// gb: Gather all low order bits from each word in $rA into a single
997// 4-bit quantity stored in $rT
998def GB:
999 RRForm_1<0b00001101100, (outs R16C:$rT), (ins VECREG:$rA),
1000 "gb\t$rT, $rA", GatherOp,
1001 []>;
1002
1003// avgb: average bytes
1004def AVGB:
1005 RRForm<0b11001011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1006 "avgb\t$rT, $rA, $rB", ByteOp,
1007 []>;
1008
1009// absdb: absolute difference of bytes
1010def ABSDB:
1011 RRForm<0b11001010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1012 "absdb\t$rT, $rA, $rB", ByteOp,
1013 []>;
1014
1015// sumb: sum bytes into halfwords
1016def SUMB:
1017 RRForm<0b11001010010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1018 "sumb\t$rT, $rA, $rB", ByteOp,
1019 []>;
1020
1021// Sign extension operations:
1022def XSBHvec:
1023 RRForm_1<0b01101101010, (outs VECREG:$rDst), (ins VECREG:$rSrc),
1024 "xsbh\t$rDst, $rSrc", IntegerOp,
1025 [(set (v8i16 VECREG:$rDst), (sext (v16i8 VECREG:$rSrc)))]>;
1026
1027// Ordinary form for XSBH
1028def XSBHr16:
1029 RRForm_1<0b01101101010, (outs R16C:$rDst), (ins R16C:$rSrc),
1030 "xsbh\t$rDst, $rSrc", IntegerOp,
1031 [(set R16C:$rDst, (sext_inreg R16C:$rSrc, i8))]>;
1032
Scott Michel438be252007-12-17 22:32:34 +00001033def XSBHr8:
1034 RRForm_1<0b01101101010, (outs R16C:$rDst), (ins R8C:$rSrc),
1035 "xsbh\t$rDst, $rSrc", IntegerOp,
1036 [(set R16C:$rDst, (sext R8C:$rSrc))]>;
1037
Scott Michel8b6b4202007-12-04 22:35:58 +00001038// 32-bit form for XSBH: used to sign extend 8-bit quantities to 16-bit
1039// quantities to 32-bit quantities via a 32-bit register (see the sext 8->32
1040// pattern below). Intentionally doesn't match a pattern because we want the
1041// sext 8->32 pattern to do the work for us, namely because we need the extra
1042// XSHWr32.
1043def XSBHr32:
1044 RRForm_1<0b01101101010, (outs R32C:$rDst), (ins R32C:$rSrc),
1045 "xsbh\t$rDst, $rSrc", IntegerOp,
1046 [(set R32C:$rDst, (sext_inreg R32C:$rSrc, i8))]>;
1047
1048// Sign extend halfwords to words:
1049def XSHWvec:
1050 RRForm_1<0b01101101010, (outs VECREG:$rDest), (ins VECREG:$rSrc),
1051 "xshw\t$rDest, $rSrc", IntegerOp,
1052 [(set (v4i32 VECREG:$rDest), (sext (v8i16 VECREG:$rSrc)))]>;
1053
1054def XSHWr32:
1055 RRForm_1<0b01101101010, (outs R32C:$rDst), (ins R32C:$rSrc),
1056 "xshw\t$rDst, $rSrc", IntegerOp,
1057 [(set R32C:$rDst, (sext_inreg R32C:$rSrc, i16))]>;
1058
1059def XSHWr16:
1060 RRForm_1<0b01101101010, (outs R32C:$rDst), (ins R16C:$rSrc),
1061 "xshw\t$rDst, $rSrc", IntegerOp,
1062 [(set R32C:$rDst, (sext R16C:$rSrc))]>;
1063
1064def XSWDvec:
1065 RRForm_1<0b01100101010, (outs VECREG:$rDst), (ins VECREG:$rSrc),
1066 "xswd\t$rDst, $rSrc", IntegerOp,
1067 [(set (v2i64 VECREG:$rDst), (sext (v4i32 VECREG:$rSrc)))]>;
1068
1069def XSWDr64:
1070 RRForm_1<0b01100101010, (outs R64C:$rDst), (ins R64C:$rSrc),
1071 "xswd\t$rDst, $rSrc", IntegerOp,
1072 [(set R64C:$rDst, (sext_inreg R64C:$rSrc, i32))]>;
1073
1074def XSWDr32:
1075 RRForm_1<0b01100101010, (outs R64C:$rDst), (ins R32C:$rSrc),
1076 "xswd\t$rDst, $rSrc", IntegerOp,
1077 [(set R64C:$rDst, (SPUsext32_to_64 R32C:$rSrc))]>;
1078
1079def : Pat<(sext R32C:$inp),
1080 (XSWDr32 R32C:$inp)>;
1081
1082// AND operations
1083def ANDv16i8:
1084 RRForm<0b10000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1085 "and\t$rT, $rA, $rB", IntegerOp,
1086 [(set (v16i8 VECREG:$rT), (and (v16i8 VECREG:$rA),
1087 (v16i8 VECREG:$rB)))]>;
1088
1089def ANDv8i16:
1090 RRForm<0b10000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1091 "and\t$rT, $rA, $rB", IntegerOp,
1092 [(set (v8i16 VECREG:$rT), (and (v8i16 VECREG:$rA),
1093 (v8i16 VECREG:$rB)))]>;
1094
1095def ANDv4i32:
1096 RRForm<0b10000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1097 "and\t$rT, $rA, $rB", IntegerOp,
1098 [(set (v4i32 VECREG:$rT), (and (v4i32 VECREG:$rA),
1099 (v4i32 VECREG:$rB)))]>;
1100
1101def ANDr32:
1102 RRForm<0b10000011000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1103 "and\t$rT, $rA, $rB", IntegerOp,
1104 [(set R32C:$rT, (and R32C:$rA, R32C:$rB))]>;
1105
1106//===---------------------------------------------
1107// Special instructions to perform the fabs instruction
1108def ANDfabs32:
1109 RRForm<0b10000011000, (outs R32FP:$rT), (ins R32FP:$rA, R32C:$rB),
1110 "and\t$rT, $rA, $rB", IntegerOp,
1111 [/* Intentionally does not match a pattern */]>;
1112
1113def ANDfabs64:
1114 RRForm<0b10000011000, (outs R64FP:$rT), (ins R64FP:$rA, VECREG:$rB),
1115 "and\t$rT, $rA, $rB", IntegerOp,
1116 [/* Intentionally does not match a pattern */]>;
1117
1118// Could use ANDv4i32, but won't for clarity
1119def ANDfabsvec:
1120 RRForm<0b10000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1121 "and\t$rT, $rA, $rB", IntegerOp,
1122 [/* Intentionally does not match a pattern */]>;
1123
1124//===---------------------------------------------
1125
1126def ANDr16:
1127 RRForm<0b10000011000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1128 "and\t$rT, $rA, $rB", IntegerOp,
1129 [(set R16C:$rT, (and R16C:$rA, R16C:$rB))]>;
1130
Scott Michel438be252007-12-17 22:32:34 +00001131def ANDr8:
1132 RRForm<0b10000011000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1133 "and\t$rT, $rA, $rB", IntegerOp,
1134 [(set R8C:$rT, (and R8C:$rA, R8C:$rB))]>;
1135
Scott Michel8b6b4202007-12-04 22:35:58 +00001136// Hacked form of AND to zero-extend 16-bit quantities to 32-bit
1137// quantities -- see 16->32 zext pattern.
1138//
1139// This pattern is somewhat artificial, since it might match some
1140// compiler generated pattern but it is unlikely to do so.
1141def AND2To4:
1142 RRForm<0b10000011000, (outs R32C:$rT), (ins R16C:$rA, R32C:$rB),
1143 "and\t$rT, $rA, $rB", IntegerOp,
1144 [(set R32C:$rT, (and (zext R16C:$rA), R32C:$rB))]>;
1145
1146// N.B.: vnot_conv is one of those special target selection pattern fragments,
1147// in which we expect there to be a bit_convert on the constant. Bear in mind
1148// that llvm translates "not <reg>" to "xor <reg>, -1" (or in this case, a
1149// constant -1 vector.)
1150def ANDCv16i8:
1151 RRForm<0b10000011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1152 "andc\t$rT, $rA, $rB", IntegerOp,
1153 [(set (v16i8 VECREG:$rT), (and (v16i8 VECREG:$rA),
1154 (vnot (v16i8 VECREG:$rB))))]>;
1155
1156def ANDCv8i16:
1157 RRForm<0b10000011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1158 "andc\t$rT, $rA, $rB", IntegerOp,
1159 [(set (v8i16 VECREG:$rT), (and (v8i16 VECREG:$rA),
1160 (vnot (v8i16 VECREG:$rB))))]>;
1161
1162def ANDCv4i32:
1163 RRForm<0b10000011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1164 "andc\t$rT, $rA, $rB", IntegerOp,
1165 [(set (v4i32 VECREG:$rT), (and (v4i32 VECREG:$rA),
1166 (vnot (v4i32 VECREG:$rB))))]>;
1167
1168def ANDCr32:
1169 RRForm<0b10000011010, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1170 "andc\t$rT, $rA, $rB", IntegerOp,
1171 [(set R32C:$rT, (and R32C:$rA, (not R32C:$rB)))]>;
1172
1173def ANDCr16:
1174 RRForm<0b10000011010, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1175 "andc\t$rT, $rA, $rB", IntegerOp,
1176 [(set R16C:$rT, (and R16C:$rA, (not R16C:$rB)))]>;
1177
Scott Michel438be252007-12-17 22:32:34 +00001178def ANDCr8:
1179 RRForm<0b10000011010, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1180 "andc\t$rT, $rA, $rB", IntegerOp,
1181 [(set R8C:$rT, (and R8C:$rA, (not R8C:$rB)))]>;
1182
Scott Michel8b6b4202007-12-04 22:35:58 +00001183def ANDBIv16i8:
1184 RI10Form<0b01101000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
1185 "andbi\t$rT, $rA, $val", IntegerOp,
1186 [(set (v16i8 VECREG:$rT),
1187 (and (v16i8 VECREG:$rA), (v16i8 v16i8U8Imm:$val)))]>;
1188
Scott Michel438be252007-12-17 22:32:34 +00001189def ANDBIr8:
1190 RI10Form<0b01101000, (outs R8C:$rT), (ins R8C:$rA, u10imm_i8:$val),
1191 "andbi\t$rT, $rA, $val", IntegerOp,
1192 [(set R8C:$rT, (and R8C:$rA, immU8:$val))]>;
1193
Scott Michel8b6b4202007-12-04 22:35:58 +00001194def ANDHIv8i16:
1195 RI10Form<0b10101000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
1196 "andhi\t$rT, $rA, $val", IntegerOp,
1197 [(set (v8i16 VECREG:$rT),
1198 (and (v8i16 VECREG:$rA), v8i16SExt10Imm:$val))]>;
1199
1200def ANDHIr16:
1201 RI10Form<0b10101000, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
1202 "andhi\t$rT, $rA, $val", IntegerOp,
Scott Michel438be252007-12-17 22:32:34 +00001203 [(set R16C:$rT, (and R16C:$rA, i16ImmUns10:$val))]>;
1204
1205def ANDHI1To2:
1206 RI10Form<0b10101000, (outs R16C:$rT), (ins R8C:$rA, s10imm:$val),
1207 "andhi\t$rT, $rA, $val", IntegerOp,
1208 [(set R16C:$rT, (and (zext R8C:$rA), i16ImmSExt10:$val))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00001209
1210def ANDIv4i32:
1211 RI10Form<0b00101000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
1212 "andi\t$rT, $rA, $val", IntegerOp,
1213 [(set (v4i32 VECREG:$rT),
1214 (and (v4i32 VECREG:$rA), v4i32SExt10Imm:$val))]>;
1215
1216def ANDIr32:
1217 RI10Form<0b10101000, (outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val),
1218 "andi\t$rT, $rA, $val", IntegerOp,
1219 [(set R32C:$rT, (and R32C:$rA, i32ImmSExt10:$val))]>;
1220
Scott Michel438be252007-12-17 22:32:34 +00001221// Hacked form of ANDI to zero-extend i8 quantities to i32. See the zext 8->32
1222// pattern below.
1223def ANDI1To4:
1224 RI10Form<0b10101000, (outs R32C:$rT), (ins R8C:$rA, s10imm_i32:$val),
1225 "andi\t$rT, $rA, $val", IntegerOp,
1226 [(set R32C:$rT, (and (zext R8C:$rA), i32ImmSExt10:$val))]>;
1227
Scott Michel8b6b4202007-12-04 22:35:58 +00001228// Hacked form of ANDI to zero-extend i16 quantities to i32. See the
1229// zext 16->32 pattern below.
1230//
1231// Note that this pattern is somewhat artificial, since it might match
1232// something the compiler generates but is unlikely to occur in practice.
1233def ANDI2To4:
1234 RI10Form<0b10101000, (outs R32C:$rT), (ins R16C:$rA, s10imm_i32:$val),
1235 "andi\t$rT, $rA, $val", IntegerOp,
1236 [(set R32C:$rT, (and (zext R16C:$rA), i32ImmSExt10:$val))]>;
1237
1238// Bitwise OR group:
1239// Bitwise "or" (N.B.: These are also register-register copy instructions...)
1240def ORv16i8:
1241 RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1242 "or\t$rT, $rA, $rB", IntegerOp,
1243 [(set (v16i8 VECREG:$rT), (or (v16i8 VECREG:$rA), (v16i8 VECREG:$rB)))]>;
1244
1245def ORv8i16:
1246 RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1247 "or\t$rT, $rA, $rB", IntegerOp,
1248 [(set (v8i16 VECREG:$rT), (or (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)))]>;
1249
1250def ORv4i32:
1251 RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1252 "or\t$rT, $rA, $rB", IntegerOp,
1253 [(set (v4i32 VECREG:$rT), (or (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
1254
1255def ORv4f32:
1256 RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1257 "or\t$rT, $rA, $rB", IntegerOp,
1258 [(set (v4f32 VECREG:$rT),
1259 (v4f32 (bitconvert (or (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))))]>;
1260
1261def ORv2f64:
1262 RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1263 "or\t$rT, $rA, $rB", IntegerOp,
1264 [(set (v2f64 VECREG:$rT),
1265 (v2f64 (bitconvert (or (v2i64 VECREG:$rA), (v2i64 VECREG:$rB)))))]>;
1266
1267def ORgprc:
1268 RRForm<0b10000010000, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB),
1269 "or\t$rT, $rA, $rB", IntegerOp,
1270 [(set GPRC:$rT, (or GPRC:$rA, GPRC:$rB))]>;
1271
1272def ORr64:
1273 RRForm<0b10000010000, (outs R64C:$rT), (ins R64C:$rA, R64C:$rB),
1274 "or\t$rT, $rA, $rB", IntegerOp,
1275 [(set R64C:$rT, (or R64C:$rA, R64C:$rB))]>;
1276
1277def ORr32:
1278 RRForm<0b10000010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1279 "or\t$rT, $rA, $rB", IntegerOp,
1280 [(set R32C:$rT, (or R32C:$rA, R32C:$rB))]>;
1281
1282def ORr16:
1283 RRForm<0b10000010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1284 "or\t$rT, $rA, $rB", IntegerOp,
1285 [(set R16C:$rT, (or R16C:$rA, R16C:$rB))]>;
1286
Scott Michel438be252007-12-17 22:32:34 +00001287def ORr8:
1288 RRForm<0b10000010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1289 "or\t$rT, $rA, $rB", IntegerOp,
1290 [(set R8C:$rT, (or R8C:$rA, R8C:$rB))]>;
1291
Scott Michel754d8662007-12-20 00:44:13 +00001292// OR instruction forms that are used to copy f32 and f64 registers.
1293// They do not match patterns.
1294def ORf32:
1295 RRForm<0b10000010000, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB),
1296 "or\t$rT, $rA, $rB", IntegerOp,
1297 [/* no pattern */]>;
1298
1299def ORf64:
1300 RRForm<0b10000010000, (outs R64FP:$rT), (ins R64FP:$rA, R64FP:$rB),
1301 "or\t$rT, $rA, $rB", IntegerOp,
1302 [/* no pattern */]>;
1303
Scott Michel8b6b4202007-12-04 22:35:58 +00001304// ORv*_*: Used in scalar->vector promotions:
Scott Michel438be252007-12-17 22:32:34 +00001305def ORv16i8_i8:
1306 RRForm<0b10000010000, (outs VECREG:$rT), (ins R8C:$rA, R8C:$rB),
1307 "or\t$rT, $rA, $rB", IntegerOp,
1308 [/* no pattern */]>;
1309
1310def : Pat<(v16i8 (SPUpromote_scalar R8C:$rA)),
1311 (ORv16i8_i8 R8C:$rA, R8C:$rA)>;
1312
Scott Michel8b6b4202007-12-04 22:35:58 +00001313def ORv8i16_i16:
1314 RRForm<0b10000010000, (outs VECREG:$rT), (ins R16C:$rA, R16C:$rB),
1315 "or\t$rT, $rA, $rB", IntegerOp,
1316 [/* no pattern */]>;
1317
1318def : Pat<(v8i16 (SPUpromote_scalar R16C:$rA)),
1319 (ORv8i16_i16 R16C:$rA, R16C:$rA)>;
1320
1321def ORv4i32_i32:
1322 RRForm<0b10000010000, (outs VECREG:$rT), (ins R32C:$rA, R32C:$rB),
1323 "or\t$rT, $rA, $rB", IntegerOp,
1324 [/* no pattern */]>;
1325
1326def : Pat<(v4i32 (SPUpromote_scalar R32C:$rA)),
1327 (ORv4i32_i32 R32C:$rA, R32C:$rA)>;
1328
1329def ORv2i64_i64:
1330 RRForm<0b10000010000, (outs VECREG:$rT), (ins R64C:$rA, R64C:$rB),
1331 "or\t$rT, $rA, $rB", IntegerOp,
1332 [/* no pattern */]>;
1333
1334def : Pat<(v2i64 (SPUpromote_scalar R64C:$rA)),
1335 (ORv2i64_i64 R64C:$rA, R64C:$rA)>;
1336
1337def ORv4f32_f32:
1338 RRForm<0b10000010000, (outs VECREG:$rT), (ins R32FP:$rA, R32FP:$rB),
1339 "or\t$rT, $rA, $rB", IntegerOp,
1340 [/* no pattern */]>;
1341
1342def : Pat<(v4f32 (SPUpromote_scalar R32FP:$rA)),
1343 (ORv4f32_f32 R32FP:$rA, R32FP:$rA)>;
1344
1345def ORv2f64_f64:
1346 RRForm<0b10000010000, (outs VECREG:$rT), (ins R64FP:$rA, R64FP:$rB),
1347 "or\t$rT, $rA, $rB", IntegerOp,
1348 [/* no pattern */]>;
1349
1350def : Pat<(v2f64 (SPUpromote_scalar R64FP:$rA)),
1351 (ORv2f64_f64 R64FP:$rA, R64FP:$rA)>;
1352
1353// ORi*_v*: Used to extract vector element 0 (the preferred slot)
Scott Michel438be252007-12-17 22:32:34 +00001354def ORi8_v16i8:
1355 RRForm<0b10000010000, (outs R8C:$rT), (ins VECREG:$rA, VECREG:$rB),
1356 "or\t$rT, $rA, $rB", IntegerOp,
1357 [/* no pattern */]>;
1358
1359def : Pat<(SPUextract_elt0 (v16i8 VECREG:$rA)),
1360 (ORi8_v16i8 VECREG:$rA, VECREG:$rA)>;
1361
Scott Michel394e26d2008-01-17 20:38:41 +00001362def : Pat<(SPUextract_elt0_chained (v16i8 VECREG:$rA)),
1363 (ORi8_v16i8 VECREG:$rA, VECREG:$rA)>;
1364
Scott Michel8b6b4202007-12-04 22:35:58 +00001365def ORi16_v8i16:
1366 RRForm<0b10000010000, (outs R16C:$rT), (ins VECREG:$rA, VECREG:$rB),
1367 "or\t$rT, $rA, $rB", IntegerOp,
1368 [/* no pattern */]>;
1369
1370def : Pat<(SPUextract_elt0 (v8i16 VECREG:$rA)),
1371 (ORi16_v8i16 VECREG:$rA, VECREG:$rA)>;
1372
1373def : Pat<(SPUextract_elt0_chained (v8i16 VECREG:$rA)),
1374 (ORi16_v8i16 VECREG:$rA, VECREG:$rA)>;
1375
1376def ORi32_v4i32:
1377 RRForm<0b10000010000, (outs R32C:$rT), (ins VECREG:$rA, VECREG:$rB),
1378 "or\t$rT, $rA, $rB", IntegerOp,
1379 [/* no pattern */]>;
1380
1381def : Pat<(SPUextract_elt0 (v4i32 VECREG:$rA)),
1382 (ORi32_v4i32 VECREG:$rA, VECREG:$rA)>;
1383
1384def : Pat<(SPUextract_elt0_chained (v4i32 VECREG:$rA)),
1385 (ORi32_v4i32 VECREG:$rA, VECREG:$rA)>;
1386
1387def ORi64_v2i64:
1388 RRForm<0b10000010000, (outs R64C:$rT), (ins VECREG:$rA, VECREG:$rB),
1389 "or\t$rT, $rA, $rB", IntegerOp,
1390 [/* no pattern */]>;
1391
1392def : Pat<(SPUextract_elt0 (v2i64 VECREG:$rA)),
1393 (ORi64_v2i64 VECREG:$rA, VECREG:$rA)>;
1394
1395def : Pat<(SPUextract_elt0_chained (v2i64 VECREG:$rA)),
1396 (ORi64_v2i64 VECREG:$rA, VECREG:$rA)>;
1397
1398def ORf32_v4f32:
1399 RRForm<0b10000010000, (outs R32FP:$rT), (ins VECREG:$rA, VECREG:$rB),
1400 "or\t$rT, $rA, $rB", IntegerOp,
1401 [/* no pattern */]>;
1402
1403def : Pat<(SPUextract_elt0 (v4f32 VECREG:$rA)),
1404 (ORf32_v4f32 VECREG:$rA, VECREG:$rA)>;
1405
1406def : Pat<(SPUextract_elt0_chained (v4f32 VECREG:$rA)),
1407 (ORf32_v4f32 VECREG:$rA, VECREG:$rA)>;
1408
1409def ORf64_v2f64:
1410 RRForm<0b10000010000, (outs R64FP:$rT), (ins VECREG:$rA, VECREG:$rB),
1411 "or\t$rT, $rA, $rB", IntegerOp,
1412 [/* no pattern */]>;
1413
1414def : Pat<(SPUextract_elt0 (v2f64 VECREG:$rA)),
1415 (ORf64_v2f64 VECREG:$rA, VECREG:$rA)>;
1416
1417def : Pat<(SPUextract_elt0_chained (v2f64 VECREG:$rA)),
1418 (ORf64_v2f64 VECREG:$rA, VECREG:$rA)>;
1419
1420// ORC: Bitwise "or" with complement (match before ORvec, ORr32)
1421def ORCv16i8:
1422 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1423 "orc\t$rT, $rA, $rB", IntegerOp,
1424 [(set (v16i8 VECREG:$rT), (or (v16i8 VECREG:$rA),
1425 (vnot (v16i8 VECREG:$rB))))]>;
1426
1427def ORCv8i16:
1428 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1429 "orc\t$rT, $rA, $rB", IntegerOp,
1430 [(set (v8i16 VECREG:$rT), (or (v8i16 VECREG:$rA),
1431 (vnot (v8i16 VECREG:$rB))))]>;
1432
1433def ORCv4i32:
1434 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1435 "orc\t$rT, $rA, $rB", IntegerOp,
1436 [(set (v4i32 VECREG:$rT), (or (v4i32 VECREG:$rA),
1437 (vnot (v4i32 VECREG:$rB))))]>;
1438
1439def ORCr32:
1440 RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1441 "orc\t$rT, $rA, $rB", IntegerOp,
1442 [(set R32C:$rT, (or R32C:$rA, (not R32C:$rB)))]>;
1443
1444def ORCr16:
1445 RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1446 "orc\t$rT, $rA, $rB", IntegerOp,
1447 [(set R16C:$rT, (or R16C:$rA, (not R16C:$rB)))]>;
1448
Scott Michel438be252007-12-17 22:32:34 +00001449def ORCr8:
1450 RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1451 "orc\t$rT, $rA, $rB", IntegerOp,
1452 [(set R8C:$rT, (or R8C:$rA, (not R8C:$rB)))]>;
1453
Scott Michel8b6b4202007-12-04 22:35:58 +00001454// OR byte immediate
1455def ORBIv16i8:
1456 RI10Form<0b01100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
1457 "orbi\t$rT, $rA, $val", IntegerOp,
1458 [(set (v16i8 VECREG:$rT),
1459 (or (v16i8 VECREG:$rA), (v16i8 v16i8U8Imm:$val)))]>;
1460
Scott Michel438be252007-12-17 22:32:34 +00001461def ORBIr8:
1462 RI10Form<0b01100000, (outs R8C:$rT), (ins R8C:$rA, u10imm_i8:$val),
1463 "orbi\t$rT, $rA, $val", IntegerOp,
1464 [(set R8C:$rT, (or R8C:$rA, immU8:$val))]>;
1465
Scott Michel8b6b4202007-12-04 22:35:58 +00001466// OR halfword immediate
1467def ORHIv8i16:
Scott Michel438be252007-12-17 22:32:34 +00001468 RI10Form<0b10100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00001469 "orhi\t$rT, $rA, $val", IntegerOp,
1470 [(set (v8i16 VECREG:$rT), (or (v8i16 VECREG:$rA),
Scott Michel438be252007-12-17 22:32:34 +00001471 v8i16Uns10Imm:$val))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00001472
1473def ORHIr16:
Scott Michel438be252007-12-17 22:32:34 +00001474 RI10Form<0b10100000, (outs R16C:$rT), (ins R16C:$rA, u10imm:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00001475 "orhi\t$rT, $rA, $val", IntegerOp,
Scott Michel438be252007-12-17 22:32:34 +00001476 [(set R16C:$rT, (or R16C:$rA, i16ImmUns10:$val))]>;
1477
1478// Hacked form of ORHI used to promote 8-bit registers to 16-bit
1479def ORHI1To2:
1480 RI10Form<0b10100000, (outs R16C:$rT), (ins R8C:$rA, s10imm:$val),
1481 "orhi\t$rT, $rA, $val", IntegerOp,
1482 [(set R16C:$rT, (or (anyext R8C:$rA), i16ImmSExt10:$val))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00001483
1484// Bitwise "or" with immediate
1485def ORIv4i32:
Scott Michel438be252007-12-17 22:32:34 +00001486 RI10Form<0b00100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00001487 "ori\t$rT, $rA, $val", IntegerOp,
1488 [(set (v4i32 VECREG:$rT), (or (v4i32 VECREG:$rA),
Scott Michel438be252007-12-17 22:32:34 +00001489 v4i32Uns10Imm:$val))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00001490
1491def ORIr32:
Scott Michel438be252007-12-17 22:32:34 +00001492 RI10Form<0b00100000, (outs R32C:$rT), (ins R32C:$rA, u10imm_i32:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00001493 "ori\t$rT, $rA, $val", IntegerOp,
Scott Michel438be252007-12-17 22:32:34 +00001494 [(set R32C:$rT, (or R32C:$rA, i32ImmUns10:$val))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00001495
Scott Michel8b6b4202007-12-04 22:35:58 +00001496def ORIr64:
1497 RI10Form_1<0b00100000, (outs R64C:$rT), (ins R64C:$rA, s10imm_i32:$val),
1498 "ori\t$rT, $rA, $val", IntegerOp,
1499 [/* no pattern */]>;
1500
1501// ORI2To4: hacked version of the ori instruction to extend 16-bit quantities
1502// to 32-bit quantities. used exclusively to match "anyext" conversions (vide
1503// infra "anyext 16->32" pattern.)
1504def ORI2To4:
1505 RI10Form<0b00100000, (outs R32C:$rT), (ins R16C:$rA, s10imm_i32:$val),
1506 "ori\t$rT, $rA, $val", IntegerOp,
1507 [(set R32C:$rT, (or (anyext R16C:$rA), i32ImmSExt10:$val))]>;
1508
Scott Michel438be252007-12-17 22:32:34 +00001509// ORI1To4: Hacked version of the ORI instruction to extend 16-bit quantities
1510// to 32-bit quantities. Used exclusively to match "anyext" conversions (vide
1511// infra "anyext 16->32" pattern.)
1512def ORI1To4:
1513 RI10Form<0b00100000, (outs R32C:$rT), (ins R8C:$rA, s10imm_i32:$val),
1514 "ori\t$rT, $rA, $val", IntegerOp,
1515 [(set R32C:$rT, (or (anyext R8C:$rA), i32ImmSExt10:$val))]>;
1516
Scott Michel8b6b4202007-12-04 22:35:58 +00001517// ORX: "or" across the vector: or's $rA's word slots leaving the result in
1518// $rT[0], slots 1-3 are zeroed.
1519//
Scott Michel438be252007-12-17 22:32:34 +00001520// FIXME: Needs to match an intrinsic pattern.
Scott Michel8b6b4202007-12-04 22:35:58 +00001521def ORXv4i32:
1522 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1523 "orx\t$rT, $rA, $rB", IntegerOp,
1524 []>;
1525
Scott Michel438be252007-12-17 22:32:34 +00001526// XOR:
Scott Michel8b6b4202007-12-04 22:35:58 +00001527def XORv16i8:
1528 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1529 "xor\t$rT, $rA, $rB", IntegerOp,
1530 [(set (v16i8 VECREG:$rT), (xor (v16i8 VECREG:$rA), (v16i8 VECREG:$rB)))]>;
1531
1532def XORv8i16:
1533 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1534 "xor\t$rT, $rA, $rB", IntegerOp,
1535 [(set (v8i16 VECREG:$rT), (xor (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)))]>;
1536
1537def XORv4i32:
1538 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1539 "xor\t$rT, $rA, $rB", IntegerOp,
1540 [(set (v4i32 VECREG:$rT), (xor (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
1541
1542def XORr32:
1543 RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1544 "xor\t$rT, $rA, $rB", IntegerOp,
1545 [(set R32C:$rT, (xor R32C:$rA, R32C:$rB))]>;
1546
1547//==----------------------------------------------------------
1548// Special forms for floating point instructions.
1549// Bitwise ORs and ANDs don't make sense for normal floating
1550// point numbers. These operations (fneg and fabs), however,
1551// require bitwise logical ops to manipulate the sign bit.
1552def XORfneg32:
1553 RRForm<0b10010010000, (outs R32FP:$rT), (ins R32FP:$rA, R32C:$rB),
1554 "xor\t$rT, $rA, $rB", IntegerOp,
1555 [/* Intentionally does not match a pattern, see fneg32 */]>;
1556
1557// KLUDGY! Better way to do this without a VECREG? bitconvert?
1558// VECREG is assumed to contain two identical 64-bit masks, so
1559// it doesn't matter which word we select for the xor
1560def XORfneg64:
1561 RRForm<0b10010010000, (outs R64FP:$rT), (ins R64FP:$rA, VECREG:$rB),
1562 "xor\t$rT, $rA, $rB", IntegerOp,
1563 [/* Intentionally does not match a pattern, see fneg64 */]>;
1564
1565// Could use XORv4i32, but will use this for clarity
1566def XORfnegvec:
1567 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1568 "xor\t$rT, $rA, $rB", IntegerOp,
1569 [/* Intentionally does not match a pattern, see fneg{32,64} */]>;
1570
1571//==----------------------------------------------------------
1572
1573def XORr16:
1574 RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1575 "xor\t$rT, $rA, $rB", IntegerOp,
1576 [(set R16C:$rT, (xor R16C:$rA, R16C:$rB))]>;
1577
Scott Michel438be252007-12-17 22:32:34 +00001578def XORr8:
1579 RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1580 "xor\t$rT, $rA, $rB", IntegerOp,
1581 [(set R8C:$rT, (xor R8C:$rA, R8C:$rB))]>;
1582
Scott Michel8b6b4202007-12-04 22:35:58 +00001583def XORBIv16i8:
1584 RI10Form<0b01100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
1585 "xorbi\t$rT, $rA, $val", IntegerOp,
1586 [(set (v16i8 VECREG:$rT), (xor (v16i8 VECREG:$rA), v16i8U8Imm:$val))]>;
1587
Scott Michel438be252007-12-17 22:32:34 +00001588def XORBIr8:
1589 RI10Form<0b01100000, (outs R8C:$rT), (ins R8C:$rA, u10imm_i8:$val),
1590 "xorbi\t$rT, $rA, $val", IntegerOp,
1591 [(set R8C:$rT, (xor R8C:$rA, immU8:$val))]>;
1592
Scott Michel8b6b4202007-12-04 22:35:58 +00001593def XORHIv8i16:
1594 RI10Form<0b10100000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
1595 "xorhi\t$rT, $rA, $val", IntegerOp,
1596 [(set (v8i16 VECREG:$rT), (xor (v8i16 VECREG:$rA),
1597 v8i16SExt10Imm:$val))]>;
1598
1599def XORHIr16:
1600 RI10Form<0b10100000, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
1601 "xorhi\t$rT, $rA, $val", IntegerOp,
1602 [(set R16C:$rT, (xor R16C:$rA, i16ImmSExt10:$val))]>;
1603
1604def XORIv4i32:
1605 RI10Form<0b00100000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
1606 "xori\t$rT, $rA, $val", IntegerOp,
1607 [(set (v4i32 VECREG:$rT), (xor (v4i32 VECREG:$rA),
1608 v4i32SExt10Imm:$val))]>;
1609
1610def XORIr32:
1611 RI10Form<0b00100000, (outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val),
1612 "xori\t$rT, $rA, $val", IntegerOp,
1613 [(set R32C:$rT, (xor R32C:$rA, i32ImmSExt10:$val))]>;
1614
1615// NAND:
1616def NANDv16i8:
1617 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1618 "nand\t$rT, $rA, $rB", IntegerOp,
1619 [(set (v16i8 VECREG:$rT), (vnot (and (v16i8 VECREG:$rA),
1620 (v16i8 VECREG:$rB))))]>;
1621
1622def NANDv8i16:
1623 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1624 "nand\t$rT, $rA, $rB", IntegerOp,
1625 [(set (v8i16 VECREG:$rT), (vnot (and (v8i16 VECREG:$rA),
1626 (v8i16 VECREG:$rB))))]>;
1627
1628def NANDv4i32:
1629 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1630 "nand\t$rT, $rA, $rB", IntegerOp,
1631 [(set (v4i32 VECREG:$rT), (vnot (and (v4i32 VECREG:$rA),
1632 (v4i32 VECREG:$rB))))]>;
1633
1634def NANDr32:
1635 RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1636 "nand\t$rT, $rA, $rB", IntegerOp,
1637 [(set R32C:$rT, (not (and R32C:$rA, R32C:$rB)))]>;
1638
1639def NANDr16:
1640 RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1641 "nand\t$rT, $rA, $rB", IntegerOp,
1642 [(set R16C:$rT, (not (and R16C:$rA, R16C:$rB)))]>;
1643
Scott Michel438be252007-12-17 22:32:34 +00001644def NANDr8:
1645 RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1646 "nand\t$rT, $rA, $rB", IntegerOp,
1647 [(set R8C:$rT, (not (and R8C:$rA, R8C:$rB)))]>;
1648
Scott Michel8b6b4202007-12-04 22:35:58 +00001649// NOR:
1650def NORv16i8:
1651 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1652 "nor\t$rT, $rA, $rB", IntegerOp,
1653 [(set (v16i8 VECREG:$rT), (vnot (or (v16i8 VECREG:$rA),
1654 (v16i8 VECREG:$rB))))]>;
1655
1656def NORv8i16:
1657 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1658 "nor\t$rT, $rA, $rB", IntegerOp,
1659 [(set (v8i16 VECREG:$rT), (vnot (or (v8i16 VECREG:$rA),
1660 (v8i16 VECREG:$rB))))]>;
1661
1662def NORv4i32:
1663 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1664 "nor\t$rT, $rA, $rB", IntegerOp,
1665 [(set (v4i32 VECREG:$rT), (vnot (or (v4i32 VECREG:$rA),
1666 (v4i32 VECREG:$rB))))]>;
1667
1668def NORr32:
1669 RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1670 "nor\t$rT, $rA, $rB", IntegerOp,
1671 [(set R32C:$rT, (not (or R32C:$rA, R32C:$rB)))]>;
1672
1673def NORr16:
1674 RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1675 "nor\t$rT, $rA, $rB", IntegerOp,
1676 [(set R16C:$rT, (not (or R16C:$rA, R16C:$rB)))]>;
1677
Scott Michel438be252007-12-17 22:32:34 +00001678def NORr8:
1679 RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1680 "nor\t$rT, $rA, $rB", IntegerOp,
1681 [(set R8C:$rT, (not (or R8C:$rA, R8C:$rB)))]>;
1682
Scott Michel8b6b4202007-12-04 22:35:58 +00001683// EQV: Equivalence (1 for each same bit, otherwise 0)
1684def EQVv16i8:
1685 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1686 "eqv\t$rT, $rA, $rB", IntegerOp,
1687 [(set (v16i8 VECREG:$rT), (or (and (v16i8 VECREG:$rA),
1688 (v16i8 VECREG:$rB)),
1689 (and (vnot (v16i8 VECREG:$rA)),
1690 (vnot (v16i8 VECREG:$rB)))))]>;
1691
1692def : Pat<(xor (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rB))),
1693 (EQVv16i8 VECREG:$rA, VECREG:$rB)>;
1694
1695def : Pat<(xor (vnot (v16i8 VECREG:$rA)), (v16i8 VECREG:$rB)),
1696 (EQVv16i8 VECREG:$rA, VECREG:$rB)>;
1697
1698def EQVv8i16:
1699 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1700 "eqv\t$rT, $rA, $rB", IntegerOp,
1701 [(set (v8i16 VECREG:$rT), (or (and (v8i16 VECREG:$rA),
1702 (v8i16 VECREG:$rB)),
1703 (and (vnot (v8i16 VECREG:$rA)),
1704 (vnot (v8i16 VECREG:$rB)))))]>;
1705
1706def : Pat<(xor (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rB))),
1707 (EQVv8i16 VECREG:$rA, VECREG:$rB)>;
1708
1709def : Pat<(xor (vnot (v8i16 VECREG:$rA)), (v8i16 VECREG:$rB)),
1710 (EQVv8i16 VECREG:$rA, VECREG:$rB)>;
1711
1712def EQVv4i32:
1713 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1714 "eqv\t$rT, $rA, $rB", IntegerOp,
1715 [(set (v4i32 VECREG:$rT), (or (and (v4i32 VECREG:$rA),
1716 (v4i32 VECREG:$rB)),
1717 (and (vnot (v4i32 VECREG:$rA)),
1718 (vnot (v4i32 VECREG:$rB)))))]>;
1719
1720def : Pat<(xor (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rB))),
1721 (EQVv4i32 VECREG:$rA, VECREG:$rB)>;
1722
1723def : Pat<(xor (vnot (v4i32 VECREG:$rA)), (v4i32 VECREG:$rB)),
1724 (EQVv4i32 VECREG:$rA, VECREG:$rB)>;
1725
1726def EQVr32:
1727 RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1728 "eqv\t$rT, $rA, $rB", IntegerOp,
1729 [(set R32C:$rT, (or (and R32C:$rA, R32C:$rB),
1730 (and (not R32C:$rA), (not R32C:$rB))))]>;
1731
1732def : Pat<(xor R32C:$rA, (not R32C:$rB)),
1733 (EQVr32 R32C:$rA, R32C:$rB)>;
1734
1735def : Pat<(xor (not R32C:$rA), R32C:$rB),
1736 (EQVr32 R32C:$rA, R32C:$rB)>;
1737
1738def EQVr16:
1739 RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1740 "eqv\t$rT, $rA, $rB", IntegerOp,
1741 [(set R16C:$rT, (or (and R16C:$rA, R16C:$rB),
1742 (and (not R16C:$rA), (not R16C:$rB))))]>;
1743
1744def : Pat<(xor R16C:$rA, (not R16C:$rB)),
1745 (EQVr16 R16C:$rA, R16C:$rB)>;
1746
1747def : Pat<(xor (not R16C:$rA), R16C:$rB),
1748 (EQVr16 R16C:$rA, R16C:$rB)>;
1749
Scott Michel438be252007-12-17 22:32:34 +00001750def EQVr8:
1751 RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1752 "eqv\t$rT, $rA, $rB", IntegerOp,
1753 [(set R8C:$rT, (or (and R8C:$rA, R8C:$rB),
1754 (and (not R8C:$rA), (not R8C:$rB))))]>;
1755
1756def : Pat<(xor R8C:$rA, (not R8C:$rB)),
1757 (EQVr8 R8C:$rA, R8C:$rB)>;
1758
1759def : Pat<(xor (not R8C:$rA), R8C:$rB),
1760 (EQVr8 R8C:$rA, R8C:$rB)>;
1761
Scott Michel8b6b4202007-12-04 22:35:58 +00001762// gcc optimizes (p & q) | (~p & ~q) -> ~(p | q) | (p & q), so match that
1763// pattern also:
1764def : Pat<(or (vnot (or (v16i8 VECREG:$rA), (v16i8 VECREG:$rB))),
1765 (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rB))),
1766 (EQVv16i8 VECREG:$rA, VECREG:$rB)>;
1767
1768def : Pat<(or (vnot (or (v8i16 VECREG:$rA), (v8i16 VECREG:$rB))),
1769 (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rB))),
1770 (EQVv8i16 VECREG:$rA, VECREG:$rB)>;
1771
1772def : Pat<(or (vnot (or (v4i32 VECREG:$rA), (v4i32 VECREG:$rB))),
1773 (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rB))),
1774 (EQVv4i32 VECREG:$rA, VECREG:$rB)>;
1775
1776def : Pat<(or (not (or R32C:$rA, R32C:$rB)), (and R32C:$rA, R32C:$rB)),
1777 (EQVr32 R32C:$rA, R32C:$rB)>;
1778
1779def : Pat<(or (not (or R16C:$rA, R16C:$rB)), (and R16C:$rA, R16C:$rB)),
1780 (EQVr16 R16C:$rA, R16C:$rB)>;
1781
Scott Michel438be252007-12-17 22:32:34 +00001782def : Pat<(or (not (or R8C:$rA, R8C:$rB)), (and R8C:$rA, R8C:$rB)),
1783 (EQVr8 R8C:$rA, R8C:$rB)>;
1784
Scott Michel8b6b4202007-12-04 22:35:58 +00001785// Select bits:
1786def SELBv16i8:
1787 RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
1788 "selb\t$rT, $rA, $rB, $rC", IntegerOp,
1789 [(set (v16i8 VECREG:$rT),
1790 (SPUselb_v16i8 (v16i8 VECREG:$rA), (v16i8 VECREG:$rB),
1791 (v16i8 VECREG:$rC)))]>;
1792
1793def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
1794 (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
1795 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1796
1797def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
1798 (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
1799 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1800
1801def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
1802 (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
1803 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1804
1805def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
1806 (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
1807 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1808
1809def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
1810 (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
1811 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1812
1813def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
1814 (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
1815 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1816
1817def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
1818 (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
1819 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1820
1821def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
1822 (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
1823 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1824
1825def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
1826 (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
1827 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1828
1829def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
1830 (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
1831 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1832
1833def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
1834 (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
1835 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1836
1837def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
1838 (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
1839 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1840
1841def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
1842 (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
1843 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1844
1845def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
1846 (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
1847 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1848
1849def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
1850 (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
1851 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1852
1853def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
1854 (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
1855 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1856
1857def SELBv8i16:
1858 RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
1859 "selb\t$rT, $rA, $rB, $rC", IntegerOp,
1860 [(set (v8i16 VECREG:$rT),
1861 (SPUselb_v8i16 (v8i16 VECREG:$rA), (v8i16 VECREG:$rB),
1862 (v8i16 VECREG:$rC)))]>;
1863
1864def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
1865 (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
1866 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1867
1868def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
1869 (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
1870 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1871
1872def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
1873 (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
1874 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1875
1876def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
1877 (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
1878 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1879
1880def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
1881 (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
1882 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1883
1884def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
1885 (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
1886 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1887
1888def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
1889 (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
1890 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1891
1892def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
1893 (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
1894 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1895
1896def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
1897 (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
1898 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1899
1900def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
1901 (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
1902 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1903
1904def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
1905 (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
1906 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1907
1908def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
1909 (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
1910 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1911
1912def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
1913 (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
1914 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1915
1916def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
1917 (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
1918 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1919
1920def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
1921 (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
1922 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1923
1924def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
1925 (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
1926 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1927
1928def SELBv4i32:
1929 RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
1930 "selb\t$rT, $rA, $rB, $rC", IntegerOp,
1931 [(set (v4i32 VECREG:$rT),
1932 (SPUselb_v4i32 (v4i32 VECREG:$rA), (v4i32 VECREG:$rB),
1933 (v4i32 VECREG:$rC)))]>;
1934
1935def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
1936 (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
1937 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1938
1939def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
1940 (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
1941 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1942
1943def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
1944 (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
1945 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1946
1947def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
1948 (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
1949 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1950
1951def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
1952 (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
1953 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1954
1955def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
1956 (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
1957 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1958
1959def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
1960 (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
1961 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1962
1963def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
1964 (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
1965 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1966
1967def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
1968 (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
1969 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1970
1971def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
1972 (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
1973 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1974
1975def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
1976 (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
1977 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1978
1979def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
1980 (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
1981 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1982
1983def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
1984 (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
1985 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1986
1987def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
1988 (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
1989 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1990
1991def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
1992 (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
1993 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1994
1995def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
1996 (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
1997 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1998
1999def SELBr32:
2000 RRRForm<0b1000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB, R32C:$rC),
2001 "selb\t$rT, $rA, $rB, $rC", IntegerOp,
2002 []>;
2003
2004// And the various patterns that can be matched... (all 8 of them :-)
2005def : Pat<(or (and R32C:$rA, R32C:$rC),
2006 (and R32C:$rB, (not R32C:$rC))),
2007 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2008
2009def : Pat<(or (and R32C:$rC, R32C:$rA),
2010 (and R32C:$rB, (not R32C:$rC))),
2011 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2012
2013def : Pat<(or (and R32C:$rA, R32C:$rC),
2014 (and (not R32C:$rC), R32C:$rB)),
2015 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2016
2017def : Pat<(or (and R32C:$rC, R32C:$rA),
2018 (and (not R32C:$rC), R32C:$rB)),
2019 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2020
2021def : Pat<(or (and R32C:$rA, (not R32C:$rC)),
2022 (and R32C:$rB, R32C:$rC)),
2023 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2024
2025def : Pat<(or (and R32C:$rA, (not R32C:$rC)),
2026 (and R32C:$rC, R32C:$rB)),
2027 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2028
2029def : Pat<(or (and (not R32C:$rC), R32C:$rA),
2030 (and R32C:$rB, R32C:$rC)),
2031 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2032
2033def : Pat<(or (and (not R32C:$rC), R32C:$rA),
2034 (and R32C:$rC, R32C:$rB)),
2035 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2036
2037def SELBr16:
2038 RRRForm<0b1000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB, R16C:$rC),
2039 "selb\t$rT, $rA, $rB, $rC", IntegerOp,
2040 []>;
2041
2042def : Pat<(or (and R16C:$rA, R16C:$rC),
2043 (and R16C:$rB, (not R16C:$rC))),
2044 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2045
2046def : Pat<(or (and R16C:$rC, R16C:$rA),
2047 (and R16C:$rB, (not R16C:$rC))),
2048 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2049
2050def : Pat<(or (and R16C:$rA, R16C:$rC),
2051 (and (not R16C:$rC), R16C:$rB)),
2052 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2053
2054def : Pat<(or (and R16C:$rC, R16C:$rA),
2055 (and (not R16C:$rC), R16C:$rB)),
2056 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2057
2058def : Pat<(or (and R16C:$rA, (not R16C:$rC)),
2059 (and R16C:$rB, R16C:$rC)),
2060 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2061
2062def : Pat<(or (and R16C:$rA, (not R16C:$rC)),
2063 (and R16C:$rC, R16C:$rB)),
2064 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2065
2066def : Pat<(or (and (not R16C:$rC), R16C:$rA),
2067 (and R16C:$rB, R16C:$rC)),
2068 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2069
2070def : Pat<(or (and (not R16C:$rC), R16C:$rA),
2071 (and R16C:$rC, R16C:$rB)),
2072 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
Scott Michel438be252007-12-17 22:32:34 +00002073
2074def SELBr8:
2075 RRRForm<0b1000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB, R8C:$rC),
2076 "selb\t$rT, $rA, $rB, $rC", IntegerOp,
2077 []>;
2078
2079def : Pat<(or (and R8C:$rA, R8C:$rC),
2080 (and R8C:$rB, (not R8C:$rC))),
2081 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2082
2083def : Pat<(or (and R8C:$rC, R8C:$rA),
2084 (and R8C:$rB, (not R8C:$rC))),
2085 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2086
2087def : Pat<(or (and R8C:$rA, R8C:$rC),
2088 (and (not R8C:$rC), R8C:$rB)),
2089 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2090
2091def : Pat<(or (and R8C:$rC, R8C:$rA),
2092 (and (not R8C:$rC), R8C:$rB)),
2093 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2094
2095def : Pat<(or (and R8C:$rA, (not R8C:$rC)),
2096 (and R8C:$rB, R8C:$rC)),
2097 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2098
2099def : Pat<(or (and R8C:$rA, (not R8C:$rC)),
2100 (and R8C:$rC, R8C:$rB)),
2101 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2102
2103def : Pat<(or (and (not R8C:$rC), R8C:$rA),
2104 (and R8C:$rB, R8C:$rC)),
2105 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2106
2107def : Pat<(or (and (not R8C:$rC), R8C:$rA),
2108 (and R8C:$rC, R8C:$rB)),
2109 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002110
2111//===----------------------------------------------------------------------===//
2112// Vector shuffle...
2113//===----------------------------------------------------------------------===//
2114
2115def SHUFB:
2116 RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
2117 "shufb\t$rT, $rA, $rB, $rC", IntegerOp,
Scott Michel754d8662007-12-20 00:44:13 +00002118 [/* no pattern */]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002119
2120// SPUshuffle is generated in LowerVECTOR_SHUFFLE and gets replaced with SHUFB.
2121// See the SPUshuffle SDNode operand above, which sets up the DAG pattern
2122// matcher to emit something when the LowerVECTOR_SHUFFLE generates a node with
2123// the SPUISD::SHUFB opcode.
2124def : Pat<(SPUshuffle (v16i8 VECREG:$rA), (v16i8 VECREG:$rB), VECREG:$rC),
2125 (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
2126
2127def : Pat<(SPUshuffle (v8i16 VECREG:$rA), (v8i16 VECREG:$rB), VECREG:$rC),
2128 (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
2129
2130def : Pat<(SPUshuffle (v4i32 VECREG:$rA), (v4i32 VECREG:$rB), VECREG:$rC),
2131 (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
2132
Scott Michel754d8662007-12-20 00:44:13 +00002133def : Pat<(SPUshuffle (v4f32 VECREG:$rA), (v4f32 VECREG:$rB), VECREG:$rC),
2134 (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
2135
Scott Michel8b6b4202007-12-04 22:35:58 +00002136def : Pat<(SPUshuffle (v2i64 VECREG:$rA), (v2i64 VECREG:$rB), VECREG:$rC),
2137 (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
2138
Scott Michel754d8662007-12-20 00:44:13 +00002139def : Pat<(SPUshuffle (v2f64 VECREG:$rA), (v2f64 VECREG:$rB), VECREG:$rC),
2140 (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
2141
Scott Michel8b6b4202007-12-04 22:35:58 +00002142//===----------------------------------------------------------------------===//
2143// Shift and rotate group:
2144//===----------------------------------------------------------------------===//
2145
2146def SHLHv8i16:
2147 RRForm<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, R16C:$rB),
2148 "shlh\t$rT, $rA, $rB", RotateShift,
2149 [(set (v8i16 VECREG:$rT),
2150 (SPUvec_shl_v8i16 (v8i16 VECREG:$rA), R16C:$rB))]>;
2151
2152// $rB gets promoted to 32-bit register type when confronted with
2153// this llvm assembly code:
2154//
2155// define i16 @shlh_i16_1(i16 %arg1, i16 %arg2) {
2156// %A = shl i16 %arg1, %arg2
2157// ret i16 %A
2158// }
2159//
2160// However, we will generate this code when lowering 8-bit shifts and rotates.
2161
2162def SHLHr16:
2163 RRForm<0b11111010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
2164 "shlh\t$rT, $rA, $rB", RotateShift,
2165 [(set R16C:$rT, (shl R16C:$rA, R16C:$rB))]>;
2166
2167def SHLHr16_r32:
2168 RRForm<0b11111010000, (outs R16C:$rT), (ins R16C:$rA, R32C:$rB),
2169 "shlh\t$rT, $rA, $rB", RotateShift,
2170 [(set R16C:$rT, (shl R16C:$rA, R32C:$rB))]>;
2171
2172def SHLHIv8i16:
Scott Michel438be252007-12-17 22:32:34 +00002173 RI7Form<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i8:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00002174 "shlhi\t$rT, $rA, $val", RotateShift,
2175 [(set (v8i16 VECREG:$rT),
Scott Michel438be252007-12-17 22:32:34 +00002176 (SPUvec_shl_v8i16 (v8i16 VECREG:$rA), (i8 uimm7:$val)))]>;
2177
2178def : Pat<(SPUvec_shl_v8i16 (v8i16 VECREG:$rA), (i16 uimm7:$val)),
2179 (SHLHIv8i16 VECREG:$rA, imm:$val)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002180
2181def : Pat<(SPUvec_shl_v8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val)),
2182 (SHLHIv8i16 VECREG:$rA, imm:$val)>;
2183
2184def SHLHIr16:
2185 RI7Form<0b11111010000, (outs R16C:$rT), (ins R16C:$rA, u7imm_i32:$val),
2186 "shlhi\t$rT, $rA, $val", RotateShift,
2187 [(set R16C:$rT, (shl R16C:$rA, (i32 uimm7:$val)))]>;
Scott Michel438be252007-12-17 22:32:34 +00002188
2189def : Pat<(shl R16C:$rA, (i8 uimm7:$val)),
2190 (SHLHIr16 R16C:$rA, uimm7:$val)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002191
2192def : Pat<(shl R16C:$rA, (i16 uimm7:$val)),
2193 (SHLHIr16 R16C:$rA, uimm7:$val)>;
2194
2195def SHLv4i32:
2196 RRForm<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, R16C:$rB),
2197 "shl\t$rT, $rA, $rB", RotateShift,
2198 [(set (v4i32 VECREG:$rT),
2199 (SPUvec_shl_v4i32 (v4i32 VECREG:$rA), R16C:$rB))]>;
2200
2201def SHLr32:
2202 RRForm<0b11111010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
2203 "shl\t$rT, $rA, $rB", RotateShift,
2204 [(set R32C:$rT, (shl R32C:$rA, R32C:$rB))]>;
2205
2206def SHLIv4i32:
Scott Michel438be252007-12-17 22:32:34 +00002207 RI7Form<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i8:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00002208 "shli\t$rT, $rA, $val", RotateShift,
2209 [(set (v4i32 VECREG:$rT),
Scott Michel438be252007-12-17 22:32:34 +00002210 (SPUvec_shl_v4i32 (v4i32 VECREG:$rA), (i8 uimm7:$val)))]>;
2211
2212def: Pat<(SPUvec_shl_v4i32 (v4i32 VECREG:$rA), (i16 uimm7:$val)),
2213 (SHLIv4i32 VECREG:$rA, uimm7:$val)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002214
2215def: Pat<(SPUvec_shl_v4i32 (v4i32 VECREG:$rA), (i32 uimm7:$val)),
2216 (SHLIv4i32 VECREG:$rA, uimm7:$val)>;
2217
2218def SHLIr32:
2219 RI7Form<0b11111010000, (outs R32C:$rT), (ins R32C:$rA, u7imm_i32:$val),
2220 "shli\t$rT, $rA, $val", RotateShift,
2221 [(set R32C:$rT, (shl R32C:$rA, (i32 uimm7:$val)))]>;
2222
2223def : Pat<(shl R32C:$rA, (i16 uimm7:$val)),
2224 (SHLIr32 R32C:$rA, uimm7:$val)>;
2225
Scott Michel438be252007-12-17 22:32:34 +00002226def : Pat<(shl R32C:$rA, (i8 uimm7:$val)),
2227 (SHLIr32 R32C:$rA, uimm7:$val)>;
2228
Scott Michel8b6b4202007-12-04 22:35:58 +00002229// SHLQBI vec form: Note that this will shift the entire vector (the 128-bit
2230// register) to the left. Vector form is here to ensure type correctness.
2231def SHLQBIvec:
2232 RRForm<0b11011011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2233 "shlqbi\t$rT, $rA, $rB", RotateShift,
2234 [/* intrinsic */]>;
2235
2236// See note above on SHLQBI.
2237def SHLQBIIvec:
2238 RI7Form<0b11011111100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
2239 "shlqbii\t$rT, $rA, $val", RotateShift,
2240 [/* intrinsic */]>;
2241
2242// SHLQBY, SHLQBYI vector forms: Shift the entire vector to the left by bytes,
2243// not by bits.
2244def SHLQBYvec:
2245 RI7Form<0b11111011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2246 "shlqbyi\t$rT, $rA, $rB", RotateShift,
2247 [/* intrinsic */]>;
2248
2249def SHLQBYIvec:
2250 RI7Form<0b11111111100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
2251 "shlqbyi\t$rT, $rA, $val", RotateShift,
2252 [/* intrinsic */]>;
2253
2254// ROTH v8i16 form:
2255def ROTHv8i16:
2256 RRForm<0b00111010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2257 "roth\t$rT, $rA, $rB", RotateShift,
2258 [(set (v8i16 VECREG:$rT),
2259 (SPUvec_rotl_v8i16 VECREG:$rA, VECREG:$rB))]>;
2260
2261def ROTHr16:
2262 RRForm<0b00111010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
2263 "roth\t$rT, $rA, $rB", RotateShift,
2264 [(set R16C:$rT, (rotl R16C:$rA, R16C:$rB))]>;
2265
2266def ROTHr16_r32:
2267 RRForm<0b00111010000, (outs R16C:$rT), (ins R16C:$rA, R32C:$rB),
2268 "roth\t$rT, $rA, $rB", RotateShift,
2269 [(set R16C:$rT, (rotl R16C:$rA, R32C:$rB))]>;
2270
Scott Michel438be252007-12-17 22:32:34 +00002271// The rotate amount is in the same bits whether we've got an 8-bit, 16-bit or
2272// 32-bit register
2273def ROTHr16_r8:
2274 RRForm<0b00111010000, (outs R16C:$rT), (ins R16C:$rA, R8C:$rB),
2275 "roth\t$rT, $rA, $rB", RotateShift,
2276 [(set R16C:$rT, (rotl R16C:$rA, (i32 (zext R8C:$rB))))]>;
2277
2278def : Pat<(rotl R16C:$rA, (i32 (sext R8C:$rB))),
2279 (ROTHr16_r8 R16C:$rA, R8C:$rB)>;
2280
2281def : Pat<(rotl R16C:$rA, (i32 (zext R8C:$rB))),
2282 (ROTHr16_r8 R16C:$rA, R8C:$rB)>;
2283
2284def : Pat<(rotl R16C:$rA, (i32 (anyext R8C:$rB))),
2285 (ROTHr16_r8 R16C:$rA, R8C:$rB)>;
2286
Scott Michel8b6b4202007-12-04 22:35:58 +00002287def ROTHIv8i16:
Scott Michel438be252007-12-17 22:32:34 +00002288 RI7Form<0b00111110000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i8:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00002289 "rothi\t$rT, $rA, $val", RotateShift,
2290 [(set (v8i16 VECREG:$rT),
Scott Michel438be252007-12-17 22:32:34 +00002291 (SPUvec_rotl_v8i16 VECREG:$rA, (i8 uimm7:$val)))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002292
2293def : Pat<(SPUvec_rotl_v8i16 VECREG:$rA, (i16 uimm7:$val)),
2294 (ROTHIv8i16 VECREG:$rA, imm:$val)>;
2295
2296def : Pat<(SPUvec_rotl_v8i16 VECREG:$rA, (i32 uimm7:$val)),
2297 (ROTHIv8i16 VECREG:$rA, imm:$val)>;
2298
2299def ROTHIr16:
2300 RI7Form<0b00111110000, (outs R16C:$rT), (ins R16C:$rA, u7imm:$val),
2301 "rothi\t$rT, $rA, $val", RotateShift,
2302 [(set R16C:$rT, (rotl R16C:$rA, (i16 uimm7:$val)))]>;
2303
2304def ROTHIr16_i32:
2305 RI7Form<0b00111110000, (outs R16C:$rT), (ins R16C:$rA, u7imm_i32:$val),
2306 "rothi\t$rT, $rA, $val", RotateShift,
2307 [(set R16C:$rT, (rotl R16C:$rA, (i32 uimm7:$val)))]>;
2308
Scott Michel438be252007-12-17 22:32:34 +00002309def ROTHIr16_i8:
2310 RI7Form<0b00111110000, (outs R16C:$rT), (ins R16C:$rA, u7imm_i8:$val),
2311 "rothi\t$rT, $rA, $val", RotateShift,
2312 [(set R16C:$rT, (rotl R16C:$rA, (i8 uimm7:$val)))]>;
2313
Scott Michel8b6b4202007-12-04 22:35:58 +00002314def ROTv4i32:
2315 RRForm<0b00011010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
2316 "rot\t$rT, $rA, $rB", RotateShift,
2317 [(set (v4i32 VECREG:$rT),
2318 (SPUvec_rotl_v4i32 (v4i32 VECREG:$rA), R32C:$rB))]>;
2319
2320def ROTr32:
2321 RRForm<0b00011010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
2322 "rot\t$rT, $rA, $rB", RotateShift,
2323 [(set R32C:$rT, (rotl R32C:$rA, R32C:$rB))]>;
2324
Scott Michel438be252007-12-17 22:32:34 +00002325// The rotate amount is in the same bits whether we've got an 8-bit, 16-bit or
2326// 32-bit register
2327def ROTr32_r16_anyext:
2328 RRForm<0b00011010000, (outs R32C:$rT), (ins R32C:$rA, R16C:$rB),
2329 "rot\t$rT, $rA, $rB", RotateShift,
2330 [(set R32C:$rT, (rotl R32C:$rA, (i32 (anyext R16C:$rB))))]>;
2331
2332def : Pat<(rotl R32C:$rA, (i32 (zext R16C:$rB))),
2333 (ROTr32_r16_anyext R32C:$rA, R16C:$rB)>;
2334
2335def : Pat<(rotl R32C:$rA, (i32 (sext R16C:$rB))),
2336 (ROTr32_r16_anyext R32C:$rA, R16C:$rB)>;
2337
2338def ROTr32_r8_anyext:
2339 RRForm<0b00011010000, (outs R32C:$rT), (ins R32C:$rA, R8C:$rB),
2340 "rot\t$rT, $rA, $rB", RotateShift,
2341 [(set R32C:$rT, (rotl R32C:$rA, (i32 (anyext R8C:$rB))))]>;
2342
2343def : Pat<(rotl R32C:$rA, (i32 (zext R8C:$rB))),
2344 (ROTr32_r8_anyext R32C:$rA, R8C:$rB)>;
2345
2346def : Pat<(rotl R32C:$rA, (i32 (sext R8C:$rB))),
2347 (ROTr32_r8_anyext R32C:$rA, R8C:$rB)>;
2348
Scott Michel8b6b4202007-12-04 22:35:58 +00002349def ROTIv4i32:
2350 RI7Form<0b00011110000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i32:$val),
2351 "roti\t$rT, $rA, $val", RotateShift,
2352 [(set (v4i32 VECREG:$rT),
2353 (SPUvec_rotl_v4i32 (v4i32 VECREG:$rA), (i32 uimm7:$val)))]>;
2354
2355def : Pat<(SPUvec_rotl_v4i32 (v4i32 VECREG:$rA), (i16 uimm7:$val)),
2356 (ROTIv4i32 VECREG:$rA, imm:$val)>;
2357
Scott Michel438be252007-12-17 22:32:34 +00002358def : Pat<(SPUvec_rotl_v4i32 (v4i32 VECREG:$rA), (i8 uimm7:$val)),
2359 (ROTIv4i32 VECREG:$rA, imm:$val)>;
2360
Scott Michel8b6b4202007-12-04 22:35:58 +00002361def ROTIr32:
2362 RI7Form<0b00011110000, (outs R32C:$rT), (ins R32C:$rA, u7imm_i32:$val),
2363 "roti\t$rT, $rA, $val", RotateShift,
2364 [(set R32C:$rT, (rotl R32C:$rA, (i32 uimm7:$val)))]>;
2365
2366def ROTIr32_i16:
2367 RI7Form<0b00111110000, (outs R32C:$rT), (ins R32C:$rA, u7imm:$val),
2368 "roti\t$rT, $rA, $val", RotateShift,
2369 [(set R32C:$rT, (rotl R32C:$rA, (i16 uimm7:$val)))]>;
2370
Scott Michel438be252007-12-17 22:32:34 +00002371def ROTIr32_i8:
2372 RI7Form<0b00111110000, (outs R32C:$rT), (ins R32C:$rA, u7imm_i8:$val),
2373 "roti\t$rT, $rA, $val", RotateShift,
2374 [(set R32C:$rT, (rotl R32C:$rA, (i8 uimm7:$val)))]>;
2375
Scott Michel8b6b4202007-12-04 22:35:58 +00002376// ROTQBY* vector forms: This rotates the entire vector, but vector registers
2377// are used here for type checking (instances where ROTQBI is used actually
2378// use vector registers)
2379def ROTQBYvec:
Scott Micheldbac4cf2008-01-11 02:53:15 +00002380 RRForm<0b00111011100, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
Scott Michel8b6b4202007-12-04 22:35:58 +00002381 "rotqby\t$rT, $rA, $rB", RotateShift,
Scott Micheldbac4cf2008-01-11 02:53:15 +00002382 [(set (v16i8 VECREG:$rT), (SPUrotbytes_left (v16i8 VECREG:$rA), R32C:$rB))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002383
Scott Micheldbac4cf2008-01-11 02:53:15 +00002384def : Pat<(SPUrotbytes_left_chained (v16i8 VECREG:$rA), R32C:$rB),
2385 (ROTQBYvec VECREG:$rA, R32C:$rB)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002386
2387// See ROTQBY note above.
2388def ROTQBYIvec:
2389 RI7Form<0b00111111100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
2390 "rotqbyi\t$rT, $rA, $val", RotateShift,
2391 [(set (v16i8 VECREG:$rT),
2392 (SPUrotbytes_left (v16i8 VECREG:$rA), (i16 uimm7:$val)))]>;
2393
2394def : Pat<(SPUrotbytes_left_chained (v16i8 VECREG:$rA), (i16 uimm7:$val)),
2395 (ROTQBYIvec VECREG:$rA, uimm7:$val)>;
2396
2397// See ROTQBY note above.
2398def ROTQBYBIvec:
2399 RI7Form<0b00110011100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
2400 "rotqbybi\t$rT, $rA, $val", RotateShift,
2401 [/* intrinsic */]>;
2402
2403// See ROTQBY note above.
2404//
2405// Assume that the user of this instruction knows to shift the rotate count
2406// into bit 29
2407def ROTQBIvec:
2408 RRForm<0b00011011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2409 "rotqbi\t$rT, $rA, $rB", RotateShift,
2410 [/* insert intrinsic here */]>;
2411
2412// See ROTQBY note above.
2413def ROTQBIIvec:
2414 RI7Form<0b00011111100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i32:$val),
2415 "rotqbii\t$rT, $rA, $val", RotateShift,
2416 [/* insert intrinsic here */]>;
2417
2418// ROTHM v8i16 form:
2419// NOTE(1): No vector rotate is generated by the C/C++ frontend (today),
2420// so this only matches a synthetically generated/lowered code
2421// fragment.
2422// NOTE(2): $rB must be negated before the right rotate!
2423def ROTHMv8i16:
2424 RRForm<0b10111010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
2425 "rothm\t$rT, $rA, $rB", RotateShift,
2426 [/* see patterns below - $rB must be negated */]>;
2427
2428def : Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), R32C:$rB),
2429 (ROTHMv8i16 VECREG:$rA, (SFIr32 R32C:$rB, 0))>;
2430
2431def : Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), R16C:$rB),
2432 (ROTHMv8i16 VECREG:$rA,
2433 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2434
Scott Michel438be252007-12-17 22:32:34 +00002435def : Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), R8C:$rB),
Scott Michel8b6b4202007-12-04 22:35:58 +00002436 (ROTHMv8i16 VECREG:$rA,
Scott Michel438be252007-12-17 22:32:34 +00002437 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB) ), 0))>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002438
2439// ROTHM r16 form: Rotate 16-bit quantity to right, zero fill at the left
2440// Note: This instruction doesn't match a pattern because rB must be negated
2441// for the instruction to work. Thus, the pattern below the instruction!
2442def ROTHMr16:
2443 RRForm<0b10111010000, (outs R16C:$rT), (ins R16C:$rA, R32C:$rB),
2444 "rothm\t$rT, $rA, $rB", RotateShift,
2445 [/* see patterns below - $rB must be negated! */]>;
2446
2447def : Pat<(srl R16C:$rA, R32C:$rB),
2448 (ROTHMr16 R16C:$rA, (SFIr32 R32C:$rB, 0))>;
2449
2450def : Pat<(srl R16C:$rA, R16C:$rB),
2451 (ROTHMr16 R16C:$rA,
2452 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2453
Scott Michel438be252007-12-17 22:32:34 +00002454def : Pat<(srl R16C:$rA, R8C:$rB),
Scott Michel8b6b4202007-12-04 22:35:58 +00002455 (ROTHMr16 R16C:$rA,
Scott Michel438be252007-12-17 22:32:34 +00002456 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB) ), 0))>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002457
2458// ROTHMI v8i16 form: See the comment for ROTHM v8i16. The difference here is
2459// that the immediate can be complemented, so that the user doesn't have to
2460// worry about it.
2461def ROTHMIv8i16:
2462 RI7Form<0b10111110000, (outs VECREG:$rT), (ins VECREG:$rA, rothNeg7imm:$val),
2463 "rothmi\t$rT, $rA, $val", RotateShift,
2464 [(set (v8i16 VECREG:$rT),
2465 (SPUvec_srl_v8i16 (v8i16 VECREG:$rA), (i32 imm:$val)))]>;
2466
2467def: Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), (i16 imm:$val)),
2468 (ROTHMIv8i16 VECREG:$rA, imm:$val)>;
Scott Michel438be252007-12-17 22:32:34 +00002469
2470def: Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), (i8 imm:$val)),
2471 (ROTHMIv8i16 VECREG:$rA, imm:$val)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002472
2473def ROTHMIr16:
2474 RI7Form<0b10111110000, (outs R16C:$rT), (ins R16C:$rA, rothNeg7imm:$val),
2475 "rothmi\t$rT, $rA, $val", RotateShift,
2476 [(set R16C:$rT, (srl R16C:$rA, (i32 uimm7:$val)))]>;
2477
2478def: Pat<(srl R16C:$rA, (i16 uimm7:$val)),
2479 (ROTHMIr16 R16C:$rA, uimm7:$val)>;
2480
Scott Michel438be252007-12-17 22:32:34 +00002481def: Pat<(srl R16C:$rA, (i8 uimm7:$val)),
2482 (ROTHMIr16 R16C:$rA, uimm7:$val)>;
2483
Scott Michel8b6b4202007-12-04 22:35:58 +00002484// ROTM v4i32 form: See the ROTHM v8i16 comments.
2485def ROTMv4i32:
2486 RRForm<0b10011010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
2487 "rotm\t$rT, $rA, $rB", RotateShift,
2488 [/* see patterns below - $rB must be negated */]>;
2489
2490def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, R32C:$rB),
2491 (ROTMv4i32 VECREG:$rA, (SFIr32 R32C:$rB, 0))>;
2492
2493def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, R16C:$rB),
2494 (ROTMv4i32 VECREG:$rA,
2495 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2496
2497def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, /* R8C */ R16C:$rB),
2498 (ROTMv4i32 VECREG:$rA,
2499 (SFIr32 (XSHWr16 /* (XSBHr8 R8C */ R16C:$rB) /*)*/, 0))>;
2500
2501def ROTMr32:
2502 RRForm<0b10011010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
2503 "rotm\t$rT, $rA, $rB", RotateShift,
2504 [/* see patterns below - $rB must be negated */]>;
2505
2506def : Pat<(srl R32C:$rA, R32C:$rB),
2507 (ROTMr32 R32C:$rA, (SFIr32 R32C:$rB, 0))>;
2508
2509def : Pat<(srl R32C:$rA, R16C:$rB),
2510 (ROTMr32 R32C:$rA,
2511 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2512
Scott Michel438be252007-12-17 22:32:34 +00002513def : Pat<(srl R32C:$rA, R8C:$rB),
2514 (ROTMr32 R32C:$rA,
2515 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
2516
Scott Michel8b6b4202007-12-04 22:35:58 +00002517// ROTMI v4i32 form: See the comment for ROTHM v8i16.
2518def ROTMIv4i32:
2519 RI7Form<0b10011110000, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
2520 "rotmi\t$rT, $rA, $val", RotateShift,
2521 [(set (v4i32 VECREG:$rT),
2522 (SPUvec_srl_v4i32 VECREG:$rA, (i32 uimm7:$val)))]>;
2523
2524def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, (i16 uimm7:$val)),
2525 (ROTMIv4i32 VECREG:$rA, uimm7:$val)>;
Scott Michel438be252007-12-17 22:32:34 +00002526
2527def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, (i8 uimm7:$val)),
2528 (ROTMIv4i32 VECREG:$rA, uimm7:$val)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002529
2530// ROTMI r32 form: know how to complement the immediate value.
2531def ROTMIr32:
2532 RI7Form<0b10011110000, (outs R32C:$rT), (ins R32C:$rA, rotNeg7imm:$val),
2533 "rotmi\t$rT, $rA, $val", RotateShift,
2534 [(set R32C:$rT, (srl R32C:$rA, (i32 uimm7:$val)))]>;
2535
2536def : Pat<(srl R32C:$rA, (i16 imm:$val)),
2537 (ROTMIr32 R32C:$rA, uimm7:$val)>;
2538
Scott Michel438be252007-12-17 22:32:34 +00002539def : Pat<(srl R32C:$rA, (i8 imm:$val)),
2540 (ROTMIr32 R32C:$rA, uimm7:$val)>;
2541
Scott Michel8b6b4202007-12-04 22:35:58 +00002542// ROTQMBYvec: This is a vector form merely so that when used in an
2543// instruction pattern, type checking will succeed. This instruction assumes
2544// that the user knew to complement $rB.
2545def ROTQMBYvec:
2546 RRForm<0b10111011100, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
2547 "rotqmby\t$rT, $rA, $rB", RotateShift,
2548 [(set (v16i8 VECREG:$rT),
2549 (SPUrotbytes_right_zfill (v16i8 VECREG:$rA), R32C:$rB))]>;
2550
2551def ROTQMBYIvec:
2552 RI7Form<0b10111111100, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
2553 "rotqmbyi\t$rT, $rA, $val", RotateShift,
2554 [(set (v16i8 VECREG:$rT),
2555 (SPUrotbytes_right_zfill (v16i8 VECREG:$rA), (i32 uimm7:$val)))]>;
2556
2557def : Pat<(SPUrotbytes_right_zfill VECREG:$rA, (i16 uimm7:$val)),
2558 (ROTQMBYIvec VECREG:$rA, uimm7:$val)>;
2559
2560def ROTQMBYBIvec:
2561 RRForm<0b10110011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2562 "rotqmbybi\t$rT, $rA, $rB", RotateShift,
2563 [/* intrinsic */]>;
2564
2565def ROTQMBIvec:
2566 RRForm<0b10011011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2567 "rotqmbi\t$rT, $rA, $rB", RotateShift,
2568 [/* intrinsic */]>;
2569
2570def ROTQMBIIvec:
2571 RI7Form<0b10011111100, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
2572 "rotqmbii\t$rT, $rA, $val", RotateShift,
2573 [/* intrinsic */]>;
2574
2575def ROTMAHv8i16:
2576 RRForm<0b01111010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
2577 "rotmah\t$rT, $rA, $rB", RotateShift,
2578 [/* see patterns below - $rB must be negated */]>;
2579
2580def : Pat<(SPUvec_sra_v8i16 VECREG:$rA, R32C:$rB),
2581 (ROTMAHv8i16 VECREG:$rA, (SFIr32 R32C:$rB, 0))>;
2582
2583def : Pat<(SPUvec_sra_v8i16 VECREG:$rA, R16C:$rB),
2584 (ROTMAHv8i16 VECREG:$rA,
2585 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2586
Scott Michel438be252007-12-17 22:32:34 +00002587def : Pat<(SPUvec_sra_v8i16 VECREG:$rA, R8C:$rB),
2588 (ROTMAHv8i16 VECREG:$rA,
2589 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
2590
Scott Michel8b6b4202007-12-04 22:35:58 +00002591def ROTMAHr16:
2592 RRForm<0b01111010000, (outs R16C:$rT), (ins R16C:$rA, R32C:$rB),
2593 "rotmah\t$rT, $rA, $rB", RotateShift,
2594 [/* see patterns below - $rB must be negated */]>;
2595
2596def : Pat<(sra R16C:$rA, R32C:$rB),
2597 (ROTMAHr16 R16C:$rA, (SFIr32 R32C:$rB, 0))>;
2598
2599def : Pat<(sra R16C:$rA, R16C:$rB),
2600 (ROTMAHr16 R16C:$rA,
2601 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2602
Scott Michel438be252007-12-17 22:32:34 +00002603def : Pat<(sra R16C:$rA, R8C:$rB),
2604 (ROTMAHr16 R16C:$rA,
2605 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
2606
Scott Michel8b6b4202007-12-04 22:35:58 +00002607def ROTMAHIv8i16:
2608 RRForm<0b01111110000, (outs VECREG:$rT), (ins VECREG:$rA, rothNeg7imm:$val),
2609 "rotmahi\t$rT, $rA, $val", RotateShift,
2610 [(set (v8i16 VECREG:$rT),
2611 (SPUvec_sra_v8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val)))]>;
2612
2613def : Pat<(SPUvec_sra_v8i16 (v8i16 VECREG:$rA), (i16 uimm7:$val)),
2614 (ROTMAHIv8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val))>;
2615
Scott Michel438be252007-12-17 22:32:34 +00002616def : Pat<(SPUvec_sra_v8i16 (v8i16 VECREG:$rA), (i8 uimm7:$val)),
2617 (ROTMAHIv8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val))>;
2618
Scott Michel8b6b4202007-12-04 22:35:58 +00002619def ROTMAHIr16:
2620 RRForm<0b01111110000, (outs R16C:$rT), (ins R16C:$rA, rothNeg7imm_i16:$val),
2621 "rotmahi\t$rT, $rA, $val", RotateShift,
2622 [(set R16C:$rT, (sra R16C:$rA, (i16 uimm7:$val)))]>;
2623
2624def : Pat<(sra R16C:$rA, (i32 imm:$val)),
2625 (ROTMAHIr16 R16C:$rA, uimm7:$val)>;
2626
Scott Michel438be252007-12-17 22:32:34 +00002627def : Pat<(sra R16C:$rA, (i8 imm:$val)),
2628 (ROTMAHIr16 R16C:$rA, uimm7:$val)>;
2629
Scott Michel8b6b4202007-12-04 22:35:58 +00002630def ROTMAv4i32:
2631 RRForm<0b01011010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
2632 "rotma\t$rT, $rA, $rB", RotateShift,
2633 [/* see patterns below - $rB must be negated */]>;
2634
2635def : Pat<(SPUvec_sra_v4i32 VECREG:$rA, R32C:$rB),
2636 (ROTMAv4i32 (v4i32 VECREG:$rA), (SFIr32 R32C:$rB, 0))>;
2637
2638def : Pat<(SPUvec_sra_v4i32 VECREG:$rA, R16C:$rB),
2639 (ROTMAv4i32 (v4i32 VECREG:$rA),
2640 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2641
Scott Michel438be252007-12-17 22:32:34 +00002642def : Pat<(SPUvec_sra_v4i32 VECREG:$rA, R8C:$rB),
2643 (ROTMAv4i32 (v4i32 VECREG:$rA),
2644 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
2645
Scott Michel8b6b4202007-12-04 22:35:58 +00002646def ROTMAr32:
2647 RRForm<0b01011010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
2648 "rotma\t$rT, $rA, $rB", RotateShift,
2649 [/* see patterns below - $rB must be negated */]>;
2650
2651def : Pat<(sra R32C:$rA, R32C:$rB),
2652 (ROTMAr32 R32C:$rA, (SFIr32 R32C:$rB, 0))>;
2653
2654def : Pat<(sra R32C:$rA, R16C:$rB),
2655 (ROTMAr32 R32C:$rA,
2656 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2657
Scott Michel438be252007-12-17 22:32:34 +00002658def : Pat<(sra R32C:$rA, R8C:$rB),
2659 (ROTMAr32 R32C:$rA,
2660 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
2661
Scott Michel8b6b4202007-12-04 22:35:58 +00002662def ROTMAIv4i32:
2663 RRForm<0b01011110000, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
2664 "rotmai\t$rT, $rA, $val", RotateShift,
2665 [(set (v4i32 VECREG:$rT),
2666 (SPUvec_sra_v4i32 VECREG:$rA, (i32 uimm7:$val)))]>;
2667
2668def : Pat<(SPUvec_sra_v4i32 VECREG:$rA, (i16 uimm7:$val)),
2669 (ROTMAIv4i32 VECREG:$rA, uimm7:$val)>;
2670
2671def ROTMAIr32:
2672 RRForm<0b01011110000, (outs R32C:$rT), (ins R32C:$rA, rotNeg7imm:$val),
2673 "rotmai\t$rT, $rA, $val", RotateShift,
2674 [(set R32C:$rT, (sra R32C:$rA, (i32 uimm7:$val)))]>;
2675
2676def : Pat<(sra R32C:$rA, (i16 uimm7:$val)),
2677 (ROTMAIr32 R32C:$rA, uimm7:$val)>;
2678
Scott Michel438be252007-12-17 22:32:34 +00002679def : Pat<(sra R32C:$rA, (i8 uimm7:$val)),
2680 (ROTMAIr32 R32C:$rA, uimm7:$val)>;
2681
Scott Michel8b6b4202007-12-04 22:35:58 +00002682//===----------------------------------------------------------------------===//
2683// Branch and conditionals:
2684//===----------------------------------------------------------------------===//
2685
2686let isTerminator = 1, isBarrier = 1 in {
2687 // Halt If Equal (r32 preferred slot only, no vector form)
2688 def HEQr32:
2689 RRForm_3<0b00011011110, (outs), (ins R32C:$rA, R32C:$rB),
2690 "heq\t$rA, $rB", BranchResolv,
2691 [/* no pattern to match */]>;
2692
2693 def HEQIr32 :
2694 RI10Form_2<0b11111110, (outs), (ins R32C:$rA, s10imm:$val),
2695 "heqi\t$rA, $val", BranchResolv,
2696 [/* no pattern to match */]>;
2697
2698 // HGT/HGTI: These instructions use signed arithmetic for the comparison,
2699 // contrasting with HLGT/HLGTI, which use unsigned comparison:
2700 def HGTr32:
2701 RRForm_3<0b00011010010, (outs), (ins R32C:$rA, R32C:$rB),
2702 "hgt\t$rA, $rB", BranchResolv,
2703 [/* no pattern to match */]>;
2704
2705 def HGTIr32:
2706 RI10Form_2<0b11110010, (outs), (ins R32C:$rA, s10imm:$val),
2707 "hgti\t$rA, $val", BranchResolv,
2708 [/* no pattern to match */]>;
2709
2710 def HLGTr32:
2711 RRForm_3<0b00011011010, (outs), (ins R32C:$rA, R32C:$rB),
2712 "hlgt\t$rA, $rB", BranchResolv,
2713 [/* no pattern to match */]>;
2714
2715 def HLGTIr32:
2716 RI10Form_2<0b11111010, (outs), (ins R32C:$rA, s10imm:$val),
2717 "hlgti\t$rA, $val", BranchResolv,
2718 [/* no pattern to match */]>;
2719}
2720
2721// Comparison operators:
Scott Michel438be252007-12-17 22:32:34 +00002722def CEQBr8:
2723 RRForm<0b00001011110, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
2724 "ceqb\t$rT, $rA, $rB", ByteOp,
2725 [/* no pattern to match */]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002726
2727def CEQBv16i8:
2728 RRForm<0b00001011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2729 "ceqb\t$rT, $rA, $rB", ByteOp,
2730 [/* no pattern to match: intrinsic */]>;
2731
Scott Michel438be252007-12-17 22:32:34 +00002732def CEQBIr8:
Scott Micheldbac4cf2008-01-11 02:53:15 +00002733 RI10Form<0b01111110, (outs R8C:$rT), (ins R8C:$rA, s7imm_i8:$val),
Scott Michel438be252007-12-17 22:32:34 +00002734 "ceqbi\t$rT, $rA, $val", ByteOp,
2735 [/* no pattern to match: intrinsic */]>;
2736
Scott Michel8b6b4202007-12-04 22:35:58 +00002737def CEQBIv16i8:
Scott Micheldbac4cf2008-01-11 02:53:15 +00002738 RI10Form<0b01111110, (outs VECREG:$rT), (ins VECREG:$rA, s7imm_i8:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00002739 "ceqbi\t$rT, $rA, $val", ByteOp,
2740 [/* no pattern to match: intrinsic */]>;
2741
2742def CEQHr16:
2743 RRForm<0b00010011110, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
2744 "ceqh\t$rT, $rA, $rB", ByteOp,
2745 [/* no pattern to match */]>;
2746
2747def CEQHv8i16:
2748 RRForm<0b00010011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2749 "ceqh\t$rT, $rA, $rB", ByteOp,
2750 [/* no pattern to match: intrinsic */]>;
2751
2752def CEQHIr16:
2753 RI10Form<0b10111110, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
2754 "ceqhi\t$rT, $rA, $val", ByteOp,
2755 [/* no pattern to match: intrinsic */]>;
2756
2757def CEQHIv8i16:
2758 RI10Form<0b10111110, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
2759 "ceqhi\t$rT, $rA, $val", ByteOp,
2760 [/* no pattern to match: intrinsic */]>;
2761
2762def CEQr32:
2763 RRForm<0b00000011110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
2764 "ceq\t$rT, $rA, $rB", ByteOp,
2765 [/* no pattern to match: intrinsic */]>;
2766
2767def CEQv4i32:
2768 RRForm<0b00000011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2769 "ceq\t$rT, $rA, $rB", ByteOp,
2770 [/* no pattern to match: intrinsic */]>;
2771
2772def CEQIr32:
2773 RI10Form<0b00111110, (outs R32C:$rT), (ins R32C:$rA, s10imm:$val),
2774 "ceqi\t$rT, $rA, $val", ByteOp,
2775 [/* no pattern to match: intrinsic */]>;
2776
2777def CEQIv4i32:
2778 RI10Form<0b00111110, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
2779 "ceqi\t$rT, $rA, $val", ByteOp,
2780 [/* no pattern to match: intrinsic */]>;
2781
2782let isCall = 1,
2783 // All calls clobber the non-callee-saved registers:
2784 Defs = [R0, R1, R2, R3, R4, R5, R6, R7, R8, R9,
2785 R10,R11,R12,R13,R14,R15,R16,R17,R18,R19,
2786 R20,R21,R22,R23,R24,R25,R26,R27,R28,R29,
2787 R30,R31,R32,R33,R34,R35,R36,R37,R38,R39,
2788 R40,R41,R42,R43,R44,R45,R46,R47,R48,R49,
2789 R50,R51,R52,R53,R54,R55,R56,R57,R58,R59,
2790 R60,R61,R62,R63,R64,R65,R66,R67,R68,R69,
2791 R70,R71,R72,R73,R74,R75,R76,R77,R78,R79],
2792 // All of these instructions use $lr (aka $0)
2793 Uses = [R0] in {
2794 // Branch relative and set link: Used if we actually know that the target
2795 // is within [-32768, 32767] bytes of the target
2796 def BRSL:
2797 BranchSetLink<0b011001100, (outs), (ins relcalltarget:$func, variable_ops),
2798 "brsl\t$$lr, $func",
2799 [(SPUcall (SPUpcrel tglobaladdr:$func, 0))]>;
2800
2801 // Branch absolute and set link: Used if we actually know that the target
2802 // is an absolute address
2803 def BRASL:
2804 BranchSetLink<0b011001100, (outs), (ins calltarget:$func, variable_ops),
2805 "brasl\t$$lr, $func",
Scott Micheldbac4cf2008-01-11 02:53:15 +00002806 [(SPUcall (SPUaform tglobaladdr:$func, 0))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002807
2808 // Branch indirect and set link if external data. These instructions are not
2809 // actually generated, matched by an intrinsic:
2810 def BISLED_00: BISLEDForm<0b11, "bisled\t$$lr, $func", [/* empty pattern */]>;
2811 def BISLED_E0: BISLEDForm<0b10, "bisled\t$$lr, $func", [/* empty pattern */]>;
2812 def BISLED_0D: BISLEDForm<0b01, "bisled\t$$lr, $func", [/* empty pattern */]>;
2813 def BISLED_ED: BISLEDForm<0b00, "bisled\t$$lr, $func", [/* empty pattern */]>;
2814
2815 // Branch indirect and set link. This is the "X-form" address version of a
2816 // function call
2817 def BISL:
2818 BIForm<0b10010101100, "bisl\t$$lr, $func", [(SPUcall R32C:$func)]>;
2819}
2820
2821// Unconditional branches:
2822let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in {
2823 def BR :
2824 UncondBranch<0b001001100, (outs), (ins brtarget:$dest),
2825 "br\t$dest",
2826 [(br bb:$dest)]>;
2827
2828 // Unconditional, absolute address branch
2829 def BRA:
2830 UncondBranch<0b001100000, (outs), (ins brtarget:$dest),
2831 "bra\t$dest",
2832 [/* no pattern */]>;
2833
2834 // Indirect branch
2835 def BI:
2836 BIForm<0b00010101100, "bi\t$func", [(brind R32C:$func)]>;
2837
2838 // Various branches:
2839 def BRNZ:
2840 RI16Form<0b010000100, (outs), (ins R32C:$rCond, brtarget:$dest),
2841 "brnz\t$rCond,$dest",
2842 BranchResolv,
2843 [(brcond R32C:$rCond, bb:$dest)]>;
2844
2845 def BRZ:
2846 RI16Form<0b000000100, (outs), (ins R32C:$rT, brtarget:$dest),
2847 "brz\t$rT,$dest",
2848 BranchResolv,
2849 [/* no pattern */]>;
2850
2851 def BRHNZ:
2852 RI16Form<0b011000100, (outs), (ins R16C:$rCond, brtarget:$dest),
2853 "brhnz\t$rCond,$dest",
2854 BranchResolv,
2855 [(brcond R16C:$rCond, bb:$dest)]>;
2856
2857 def BRHZ:
2858 RI16Form<0b001000100, (outs), (ins R16C:$rT, brtarget:$dest),
2859 "brhz\t$rT,$dest",
2860 BranchResolv,
2861 [/* no pattern */]>;
2862
2863/*
2864 def BINZ:
2865 BICondForm<0b10010100100, "binz\t$rA, $func",
2866 [(SPUbinz R32C:$rA, R32C:$func)]>;
2867
2868 def BIZ:
2869 BICondForm<0b00010100100, "biz\t$rA, $func",
2870 [(SPUbiz R32C:$rA, R32C:$func)]>;
2871*/
2872}
2873
Scott Michel394e26d2008-01-17 20:38:41 +00002874//===----------------------------------------------------------------------===//
2875// brcond predicates:
2876//===----------------------------------------------------------------------===//
Scott Michel8b6b4202007-12-04 22:35:58 +00002877def : Pat<(brcond (i16 (seteq R16C:$rA, 0)), bb:$dest),
2878 (BRHZ R16C:$rA, bb:$dest)>;
2879def : Pat<(brcond (i16 (setne R16C:$rA, 0)), bb:$dest),
2880 (BRHNZ R16C:$rA, bb:$dest)>;
2881
2882def : Pat<(brcond (i32 (seteq R32C:$rA, 0)), bb:$dest),
2883 (BRZ R32C:$rA, bb:$dest)>;
2884def : Pat<(brcond (i32 (setne R32C:$rA, 0)), bb:$dest),
Scott Michel394e26d2008-01-17 20:38:41 +00002885 (BRNZ R32C:$rA, bb:$dest)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002886
2887let isTerminator = 1, isBarrier = 1 in {
2888 let isReturn = 1 in {
2889 def RET:
2890 RETForm<"bi\t$$lr", [(retflag)]>;
2891 }
2892}
2893
2894//===----------------------------------------------------------------------===//
Scott Michel8b6b4202007-12-04 22:35:58 +00002895// Single precision floating point instructions
2896//===----------------------------------------------------------------------===//
2897
2898def FAv4f32:
2899 RRForm<0b00100011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2900 "fa\t$rT, $rA, $rB", SPrecFP,
2901 [(set (v4f32 VECREG:$rT), (fadd (v4f32 VECREG:$rA), (v4f32 VECREG:$rB)))]>;
2902
2903def FAf32 :
2904 RRForm<0b00100011010, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB),
2905 "fa\t$rT, $rA, $rB", SPrecFP,
2906 [(set R32FP:$rT, (fadd R32FP:$rA, R32FP:$rB))]>;
2907
2908def FSv4f32:
2909 RRForm<0b00100011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2910 "fs\t$rT, $rA, $rB", SPrecFP,
2911 [(set (v4f32 VECREG:$rT), (fsub (v4f32 VECREG:$rA), (v4f32 VECREG:$rB)))]>;
2912
2913def FSf32 :
2914 RRForm<0b10100011010, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB),
2915 "fs\t$rT, $rA, $rB", SPrecFP,
2916 [(set R32FP:$rT, (fsub R32FP:$rA, R32FP:$rB))]>;
2917
2918// Floating point reciprocal estimate
2919def FREv4f32 :
2920 RRForm_1<0b00011101100, (outs VECREG:$rT), (ins VECREG:$rA),
2921 "frest\t$rT, $rA", SPrecFP,
2922 [(set (v4f32 VECREG:$rT), (SPUreciprocalEst (v4f32 VECREG:$rA)))]>;
2923
2924def FREf32 :
2925 RRForm_1<0b00011101100, (outs R32FP:$rT), (ins R32FP:$rA),
2926 "frest\t$rT, $rA", SPrecFP,
2927 [(set R32FP:$rT, (SPUreciprocalEst R32FP:$rA))]>;
2928
2929// Floating point interpolate (used in conjunction with reciprocal estimate)
2930def FIv4f32 :
2931 RRForm<0b00101011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2932 "fi\t$rT, $rA, $rB", SPrecFP,
2933 [(set (v4f32 VECREG:$rT), (SPUinterpolate (v4f32 VECREG:$rA),
2934 (v4f32 VECREG:$rB)))]>;
2935
2936def FIf32 :
2937 RRForm<0b00101011110, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB),
2938 "fi\t$rT, $rA, $rB", SPrecFP,
2939 [(set R32FP:$rT, (SPUinterpolate R32FP:$rA, R32FP:$rB))]>;
2940
2941// Floating Compare Equal
2942def FCEQf32 :
2943 RRForm<0b01000011110, (outs R32C:$rT), (ins R32FP:$rA, R32FP:$rB),
2944 "fceq\t$rT, $rA, $rB", SPrecFP,
2945 [(set R32C:$rT, (setoeq R32FP:$rA, R32FP:$rB))]>;
2946
2947def FCMEQf32 :
2948 RRForm<0b01010011110, (outs R32C:$rT), (ins R32FP:$rA, R32FP:$rB),
2949 "fcmeq\t$rT, $rA, $rB", SPrecFP,
2950 [(set R32C:$rT, (setoeq (fabs R32FP:$rA), (fabs R32FP:$rB)))]>;
2951
2952def FCGTf32 :
2953 RRForm<0b01000011010, (outs R32C:$rT), (ins R32FP:$rA, R32FP:$rB),
2954 "fcgt\t$rT, $rA, $rB", SPrecFP,
2955 [(set R32C:$rT, (setogt R32FP:$rA, R32FP:$rB))]>;
2956
2957def FCMGTf32 :
2958 RRForm<0b01010011010, (outs R32C:$rT), (ins R32FP:$rA, R32FP:$rB),
2959 "fcmgt\t$rT, $rA, $rB", SPrecFP,
2960 [(set R32C:$rT, (setogt (fabs R32FP:$rA), (fabs R32FP:$rB)))]>;
2961
2962// FP Status and Control Register Write
2963// Why isn't rT a don't care in the ISA?
2964// Should we create a special RRForm_3 for this guy and zero out the rT?
2965def FSCRWf32 :
2966 RRForm_1<0b01011101110, (outs R32FP:$rT), (ins R32FP:$rA),
2967 "fscrwr\t$rA", SPrecFP,
2968 [/* This instruction requires an intrinsic. Note: rT is unused. */]>;
2969
2970// FP Status and Control Register Read
2971def FSCRRf32 :
2972 RRForm_2<0b01011101110, (outs R32FP:$rT), (ins),
2973 "fscrrd\t$rT", SPrecFP,
2974 [/* This instruction requires an intrinsic */]>;
2975
2976// llvm instruction space
2977// How do these map onto cell instructions?
2978// fdiv rA rB
2979// frest rC rB # c = 1/b (both lines)
2980// fi rC rB rC
2981// fm rD rA rC # d = a * 1/b
2982// fnms rB rD rB rA # b = - (d * b - a) --should == 0 in a perfect world
2983// fma rB rB rC rD # b = b * c + d
2984// = -(d *b -a) * c + d
2985// = a * c - c ( a *b *c - a)
2986
2987// fcopysign (???)
2988
2989// Library calls:
2990// These llvm instructions will actually map to library calls.
2991// All that's needed, then, is to check that the appropriate library is
2992// imported and do a brsl to the proper function name.
2993// frem # fmod(x, y): x - (x/y) * y
2994// (Note: fmod(double, double), fmodf(float,float)
2995// fsqrt?
2996// fsin?
2997// fcos?
2998// Unimplemented SPU instruction space
2999// floating reciprocal absolute square root estimate (frsqest)
3000
3001// The following are probably just intrinsics
3002// status and control register write
3003// status and control register read
3004
3005//--------------------------------------
3006// Floating point multiply instructions
3007//--------------------------------------
3008
3009def FMv4f32:
3010 RRForm<0b00100011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
3011 "fm\t$rT, $rA, $rB", SPrecFP,
3012 [(set (v4f32 VECREG:$rT), (fmul (v4f32 VECREG:$rA),
3013 (v4f32 VECREG:$rB)))]>;
3014
3015def FMf32 :
3016 RRForm<0b01100011010, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB),
3017 "fm\t$rT, $rA, $rB", SPrecFP,
3018 [(set R32FP:$rT, (fmul R32FP:$rA, R32FP:$rB))]>;
3019
3020// Floating point multiply and add
3021// e.g. d = c + (a * b)
3022def FMAv4f32:
3023 RRRForm<0b0111, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3024 "fma\t$rT, $rA, $rB, $rC", SPrecFP,
3025 [(set (v4f32 VECREG:$rT),
3026 (fadd (v4f32 VECREG:$rC),
3027 (fmul (v4f32 VECREG:$rA), (v4f32 VECREG:$rB))))]>;
3028
3029def FMAf32:
3030 RRRForm<0b0111, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB, R32FP:$rC),
3031 "fma\t$rT, $rA, $rB, $rC", SPrecFP,
3032 [(set R32FP:$rT, (fadd R32FP:$rC, (fmul R32FP:$rA, R32FP:$rB)))]>;
3033
3034// FP multiply and subtract
3035// Subtracts value in rC from product
3036// res = a * b - c
3037def FMSv4f32 :
3038 RRRForm<0b0111, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3039 "fms\t$rT, $rA, $rB, $rC", SPrecFP,
3040 [(set (v4f32 VECREG:$rT),
3041 (fsub (fmul (v4f32 VECREG:$rA), (v4f32 VECREG:$rB)),
3042 (v4f32 VECREG:$rC)))]>;
3043
3044def FMSf32 :
3045 RRRForm<0b0111, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB, R32FP:$rC),
3046 "fms\t$rT, $rA, $rB, $rC", SPrecFP,
3047 [(set R32FP:$rT,
3048 (fsub (fmul R32FP:$rA, R32FP:$rB), R32FP:$rC))]>;
3049
3050// Floating Negative Mulitply and Subtract
3051// Subtracts product from value in rC
3052// res = fneg(fms a b c)
3053// = - (a * b - c)
3054// = c - a * b
3055// NOTE: subtraction order
3056// fsub a b = a - b
3057// fs a b = b - a?
3058def FNMSf32 :
3059 RRRForm<0b1101, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB, R32FP:$rC),
3060 "fnms\t$rT, $rA, $rB, $rC", SPrecFP,
3061 [(set R32FP:$rT, (fsub R32FP:$rC, (fmul R32FP:$rA, R32FP:$rB)))]>;
3062
3063def FNMSv4f32 :
3064 RRRForm<0b1101, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3065 "fnms\t$rT, $rA, $rB, $rC", SPrecFP,
3066 [(set (v4f32 VECREG:$rT),
3067 (fsub (v4f32 VECREG:$rC),
3068 (fmul (v4f32 VECREG:$rA),
3069 (v4f32 VECREG:$rB))))]>;
3070
3071//--------------------------------------
3072// Floating Point Conversions
3073// Signed conversions:
3074def CSiFv4f32:
3075 CVTIntFPForm<0b0101101110, (outs VECREG:$rT), (ins VECREG:$rA),
3076 "csflt\t$rT, $rA, 0", SPrecFP,
3077 [(set (v4f32 VECREG:$rT), (sint_to_fp (v4i32 VECREG:$rA)))]>;
3078
3079// Convert signed integer to floating point
3080def CSiFf32 :
3081 CVTIntFPForm<0b0101101110, (outs R32FP:$rT), (ins R32C:$rA),
3082 "csflt\t$rT, $rA, 0", SPrecFP,
3083 [(set R32FP:$rT, (sint_to_fp R32C:$rA))]>;
3084
3085// Convert unsigned into to float
3086def CUiFv4f32 :
3087 CVTIntFPForm<0b1101101110, (outs VECREG:$rT), (ins VECREG:$rA),
3088 "cuflt\t$rT, $rA, 0", SPrecFP,
3089 [(set (v4f32 VECREG:$rT), (uint_to_fp (v4i32 VECREG:$rA)))]>;
3090
3091def CUiFf32 :
3092 CVTIntFPForm<0b1101101110, (outs R32FP:$rT), (ins R32C:$rA),
3093 "cuflt\t$rT, $rA, 0", SPrecFP,
3094 [(set R32FP:$rT, (uint_to_fp R32C:$rA))]>;
3095
3096// Convert float to unsigned int
3097// Assume that scale = 0
3098
3099def CFUiv4f32 :
3100 CVTIntFPForm<0b1101101110, (outs VECREG:$rT), (ins VECREG:$rA),
3101 "cfltu\t$rT, $rA, 0", SPrecFP,
3102 [(set (v4i32 VECREG:$rT), (fp_to_uint (v4f32 VECREG:$rA)))]>;
3103
3104def CFUif32 :
3105 CVTIntFPForm<0b1101101110, (outs R32C:$rT), (ins R32FP:$rA),
3106 "cfltu\t$rT, $rA, 0", SPrecFP,
3107 [(set R32C:$rT, (fp_to_uint R32FP:$rA))]>;
3108
3109// Convert float to signed int
3110// Assume that scale = 0
3111
3112def CFSiv4f32 :
3113 CVTIntFPForm<0b1101101110, (outs VECREG:$rT), (ins VECREG:$rA),
3114 "cflts\t$rT, $rA, 0", SPrecFP,
3115 [(set (v4i32 VECREG:$rT), (fp_to_sint (v4f32 VECREG:$rA)))]>;
3116
3117def CFSif32 :
3118 CVTIntFPForm<0b1101101110, (outs R32C:$rT), (ins R32FP:$rA),
3119 "cflts\t$rT, $rA, 0", SPrecFP,
3120 [(set R32C:$rT, (fp_to_sint R32FP:$rA))]>;
3121
3122//===----------------------------------------------------------------------==//
3123// Single<->Double precision conversions
3124//===----------------------------------------------------------------------==//
3125
3126// NOTE: We use "vec" name suffix here to avoid confusion (e.g. input is a
3127// v4f32, output is v2f64--which goes in the name?)
3128
3129// Floating point extend single to double
3130// NOTE: Not sure if passing in v4f32 to FESDvec is correct since it
3131// operates on two double-word slots (i.e. 1st and 3rd fp numbers
3132// are ignored).
3133def FESDvec :
3134 RRForm_1<0b00011101110, (outs VECREG:$rT), (ins VECREG:$rA),
3135 "fesd\t$rT, $rA", SPrecFP,
3136 [(set (v2f64 VECREG:$rT), (fextend (v4f32 VECREG:$rA)))]>;
3137
3138def FESDf32 :
3139 RRForm_1<0b00011101110, (outs R64FP:$rT), (ins R32FP:$rA),
3140 "fesd\t$rT, $rA", SPrecFP,
3141 [(set R64FP:$rT, (fextend R32FP:$rA))]>;
3142
3143// Floating point round double to single
3144//def FRDSvec :
3145// RRForm_1<0b10011101110, (outs VECREG:$rT), (ins VECREG:$rA),
3146// "frds\t$rT, $rA,", SPrecFP,
3147// [(set (v4f32 R32FP:$rT), (fround (v2f64 R64FP:$rA)))]>;
3148
3149def FRDSf64 :
3150 RRForm_1<0b10011101110, (outs R32FP:$rT), (ins R64FP:$rA),
3151 "frds\t$rT, $rA", SPrecFP,
3152 [(set R32FP:$rT, (fround R64FP:$rA))]>;
3153
3154//ToDo include anyextend?
3155
3156//===----------------------------------------------------------------------==//
3157// Double precision floating point instructions
3158//===----------------------------------------------------------------------==//
3159def FAf64 :
3160 RRForm<0b00110011010, (outs R64FP:$rT), (ins R64FP:$rA, R64FP:$rB),
3161 "dfa\t$rT, $rA, $rB", DPrecFP,
3162 [(set R64FP:$rT, (fadd R64FP:$rA, R64FP:$rB))]>;
3163
3164def FAv2f64 :
3165 RRForm<0b00110011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
3166 "dfa\t$rT, $rA, $rB", DPrecFP,
3167 [(set (v2f64 VECREG:$rT), (fadd (v2f64 VECREG:$rA), (v2f64 VECREG:$rB)))]>;
3168
3169def FSf64 :
3170 RRForm<0b10100011010, (outs R64FP:$rT), (ins R64FP:$rA, R64FP:$rB),
3171 "dfs\t$rT, $rA, $rB", DPrecFP,
3172 [(set R64FP:$rT, (fsub R64FP:$rA, R64FP:$rB))]>;
3173
3174def FSv2f64 :
3175 RRForm<0b10100011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
3176 "dfs\t$rT, $rA, $rB", DPrecFP,
3177 [(set (v2f64 VECREG:$rT),
3178 (fsub (v2f64 VECREG:$rA), (v2f64 VECREG:$rB)))]>;
3179
3180def FMf64 :
3181 RRForm<0b01100011010, (outs R64FP:$rT), (ins R64FP:$rA, R64FP:$rB),
3182 "dfm\t$rT, $rA, $rB", DPrecFP,
3183 [(set R64FP:$rT, (fmul R64FP:$rA, R64FP:$rB))]>;
3184
3185def FMv2f64:
3186 RRForm<0b00100011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
3187 "dfm\t$rT, $rA, $rB", DPrecFP,
3188 [(set (v2f64 VECREG:$rT),
3189 (fmul (v2f64 VECREG:$rA), (v2f64 VECREG:$rB)))]>;
3190
3191def FMAf64:
3192 RRForm<0b00111010110, (outs R64FP:$rT),
3193 (ins R64FP:$rA, R64FP:$rB, R64FP:$rC),
3194 "dfma\t$rT, $rA, $rB", DPrecFP,
3195 [(set R64FP:$rT, (fadd R64FP:$rC, (fmul R64FP:$rA, R64FP:$rB)))]>,
3196 RegConstraint<"$rC = $rT">,
3197 NoEncode<"$rC">;
3198
3199def FMAv2f64:
3200 RRForm<0b00111010110, (outs VECREG:$rT),
3201 (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3202 "dfma\t$rT, $rA, $rB", DPrecFP,
3203 [(set (v2f64 VECREG:$rT),
3204 (fadd (v2f64 VECREG:$rC),
3205 (fmul (v2f64 VECREG:$rA), (v2f64 VECREG:$rB))))]>,
3206 RegConstraint<"$rC = $rT">,
3207 NoEncode<"$rC">;
3208
3209def FMSf64 :
3210 RRForm<0b10111010110, (outs R64FP:$rT),
3211 (ins R64FP:$rA, R64FP:$rB, R64FP:$rC),
3212 "dfms\t$rT, $rA, $rB", DPrecFP,
3213 [(set R64FP:$rT, (fsub (fmul R64FP:$rA, R64FP:$rB), R64FP:$rC))]>,
3214 RegConstraint<"$rC = $rT">,
3215 NoEncode<"$rC">;
3216
3217def FMSv2f64 :
3218 RRForm<0b10111010110, (outs VECREG:$rT),
3219 (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3220 "dfms\t$rT, $rA, $rB", DPrecFP,
3221 [(set (v2f64 VECREG:$rT),
3222 (fsub (fmul (v2f64 VECREG:$rA), (v2f64 VECREG:$rB)),
3223 (v2f64 VECREG:$rC)))]>;
3224
3225// FNMS: - (a * b - c)
3226// - (a * b) + c => c - (a * b)
3227def FNMSf64 :
3228 RRForm<0b01111010110, (outs R64FP:$rT),
3229 (ins R64FP:$rA, R64FP:$rB, R64FP:$rC),
3230 "dfnms\t$rT, $rA, $rB", DPrecFP,
3231 [(set R64FP:$rT, (fsub R64FP:$rC, (fmul R64FP:$rA, R64FP:$rB)))]>,
3232 RegConstraint<"$rC = $rT">,
3233 NoEncode<"$rC">;
3234
3235def : Pat<(fneg (fsub (fmul R64FP:$rA, R64FP:$rB), R64FP:$rC)),
3236 (FNMSf64 R64FP:$rA, R64FP:$rB, R64FP:$rC)>;
3237
3238def FNMSv2f64 :
3239 RRForm<0b01111010110, (outs VECREG:$rT),
3240 (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3241 "dfnms\t$rT, $rA, $rB", DPrecFP,
3242 [(set (v2f64 VECREG:$rT),
3243 (fsub (v2f64 VECREG:$rC),
3244 (fmul (v2f64 VECREG:$rA),
3245 (v2f64 VECREG:$rB))))]>,
3246 RegConstraint<"$rC = $rT">,
3247 NoEncode<"$rC">;
3248
3249def : Pat<(fneg (fsub (fmul (v2f64 VECREG:$rA), (v2f64 VECREG:$rB)),
3250 (v2f64 VECREG:$rC))),
3251 (FNMSv2f64 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
3252
3253// - (a * b + c)
3254// - (a * b) - c
3255def FNMAf64 :
3256 RRForm<0b11111010110, (outs R64FP:$rT),
3257 (ins R64FP:$rA, R64FP:$rB, R64FP:$rC),
3258 "dfnma\t$rT, $rA, $rB", DPrecFP,
3259 [(set R64FP:$rT, (fneg (fadd R64FP:$rC, (fmul R64FP:$rA, R64FP:$rB))))]>,
3260 RegConstraint<"$rC = $rT">,
3261 NoEncode<"$rC">;
3262
3263def FNMAv2f64 :
3264 RRForm<0b11111010110, (outs VECREG:$rT),
3265 (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3266 "dfnma\t$rT, $rA, $rB", DPrecFP,
3267 [(set (v2f64 VECREG:$rT),
3268 (fneg (fadd (v2f64 VECREG:$rC),
3269 (fmul (v2f64 VECREG:$rA),
3270 (v2f64 VECREG:$rB)))))]>,
3271 RegConstraint<"$rC = $rT">,
3272 NoEncode<"$rC">;
3273
3274//===----------------------------------------------------------------------==//
3275// Floating point negation and absolute value
3276//===----------------------------------------------------------------------==//
3277
3278def : Pat<(fneg (v4f32 VECREG:$rA)),
3279 (XORfnegvec (v4f32 VECREG:$rA),
3280 (v4f32 (ILHUv4i32 0x8000)))>;
3281
3282def : Pat<(fneg R32FP:$rA),
3283 (XORfneg32 R32FP:$rA, (ILHUr32 0x8000))>;
3284
3285def : Pat<(fneg (v2f64 VECREG:$rA)),
3286 (XORfnegvec (v2f64 VECREG:$rA),
3287 (v2f64 (ANDBIv16i8 (FSMBIv16i8 0x8080), 0x80)))>;
3288
3289def : Pat<(fneg R64FP:$rA),
3290 (XORfneg64 R64FP:$rA,
3291 (ANDBIv16i8 (FSMBIv16i8 0x8080), 0x80))>;
3292
3293// Floating point absolute value
3294
3295def : Pat<(fabs R32FP:$rA),
3296 (ANDfabs32 R32FP:$rA, (IOHLr32 (ILHUr32 0x7fff), 0xffff))>;
3297
3298def : Pat<(fabs (v4f32 VECREG:$rA)),
3299 (ANDfabsvec (v4f32 VECREG:$rA),
3300 (v4f32 (ANDBIv16i8 (FSMBIv16i8 0xffff), 0x7f)))>;
3301
3302def : Pat<(fabs R64FP:$rA),
3303 (ANDfabs64 R64FP:$rA, (ANDBIv16i8 (FSMBIv16i8 0xffff), 0x7f))>;
3304
3305def : Pat<(fabs (v2f64 VECREG:$rA)),
3306 (ANDfabsvec (v2f64 VECREG:$rA),
3307 (v2f64 (ANDBIv16i8 (FSMBIv16i8 0xffff), 0x7f)))>;
3308
3309//===----------------------------------------------------------------------===//
3310// Execution, Load NOP (execute NOPs belong in even pipeline, load NOPs belong
3311// in the odd pipeline)
3312//===----------------------------------------------------------------------===//
3313
3314def ENOP : I<(outs), (ins), "enop", ExecNOP> {
3315 let Pattern = [];
3316
3317 let Inst{0-10} = 0b10000000010;
3318 let Inst{11-17} = 0;
3319 let Inst{18-24} = 0;
3320 let Inst{25-31} = 0;
3321}
3322
3323def LNOP : I<(outs), (ins), "lnop", LoadNOP> {
3324 let Pattern = [];
3325
3326 let Inst{0-10} = 0b10000000000;
3327 let Inst{11-17} = 0;
3328 let Inst{18-24} = 0;
3329 let Inst{25-31} = 0;
3330}
3331
3332//===----------------------------------------------------------------------===//
3333// Bit conversions (type conversions between vector/packed types)
3334// NOTE: Promotions are handled using the XS* instructions. Truncation
3335// is not handled.
3336//===----------------------------------------------------------------------===//
3337def : Pat<(v16i8 (bitconvert (v8i16 VECREG:$src))), (v16i8 VECREG:$src)>;
3338def : Pat<(v16i8 (bitconvert (v4i32 VECREG:$src))), (v16i8 VECREG:$src)>;
3339def : Pat<(v16i8 (bitconvert (v2i64 VECREG:$src))), (v16i8 VECREG:$src)>;
3340def : Pat<(v16i8 (bitconvert (v4f32 VECREG:$src))), (v16i8 VECREG:$src)>;
3341def : Pat<(v16i8 (bitconvert (v2f64 VECREG:$src))), (v16i8 VECREG:$src)>;
3342
3343def : Pat<(v8i16 (bitconvert (v16i8 VECREG:$src))), (v8i16 VECREG:$src)>;
3344def : Pat<(v8i16 (bitconvert (v4i32 VECREG:$src))), (v8i16 VECREG:$src)>;
3345def : Pat<(v8i16 (bitconvert (v2i64 VECREG:$src))), (v8i16 VECREG:$src)>;
3346def : Pat<(v8i16 (bitconvert (v4f32 VECREG:$src))), (v8i16 VECREG:$src)>;
3347def : Pat<(v8i16 (bitconvert (v2f64 VECREG:$src))), (v8i16 VECREG:$src)>;
3348
3349def : Pat<(v4i32 (bitconvert (v16i8 VECREG:$src))), (v4i32 VECREG:$src)>;
3350def : Pat<(v4i32 (bitconvert (v8i16 VECREG:$src))), (v4i32 VECREG:$src)>;
3351def : Pat<(v4i32 (bitconvert (v2i64 VECREG:$src))), (v4i32 VECREG:$src)>;
3352def : Pat<(v4i32 (bitconvert (v4f32 VECREG:$src))), (v4i32 VECREG:$src)>;
3353def : Pat<(v4i32 (bitconvert (v2f64 VECREG:$src))), (v4i32 VECREG:$src)>;
3354
3355def : Pat<(v2i64 (bitconvert (v16i8 VECREG:$src))), (v2i64 VECREG:$src)>;
3356def : Pat<(v2i64 (bitconvert (v8i16 VECREG:$src))), (v2i64 VECREG:$src)>;
3357def : Pat<(v2i64 (bitconvert (v4i32 VECREG:$src))), (v2i64 VECREG:$src)>;
3358def : Pat<(v2i64 (bitconvert (v4f32 VECREG:$src))), (v2i64 VECREG:$src)>;
3359def : Pat<(v2i64 (bitconvert (v2f64 VECREG:$src))), (v2i64 VECREG:$src)>;
3360
3361def : Pat<(v4f32 (bitconvert (v16i8 VECREG:$src))), (v4f32 VECREG:$src)>;
3362def : Pat<(v4f32 (bitconvert (v8i16 VECREG:$src))), (v4f32 VECREG:$src)>;
3363def : Pat<(v4f32 (bitconvert (v2i64 VECREG:$src))), (v4f32 VECREG:$src)>;
3364def : Pat<(v4f32 (bitconvert (v4i32 VECREG:$src))), (v4f32 VECREG:$src)>;
3365def : Pat<(v4f32 (bitconvert (v2f64 VECREG:$src))), (v4f32 VECREG:$src)>;
3366
3367def : Pat<(v2f64 (bitconvert (v16i8 VECREG:$src))), (v2f64 VECREG:$src)>;
3368def : Pat<(v2f64 (bitconvert (v8i16 VECREG:$src))), (v2f64 VECREG:$src)>;
3369def : Pat<(v2f64 (bitconvert (v4i32 VECREG:$src))), (v2f64 VECREG:$src)>;
3370def : Pat<(v2f64 (bitconvert (v2i64 VECREG:$src))), (v2f64 VECREG:$src)>;
3371def : Pat<(v2f64 (bitconvert (v2f64 VECREG:$src))), (v2f64 VECREG:$src)>;
3372
3373def : Pat<(f32 (bitconvert (i32 R32C:$src))), (f32 R32FP:$src)>;
Scott Michel754d8662007-12-20 00:44:13 +00003374def : Pat<(f64 (bitconvert (i64 R64C:$src))), (f64 R64FP:$src)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00003375
3376//===----------------------------------------------------------------------===//
3377// Instruction patterns:
3378//===----------------------------------------------------------------------===//
3379
3380// General 32-bit constants:
3381def : Pat<(i32 imm:$imm),
3382 (IOHLr32 (ILHUr32 (HI16 imm:$imm)), (LO16 imm:$imm))>;
3383
3384// Single precision float constants:
3385def : Pat<(SPUFPconstant (f32 fpimm:$imm)),
3386 (IOHLf32 (ILHUf32 (HI16_f32 fpimm:$imm)), (LO16_f32 fpimm:$imm))>;
3387
3388// General constant 32-bit vectors
3389def : Pat<(v4i32 v4i32Imm:$imm),
3390 (IOHLvec (v4i32 (ILHUv4i32 (HI16_vec v4i32Imm:$imm))),
3391 (LO16_vec v4i32Imm:$imm))>;
Scott Michel438be252007-12-17 22:32:34 +00003392
3393// 8-bit constants
3394def : Pat<(i8 imm:$imm),
3395 (ILHr8 imm:$imm)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00003396
3397//===----------------------------------------------------------------------===//
3398// Call instruction patterns:
3399//===----------------------------------------------------------------------===//
3400// Return void
3401def : Pat<(ret),
3402 (RET)>;
3403
3404//===----------------------------------------------------------------------===//
3405// Zero/Any/Sign extensions
3406//===----------------------------------------------------------------------===//
3407
3408// zext 1->32: Zero extend i1 to i32
3409def : Pat<(SPUextract_i1_zext R32C:$rSrc),
3410 (ANDIr32 R32C:$rSrc, 0x1)>;
3411
3412// sext 8->32: Sign extend bytes to words
3413def : Pat<(sext_inreg R32C:$rSrc, i8),
3414 (XSHWr32 (XSBHr32 R32C:$rSrc))>;
3415
Scott Michel438be252007-12-17 22:32:34 +00003416def : Pat<(i32 (sext R8C:$rSrc)),
3417 (XSHWr16 (XSBHr8 R8C:$rSrc))>;
3418
Scott Michel8b6b4202007-12-04 22:35:58 +00003419def : Pat<(SPUextract_i8_sext VECREG:$rSrc),
3420 (XSHWr32 (XSBHr32 (ORi32_v4i32 (v4i32 VECREG:$rSrc),
3421 (v4i32 VECREG:$rSrc))))>;
3422
Scott Michel438be252007-12-17 22:32:34 +00003423// zext 8->16: Zero extend bytes to halfwords
3424def : Pat<(i16 (zext R8C:$rSrc)),
3425 (ANDHI1To2 R8C:$rSrc, 0xff)>;
3426
3427// zext 8->32 from preferred slot in load/store
Scott Michel8b6b4202007-12-04 22:35:58 +00003428def : Pat<(SPUextract_i8_zext VECREG:$rSrc),
3429 (ANDIr32 (ORi32_v4i32 (v4i32 VECREG:$rSrc), (v4i32 VECREG:$rSrc)),
3430 0xff)>;
3431
Scott Michel438be252007-12-17 22:32:34 +00003432// zext 8->32: Zero extend bytes to words
3433def : Pat<(i32 (zext R8C:$rSrc)),
3434 (ANDI1To4 R8C:$rSrc, 0xff)>;
3435
3436// anyext 8->16: Extend 8->16 bits, irrespective of sign
3437def : Pat<(i16 (anyext R8C:$rSrc)),
3438 (ORHI1To2 R8C:$rSrc, 0)>;
3439
3440// anyext 8->32: Extend 8->32 bits, irrespective of sign
3441def : Pat<(i32 (anyext R8C:$rSrc)),
3442 (ORI1To4 R8C:$rSrc, 0)>;
3443
Scott Michel8b6b4202007-12-04 22:35:58 +00003444// zext 16->32: Zero extend halfwords to words (note that we have to juggle the
3445// 0xffff constant since it will not fit into an immediate.)
3446def : Pat<(i32 (zext R16C:$rSrc)),
3447 (AND2To4 R16C:$rSrc, (ILAr32 0xffff))>;
3448
3449def : Pat<(i32 (zext (and R16C:$rSrc, 0xf))),
3450 (ANDI2To4 R16C:$rSrc, 0xf)>;
3451
3452def : Pat<(i32 (zext (and R16C:$rSrc, 0xff))),
3453 (ANDI2To4 R16C:$rSrc, 0xff)>;
3454
3455def : Pat<(i32 (zext (and R16C:$rSrc, 0xfff))),
3456 (ANDI2To4 R16C:$rSrc, 0xfff)>;
3457
3458// anyext 16->32: Extend 16->32 bits, irrespective of sign
3459def : Pat<(i32 (anyext R16C:$rSrc)),
3460 (ORI2To4 R16C:$rSrc, 0)>;
3461
3462//===----------------------------------------------------------------------===//
3463// Address translation: SPU, like PPC, has to split addresses into high and
3464// low parts in order to load them into a register.
3465//===----------------------------------------------------------------------===//
3466
Scott Micheldbac4cf2008-01-11 02:53:15 +00003467def : Pat<(SPUaform tglobaladdr:$in, 0), (ILAlsa tglobaladdr:$in)>;
3468def : Pat<(SPUxform tglobaladdr:$in, 0),
3469 (IOHLlo (ILHUhi tglobaladdr:$in), tglobaladdr:$in)>;
Scott Michel394e26d2008-01-17 20:38:41 +00003470
Scott Micheldbac4cf2008-01-11 02:53:15 +00003471def : Pat<(SPUaform tjumptable:$in, 0), (ILAlsa tjumptable:$in)>;
3472def : Pat<(SPUxform tjumptable:$in, 0),
3473 (IOHLlo (ILHUhi tjumptable:$in), tjumptable:$in)>;
Scott Michel394e26d2008-01-17 20:38:41 +00003474
3475def : Pat<(SPUhi tconstpool:$in , 0), (ILHUhi tconstpool:$in)>;
3476def : Pat<(SPUlo tconstpool:$in , 0), (ILAlsa tconstpool:$in)>;
3477def : Pat<(SPUaform tconstpool:$in, 0), (ILAlsa tconstpool:$in)>;
3478// tblgen bug prevents this from working.
3479// def : Pat<(SPUxform tconstpool:$in, 0),
3480// (IOHLlo (ILHUhi tconstpool:$in), tconstpool:$in)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00003481
Scott Michel8b6b4202007-12-04 22:35:58 +00003482// Instrinsics:
3483include "CellSDKIntrinsics.td"