blob: 2a0eef7f091e2bd9bc8ff107945665ca5fe5da58 [file] [log] [blame]
Scott Michel8b6b4202007-12-04 22:35:58 +00001//==- SPUInstrInfo.td - Describe the Cell SPU Instructions -*- tablegen -*-==//
2//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner081ce942007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Scott Michel8b6b4202007-12-04 22:35:58 +00007//
8//===----------------------------------------------------------------------===//
9// Cell SPU Instructions:
10//===----------------------------------------------------------------------===//
11
12//===----------------------------------------------------------------------===//
13// TODO Items (not urgent today, but would be nice, low priority)
14//
15// ANDBI, ORBI: SPU constructs a 4-byte constant for these instructions by
16// concatenating the byte argument b as "bbbb". Could recognize this bit pattern
17// in 16-bit and 32-bit constants and reduce instruction count.
18//===----------------------------------------------------------------------===//
19
20//===----------------------------------------------------------------------===//
21// Pseudo instructions:
22//===----------------------------------------------------------------------===//
23
24let hasCtrlDep = 1, Defs = [R1], Uses = [R1] in {
25 def ADJCALLSTACKDOWN : Pseudo<(outs), (ins u16imm:$amt),
26 "${:comment} ADJCALLSTACKDOWN",
27 [(callseq_start imm:$amt)]>;
28 def ADJCALLSTACKUP : Pseudo<(outs), (ins u16imm:$amt),
29 "${:comment} ADJCALLSTACKUP",
30 [(callseq_end imm:$amt)]>;
31}
32
33//===----------------------------------------------------------------------===//
34// DWARF debugging Pseudo Instructions
35//===----------------------------------------------------------------------===//
36
37def DWARF_LOC : Pseudo<(outs), (ins i32imm:$line, i32imm:$col, i32imm:$file),
38 "${:comment} .loc $file, $line, $col",
39 [(dwarf_loc (i32 imm:$line), (i32 imm:$col),
40 (i32 imm:$file))]>;
41
42//===----------------------------------------------------------------------===//
43// Loads:
44// NB: The ordering is actually important, since the instruction selection
45// will try each of the instructions in sequence, i.e., the D-form first with
46// the 10-bit displacement, then the A-form with the 16 bit displacement, and
47// finally the X-form with the register-register.
48//===----------------------------------------------------------------------===//
49
Chris Lattner1a1932c2008-01-06 23:38:27 +000050let isSimpleLoad = 1 in {
Scott Michel8b6b4202007-12-04 22:35:58 +000051 def LQDv16i8:
52 RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
53 "lqd\t$rT, $src", LoadStore,
54 [(set (v16i8 VECREG:$rT), (load dform_addr:$src))]>;
55
56 def LQDv8i16:
57 RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
58 "lqd\t$rT, $src", LoadStore,
59 [(set (v8i16 VECREG:$rT), (load dform_addr:$src))]>;
60
61 def LQDv4i32:
62 RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
63 "lqd\t$rT, $src", LoadStore,
64 [(set (v4i32 VECREG:$rT), (load dform_addr:$src))]>;
65
66 def LQDv2i64:
67 RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
68 "lqd\t$rT, $src", LoadStore,
69 [(set (v2i64 VECREG:$rT), (load dform_addr:$src))]>;
70
71 def LQDv4f32:
72 RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
73 "lqd\t$rT, $src", LoadStore,
74 [(set (v4f32 VECREG:$rT), (load dform_addr:$src))]>;
75
76 def LQDv2f64:
77 RI10Form<0b00101100, (outs VECREG:$rT), (ins memri10:$src),
78 "lqd\t$rT, $src", LoadStore,
79 [(set (v2f64 VECREG:$rT), (load dform_addr:$src))]>;
80
81 def LQDr128:
82 RI10Form<0b00101100, (outs GPRC:$rT), (ins memri10:$src),
83 "lqd\t$rT, $src", LoadStore,
84 [(set GPRC:$rT, (load dform_addr:$src))]>;
85
86 def LQDr64:
87 RI10Form<0b00101100, (outs R64C:$rT), (ins memri10:$src),
88 "lqd\t$rT, $src", LoadStore,
89 [(set R64C:$rT, (load dform_addr:$src))]>;
90
91 def LQDr32:
92 RI10Form<0b00101100, (outs R32C:$rT), (ins memri10:$src),
93 "lqd\t$rT, $src", LoadStore,
94 [(set R32C:$rT, (load dform_addr:$src))]>;
95
96 // Floating Point
97 def LQDf32:
98 RI10Form<0b00101100, (outs R32FP:$rT), (ins memri10:$src),
99 "lqd\t$rT, $src", LoadStore,
100 [(set R32FP:$rT, (load dform_addr:$src))]>;
101
102 def LQDf64:
103 RI10Form<0b00101100, (outs R64FP:$rT), (ins memri10:$src),
104 "lqd\t$rT, $src", LoadStore,
105 [(set R64FP:$rT, (load dform_addr:$src))]>;
106 // END Floating Point
107
108 def LQDr16:
109 RI10Form<0b00101100, (outs R16C:$rT), (ins memri10:$src),
110 "lqd\t$rT, $src", LoadStore,
111 [(set R16C:$rT, (load dform_addr:$src))]>;
112
Scott Michel438be252007-12-17 22:32:34 +0000113 def LQDr8:
114 RI10Form<0b00101100, (outs R8C:$rT), (ins memri10:$src),
115 "lqd\t$rT, $src", LoadStore,
116 [(set R8C:$rT, (load dform_addr:$src))]>;
117
Scott Michel8b6b4202007-12-04 22:35:58 +0000118 def LQAv16i8:
119 RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
120 "lqa\t$rT, $src", LoadStore,
121 [(set (v16i8 VECREG:$rT), (load aform_addr:$src))]>;
122
123 def LQAv8i16:
124 RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
125 "lqa\t$rT, $src", LoadStore,
126 [(set (v8i16 VECREG:$rT), (load aform_addr:$src))]>;
127
128 def LQAv4i32:
129 RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
130 "lqa\t$rT, $src", LoadStore,
131 [(set (v4i32 VECREG:$rT), (load aform_addr:$src))]>;
132
133 def LQAv2i64:
134 RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
135 "lqa\t$rT, $src", LoadStore,
136 [(set (v2i64 VECREG:$rT), (load aform_addr:$src))]>;
137
138 def LQAv4f32:
139 RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
140 "lqa\t$rT, $src", LoadStore,
141 [(set (v4f32 VECREG:$rT), (load aform_addr:$src))]>;
142
143 def LQAv2f64:
144 RI16Form<0b100001100, (outs VECREG:$rT), (ins addr256k:$src),
145 "lqa\t$rT, $src", LoadStore,
146 [(set (v2f64 VECREG:$rT), (load aform_addr:$src))]>;
147
148 def LQAr128:
149 RI16Form<0b100001100, (outs GPRC:$rT), (ins addr256k:$src),
150 "lqa\t$rT, $src", LoadStore,
151 [(set GPRC:$rT, (load aform_addr:$src))]>;
152
153 def LQAr64:
154 RI16Form<0b100001100, (outs R64C:$rT), (ins addr256k:$src),
155 "lqa\t$rT, $src", LoadStore,
156 [(set R64C:$rT, (load aform_addr:$src))]>;
157
158 def LQAr32:
159 RI16Form<0b100001100, (outs R32C:$rT), (ins addr256k:$src),
160 "lqa\t$rT, $src", LoadStore,
161 [(set R32C:$rT, (load aform_addr:$src))]>;
162
163 def LQAf32:
164 RI16Form<0b100001100, (outs R32FP:$rT), (ins addr256k:$src),
165 "lqa\t$rT, $src", LoadStore,
166 [(set R32FP:$rT, (load aform_addr:$src))]>;
167
168 def LQAf64:
169 RI16Form<0b100001100, (outs R64FP:$rT), (ins addr256k:$src),
170 "lqa\t$rT, $src", LoadStore,
171 [(set R64FP:$rT, (load aform_addr:$src))]>;
172
173 def LQAr16:
174 RI16Form<0b100001100, (outs R16C:$rT), (ins addr256k:$src),
175 "lqa\t$rT, $src", LoadStore,
176 [(set R16C:$rT, (load aform_addr:$src))]>;
177
Scott Michel438be252007-12-17 22:32:34 +0000178 def LQAr8:
179 RI16Form<0b100001100, (outs R8C:$rT), (ins addr256k:$src),
180 "lqa\t$rT, $src", LoadStore,
181 [(set R8C:$rT, (load aform_addr:$src))]>;
182
Scott Michel8b6b4202007-12-04 22:35:58 +0000183 def LQXv16i8:
184 RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
185 "lqx\t$rT, $src", LoadStore,
186 [(set (v16i8 VECREG:$rT), (load xform_addr:$src))]>;
187
188 def LQXv8i16:
189 RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
190 "lqx\t$rT, $src", LoadStore,
191 [(set (v8i16 VECREG:$rT), (load xform_addr:$src))]>;
192
193 def LQXv4i32:
194 RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
195 "lqx\t$rT, $src", LoadStore,
196 [(set (v4i32 VECREG:$rT), (load xform_addr:$src))]>;
197
198 def LQXv2i64:
199 RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
200 "lqx\t$rT, $src", LoadStore,
201 [(set (v2i64 VECREG:$rT), (load xform_addr:$src))]>;
202
203 def LQXv4f32:
204 RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
205 "lqx\t$rT, $src", LoadStore,
206 [(set (v4f32 VECREG:$rT), (load xform_addr:$src))]>;
207
208 def LQXv2f64:
209 RRForm<0b00100011100, (outs VECREG:$rT), (ins memrr:$src),
210 "lqx\t$rT, $src", LoadStore,
211 [(set (v2f64 VECREG:$rT), (load xform_addr:$src))]>;
212
213 def LQXr128:
214 RRForm<0b00100011100, (outs GPRC:$rT), (ins memrr:$src),
215 "lqx\t$rT, $src", LoadStore,
216 [(set GPRC:$rT, (load xform_addr:$src))]>;
217
218 def LQXr64:
219 RRForm<0b00100011100, (outs R64C:$rT), (ins memrr:$src),
220 "lqx\t$rT, $src", LoadStore,
221 [(set R64C:$rT, (load xform_addr:$src))]>;
222
223 def LQXr32:
224 RRForm<0b00100011100, (outs R32C:$rT), (ins memrr:$src),
225 "lqx\t$rT, $src", LoadStore,
226 [(set R32C:$rT, (load xform_addr:$src))]>;
227
228 def LQXf32:
229 RRForm<0b00100011100, (outs R32FP:$rT), (ins memrr:$src),
230 "lqx\t$rT, $src", LoadStore,
231 [(set R32FP:$rT, (load xform_addr:$src))]>;
232
233 def LQXf64:
234 RRForm<0b00100011100, (outs R64FP:$rT), (ins memrr:$src),
235 "lqx\t$rT, $src", LoadStore,
236 [(set R64FP:$rT, (load xform_addr:$src))]>;
237
238 def LQXr16:
239 RRForm<0b00100011100, (outs R16C:$rT), (ins memrr:$src),
240 "lqx\t$rT, $src", LoadStore,
241 [(set R16C:$rT, (load xform_addr:$src))]>;
242
Scott Michel438be252007-12-17 22:32:34 +0000243 def LQXr8:
244 RRForm<0b00100011100, (outs R8C:$rT), (ins memrr:$src),
245 "lqx\t$rT, $src", LoadStore,
246 [(set R8C:$rT, (load xform_addr:$src))]>;
247
Scott Michel8b6b4202007-12-04 22:35:58 +0000248/* Load quadword, PC relative: Not much use at this point in time.
249 Might be of use later for relocatable code.
250 def LQR : RI16Form<0b111001100, (outs VECREG:$rT), (ins s16imm:$disp),
251 "lqr\t$rT, $disp", LoadStore,
252 [(set VECREG:$rT, (load iaddr:$disp))]>;
253 */
Scott Michel8b6b4202007-12-04 22:35:58 +0000254}
255
256//===----------------------------------------------------------------------===//
257// Stores:
258//===----------------------------------------------------------------------===//
259
Chris Lattneref8d6082008-01-06 06:44:58 +0000260def STQDv16i8 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
261 "stqd\t$rT, $src", LoadStore,
262 [(store (v16i8 VECREG:$rT), dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000263
Chris Lattneref8d6082008-01-06 06:44:58 +0000264def STQDv8i16 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
265 "stqd\t$rT, $src", LoadStore,
266 [(store (v8i16 VECREG:$rT), dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000267
Chris Lattneref8d6082008-01-06 06:44:58 +0000268def STQDv4i32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
269 "stqd\t$rT, $src", LoadStore,
270 [(store (v4i32 VECREG:$rT), dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000271
Chris Lattneref8d6082008-01-06 06:44:58 +0000272def STQDv2i64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
273 "stqd\t$rT, $src", LoadStore,
274 [(store (v2i64 VECREG:$rT), dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000275
Chris Lattneref8d6082008-01-06 06:44:58 +0000276def STQDv4f32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
277 "stqd\t$rT, $src", LoadStore,
278 [(store (v4f32 VECREG:$rT), dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000279
Chris Lattneref8d6082008-01-06 06:44:58 +0000280def STQDv2f64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memri10:$src),
281 "stqd\t$rT, $src", LoadStore,
282 [(store (v2f64 VECREG:$rT), dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000283
Chris Lattneref8d6082008-01-06 06:44:58 +0000284def STQDr128 : RI10Form<0b00100100, (outs), (ins GPRC:$rT, memri10:$src),
285 "stqd\t$rT, $src", LoadStore,
286 [(store GPRC:$rT, dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000287
Chris Lattneref8d6082008-01-06 06:44:58 +0000288def STQDr64 : RI10Form<0b00100100, (outs), (ins R64C:$rT, memri10:$src),
289 "stqd\t$rT, $src", LoadStore,
290 [(store R64C:$rT, dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000291
Chris Lattneref8d6082008-01-06 06:44:58 +0000292def STQDr32 : RI10Form<0b00100100, (outs), (ins R32C:$rT, memri10:$src),
293 "stqd\t$rT, $src", LoadStore,
294 [(store R32C:$rT, dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000295
Chris Lattneref8d6082008-01-06 06:44:58 +0000296// Floating Point
297def STQDf32 : RI10Form<0b00100100, (outs), (ins R32FP:$rT, memri10:$src),
298 "stqd\t$rT, $src", LoadStore,
299 [(store R32FP:$rT, dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000300
Chris Lattneref8d6082008-01-06 06:44:58 +0000301def STQDf64 : RI10Form<0b00100100, (outs), (ins R64FP:$rT, memri10:$src),
302 "stqd\t$rT, $src", LoadStore,
303 [(store R64FP:$rT, dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000304
Chris Lattneref8d6082008-01-06 06:44:58 +0000305def STQDr16 : RI10Form<0b00100100, (outs), (ins R16C:$rT, memri10:$src),
306 "stqd\t$rT, $src", LoadStore,
307 [(store R16C:$rT, dform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000308
Chris Lattneref8d6082008-01-06 06:44:58 +0000309def STQDr8 : RI10Form<0b00100100, (outs), (ins R8C:$rT, memri10:$src),
310 "stqd\t$rT, $src", LoadStore,
311 [(store R8C:$rT, dform_addr:$src)]>;
Scott Michel438be252007-12-17 22:32:34 +0000312
Chris Lattneref8d6082008-01-06 06:44:58 +0000313def STQAv16i8 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
314 "stqa\t$rT, $src", LoadStore,
315 [(store (v16i8 VECREG:$rT), aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000316
Chris Lattneref8d6082008-01-06 06:44:58 +0000317def STQAv8i16 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
318 "stqa\t$rT, $src", LoadStore,
319 [(store (v8i16 VECREG:$rT), aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000320
Chris Lattneref8d6082008-01-06 06:44:58 +0000321def STQAv4i32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
322 "stqa\t$rT, $src", LoadStore,
323 [(store (v4i32 VECREG:$rT), aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000324
Chris Lattneref8d6082008-01-06 06:44:58 +0000325def STQAv2i64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
326 "stqa\t$rT, $src", LoadStore,
327 [(store (v2i64 VECREG:$rT), aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000328
Chris Lattneref8d6082008-01-06 06:44:58 +0000329def STQAv4f32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
330 "stqa\t$rT, $src", LoadStore,
331 [(store (v4f32 VECREG:$rT), aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000332
Chris Lattneref8d6082008-01-06 06:44:58 +0000333def STQAv2f64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, addr256k:$src),
334 "stqa\t$rT, $src", LoadStore,
335 [(store (v2f64 VECREG:$rT), aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000336
Chris Lattneref8d6082008-01-06 06:44:58 +0000337def STQAr128 : RI10Form<0b00100100, (outs), (ins GPRC:$rT, addr256k:$src),
338 "stqa\t$rT, $src", LoadStore,
339 [(store GPRC:$rT, aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000340
Chris Lattneref8d6082008-01-06 06:44:58 +0000341def STQAr64 : RI10Form<0b00100100, (outs), (ins R64C:$rT, addr256k:$src),
342 "stqa\t$rT, $src", LoadStore,
343 [(store R64C:$rT, aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000344
Chris Lattneref8d6082008-01-06 06:44:58 +0000345def STQAr32 : RI10Form<0b00100100, (outs), (ins R32C:$rT, addr256k:$src),
346 "stqa\t$rT, $src", LoadStore,
347 [(store R32C:$rT, aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000348
Chris Lattneref8d6082008-01-06 06:44:58 +0000349// Floating Point
350def STQAf32 : RI10Form<0b00100100, (outs), (ins R32FP:$rT, addr256k:$src),
351 "stqa\t$rT, $src", LoadStore,
352 [(store R32FP:$rT, aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000353
Chris Lattneref8d6082008-01-06 06:44:58 +0000354def STQAf64 : RI10Form<0b00100100, (outs), (ins R64FP:$rT, addr256k:$src),
355 "stqa\t$rT, $src", LoadStore,
356 [(store R64FP:$rT, aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000357
Chris Lattneref8d6082008-01-06 06:44:58 +0000358def STQAr16 : RI10Form<0b00100100, (outs), (ins R16C:$rT, addr256k:$src),
359 "stqa\t$rT, $src", LoadStore,
360 [(store R16C:$rT, aform_addr:$src)]>;
Scott Michel438be252007-12-17 22:32:34 +0000361
Chris Lattneref8d6082008-01-06 06:44:58 +0000362def STQAr8 : RI10Form<0b00100100, (outs), (ins R8C:$rT, addr256k:$src),
363 "stqa\t$rT, $src", LoadStore,
364 [(store R8C:$rT, aform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000365
Chris Lattneref8d6082008-01-06 06:44:58 +0000366def STQXv16i8 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
367 "stqx\t$rT, $src", LoadStore,
368 [(store (v16i8 VECREG:$rT), xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000369
Chris Lattneref8d6082008-01-06 06:44:58 +0000370def STQXv8i16 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
371 "stqx\t$rT, $src", LoadStore,
372 [(store (v8i16 VECREG:$rT), xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000373
Chris Lattneref8d6082008-01-06 06:44:58 +0000374def STQXv4i32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
375 "stqx\t$rT, $src", LoadStore,
376 [(store (v4i32 VECREG:$rT), xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000377
Chris Lattneref8d6082008-01-06 06:44:58 +0000378def STQXv2i64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
379 "stqx\t$rT, $src", LoadStore,
380 [(store (v2i64 VECREG:$rT), xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000381
Chris Lattneref8d6082008-01-06 06:44:58 +0000382def STQXv4f32 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
383 "stqx\t$rT, $src", LoadStore,
384 [(store (v4f32 VECREG:$rT), xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000385
Chris Lattneref8d6082008-01-06 06:44:58 +0000386def STQXv2f64 : RI10Form<0b00100100, (outs), (ins VECREG:$rT, memrr:$src),
387 "stqx\t$rT, $src", LoadStore,
388 [(store (v2f64 VECREG:$rT), xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000389
Chris Lattneref8d6082008-01-06 06:44:58 +0000390def STQXr128 : RI10Form<0b00100100, (outs), (ins GPRC:$rT, memrr:$src),
391 "stqx\t$rT, $src", LoadStore,
392 [(store GPRC:$rT, xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000393
Chris Lattneref8d6082008-01-06 06:44:58 +0000394def STQXr64:
395 RI10Form<0b00100100, (outs), (ins R64C:$rT, memrr:$src),
396 "stqx\t$rT, $src", LoadStore,
397 [(store R64C:$rT, xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000398
Chris Lattneref8d6082008-01-06 06:44:58 +0000399def STQXr32:
400 RI10Form<0b00100100, (outs), (ins R32C:$rT, memrr:$src),
401 "stqx\t$rT, $src", LoadStore,
402 [(store R32C:$rT, xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000403
Chris Lattneref8d6082008-01-06 06:44:58 +0000404// Floating Point
405def STQXf32:
406 RI10Form<0b00100100, (outs), (ins R32FP:$rT, memrr:$src),
407 "stqx\t$rT, $src", LoadStore,
408 [(store R32FP:$rT, xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000409
Chris Lattneref8d6082008-01-06 06:44:58 +0000410def STQXf64:
411 RI10Form<0b00100100, (outs), (ins R64FP:$rT, memrr:$src),
412 "stqx\t$rT, $src", LoadStore,
413 [(store R64FP:$rT, xform_addr:$src)]>;
414
415def STQXr16:
416 RI10Form<0b00100100, (outs), (ins R16C:$rT, memrr:$src),
417 "stqx\t$rT, $src", LoadStore,
418 [(store R16C:$rT, xform_addr:$src)]>;
419
420def STQXr8:
421 RI10Form<0b00100100, (outs), (ins R8C:$rT, memrr:$src),
422 "stqx\t$rT, $src", LoadStore,
423 [(store R8C:$rT, xform_addr:$src)]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000424
425/* Store quadword, PC relative: Not much use at this point in time. Might
Chris Lattneref8d6082008-01-06 06:44:58 +0000426 be useful for relocatable code.
427def STQR : RI16Form<0b111000100, (outs), (ins VECREG:$rT, s16imm:$disp),
428 "stqr\t$rT, $disp", LoadStore,
429 [(store VECREG:$rT, iaddr:$disp)]>;
430*/
Scott Michel8b6b4202007-12-04 22:35:58 +0000431
432//===----------------------------------------------------------------------===//
433// Generate Controls for Insertion:
434//===----------------------------------------------------------------------===//
435
436def CBD :
437 RI7Form<0b10101111100, (outs VECREG:$rT), (ins memri7:$src),
438 "cbd\t$rT, $src", ShuffleOp,
439 [(set (v16i8 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
440
441def CBX : RRForm<0b00101011100, (outs VECREG:$rT), (ins memrr:$src),
442 "cbx\t$rT, $src", ShuffleOp,
443 [(set (v16i8 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
444
445def CHD : RI7Form<0b10101111100, (outs VECREG:$rT), (ins memri7:$src),
446 "chd\t$rT, $src", ShuffleOp,
447 [(set (v8i16 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
448
449def CHX : RRForm<0b10101011100, (outs VECREG:$rT), (ins memrr:$src),
450 "chx\t$rT, $src", ShuffleOp,
451 [(set (v8i16 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
452
453def CWD : RI7Form<0b01101111100, (outs VECREG:$rT), (ins memri7:$src),
454 "cwd\t$rT, $src", ShuffleOp,
455 [(set (v4i32 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
456
457def CWX : RRForm<0b01101011100, (outs VECREG:$rT), (ins memrr:$src),
458 "cwx\t$rT, $src", ShuffleOp,
459 [(set (v4i32 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
460
461def CDD : RI7Form<0b11101111100, (outs VECREG:$rT), (ins memri7:$src),
462 "cdd\t$rT, $src", ShuffleOp,
463 [(set (v2i64 VECREG:$rT), (SPUvecinsmask dform2_addr:$src))]>;
464
465def CDX : RRForm<0b11101011100, (outs VECREG:$rT), (ins memrr:$src),
466 "cdx\t$rT, $src", ShuffleOp,
467 [(set (v2i64 VECREG:$rT), (SPUvecinsmask xform_addr:$src))]>;
468
469//===----------------------------------------------------------------------===//
470// Constant formation:
471//===----------------------------------------------------------------------===//
472
473def ILHv8i16:
474 RI16Form<0b110000010, (outs VECREG:$rT), (ins s16imm:$val),
475 "ilh\t$rT, $val", ImmLoad,
476 [(set (v8i16 VECREG:$rT), (v8i16 v8i16SExt16Imm:$val))]>;
477
478def ILHr16:
479 RI16Form<0b110000010, (outs R16C:$rT), (ins s16imm:$val),
480 "ilh\t$rT, $val", ImmLoad,
481 [(set R16C:$rT, immSExt16:$val)]>;
482
Scott Michel438be252007-12-17 22:32:34 +0000483// Cell SPU doesn't have a native 8-bit immediate load, but ILH works ("with
484// the right constant")
485def ILHr8:
486 RI16Form<0b110000010, (outs R8C:$rT), (ins s16imm_i8:$val),
487 "ilh\t$rT, $val", ImmLoad,
488 [(set R8C:$rT, immSExt8:$val)]>;
489
Scott Michel8b6b4202007-12-04 22:35:58 +0000490// IL does sign extension!
491def ILr64:
492 RI16Form<0b100000010, (outs R64C:$rT), (ins s16imm_i64:$val),
493 "il\t$rT, $val", ImmLoad,
494 [(set R64C:$rT, immSExt16:$val)]>;
495
496def ILv2i64:
497 RI16Form<0b100000010, (outs VECREG:$rT), (ins s16imm_i64:$val),
498 "il\t$rT, $val", ImmLoad,
499 [(set VECREG:$rT, (v2i64 v2i64SExt16Imm:$val))]>;
500
501def ILv4i32:
502 RI16Form<0b100000010, (outs VECREG:$rT), (ins s16imm:$val),
503 "il\t$rT, $val", ImmLoad,
504 [(set VECREG:$rT, (v4i32 v4i32SExt16Imm:$val))]>;
505
506def ILr32:
507 RI16Form<0b100000010, (outs R32C:$rT), (ins s16imm_i32:$val),
508 "il\t$rT, $val", ImmLoad,
509 [(set R32C:$rT, immSExt16:$val)]>;
510
511def ILf32:
512 RI16Form<0b100000010, (outs R32FP:$rT), (ins s16imm_f32:$val),
513 "il\t$rT, $val", ImmLoad,
514 [(set R32FP:$rT, (SPUFPconstant fpimmSExt16:$val))]>;
515
516def ILf64:
517 RI16Form<0b100000010, (outs R64FP:$rT), (ins s16imm_f64:$val),
518 "il\t$rT, $val", ImmLoad,
519 [(set R64FP:$rT, (SPUFPconstant fpimmSExt16:$val))]>;
520
521def ILHUv4i32:
522 RI16Form<0b010000010, (outs VECREG:$rT), (ins u16imm:$val),
523 "ilhu\t$rT, $val", ImmLoad,
524 [(set VECREG:$rT, (v4i32 immILHUvec:$val))]>;
525
526def ILHUr32:
527 RI16Form<0b010000010, (outs R32C:$rT), (ins u16imm:$val),
528 "ilhu\t$rT, $val", ImmLoad,
529 [(set R32C:$rT, hi16:$val)]>;
530
531// ILHUf32: Used to custom lower float constant loads
532def ILHUf32:
533 RI16Form<0b010000010, (outs R32FP:$rT), (ins f16imm:$val),
534 "ilhu\t$rT, $val", ImmLoad,
535 [(set R32FP:$rT, (SPUFPconstant hi16_f32:$val))]>;
536
537// ILHUhi: Used for loading high portion of an address. Note the symbolHi
538// printer used for the operand.
539def ILHUhi : RI16Form<0b010000010, (outs R32C:$rT), (ins symbolHi:$val),
540 "ilhu\t$rT, $val", ImmLoad,
541 [(set R32C:$rT, hi16:$val)]>;
542
543// Immediate load address (can also be used to load 18-bit unsigned constants,
544// see the zext 16->32 pattern)
545def ILAr64:
546 RI18Form<0b1000010, (outs R64C:$rT), (ins u18imm_i64:$val),
547 "ila\t$rT, $val", LoadNOP,
548 [(set R64C:$rT, imm18:$val)]>;
549
550// TODO: ILAv2i64
551
552def ILAv2i64:
553 RI18Form<0b1000010, (outs VECREG:$rT), (ins u18imm:$val),
554 "ila\t$rT, $val", LoadNOP,
555 [(set (v2i64 VECREG:$rT), v2i64Uns18Imm:$val)]>;
556
557def ILAv4i32:
558 RI18Form<0b1000010, (outs VECREG:$rT), (ins u18imm:$val),
559 "ila\t$rT, $val", LoadNOP,
560 [(set (v4i32 VECREG:$rT), v4i32Uns18Imm:$val)]>;
561
562def ILAr32:
563 RI18Form<0b1000010, (outs R32C:$rT), (ins u18imm:$val),
564 "ila\t$rT, $val", LoadNOP,
565 [(set R32C:$rT, imm18:$val)]>;
566
567def ILAf32:
568 RI18Form<0b1000010, (outs R32FP:$rT), (ins f18imm:$val),
569 "ila\t$rT, $val", LoadNOP,
570 [(set R32FP:$rT, (SPUFPconstant fpimm18:$val))]>;
571
572def ILAf64:
573 RI18Form<0b1000010, (outs R64FP:$rT), (ins f18imm_f64:$val),
574 "ila\t$rT, $val", LoadNOP,
575 [(set R64FP:$rT, (SPUFPconstant fpimm18:$val))]>;
576
577def ILAlo:
578 RI18Form<0b1000010, (outs R32C:$rT), (ins symbolLo:$val),
579 "ila\t$rT, $val", ImmLoad,
580 [(set R32C:$rT, imm18:$val)]>;
581
582def ILAlsa:
583 RI18Form<0b1000010, (outs R32C:$rT), (ins symbolLSA:$val),
584 "ila\t$rT, $val", ImmLoad,
585 [/* no pattern */]>;
586
587// Immediate OR, Halfword Lower: The "other" part of loading large constants
588// into 32-bit registers. See the anonymous pattern Pat<(i32 imm:$imm), ...>
589// Note that these are really two operand instructions, but they're encoded
590// as three operands with the first two arguments tied-to each other.
591
592def IOHLvec:
593 RI16Form<0b100000110, (outs VECREG:$rT), (ins VECREG:$rS, u16imm:$val),
594 "iohl\t$rT, $val", ImmLoad,
595 [/* insert intrinsic here */]>,
596 RegConstraint<"$rS = $rT">,
597 NoEncode<"$rS">;
598
599def IOHLr32:
600 RI16Form<0b100000110, (outs R32C:$rT), (ins R32C:$rS, i32imm:$val),
601 "iohl\t$rT, $val", ImmLoad,
602 [/* insert intrinsic here */]>,
603 RegConstraint<"$rS = $rT">,
604 NoEncode<"$rS">;
605
606def IOHLf32:
607 RI16Form<0b100000110, (outs R32FP:$rT), (ins R32FP:$rS, f32imm:$val),
608 "iohl\t$rT, $val", ImmLoad,
609 [/* insert intrinsic here */]>,
610 RegConstraint<"$rS = $rT">,
611 NoEncode<"$rS">;
612
613// Form select mask for bytes using immediate, used in conjunction with the
614// SELB instruction:
615
616def FSMBIv16i8 : RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
617 "fsmbi\t$rT, $val", SelectOp,
618 [(set (v16i8 VECREG:$rT), (SPUfsmbi_v16i8 immU16:$val))]>;
619
620def FSMBIv8i16 : RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
621 "fsmbi\t$rT, $val", SelectOp,
622 [(set (v8i16 VECREG:$rT), (SPUfsmbi_v8i16 immU16:$val))]>;
623
624def FSMBIvecv4i32 : RI16Form<0b101001100, (outs VECREG:$rT), (ins u16imm:$val),
625 "fsmbi\t$rT, $val", SelectOp,
626 [(set (v4i32 VECREG:$rT), (SPUfsmbi_v4i32 immU16:$val))]>;
627
628//===----------------------------------------------------------------------===//
629// Integer and Logical Operations:
630//===----------------------------------------------------------------------===//
631
632def AHv8i16:
633 RRForm<0b00010011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
634 "ah\t$rT, $rA, $rB", IntegerOp,
635 [(set (v8i16 VECREG:$rT), (int_spu_si_ah VECREG:$rA, VECREG:$rB))]>;
636
637def : Pat<(add (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)),
638 (AHv8i16 VECREG:$rA, VECREG:$rB)>;
639
640// [(set (v8i16 VECREG:$rT), (add (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)))]>;
641
642def AHr16:
643 RRForm<0b00010011000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
644 "ah\t$rT, $rA, $rB", IntegerOp,
645 [(set R16C:$rT, (add R16C:$rA, R16C:$rB))]>;
646
647def AHIvec:
648 RI10Form<0b10111000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
649 "ahi\t$rT, $rA, $val", IntegerOp,
650 [(set (v8i16 VECREG:$rT), (add (v8i16 VECREG:$rA),
651 v8i16SExt10Imm:$val))]>;
652
653def AHIr16 : RI10Form<0b10111000, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
654 "ahi\t$rT, $rA, $val", IntegerOp,
655 [(set R16C:$rT, (add R16C:$rA, v8i16SExt10Imm:$val))]>;
656
657def Avec : RRForm<0b00000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
658 "a\t$rT, $rA, $rB", IntegerOp,
659 [(set (v4i32 VECREG:$rT), (add (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
660
661def : Pat<(add (v16i8 VECREG:$rA), (v16i8 VECREG:$rB)),
662 (Avec VECREG:$rA, VECREG:$rB)>;
663
664def Ar32 : RRForm<0b00000011000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
665 "a\t$rT, $rA, $rB", IntegerOp,
666 [(set R32C:$rT, (add R32C:$rA, R32C:$rB))]>;
667
Scott Michel438be252007-12-17 22:32:34 +0000668def Ar8:
669 RRForm<0b00000011000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
670 "a\t$rT, $rA, $rB", IntegerOp,
671 [(set R8C:$rT, (add R8C:$rA, R8C:$rB))]>;
672
Scott Michel8b6b4202007-12-04 22:35:58 +0000673def AIvec:
674 RI10Form<0b00111000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
675 "ai\t$rT, $rA, $val", IntegerOp,
676 [(set (v4i32 VECREG:$rT), (add (v4i32 VECREG:$rA),
677 v4i32SExt10Imm:$val))]>;
678
Scott Michel438be252007-12-17 22:32:34 +0000679def AIr32:
680 RI10Form<0b00111000, (outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val),
681 "ai\t$rT, $rA, $val", IntegerOp,
682 [(set R32C:$rT, (add R32C:$rA, i32ImmSExt10:$val))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000683
Scott Michel438be252007-12-17 22:32:34 +0000684def SFHvec:
685 RRForm<0b00010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
686 "sfh\t$rT, $rA, $rB", IntegerOp,
687 [(set (v8i16 VECREG:$rT), (sub (v8i16 VECREG:$rA),
688 (v8i16 VECREG:$rB)))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000689
Scott Michel438be252007-12-17 22:32:34 +0000690def SFHr16:
691 RRForm<0b00010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
692 "sfh\t$rT, $rA, $rB", IntegerOp,
693 [(set R16C:$rT, (sub R16C:$rA, R16C:$rB))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +0000694
695def SFHIvec:
696 RI10Form<0b10110000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
697 "sfhi\t$rT, $rA, $val", IntegerOp,
698 [(set (v8i16 VECREG:$rT), (sub v8i16SExt10Imm:$val,
699 (v8i16 VECREG:$rA)))]>;
700
701def SFHIr16 : RI10Form<0b10110000, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
702 "sfhi\t$rT, $rA, $val", IntegerOp,
703 [(set R16C:$rT, (sub i16ImmSExt10:$val, R16C:$rA))]>;
704
705def SFvec : RRForm<0b00000010000, (outs VECREG:$rT),
706 (ins VECREG:$rA, VECREG:$rB),
707 "sf\t$rT, $rA, $rB", IntegerOp,
708 [(set (v4i32 VECREG:$rT), (sub (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
709
710def SFr32 : RRForm<0b00000010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
711 "sf\t$rT, $rA, $rB", IntegerOp,
712 [(set R32C:$rT, (sub R32C:$rA, R32C:$rB))]>;
713
714def SFIvec:
715 RI10Form<0b00110000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
716 "sfi\t$rT, $rA, $val", IntegerOp,
717 [(set (v4i32 VECREG:$rT), (sub v4i32SExt10Imm:$val,
718 (v4i32 VECREG:$rA)))]>;
719
720def SFIr32 : RI10Form<0b00110000, (outs R32C:$rT),
721 (ins R32C:$rA, s10imm_i32:$val),
722 "sfi\t$rT, $rA, $val", IntegerOp,
723 [(set R32C:$rT, (sub i32ImmSExt10:$val, R32C:$rA))]>;
724
725// ADDX: only available in vector form, doesn't match a pattern.
726def ADDXvec:
727 RRForm<0b00000010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
728 VECREG:$rCarry),
729 "addx\t$rT, $rA, $rB", IntegerOp,
730 []>,
731 RegConstraint<"$rCarry = $rT">,
732 NoEncode<"$rCarry">;
733
734// CG: only available in vector form, doesn't match a pattern.
735def CGvec:
736 RRForm<0b01000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
737 VECREG:$rCarry),
738 "cg\t$rT, $rA, $rB", IntegerOp,
739 []>,
740 RegConstraint<"$rCarry = $rT">,
741 NoEncode<"$rCarry">;
742
743// SFX: only available in vector form, doesn't match a pattern
744def SFXvec:
745 RRForm<0b10000010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
746 VECREG:$rCarry),
747 "sfx\t$rT, $rA, $rB", IntegerOp,
748 []>,
749 RegConstraint<"$rCarry = $rT">,
750 NoEncode<"$rCarry">;
751
752// BG: only available in vector form, doesn't match a pattern.
753def BGvec:
754 RRForm<0b01000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
755 VECREG:$rCarry),
756 "bg\t$rT, $rA, $rB", IntegerOp,
757 []>,
758 RegConstraint<"$rCarry = $rT">,
759 NoEncode<"$rCarry">;
760
761// BGX: only available in vector form, doesn't match a pattern.
762def BGXvec:
763 RRForm<0b11000010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB,
764 VECREG:$rCarry),
765 "bgx\t$rT, $rA, $rB", IntegerOp,
766 []>,
767 RegConstraint<"$rCarry = $rT">,
768 NoEncode<"$rCarry">;
769
770// Halfword multiply variants:
771// N.B: These can be used to build up larger quantities (16x16 -> 32)
772
773def MPYv8i16:
774 RRForm<0b00100011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
775 "mpy\t$rT, $rA, $rB", IntegerMulDiv,
776 [(set (v8i16 VECREG:$rT), (SPUmpy_v8i16 (v8i16 VECREG:$rA),
777 (v8i16 VECREG:$rB)))]>;
778
779def MPYr16:
780 RRForm<0b00100011110, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
781 "mpy\t$rT, $rA, $rB", IntegerMulDiv,
782 [(set R16C:$rT, (mul R16C:$rA, R16C:$rB))]>;
783
784def MPYUv4i32:
785 RRForm<0b00110011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
786 "mpyu\t$rT, $rA, $rB", IntegerMulDiv,
787 [(set (v4i32 VECREG:$rT),
788 (SPUmpyu_v4i32 (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
789
790def MPYUr16:
791 RRForm<0b00110011110, (outs R32C:$rT), (ins R16C:$rA, R16C:$rB),
792 "mpyu\t$rT, $rA, $rB", IntegerMulDiv,
793 [(set R32C:$rT, (mul (zext R16C:$rA),
794 (zext R16C:$rB)))]>;
795
796def MPYUr32:
797 RRForm<0b00110011110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
798 "mpyu\t$rT, $rA, $rB", IntegerMulDiv,
799 [(set R32C:$rT, (SPUmpyu_i32 R32C:$rA, R32C:$rB))]>;
800
801// mpyi: multiply 16 x s10imm -> 32 result (custom lowering for 32 bit result,
802// this only produces the lower 16 bits)
803def MPYIvec:
804 RI10Form<0b00101110, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
805 "mpyi\t$rT, $rA, $val", IntegerMulDiv,
806 [(set (v8i16 VECREG:$rT), (mul (v8i16 VECREG:$rA), v8i16SExt10Imm:$val))]>;
807
808def MPYIr16:
809 RI10Form<0b00101110, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
810 "mpyi\t$rT, $rA, $val", IntegerMulDiv,
811 [(set R16C:$rT, (mul R16C:$rA, i16ImmSExt10:$val))]>;
812
813// mpyui: same issues as other multiplies, plus, this doesn't match a
814// pattern... but may be used during target DAG selection or lowering
815def MPYUIvec:
816 RI10Form<0b10101110, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
817 "mpyui\t$rT, $rA, $val", IntegerMulDiv,
818 []>;
819
820def MPYUIr16:
821 RI10Form<0b10101110, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
822 "mpyui\t$rT, $rA, $val", IntegerMulDiv,
823 []>;
824
825// mpya: 16 x 16 + 16 -> 32 bit result
826def MPYAvec:
827 RRRForm<0b0011, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
828 "mpya\t$rT, $rA, $rB, $rC", IntegerMulDiv,
829 [(set (v4i32 VECREG:$rT), (add (v4i32 (bitconvert (mul (v8i16 VECREG:$rA),
830 (v8i16 VECREG:$rB)))),
831 (v4i32 VECREG:$rC)))]>;
832
833def MPYAr32:
834 RRRForm<0b0011, (outs R32C:$rT), (ins R16C:$rA, R16C:$rB, R32C:$rC),
835 "mpya\t$rT, $rA, $rB, $rC", IntegerMulDiv,
836 [(set R32C:$rT, (add (sext (mul R16C:$rA, R16C:$rB)),
837 R32C:$rC))]>;
838
839def : Pat<(add (mul (sext R16C:$rA), (sext R16C:$rB)), R32C:$rC),
840 (MPYAr32 R16C:$rA, R16C:$rB, R32C:$rC)>;
841
842def MPYAr32_sextinreg:
843 RRRForm<0b0011, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB, R32C:$rC),
844 "mpya\t$rT, $rA, $rB, $rC", IntegerMulDiv,
845 [(set R32C:$rT, (add (mul (sext_inreg R32C:$rA, i16),
846 (sext_inreg R32C:$rB, i16)),
847 R32C:$rC))]>;
848
849//def MPYAr32:
850// RRRForm<0b0011, (outs R32C:$rT), (ins R16C:$rA, R16C:$rB, R32C:$rC),
851// "mpya\t$rT, $rA, $rB, $rC", IntegerMulDiv,
852// [(set R32C:$rT, (add (sext (mul R16C:$rA, R16C:$rB)),
853// R32C:$rC))]>;
854
855// mpyh: multiply high, used to synthesize 32-bit multiplies
856def MPYHv4i32:
857 RRForm<0b10100011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
858 "mpyh\t$rT, $rA, $rB", IntegerMulDiv,
859 [(set (v4i32 VECREG:$rT),
860 (SPUmpyh_v4i32 (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
861
862def MPYHr32:
863 RRForm<0b10100011110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
864 "mpyh\t$rT, $rA, $rB", IntegerMulDiv,
865 [(set R32C:$rT, (SPUmpyh_i32 R32C:$rA, R32C:$rB))]>;
866
867// mpys: multiply high and shift right (returns the top half of
868// a 16-bit multiply, sign extended to 32 bits.)
869def MPYSvec:
870 RRForm<0b11100011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
871 "mpys\t$rT, $rA, $rB", IntegerMulDiv,
872 []>;
873
874def MPYSr16:
875 RRForm<0b11100011110, (outs R32C:$rT), (ins R16C:$rA, R16C:$rB),
876 "mpys\t$rT, $rA, $rB", IntegerMulDiv,
877 []>;
878
879// mpyhh: multiply high-high (returns the 32-bit result from multiplying
880// the top 16 bits of the $rA, $rB)
881def MPYHHv8i16:
882 RRForm<0b01100011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
883 "mpyhh\t$rT, $rA, $rB", IntegerMulDiv,
884 [(set (v8i16 VECREG:$rT),
885 (SPUmpyhh_v8i16 (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)))]>;
886
887def MPYHHr32:
888 RRForm<0b01100011110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
889 "mpyhh\t$rT, $rA, $rB", IntegerMulDiv,
890 []>;
891
892// mpyhha: Multiply high-high, add to $rT:
893def MPYHHAvec:
894 RRForm<0b01100010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
895 "mpyhha\t$rT, $rA, $rB", IntegerMulDiv,
896 []>;
897
898def MPYHHAr32:
899 RRForm<0b01100010110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
900 "mpyhha\t$rT, $rA, $rB", IntegerMulDiv,
901 []>;
902
903// mpyhhu: Multiply high-high, unsigned
904def MPYHHUvec:
905 RRForm<0b01110011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
906 "mpyhhu\t$rT, $rA, $rB", IntegerMulDiv,
907 []>;
908
909def MPYHHUr32:
910 RRForm<0b01110011110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
911 "mpyhhu\t$rT, $rA, $rB", IntegerMulDiv,
912 []>;
913
914// mpyhhau: Multiply high-high, unsigned
915def MPYHHAUvec:
916 RRForm<0b01110010110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
917 "mpyhhau\t$rT, $rA, $rB", IntegerMulDiv,
918 []>;
919
920def MPYHHAUr32:
921 RRForm<0b01110010110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
922 "mpyhhau\t$rT, $rA, $rB", IntegerMulDiv,
923 []>;
924
925// clz: Count leading zeroes
926def CLZv4i32:
927 RRForm_1<0b10100101010, (outs VECREG:$rT), (ins VECREG:$rA),
928 "clz\t$rT, $rA", IntegerOp,
929 [/* intrinsic */]>;
930
931def CLZr32:
932 RRForm_1<0b10100101010, (outs R32C:$rT), (ins R32C:$rA),
933 "clz\t$rT, $rA", IntegerOp,
934 [(set R32C:$rT, (ctlz R32C:$rA))]>;
935
936// cntb: Count ones in bytes (aka "population count")
937// NOTE: This instruction is really a vector instruction, but the custom
938// lowering code uses it in unorthodox ways to support CTPOP for other
939// data types!
940def CNTBv16i8:
941 RRForm_1<0b00101101010, (outs VECREG:$rT), (ins VECREG:$rA),
942 "cntb\t$rT, $rA", IntegerOp,
943 [(set (v16i8 VECREG:$rT), (SPUcntb_v16i8 (v16i8 VECREG:$rA)))]>;
944
945def CNTBv8i16 :
946 RRForm_1<0b00101101010, (outs VECREG:$rT), (ins VECREG:$rA),
947 "cntb\t$rT, $rA", IntegerOp,
948 [(set (v8i16 VECREG:$rT), (SPUcntb_v8i16 (v8i16 VECREG:$rA)))]>;
949
950def CNTBv4i32 :
951 RRForm_1<0b00101101010, (outs VECREG:$rT), (ins VECREG:$rA),
952 "cntb\t$rT, $rA", IntegerOp,
953 [(set (v4i32 VECREG:$rT), (SPUcntb_v4i32 (v4i32 VECREG:$rA)))]>;
954
955// fsmb: Form select mask for bytes. N.B. Input operand, $rA, is 16-bits
956def FSMB:
957 RRForm_1<0b01101101100, (outs VECREG:$rT), (ins R16C:$rA),
958 "fsmb\t$rT, $rA", SelectOp,
959 []>;
960
961// fsmh: Form select mask for halfwords. N.B., Input operand, $rA, is
962// only 8-bits wide (even though it's input as 16-bits here)
963def FSMH:
964 RRForm_1<0b10101101100, (outs VECREG:$rT), (ins R16C:$rA),
965 "fsmh\t$rT, $rA", SelectOp,
966 []>;
967
968// fsm: Form select mask for words. Like the other fsm* instructions,
969// only the lower 4 bits of $rA are significant.
970def FSM:
971 RRForm_1<0b00101101100, (outs VECREG:$rT), (ins R16C:$rA),
972 "fsm\t$rT, $rA", SelectOp,
973 []>;
974
975// gbb: Gather all low order bits from each byte in $rA into a single 16-bit
976// quantity stored into $rT
977def GBB:
978 RRForm_1<0b01001101100, (outs R16C:$rT), (ins VECREG:$rA),
979 "gbb\t$rT, $rA", GatherOp,
980 []>;
981
982// gbh: Gather all low order bits from each halfword in $rA into a single
983// 8-bit quantity stored in $rT
984def GBH:
985 RRForm_1<0b10001101100, (outs R16C:$rT), (ins VECREG:$rA),
986 "gbh\t$rT, $rA", GatherOp,
987 []>;
988
989// gb: Gather all low order bits from each word in $rA into a single
990// 4-bit quantity stored in $rT
991def GB:
992 RRForm_1<0b00001101100, (outs R16C:$rT), (ins VECREG:$rA),
993 "gb\t$rT, $rA", GatherOp,
994 []>;
995
996// avgb: average bytes
997def AVGB:
998 RRForm<0b11001011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
999 "avgb\t$rT, $rA, $rB", ByteOp,
1000 []>;
1001
1002// absdb: absolute difference of bytes
1003def ABSDB:
1004 RRForm<0b11001010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1005 "absdb\t$rT, $rA, $rB", ByteOp,
1006 []>;
1007
1008// sumb: sum bytes into halfwords
1009def SUMB:
1010 RRForm<0b11001010010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1011 "sumb\t$rT, $rA, $rB", ByteOp,
1012 []>;
1013
1014// Sign extension operations:
1015def XSBHvec:
1016 RRForm_1<0b01101101010, (outs VECREG:$rDst), (ins VECREG:$rSrc),
1017 "xsbh\t$rDst, $rSrc", IntegerOp,
1018 [(set (v8i16 VECREG:$rDst), (sext (v16i8 VECREG:$rSrc)))]>;
1019
1020// Ordinary form for XSBH
1021def XSBHr16:
1022 RRForm_1<0b01101101010, (outs R16C:$rDst), (ins R16C:$rSrc),
1023 "xsbh\t$rDst, $rSrc", IntegerOp,
1024 [(set R16C:$rDst, (sext_inreg R16C:$rSrc, i8))]>;
1025
Scott Michel438be252007-12-17 22:32:34 +00001026def XSBHr8:
1027 RRForm_1<0b01101101010, (outs R16C:$rDst), (ins R8C:$rSrc),
1028 "xsbh\t$rDst, $rSrc", IntegerOp,
1029 [(set R16C:$rDst, (sext R8C:$rSrc))]>;
1030
Scott Michel8b6b4202007-12-04 22:35:58 +00001031// 32-bit form for XSBH: used to sign extend 8-bit quantities to 16-bit
1032// quantities to 32-bit quantities via a 32-bit register (see the sext 8->32
1033// pattern below). Intentionally doesn't match a pattern because we want the
1034// sext 8->32 pattern to do the work for us, namely because we need the extra
1035// XSHWr32.
1036def XSBHr32:
1037 RRForm_1<0b01101101010, (outs R32C:$rDst), (ins R32C:$rSrc),
1038 "xsbh\t$rDst, $rSrc", IntegerOp,
1039 [(set R32C:$rDst, (sext_inreg R32C:$rSrc, i8))]>;
1040
1041// Sign extend halfwords to words:
1042def XSHWvec:
1043 RRForm_1<0b01101101010, (outs VECREG:$rDest), (ins VECREG:$rSrc),
1044 "xshw\t$rDest, $rSrc", IntegerOp,
1045 [(set (v4i32 VECREG:$rDest), (sext (v8i16 VECREG:$rSrc)))]>;
1046
1047def XSHWr32:
1048 RRForm_1<0b01101101010, (outs R32C:$rDst), (ins R32C:$rSrc),
1049 "xshw\t$rDst, $rSrc", IntegerOp,
1050 [(set R32C:$rDst, (sext_inreg R32C:$rSrc, i16))]>;
1051
1052def XSHWr16:
1053 RRForm_1<0b01101101010, (outs R32C:$rDst), (ins R16C:$rSrc),
1054 "xshw\t$rDst, $rSrc", IntegerOp,
1055 [(set R32C:$rDst, (sext R16C:$rSrc))]>;
1056
1057def XSWDvec:
1058 RRForm_1<0b01100101010, (outs VECREG:$rDst), (ins VECREG:$rSrc),
1059 "xswd\t$rDst, $rSrc", IntegerOp,
1060 [(set (v2i64 VECREG:$rDst), (sext (v4i32 VECREG:$rSrc)))]>;
1061
1062def XSWDr64:
1063 RRForm_1<0b01100101010, (outs R64C:$rDst), (ins R64C:$rSrc),
1064 "xswd\t$rDst, $rSrc", IntegerOp,
1065 [(set R64C:$rDst, (sext_inreg R64C:$rSrc, i32))]>;
1066
1067def XSWDr32:
1068 RRForm_1<0b01100101010, (outs R64C:$rDst), (ins R32C:$rSrc),
1069 "xswd\t$rDst, $rSrc", IntegerOp,
1070 [(set R64C:$rDst, (SPUsext32_to_64 R32C:$rSrc))]>;
1071
1072def : Pat<(sext R32C:$inp),
1073 (XSWDr32 R32C:$inp)>;
1074
1075// AND operations
1076def ANDv16i8:
1077 RRForm<0b10000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1078 "and\t$rT, $rA, $rB", IntegerOp,
1079 [(set (v16i8 VECREG:$rT), (and (v16i8 VECREG:$rA),
1080 (v16i8 VECREG:$rB)))]>;
1081
1082def ANDv8i16:
1083 RRForm<0b10000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1084 "and\t$rT, $rA, $rB", IntegerOp,
1085 [(set (v8i16 VECREG:$rT), (and (v8i16 VECREG:$rA),
1086 (v8i16 VECREG:$rB)))]>;
1087
1088def ANDv4i32:
1089 RRForm<0b10000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1090 "and\t$rT, $rA, $rB", IntegerOp,
1091 [(set (v4i32 VECREG:$rT), (and (v4i32 VECREG:$rA),
1092 (v4i32 VECREG:$rB)))]>;
1093
1094def ANDr32:
1095 RRForm<0b10000011000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1096 "and\t$rT, $rA, $rB", IntegerOp,
1097 [(set R32C:$rT, (and R32C:$rA, R32C:$rB))]>;
1098
1099//===---------------------------------------------
1100// Special instructions to perform the fabs instruction
1101def ANDfabs32:
1102 RRForm<0b10000011000, (outs R32FP:$rT), (ins R32FP:$rA, R32C:$rB),
1103 "and\t$rT, $rA, $rB", IntegerOp,
1104 [/* Intentionally does not match a pattern */]>;
1105
1106def ANDfabs64:
1107 RRForm<0b10000011000, (outs R64FP:$rT), (ins R64FP:$rA, VECREG:$rB),
1108 "and\t$rT, $rA, $rB", IntegerOp,
1109 [/* Intentionally does not match a pattern */]>;
1110
1111// Could use ANDv4i32, but won't for clarity
1112def ANDfabsvec:
1113 RRForm<0b10000011000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1114 "and\t$rT, $rA, $rB", IntegerOp,
1115 [/* Intentionally does not match a pattern */]>;
1116
1117//===---------------------------------------------
1118
1119def ANDr16:
1120 RRForm<0b10000011000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1121 "and\t$rT, $rA, $rB", IntegerOp,
1122 [(set R16C:$rT, (and R16C:$rA, R16C:$rB))]>;
1123
Scott Michel438be252007-12-17 22:32:34 +00001124def ANDr8:
1125 RRForm<0b10000011000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1126 "and\t$rT, $rA, $rB", IntegerOp,
1127 [(set R8C:$rT, (and R8C:$rA, R8C:$rB))]>;
1128
Scott Michel8b6b4202007-12-04 22:35:58 +00001129// Hacked form of AND to zero-extend 16-bit quantities to 32-bit
1130// quantities -- see 16->32 zext pattern.
1131//
1132// This pattern is somewhat artificial, since it might match some
1133// compiler generated pattern but it is unlikely to do so.
1134def AND2To4:
1135 RRForm<0b10000011000, (outs R32C:$rT), (ins R16C:$rA, R32C:$rB),
1136 "and\t$rT, $rA, $rB", IntegerOp,
1137 [(set R32C:$rT, (and (zext R16C:$rA), R32C:$rB))]>;
1138
1139// N.B.: vnot_conv is one of those special target selection pattern fragments,
1140// in which we expect there to be a bit_convert on the constant. Bear in mind
1141// that llvm translates "not <reg>" to "xor <reg>, -1" (or in this case, a
1142// constant -1 vector.)
1143def ANDCv16i8:
1144 RRForm<0b10000011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1145 "andc\t$rT, $rA, $rB", IntegerOp,
1146 [(set (v16i8 VECREG:$rT), (and (v16i8 VECREG:$rA),
1147 (vnot (v16i8 VECREG:$rB))))]>;
1148
1149def ANDCv8i16:
1150 RRForm<0b10000011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1151 "andc\t$rT, $rA, $rB", IntegerOp,
1152 [(set (v8i16 VECREG:$rT), (and (v8i16 VECREG:$rA),
1153 (vnot (v8i16 VECREG:$rB))))]>;
1154
1155def ANDCv4i32:
1156 RRForm<0b10000011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1157 "andc\t$rT, $rA, $rB", IntegerOp,
1158 [(set (v4i32 VECREG:$rT), (and (v4i32 VECREG:$rA),
1159 (vnot (v4i32 VECREG:$rB))))]>;
1160
1161def ANDCr32:
1162 RRForm<0b10000011010, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1163 "andc\t$rT, $rA, $rB", IntegerOp,
1164 [(set R32C:$rT, (and R32C:$rA, (not R32C:$rB)))]>;
1165
1166def ANDCr16:
1167 RRForm<0b10000011010, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1168 "andc\t$rT, $rA, $rB", IntegerOp,
1169 [(set R16C:$rT, (and R16C:$rA, (not R16C:$rB)))]>;
1170
Scott Michel438be252007-12-17 22:32:34 +00001171def ANDCr8:
1172 RRForm<0b10000011010, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1173 "andc\t$rT, $rA, $rB", IntegerOp,
1174 [(set R8C:$rT, (and R8C:$rA, (not R8C:$rB)))]>;
1175
Scott Michel8b6b4202007-12-04 22:35:58 +00001176def ANDBIv16i8:
1177 RI10Form<0b01101000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
1178 "andbi\t$rT, $rA, $val", IntegerOp,
1179 [(set (v16i8 VECREG:$rT),
1180 (and (v16i8 VECREG:$rA), (v16i8 v16i8U8Imm:$val)))]>;
1181
Scott Michel438be252007-12-17 22:32:34 +00001182def ANDBIr8:
1183 RI10Form<0b01101000, (outs R8C:$rT), (ins R8C:$rA, u10imm_i8:$val),
1184 "andbi\t$rT, $rA, $val", IntegerOp,
1185 [(set R8C:$rT, (and R8C:$rA, immU8:$val))]>;
1186
Scott Michel8b6b4202007-12-04 22:35:58 +00001187def ANDHIv8i16:
1188 RI10Form<0b10101000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
1189 "andhi\t$rT, $rA, $val", IntegerOp,
1190 [(set (v8i16 VECREG:$rT),
1191 (and (v8i16 VECREG:$rA), v8i16SExt10Imm:$val))]>;
1192
1193def ANDHIr16:
1194 RI10Form<0b10101000, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
1195 "andhi\t$rT, $rA, $val", IntegerOp,
Scott Michel438be252007-12-17 22:32:34 +00001196 [(set R16C:$rT, (and R16C:$rA, i16ImmUns10:$val))]>;
1197
1198def ANDHI1To2:
1199 RI10Form<0b10101000, (outs R16C:$rT), (ins R8C:$rA, s10imm:$val),
1200 "andhi\t$rT, $rA, $val", IntegerOp,
1201 [(set R16C:$rT, (and (zext R8C:$rA), i16ImmSExt10:$val))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00001202
1203def ANDIv4i32:
1204 RI10Form<0b00101000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
1205 "andi\t$rT, $rA, $val", IntegerOp,
1206 [(set (v4i32 VECREG:$rT),
1207 (and (v4i32 VECREG:$rA), v4i32SExt10Imm:$val))]>;
1208
1209def ANDIr32:
1210 RI10Form<0b10101000, (outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val),
1211 "andi\t$rT, $rA, $val", IntegerOp,
1212 [(set R32C:$rT, (and R32C:$rA, i32ImmSExt10:$val))]>;
1213
Scott Michel438be252007-12-17 22:32:34 +00001214// Hacked form of ANDI to zero-extend i8 quantities to i32. See the zext 8->32
1215// pattern below.
1216def ANDI1To4:
1217 RI10Form<0b10101000, (outs R32C:$rT), (ins R8C:$rA, s10imm_i32:$val),
1218 "andi\t$rT, $rA, $val", IntegerOp,
1219 [(set R32C:$rT, (and (zext R8C:$rA), i32ImmSExt10:$val))]>;
1220
Scott Michel8b6b4202007-12-04 22:35:58 +00001221// Hacked form of ANDI to zero-extend i16 quantities to i32. See the
1222// zext 16->32 pattern below.
1223//
1224// Note that this pattern is somewhat artificial, since it might match
1225// something the compiler generates but is unlikely to occur in practice.
1226def ANDI2To4:
1227 RI10Form<0b10101000, (outs R32C:$rT), (ins R16C:$rA, s10imm_i32:$val),
1228 "andi\t$rT, $rA, $val", IntegerOp,
1229 [(set R32C:$rT, (and (zext R16C:$rA), i32ImmSExt10:$val))]>;
1230
1231// Bitwise OR group:
1232// Bitwise "or" (N.B.: These are also register-register copy instructions...)
1233def ORv16i8:
1234 RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1235 "or\t$rT, $rA, $rB", IntegerOp,
1236 [(set (v16i8 VECREG:$rT), (or (v16i8 VECREG:$rA), (v16i8 VECREG:$rB)))]>;
1237
1238def ORv8i16:
1239 RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1240 "or\t$rT, $rA, $rB", IntegerOp,
1241 [(set (v8i16 VECREG:$rT), (or (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)))]>;
1242
1243def ORv4i32:
1244 RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1245 "or\t$rT, $rA, $rB", IntegerOp,
1246 [(set (v4i32 VECREG:$rT), (or (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
1247
1248def ORv4f32:
1249 RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1250 "or\t$rT, $rA, $rB", IntegerOp,
1251 [(set (v4f32 VECREG:$rT),
1252 (v4f32 (bitconvert (or (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))))]>;
1253
1254def ORv2f64:
1255 RRForm<0b10000010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1256 "or\t$rT, $rA, $rB", IntegerOp,
1257 [(set (v2f64 VECREG:$rT),
1258 (v2f64 (bitconvert (or (v2i64 VECREG:$rA), (v2i64 VECREG:$rB)))))]>;
1259
1260def ORgprc:
1261 RRForm<0b10000010000, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB),
1262 "or\t$rT, $rA, $rB", IntegerOp,
1263 [(set GPRC:$rT, (or GPRC:$rA, GPRC:$rB))]>;
1264
1265def ORr64:
1266 RRForm<0b10000010000, (outs R64C:$rT), (ins R64C:$rA, R64C:$rB),
1267 "or\t$rT, $rA, $rB", IntegerOp,
1268 [(set R64C:$rT, (or R64C:$rA, R64C:$rB))]>;
1269
1270def ORr32:
1271 RRForm<0b10000010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1272 "or\t$rT, $rA, $rB", IntegerOp,
1273 [(set R32C:$rT, (or R32C:$rA, R32C:$rB))]>;
1274
1275def ORr16:
1276 RRForm<0b10000010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1277 "or\t$rT, $rA, $rB", IntegerOp,
1278 [(set R16C:$rT, (or R16C:$rA, R16C:$rB))]>;
1279
Scott Michel438be252007-12-17 22:32:34 +00001280def ORr8:
1281 RRForm<0b10000010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1282 "or\t$rT, $rA, $rB", IntegerOp,
1283 [(set R8C:$rT, (or R8C:$rA, R8C:$rB))]>;
1284
Scott Michel754d8662007-12-20 00:44:13 +00001285// OR instruction forms that are used to copy f32 and f64 registers.
1286// They do not match patterns.
1287def ORf32:
1288 RRForm<0b10000010000, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB),
1289 "or\t$rT, $rA, $rB", IntegerOp,
1290 [/* no pattern */]>;
1291
1292def ORf64:
1293 RRForm<0b10000010000, (outs R64FP:$rT), (ins R64FP:$rA, R64FP:$rB),
1294 "or\t$rT, $rA, $rB", IntegerOp,
1295 [/* no pattern */]>;
1296
Scott Michel8b6b4202007-12-04 22:35:58 +00001297// ORv*_*: Used in scalar->vector promotions:
Scott Michel438be252007-12-17 22:32:34 +00001298def ORv16i8_i8:
1299 RRForm<0b10000010000, (outs VECREG:$rT), (ins R8C:$rA, R8C:$rB),
1300 "or\t$rT, $rA, $rB", IntegerOp,
1301 [/* no pattern */]>;
1302
1303def : Pat<(v16i8 (SPUpromote_scalar R8C:$rA)),
1304 (ORv16i8_i8 R8C:$rA, R8C:$rA)>;
1305
Scott Michel8b6b4202007-12-04 22:35:58 +00001306def ORv8i16_i16:
1307 RRForm<0b10000010000, (outs VECREG:$rT), (ins R16C:$rA, R16C:$rB),
1308 "or\t$rT, $rA, $rB", IntegerOp,
1309 [/* no pattern */]>;
1310
1311def : Pat<(v8i16 (SPUpromote_scalar R16C:$rA)),
1312 (ORv8i16_i16 R16C:$rA, R16C:$rA)>;
1313
1314def ORv4i32_i32:
1315 RRForm<0b10000010000, (outs VECREG:$rT), (ins R32C:$rA, R32C:$rB),
1316 "or\t$rT, $rA, $rB", IntegerOp,
1317 [/* no pattern */]>;
1318
1319def : Pat<(v4i32 (SPUpromote_scalar R32C:$rA)),
1320 (ORv4i32_i32 R32C:$rA, R32C:$rA)>;
1321
1322def ORv2i64_i64:
1323 RRForm<0b10000010000, (outs VECREG:$rT), (ins R64C:$rA, R64C:$rB),
1324 "or\t$rT, $rA, $rB", IntegerOp,
1325 [/* no pattern */]>;
1326
1327def : Pat<(v2i64 (SPUpromote_scalar R64C:$rA)),
1328 (ORv2i64_i64 R64C:$rA, R64C:$rA)>;
1329
1330def ORv4f32_f32:
1331 RRForm<0b10000010000, (outs VECREG:$rT), (ins R32FP:$rA, R32FP:$rB),
1332 "or\t$rT, $rA, $rB", IntegerOp,
1333 [/* no pattern */]>;
1334
1335def : Pat<(v4f32 (SPUpromote_scalar R32FP:$rA)),
1336 (ORv4f32_f32 R32FP:$rA, R32FP:$rA)>;
1337
1338def ORv2f64_f64:
1339 RRForm<0b10000010000, (outs VECREG:$rT), (ins R64FP:$rA, R64FP:$rB),
1340 "or\t$rT, $rA, $rB", IntegerOp,
1341 [/* no pattern */]>;
1342
1343def : Pat<(v2f64 (SPUpromote_scalar R64FP:$rA)),
1344 (ORv2f64_f64 R64FP:$rA, R64FP:$rA)>;
1345
1346// ORi*_v*: Used to extract vector element 0 (the preferred slot)
Scott Michel438be252007-12-17 22:32:34 +00001347def ORi8_v16i8:
1348 RRForm<0b10000010000, (outs R8C:$rT), (ins VECREG:$rA, VECREG:$rB),
1349 "or\t$rT, $rA, $rB", IntegerOp,
1350 [/* no pattern */]>;
1351
1352def : Pat<(SPUextract_elt0 (v16i8 VECREG:$rA)),
1353 (ORi8_v16i8 VECREG:$rA, VECREG:$rA)>;
1354
Scott Michel8b6b4202007-12-04 22:35:58 +00001355def ORi16_v8i16:
1356 RRForm<0b10000010000, (outs R16C:$rT), (ins VECREG:$rA, VECREG:$rB),
1357 "or\t$rT, $rA, $rB", IntegerOp,
1358 [/* no pattern */]>;
1359
1360def : Pat<(SPUextract_elt0 (v8i16 VECREG:$rA)),
1361 (ORi16_v8i16 VECREG:$rA, VECREG:$rA)>;
1362
1363def : Pat<(SPUextract_elt0_chained (v8i16 VECREG:$rA)),
1364 (ORi16_v8i16 VECREG:$rA, VECREG:$rA)>;
1365
1366def ORi32_v4i32:
1367 RRForm<0b10000010000, (outs R32C:$rT), (ins VECREG:$rA, VECREG:$rB),
1368 "or\t$rT, $rA, $rB", IntegerOp,
1369 [/* no pattern */]>;
1370
1371def : Pat<(SPUextract_elt0 (v4i32 VECREG:$rA)),
1372 (ORi32_v4i32 VECREG:$rA, VECREG:$rA)>;
1373
1374def : Pat<(SPUextract_elt0_chained (v4i32 VECREG:$rA)),
1375 (ORi32_v4i32 VECREG:$rA, VECREG:$rA)>;
1376
1377def ORi64_v2i64:
1378 RRForm<0b10000010000, (outs R64C:$rT), (ins VECREG:$rA, VECREG:$rB),
1379 "or\t$rT, $rA, $rB", IntegerOp,
1380 [/* no pattern */]>;
1381
1382def : Pat<(SPUextract_elt0 (v2i64 VECREG:$rA)),
1383 (ORi64_v2i64 VECREG:$rA, VECREG:$rA)>;
1384
1385def : Pat<(SPUextract_elt0_chained (v2i64 VECREG:$rA)),
1386 (ORi64_v2i64 VECREG:$rA, VECREG:$rA)>;
1387
1388def ORf32_v4f32:
1389 RRForm<0b10000010000, (outs R32FP:$rT), (ins VECREG:$rA, VECREG:$rB),
1390 "or\t$rT, $rA, $rB", IntegerOp,
1391 [/* no pattern */]>;
1392
1393def : Pat<(SPUextract_elt0 (v4f32 VECREG:$rA)),
1394 (ORf32_v4f32 VECREG:$rA, VECREG:$rA)>;
1395
1396def : Pat<(SPUextract_elt0_chained (v4f32 VECREG:$rA)),
1397 (ORf32_v4f32 VECREG:$rA, VECREG:$rA)>;
1398
1399def ORf64_v2f64:
1400 RRForm<0b10000010000, (outs R64FP:$rT), (ins VECREG:$rA, VECREG:$rB),
1401 "or\t$rT, $rA, $rB", IntegerOp,
1402 [/* no pattern */]>;
1403
1404def : Pat<(SPUextract_elt0 (v2f64 VECREG:$rA)),
1405 (ORf64_v2f64 VECREG:$rA, VECREG:$rA)>;
1406
1407def : Pat<(SPUextract_elt0_chained (v2f64 VECREG:$rA)),
1408 (ORf64_v2f64 VECREG:$rA, VECREG:$rA)>;
1409
1410// ORC: Bitwise "or" with complement (match before ORvec, ORr32)
1411def ORCv16i8:
1412 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1413 "orc\t$rT, $rA, $rB", IntegerOp,
1414 [(set (v16i8 VECREG:$rT), (or (v16i8 VECREG:$rA),
1415 (vnot (v16i8 VECREG:$rB))))]>;
1416
1417def ORCv8i16:
1418 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1419 "orc\t$rT, $rA, $rB", IntegerOp,
1420 [(set (v8i16 VECREG:$rT), (or (v8i16 VECREG:$rA),
1421 (vnot (v8i16 VECREG:$rB))))]>;
1422
1423def ORCv4i32:
1424 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1425 "orc\t$rT, $rA, $rB", IntegerOp,
1426 [(set (v4i32 VECREG:$rT), (or (v4i32 VECREG:$rA),
1427 (vnot (v4i32 VECREG:$rB))))]>;
1428
1429def ORCr32:
1430 RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1431 "orc\t$rT, $rA, $rB", IntegerOp,
1432 [(set R32C:$rT, (or R32C:$rA, (not R32C:$rB)))]>;
1433
1434def ORCr16:
1435 RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1436 "orc\t$rT, $rA, $rB", IntegerOp,
1437 [(set R16C:$rT, (or R16C:$rA, (not R16C:$rB)))]>;
1438
Scott Michel438be252007-12-17 22:32:34 +00001439def ORCr8:
1440 RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1441 "orc\t$rT, $rA, $rB", IntegerOp,
1442 [(set R8C:$rT, (or R8C:$rA, (not R8C:$rB)))]>;
1443
Scott Michel8b6b4202007-12-04 22:35:58 +00001444// OR byte immediate
1445def ORBIv16i8:
1446 RI10Form<0b01100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
1447 "orbi\t$rT, $rA, $val", IntegerOp,
1448 [(set (v16i8 VECREG:$rT),
1449 (or (v16i8 VECREG:$rA), (v16i8 v16i8U8Imm:$val)))]>;
1450
Scott Michel438be252007-12-17 22:32:34 +00001451def ORBIr8:
1452 RI10Form<0b01100000, (outs R8C:$rT), (ins R8C:$rA, u10imm_i8:$val),
1453 "orbi\t$rT, $rA, $val", IntegerOp,
1454 [(set R8C:$rT, (or R8C:$rA, immU8:$val))]>;
1455
Scott Michel8b6b4202007-12-04 22:35:58 +00001456// OR halfword immediate
1457def ORHIv8i16:
Scott Michel438be252007-12-17 22:32:34 +00001458 RI10Form<0b10100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00001459 "orhi\t$rT, $rA, $val", IntegerOp,
1460 [(set (v8i16 VECREG:$rT), (or (v8i16 VECREG:$rA),
Scott Michel438be252007-12-17 22:32:34 +00001461 v8i16Uns10Imm:$val))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00001462
1463def ORHIr16:
Scott Michel438be252007-12-17 22:32:34 +00001464 RI10Form<0b10100000, (outs R16C:$rT), (ins R16C:$rA, u10imm:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00001465 "orhi\t$rT, $rA, $val", IntegerOp,
Scott Michel438be252007-12-17 22:32:34 +00001466 [(set R16C:$rT, (or R16C:$rA, i16ImmUns10:$val))]>;
1467
1468// Hacked form of ORHI used to promote 8-bit registers to 16-bit
1469def ORHI1To2:
1470 RI10Form<0b10100000, (outs R16C:$rT), (ins R8C:$rA, s10imm:$val),
1471 "orhi\t$rT, $rA, $val", IntegerOp,
1472 [(set R16C:$rT, (or (anyext R8C:$rA), i16ImmSExt10:$val))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00001473
1474// Bitwise "or" with immediate
1475def ORIv4i32:
Scott Michel438be252007-12-17 22:32:34 +00001476 RI10Form<0b00100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00001477 "ori\t$rT, $rA, $val", IntegerOp,
1478 [(set (v4i32 VECREG:$rT), (or (v4i32 VECREG:$rA),
Scott Michel438be252007-12-17 22:32:34 +00001479 v4i32Uns10Imm:$val))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00001480
1481def ORIr32:
Scott Michel438be252007-12-17 22:32:34 +00001482 RI10Form<0b00100000, (outs R32C:$rT), (ins R32C:$rA, u10imm_i32:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00001483 "ori\t$rT, $rA, $val", IntegerOp,
Scott Michel438be252007-12-17 22:32:34 +00001484 [(set R32C:$rT, (or R32C:$rA, i32ImmUns10:$val))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00001485
Scott Michel8b6b4202007-12-04 22:35:58 +00001486def ORIr64:
1487 RI10Form_1<0b00100000, (outs R64C:$rT), (ins R64C:$rA, s10imm_i32:$val),
1488 "ori\t$rT, $rA, $val", IntegerOp,
1489 [/* no pattern */]>;
1490
1491// ORI2To4: hacked version of the ori instruction to extend 16-bit quantities
1492// to 32-bit quantities. used exclusively to match "anyext" conversions (vide
1493// infra "anyext 16->32" pattern.)
1494def ORI2To4:
1495 RI10Form<0b00100000, (outs R32C:$rT), (ins R16C:$rA, s10imm_i32:$val),
1496 "ori\t$rT, $rA, $val", IntegerOp,
1497 [(set R32C:$rT, (or (anyext R16C:$rA), i32ImmSExt10:$val))]>;
1498
Scott Michel438be252007-12-17 22:32:34 +00001499// ORI1To4: Hacked version of the ORI instruction to extend 16-bit quantities
1500// to 32-bit quantities. Used exclusively to match "anyext" conversions (vide
1501// infra "anyext 16->32" pattern.)
1502def ORI1To4:
1503 RI10Form<0b00100000, (outs R32C:$rT), (ins R8C:$rA, s10imm_i32:$val),
1504 "ori\t$rT, $rA, $val", IntegerOp,
1505 [(set R32C:$rT, (or (anyext R8C:$rA), i32ImmSExt10:$val))]>;
1506
Scott Michel8b6b4202007-12-04 22:35:58 +00001507// ORX: "or" across the vector: or's $rA's word slots leaving the result in
1508// $rT[0], slots 1-3 are zeroed.
1509//
Scott Michel438be252007-12-17 22:32:34 +00001510// FIXME: Needs to match an intrinsic pattern.
Scott Michel8b6b4202007-12-04 22:35:58 +00001511def ORXv4i32:
1512 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1513 "orx\t$rT, $rA, $rB", IntegerOp,
1514 []>;
1515
Scott Michel438be252007-12-17 22:32:34 +00001516// XOR:
Scott Michel8b6b4202007-12-04 22:35:58 +00001517def XORv16i8:
1518 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1519 "xor\t$rT, $rA, $rB", IntegerOp,
1520 [(set (v16i8 VECREG:$rT), (xor (v16i8 VECREG:$rA), (v16i8 VECREG:$rB)))]>;
1521
1522def XORv8i16:
1523 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1524 "xor\t$rT, $rA, $rB", IntegerOp,
1525 [(set (v8i16 VECREG:$rT), (xor (v8i16 VECREG:$rA), (v8i16 VECREG:$rB)))]>;
1526
1527def XORv4i32:
1528 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1529 "xor\t$rT, $rA, $rB", IntegerOp,
1530 [(set (v4i32 VECREG:$rT), (xor (v4i32 VECREG:$rA), (v4i32 VECREG:$rB)))]>;
1531
1532def XORr32:
1533 RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1534 "xor\t$rT, $rA, $rB", IntegerOp,
1535 [(set R32C:$rT, (xor R32C:$rA, R32C:$rB))]>;
1536
1537//==----------------------------------------------------------
1538// Special forms for floating point instructions.
1539// Bitwise ORs and ANDs don't make sense for normal floating
1540// point numbers. These operations (fneg and fabs), however,
1541// require bitwise logical ops to manipulate the sign bit.
1542def XORfneg32:
1543 RRForm<0b10010010000, (outs R32FP:$rT), (ins R32FP:$rA, R32C:$rB),
1544 "xor\t$rT, $rA, $rB", IntegerOp,
1545 [/* Intentionally does not match a pattern, see fneg32 */]>;
1546
1547// KLUDGY! Better way to do this without a VECREG? bitconvert?
1548// VECREG is assumed to contain two identical 64-bit masks, so
1549// it doesn't matter which word we select for the xor
1550def XORfneg64:
1551 RRForm<0b10010010000, (outs R64FP:$rT), (ins R64FP:$rA, VECREG:$rB),
1552 "xor\t$rT, $rA, $rB", IntegerOp,
1553 [/* Intentionally does not match a pattern, see fneg64 */]>;
1554
1555// Could use XORv4i32, but will use this for clarity
1556def XORfnegvec:
1557 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1558 "xor\t$rT, $rA, $rB", IntegerOp,
1559 [/* Intentionally does not match a pattern, see fneg{32,64} */]>;
1560
1561//==----------------------------------------------------------
1562
1563def XORr16:
1564 RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1565 "xor\t$rT, $rA, $rB", IntegerOp,
1566 [(set R16C:$rT, (xor R16C:$rA, R16C:$rB))]>;
1567
Scott Michel438be252007-12-17 22:32:34 +00001568def XORr8:
1569 RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1570 "xor\t$rT, $rA, $rB", IntegerOp,
1571 [(set R8C:$rT, (xor R8C:$rA, R8C:$rB))]>;
1572
Scott Michel8b6b4202007-12-04 22:35:58 +00001573def XORBIv16i8:
1574 RI10Form<0b01100000, (outs VECREG:$rT), (ins VECREG:$rA, u10imm:$val),
1575 "xorbi\t$rT, $rA, $val", IntegerOp,
1576 [(set (v16i8 VECREG:$rT), (xor (v16i8 VECREG:$rA), v16i8U8Imm:$val))]>;
1577
Scott Michel438be252007-12-17 22:32:34 +00001578def XORBIr8:
1579 RI10Form<0b01100000, (outs R8C:$rT), (ins R8C:$rA, u10imm_i8:$val),
1580 "xorbi\t$rT, $rA, $val", IntegerOp,
1581 [(set R8C:$rT, (xor R8C:$rA, immU8:$val))]>;
1582
Scott Michel8b6b4202007-12-04 22:35:58 +00001583def XORHIv8i16:
1584 RI10Form<0b10100000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
1585 "xorhi\t$rT, $rA, $val", IntegerOp,
1586 [(set (v8i16 VECREG:$rT), (xor (v8i16 VECREG:$rA),
1587 v8i16SExt10Imm:$val))]>;
1588
1589def XORHIr16:
1590 RI10Form<0b10100000, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
1591 "xorhi\t$rT, $rA, $val", IntegerOp,
1592 [(set R16C:$rT, (xor R16C:$rA, i16ImmSExt10:$val))]>;
1593
1594def XORIv4i32:
1595 RI10Form<0b00100000, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
1596 "xori\t$rT, $rA, $val", IntegerOp,
1597 [(set (v4i32 VECREG:$rT), (xor (v4i32 VECREG:$rA),
1598 v4i32SExt10Imm:$val))]>;
1599
1600def XORIr32:
1601 RI10Form<0b00100000, (outs R32C:$rT), (ins R32C:$rA, s10imm_i32:$val),
1602 "xori\t$rT, $rA, $val", IntegerOp,
1603 [(set R32C:$rT, (xor R32C:$rA, i32ImmSExt10:$val))]>;
1604
1605// NAND:
1606def NANDv16i8:
1607 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1608 "nand\t$rT, $rA, $rB", IntegerOp,
1609 [(set (v16i8 VECREG:$rT), (vnot (and (v16i8 VECREG:$rA),
1610 (v16i8 VECREG:$rB))))]>;
1611
1612def NANDv8i16:
1613 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1614 "nand\t$rT, $rA, $rB", IntegerOp,
1615 [(set (v8i16 VECREG:$rT), (vnot (and (v8i16 VECREG:$rA),
1616 (v8i16 VECREG:$rB))))]>;
1617
1618def NANDv4i32:
1619 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1620 "nand\t$rT, $rA, $rB", IntegerOp,
1621 [(set (v4i32 VECREG:$rT), (vnot (and (v4i32 VECREG:$rA),
1622 (v4i32 VECREG:$rB))))]>;
1623
1624def NANDr32:
1625 RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1626 "nand\t$rT, $rA, $rB", IntegerOp,
1627 [(set R32C:$rT, (not (and R32C:$rA, R32C:$rB)))]>;
1628
1629def NANDr16:
1630 RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1631 "nand\t$rT, $rA, $rB", IntegerOp,
1632 [(set R16C:$rT, (not (and R16C:$rA, R16C:$rB)))]>;
1633
Scott Michel438be252007-12-17 22:32:34 +00001634def NANDr8:
1635 RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1636 "nand\t$rT, $rA, $rB", IntegerOp,
1637 [(set R8C:$rT, (not (and R8C:$rA, R8C:$rB)))]>;
1638
Scott Michel8b6b4202007-12-04 22:35:58 +00001639// NOR:
1640def NORv16i8:
1641 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1642 "nor\t$rT, $rA, $rB", IntegerOp,
1643 [(set (v16i8 VECREG:$rT), (vnot (or (v16i8 VECREG:$rA),
1644 (v16i8 VECREG:$rB))))]>;
1645
1646def NORv8i16:
1647 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1648 "nor\t$rT, $rA, $rB", IntegerOp,
1649 [(set (v8i16 VECREG:$rT), (vnot (or (v8i16 VECREG:$rA),
1650 (v8i16 VECREG:$rB))))]>;
1651
1652def NORv4i32:
1653 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1654 "nor\t$rT, $rA, $rB", IntegerOp,
1655 [(set (v4i32 VECREG:$rT), (vnot (or (v4i32 VECREG:$rA),
1656 (v4i32 VECREG:$rB))))]>;
1657
1658def NORr32:
1659 RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1660 "nor\t$rT, $rA, $rB", IntegerOp,
1661 [(set R32C:$rT, (not (or R32C:$rA, R32C:$rB)))]>;
1662
1663def NORr16:
1664 RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1665 "nor\t$rT, $rA, $rB", IntegerOp,
1666 [(set R16C:$rT, (not (or R16C:$rA, R16C:$rB)))]>;
1667
Scott Michel438be252007-12-17 22:32:34 +00001668def NORr8:
1669 RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1670 "nor\t$rT, $rA, $rB", IntegerOp,
1671 [(set R8C:$rT, (not (or R8C:$rA, R8C:$rB)))]>;
1672
Scott Michel8b6b4202007-12-04 22:35:58 +00001673// EQV: Equivalence (1 for each same bit, otherwise 0)
1674def EQVv16i8:
1675 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1676 "eqv\t$rT, $rA, $rB", IntegerOp,
1677 [(set (v16i8 VECREG:$rT), (or (and (v16i8 VECREG:$rA),
1678 (v16i8 VECREG:$rB)),
1679 (and (vnot (v16i8 VECREG:$rA)),
1680 (vnot (v16i8 VECREG:$rB)))))]>;
1681
1682def : Pat<(xor (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rB))),
1683 (EQVv16i8 VECREG:$rA, VECREG:$rB)>;
1684
1685def : Pat<(xor (vnot (v16i8 VECREG:$rA)), (v16i8 VECREG:$rB)),
1686 (EQVv16i8 VECREG:$rA, VECREG:$rB)>;
1687
1688def EQVv8i16:
1689 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1690 "eqv\t$rT, $rA, $rB", IntegerOp,
1691 [(set (v8i16 VECREG:$rT), (or (and (v8i16 VECREG:$rA),
1692 (v8i16 VECREG:$rB)),
1693 (and (vnot (v8i16 VECREG:$rA)),
1694 (vnot (v8i16 VECREG:$rB)))))]>;
1695
1696def : Pat<(xor (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rB))),
1697 (EQVv8i16 VECREG:$rA, VECREG:$rB)>;
1698
1699def : Pat<(xor (vnot (v8i16 VECREG:$rA)), (v8i16 VECREG:$rB)),
1700 (EQVv8i16 VECREG:$rA, VECREG:$rB)>;
1701
1702def EQVv4i32:
1703 RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
1704 "eqv\t$rT, $rA, $rB", IntegerOp,
1705 [(set (v4i32 VECREG:$rT), (or (and (v4i32 VECREG:$rA),
1706 (v4i32 VECREG:$rB)),
1707 (and (vnot (v4i32 VECREG:$rA)),
1708 (vnot (v4i32 VECREG:$rB)))))]>;
1709
1710def : Pat<(xor (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rB))),
1711 (EQVv4i32 VECREG:$rA, VECREG:$rB)>;
1712
1713def : Pat<(xor (vnot (v4i32 VECREG:$rA)), (v4i32 VECREG:$rB)),
1714 (EQVv4i32 VECREG:$rA, VECREG:$rB)>;
1715
1716def EQVr32:
1717 RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
1718 "eqv\t$rT, $rA, $rB", IntegerOp,
1719 [(set R32C:$rT, (or (and R32C:$rA, R32C:$rB),
1720 (and (not R32C:$rA), (not R32C:$rB))))]>;
1721
1722def : Pat<(xor R32C:$rA, (not R32C:$rB)),
1723 (EQVr32 R32C:$rA, R32C:$rB)>;
1724
1725def : Pat<(xor (not R32C:$rA), R32C:$rB),
1726 (EQVr32 R32C:$rA, R32C:$rB)>;
1727
1728def EQVr16:
1729 RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
1730 "eqv\t$rT, $rA, $rB", IntegerOp,
1731 [(set R16C:$rT, (or (and R16C:$rA, R16C:$rB),
1732 (and (not R16C:$rA), (not R16C:$rB))))]>;
1733
1734def : Pat<(xor R16C:$rA, (not R16C:$rB)),
1735 (EQVr16 R16C:$rA, R16C:$rB)>;
1736
1737def : Pat<(xor (not R16C:$rA), R16C:$rB),
1738 (EQVr16 R16C:$rA, R16C:$rB)>;
1739
Scott Michel438be252007-12-17 22:32:34 +00001740def EQVr8:
1741 RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
1742 "eqv\t$rT, $rA, $rB", IntegerOp,
1743 [(set R8C:$rT, (or (and R8C:$rA, R8C:$rB),
1744 (and (not R8C:$rA), (not R8C:$rB))))]>;
1745
1746def : Pat<(xor R8C:$rA, (not R8C:$rB)),
1747 (EQVr8 R8C:$rA, R8C:$rB)>;
1748
1749def : Pat<(xor (not R8C:$rA), R8C:$rB),
1750 (EQVr8 R8C:$rA, R8C:$rB)>;
1751
Scott Michel8b6b4202007-12-04 22:35:58 +00001752// gcc optimizes (p & q) | (~p & ~q) -> ~(p | q) | (p & q), so match that
1753// pattern also:
1754def : Pat<(or (vnot (or (v16i8 VECREG:$rA), (v16i8 VECREG:$rB))),
1755 (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rB))),
1756 (EQVv16i8 VECREG:$rA, VECREG:$rB)>;
1757
1758def : Pat<(or (vnot (or (v8i16 VECREG:$rA), (v8i16 VECREG:$rB))),
1759 (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rB))),
1760 (EQVv8i16 VECREG:$rA, VECREG:$rB)>;
1761
1762def : Pat<(or (vnot (or (v4i32 VECREG:$rA), (v4i32 VECREG:$rB))),
1763 (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rB))),
1764 (EQVv4i32 VECREG:$rA, VECREG:$rB)>;
1765
1766def : Pat<(or (not (or R32C:$rA, R32C:$rB)), (and R32C:$rA, R32C:$rB)),
1767 (EQVr32 R32C:$rA, R32C:$rB)>;
1768
1769def : Pat<(or (not (or R16C:$rA, R16C:$rB)), (and R16C:$rA, R16C:$rB)),
1770 (EQVr16 R16C:$rA, R16C:$rB)>;
1771
Scott Michel438be252007-12-17 22:32:34 +00001772def : Pat<(or (not (or R8C:$rA, R8C:$rB)), (and R8C:$rA, R8C:$rB)),
1773 (EQVr8 R8C:$rA, R8C:$rB)>;
1774
Scott Michel8b6b4202007-12-04 22:35:58 +00001775// Select bits:
1776def SELBv16i8:
1777 RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
1778 "selb\t$rT, $rA, $rB, $rC", IntegerOp,
1779 [(set (v16i8 VECREG:$rT),
1780 (SPUselb_v16i8 (v16i8 VECREG:$rA), (v16i8 VECREG:$rB),
1781 (v16i8 VECREG:$rC)))]>;
1782
1783def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
1784 (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
1785 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1786
1787def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
1788 (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
1789 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1790
1791def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
1792 (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
1793 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1794
1795def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
1796 (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
1797 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1798
1799def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
1800 (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
1801 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1802
1803def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
1804 (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
1805 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1806
1807def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
1808 (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
1809 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1810
1811def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
1812 (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
1813 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1814
1815def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
1816 (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
1817 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1818
1819def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
1820 (and (v16i8 VECREG:$rB), (vnot (v16i8 VECREG:$rC)))),
1821 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1822
1823def : Pat<(or (and (v16i8 VECREG:$rA), (v16i8 VECREG:$rC)),
1824 (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
1825 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1826
1827def : Pat<(or (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rA)),
1828 (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rB))),
1829 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1830
1831def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
1832 (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
1833 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1834
1835def : Pat<(or (and (v16i8 VECREG:$rA), (vnot (v16i8 VECREG:$rC))),
1836 (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
1837 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1838
1839def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
1840 (and (v16i8 VECREG:$rB), (v16i8 VECREG:$rC))),
1841 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1842
1843def : Pat<(or (and (vnot (v16i8 VECREG:$rC)), (v16i8 VECREG:$rA)),
1844 (and (v16i8 VECREG:$rC), (v16i8 VECREG:$rB))),
1845 (SELBv16i8 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1846
1847def SELBv8i16:
1848 RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
1849 "selb\t$rT, $rA, $rB, $rC", IntegerOp,
1850 [(set (v8i16 VECREG:$rT),
1851 (SPUselb_v8i16 (v8i16 VECREG:$rA), (v8i16 VECREG:$rB),
1852 (v8i16 VECREG:$rC)))]>;
1853
1854def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
1855 (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
1856 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1857
1858def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
1859 (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
1860 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1861
1862def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
1863 (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
1864 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1865
1866def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
1867 (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
1868 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1869
1870def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
1871 (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
1872 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1873
1874def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
1875 (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
1876 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1877
1878def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
1879 (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
1880 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1881
1882def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
1883 (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
1884 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1885
1886def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
1887 (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
1888 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1889
1890def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
1891 (and (v8i16 VECREG:$rB), (vnot (v8i16 VECREG:$rC)))),
1892 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1893
1894def : Pat<(or (and (v8i16 VECREG:$rA), (v8i16 VECREG:$rC)),
1895 (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
1896 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1897
1898def : Pat<(or (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rA)),
1899 (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rB))),
1900 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1901
1902def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
1903 (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
1904 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1905
1906def : Pat<(or (and (v8i16 VECREG:$rA), (vnot (v8i16 VECREG:$rC))),
1907 (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
1908 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1909
1910def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
1911 (and (v8i16 VECREG:$rB), (v8i16 VECREG:$rC))),
1912 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1913
1914def : Pat<(or (and (vnot (v8i16 VECREG:$rC)), (v8i16 VECREG:$rA)),
1915 (and (v8i16 VECREG:$rC), (v8i16 VECREG:$rB))),
1916 (SELBv8i16 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1917
1918def SELBv4i32:
1919 RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
1920 "selb\t$rT, $rA, $rB, $rC", IntegerOp,
1921 [(set (v4i32 VECREG:$rT),
1922 (SPUselb_v4i32 (v4i32 VECREG:$rA), (v4i32 VECREG:$rB),
1923 (v4i32 VECREG:$rC)))]>;
1924
1925def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
1926 (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
1927 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1928
1929def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
1930 (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
1931 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1932
1933def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
1934 (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
1935 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1936
1937def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
1938 (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
1939 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1940
1941def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
1942 (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
1943 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1944
1945def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
1946 (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
1947 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1948
1949def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
1950 (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
1951 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1952
1953def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
1954 (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
1955 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1956
1957def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
1958 (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
1959 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1960
1961def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
1962 (and (v4i32 VECREG:$rB), (vnot (v4i32 VECREG:$rC)))),
1963 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1964
1965def : Pat<(or (and (v4i32 VECREG:$rA), (v4i32 VECREG:$rC)),
1966 (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
1967 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1968
1969def : Pat<(or (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rA)),
1970 (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rB))),
1971 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1972
1973def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
1974 (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
1975 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1976
1977def : Pat<(or (and (v4i32 VECREG:$rA), (vnot (v4i32 VECREG:$rC))),
1978 (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
1979 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1980
1981def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
1982 (and (v4i32 VECREG:$rB), (v4i32 VECREG:$rC))),
1983 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1984
1985def : Pat<(or (and (vnot (v4i32 VECREG:$rC)), (v4i32 VECREG:$rA)),
1986 (and (v4i32 VECREG:$rC), (v4i32 VECREG:$rB))),
1987 (SELBv4i32 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
1988
1989def SELBr32:
1990 RRRForm<0b1000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB, R32C:$rC),
1991 "selb\t$rT, $rA, $rB, $rC", IntegerOp,
1992 []>;
1993
1994// And the various patterns that can be matched... (all 8 of them :-)
1995def : Pat<(or (and R32C:$rA, R32C:$rC),
1996 (and R32C:$rB, (not R32C:$rC))),
1997 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
1998
1999def : Pat<(or (and R32C:$rC, R32C:$rA),
2000 (and R32C:$rB, (not R32C:$rC))),
2001 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2002
2003def : Pat<(or (and R32C:$rA, R32C:$rC),
2004 (and (not R32C:$rC), R32C:$rB)),
2005 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2006
2007def : Pat<(or (and R32C:$rC, R32C:$rA),
2008 (and (not R32C:$rC), R32C:$rB)),
2009 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2010
2011def : Pat<(or (and R32C:$rA, (not R32C:$rC)),
2012 (and R32C:$rB, R32C:$rC)),
2013 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2014
2015def : Pat<(or (and R32C:$rA, (not R32C:$rC)),
2016 (and R32C:$rC, R32C:$rB)),
2017 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2018
2019def : Pat<(or (and (not R32C:$rC), R32C:$rA),
2020 (and R32C:$rB, R32C:$rC)),
2021 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2022
2023def : Pat<(or (and (not R32C:$rC), R32C:$rA),
2024 (and R32C:$rC, R32C:$rB)),
2025 (SELBr32 R32C:$rA, R32C:$rB, R32C:$rC)>;
2026
2027def SELBr16:
2028 RRRForm<0b1000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB, R16C:$rC),
2029 "selb\t$rT, $rA, $rB, $rC", IntegerOp,
2030 []>;
2031
2032def : Pat<(or (and R16C:$rA, R16C:$rC),
2033 (and R16C:$rB, (not R16C:$rC))),
2034 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2035
2036def : Pat<(or (and R16C:$rC, R16C:$rA),
2037 (and R16C:$rB, (not R16C:$rC))),
2038 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2039
2040def : Pat<(or (and R16C:$rA, R16C:$rC),
2041 (and (not R16C:$rC), R16C:$rB)),
2042 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2043
2044def : Pat<(or (and R16C:$rC, R16C:$rA),
2045 (and (not R16C:$rC), R16C:$rB)),
2046 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2047
2048def : Pat<(or (and R16C:$rA, (not R16C:$rC)),
2049 (and R16C:$rB, R16C:$rC)),
2050 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2051
2052def : Pat<(or (and R16C:$rA, (not R16C:$rC)),
2053 (and R16C:$rC, R16C:$rB)),
2054 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2055
2056def : Pat<(or (and (not R16C:$rC), R16C:$rA),
2057 (and R16C:$rB, R16C:$rC)),
2058 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
2059
2060def : Pat<(or (and (not R16C:$rC), R16C:$rA),
2061 (and R16C:$rC, R16C:$rB)),
2062 (SELBr16 R16C:$rA, R16C:$rB, R16C:$rC)>;
Scott Michel438be252007-12-17 22:32:34 +00002063
2064def SELBr8:
2065 RRRForm<0b1000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB, R8C:$rC),
2066 "selb\t$rT, $rA, $rB, $rC", IntegerOp,
2067 []>;
2068
2069def : Pat<(or (and R8C:$rA, R8C:$rC),
2070 (and R8C:$rB, (not R8C:$rC))),
2071 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2072
2073def : Pat<(or (and R8C:$rC, R8C:$rA),
2074 (and R8C:$rB, (not R8C:$rC))),
2075 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2076
2077def : Pat<(or (and R8C:$rA, R8C:$rC),
2078 (and (not R8C:$rC), R8C:$rB)),
2079 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2080
2081def : Pat<(or (and R8C:$rC, R8C:$rA),
2082 (and (not R8C:$rC), R8C:$rB)),
2083 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2084
2085def : Pat<(or (and R8C:$rA, (not R8C:$rC)),
2086 (and R8C:$rB, R8C:$rC)),
2087 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2088
2089def : Pat<(or (and R8C:$rA, (not R8C:$rC)),
2090 (and R8C:$rC, R8C:$rB)),
2091 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2092
2093def : Pat<(or (and (not R8C:$rC), R8C:$rA),
2094 (and R8C:$rB, R8C:$rC)),
2095 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
2096
2097def : Pat<(or (and (not R8C:$rC), R8C:$rA),
2098 (and R8C:$rC, R8C:$rB)),
2099 (SELBr8 R8C:$rA, R8C:$rB, R8C:$rC)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002100
2101//===----------------------------------------------------------------------===//
2102// Vector shuffle...
2103//===----------------------------------------------------------------------===//
2104
2105def SHUFB:
2106 RRRForm<0b1000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
2107 "shufb\t$rT, $rA, $rB, $rC", IntegerOp,
Scott Michel754d8662007-12-20 00:44:13 +00002108 [/* no pattern */]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002109
2110// SPUshuffle is generated in LowerVECTOR_SHUFFLE and gets replaced with SHUFB.
2111// See the SPUshuffle SDNode operand above, which sets up the DAG pattern
2112// matcher to emit something when the LowerVECTOR_SHUFFLE generates a node with
2113// the SPUISD::SHUFB opcode.
2114def : Pat<(SPUshuffle (v16i8 VECREG:$rA), (v16i8 VECREG:$rB), VECREG:$rC),
2115 (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
2116
2117def : Pat<(SPUshuffle (v8i16 VECREG:$rA), (v8i16 VECREG:$rB), VECREG:$rC),
2118 (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
2119
2120def : Pat<(SPUshuffle (v4i32 VECREG:$rA), (v4i32 VECREG:$rB), VECREG:$rC),
2121 (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
2122
Scott Michel754d8662007-12-20 00:44:13 +00002123def : Pat<(SPUshuffle (v4f32 VECREG:$rA), (v4f32 VECREG:$rB), VECREG:$rC),
2124 (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
2125
Scott Michel8b6b4202007-12-04 22:35:58 +00002126def : Pat<(SPUshuffle (v2i64 VECREG:$rA), (v2i64 VECREG:$rB), VECREG:$rC),
2127 (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
2128
Scott Michel754d8662007-12-20 00:44:13 +00002129def : Pat<(SPUshuffle (v2f64 VECREG:$rA), (v2f64 VECREG:$rB), VECREG:$rC),
2130 (SHUFB VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
2131
Scott Michel8b6b4202007-12-04 22:35:58 +00002132//===----------------------------------------------------------------------===//
2133// Shift and rotate group:
2134//===----------------------------------------------------------------------===//
2135
2136def SHLHv8i16:
2137 RRForm<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, R16C:$rB),
2138 "shlh\t$rT, $rA, $rB", RotateShift,
2139 [(set (v8i16 VECREG:$rT),
2140 (SPUvec_shl_v8i16 (v8i16 VECREG:$rA), R16C:$rB))]>;
2141
2142// $rB gets promoted to 32-bit register type when confronted with
2143// this llvm assembly code:
2144//
2145// define i16 @shlh_i16_1(i16 %arg1, i16 %arg2) {
2146// %A = shl i16 %arg1, %arg2
2147// ret i16 %A
2148// }
2149//
2150// However, we will generate this code when lowering 8-bit shifts and rotates.
2151
2152def SHLHr16:
2153 RRForm<0b11111010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
2154 "shlh\t$rT, $rA, $rB", RotateShift,
2155 [(set R16C:$rT, (shl R16C:$rA, R16C:$rB))]>;
2156
2157def SHLHr16_r32:
2158 RRForm<0b11111010000, (outs R16C:$rT), (ins R16C:$rA, R32C:$rB),
2159 "shlh\t$rT, $rA, $rB", RotateShift,
2160 [(set R16C:$rT, (shl R16C:$rA, R32C:$rB))]>;
2161
2162def SHLHIv8i16:
Scott Michel438be252007-12-17 22:32:34 +00002163 RI7Form<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i8:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00002164 "shlhi\t$rT, $rA, $val", RotateShift,
2165 [(set (v8i16 VECREG:$rT),
Scott Michel438be252007-12-17 22:32:34 +00002166 (SPUvec_shl_v8i16 (v8i16 VECREG:$rA), (i8 uimm7:$val)))]>;
2167
2168def : Pat<(SPUvec_shl_v8i16 (v8i16 VECREG:$rA), (i16 uimm7:$val)),
2169 (SHLHIv8i16 VECREG:$rA, imm:$val)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002170
2171def : Pat<(SPUvec_shl_v8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val)),
2172 (SHLHIv8i16 VECREG:$rA, imm:$val)>;
2173
2174def SHLHIr16:
2175 RI7Form<0b11111010000, (outs R16C:$rT), (ins R16C:$rA, u7imm_i32:$val),
2176 "shlhi\t$rT, $rA, $val", RotateShift,
2177 [(set R16C:$rT, (shl R16C:$rA, (i32 uimm7:$val)))]>;
Scott Michel438be252007-12-17 22:32:34 +00002178
2179def : Pat<(shl R16C:$rA, (i8 uimm7:$val)),
2180 (SHLHIr16 R16C:$rA, uimm7:$val)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002181
2182def : Pat<(shl R16C:$rA, (i16 uimm7:$val)),
2183 (SHLHIr16 R16C:$rA, uimm7:$val)>;
2184
2185def SHLv4i32:
2186 RRForm<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, R16C:$rB),
2187 "shl\t$rT, $rA, $rB", RotateShift,
2188 [(set (v4i32 VECREG:$rT),
2189 (SPUvec_shl_v4i32 (v4i32 VECREG:$rA), R16C:$rB))]>;
2190
2191def SHLr32:
2192 RRForm<0b11111010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
2193 "shl\t$rT, $rA, $rB", RotateShift,
2194 [(set R32C:$rT, (shl R32C:$rA, R32C:$rB))]>;
2195
2196def SHLIv4i32:
Scott Michel438be252007-12-17 22:32:34 +00002197 RI7Form<0b11111010000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i8:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00002198 "shli\t$rT, $rA, $val", RotateShift,
2199 [(set (v4i32 VECREG:$rT),
Scott Michel438be252007-12-17 22:32:34 +00002200 (SPUvec_shl_v4i32 (v4i32 VECREG:$rA), (i8 uimm7:$val)))]>;
2201
2202def: Pat<(SPUvec_shl_v4i32 (v4i32 VECREG:$rA), (i16 uimm7:$val)),
2203 (SHLIv4i32 VECREG:$rA, uimm7:$val)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002204
2205def: Pat<(SPUvec_shl_v4i32 (v4i32 VECREG:$rA), (i32 uimm7:$val)),
2206 (SHLIv4i32 VECREG:$rA, uimm7:$val)>;
2207
2208def SHLIr32:
2209 RI7Form<0b11111010000, (outs R32C:$rT), (ins R32C:$rA, u7imm_i32:$val),
2210 "shli\t$rT, $rA, $val", RotateShift,
2211 [(set R32C:$rT, (shl R32C:$rA, (i32 uimm7:$val)))]>;
2212
2213def : Pat<(shl R32C:$rA, (i16 uimm7:$val)),
2214 (SHLIr32 R32C:$rA, uimm7:$val)>;
2215
Scott Michel438be252007-12-17 22:32:34 +00002216def : Pat<(shl R32C:$rA, (i8 uimm7:$val)),
2217 (SHLIr32 R32C:$rA, uimm7:$val)>;
2218
Scott Michel8b6b4202007-12-04 22:35:58 +00002219// SHLQBI vec form: Note that this will shift the entire vector (the 128-bit
2220// register) to the left. Vector form is here to ensure type correctness.
2221def SHLQBIvec:
2222 RRForm<0b11011011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2223 "shlqbi\t$rT, $rA, $rB", RotateShift,
2224 [/* intrinsic */]>;
2225
2226// See note above on SHLQBI.
2227def SHLQBIIvec:
2228 RI7Form<0b11011111100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
2229 "shlqbii\t$rT, $rA, $val", RotateShift,
2230 [/* intrinsic */]>;
2231
2232// SHLQBY, SHLQBYI vector forms: Shift the entire vector to the left by bytes,
2233// not by bits.
2234def SHLQBYvec:
2235 RI7Form<0b11111011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2236 "shlqbyi\t$rT, $rA, $rB", RotateShift,
2237 [/* intrinsic */]>;
2238
2239def SHLQBYIvec:
2240 RI7Form<0b11111111100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
2241 "shlqbyi\t$rT, $rA, $val", RotateShift,
2242 [/* intrinsic */]>;
2243
2244// ROTH v8i16 form:
2245def ROTHv8i16:
2246 RRForm<0b00111010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2247 "roth\t$rT, $rA, $rB", RotateShift,
2248 [(set (v8i16 VECREG:$rT),
2249 (SPUvec_rotl_v8i16 VECREG:$rA, VECREG:$rB))]>;
2250
2251def ROTHr16:
2252 RRForm<0b00111010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
2253 "roth\t$rT, $rA, $rB", RotateShift,
2254 [(set R16C:$rT, (rotl R16C:$rA, R16C:$rB))]>;
2255
2256def ROTHr16_r32:
2257 RRForm<0b00111010000, (outs R16C:$rT), (ins R16C:$rA, R32C:$rB),
2258 "roth\t$rT, $rA, $rB", RotateShift,
2259 [(set R16C:$rT, (rotl R16C:$rA, R32C:$rB))]>;
2260
Scott Michel438be252007-12-17 22:32:34 +00002261// The rotate amount is in the same bits whether we've got an 8-bit, 16-bit or
2262// 32-bit register
2263def ROTHr16_r8:
2264 RRForm<0b00111010000, (outs R16C:$rT), (ins R16C:$rA, R8C:$rB),
2265 "roth\t$rT, $rA, $rB", RotateShift,
2266 [(set R16C:$rT, (rotl R16C:$rA, (i32 (zext R8C:$rB))))]>;
2267
2268def : Pat<(rotl R16C:$rA, (i32 (sext R8C:$rB))),
2269 (ROTHr16_r8 R16C:$rA, R8C:$rB)>;
2270
2271def : Pat<(rotl R16C:$rA, (i32 (zext R8C:$rB))),
2272 (ROTHr16_r8 R16C:$rA, R8C:$rB)>;
2273
2274def : Pat<(rotl R16C:$rA, (i32 (anyext R8C:$rB))),
2275 (ROTHr16_r8 R16C:$rA, R8C:$rB)>;
2276
Scott Michel8b6b4202007-12-04 22:35:58 +00002277def ROTHIv8i16:
Scott Michel438be252007-12-17 22:32:34 +00002278 RI7Form<0b00111110000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i8:$val),
Scott Michel8b6b4202007-12-04 22:35:58 +00002279 "rothi\t$rT, $rA, $val", RotateShift,
2280 [(set (v8i16 VECREG:$rT),
Scott Michel438be252007-12-17 22:32:34 +00002281 (SPUvec_rotl_v8i16 VECREG:$rA, (i8 uimm7:$val)))]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002282
2283def : Pat<(SPUvec_rotl_v8i16 VECREG:$rA, (i16 uimm7:$val)),
2284 (ROTHIv8i16 VECREG:$rA, imm:$val)>;
2285
2286def : Pat<(SPUvec_rotl_v8i16 VECREG:$rA, (i32 uimm7:$val)),
2287 (ROTHIv8i16 VECREG:$rA, imm:$val)>;
2288
2289def ROTHIr16:
2290 RI7Form<0b00111110000, (outs R16C:$rT), (ins R16C:$rA, u7imm:$val),
2291 "rothi\t$rT, $rA, $val", RotateShift,
2292 [(set R16C:$rT, (rotl R16C:$rA, (i16 uimm7:$val)))]>;
2293
2294def ROTHIr16_i32:
2295 RI7Form<0b00111110000, (outs R16C:$rT), (ins R16C:$rA, u7imm_i32:$val),
2296 "rothi\t$rT, $rA, $val", RotateShift,
2297 [(set R16C:$rT, (rotl R16C:$rA, (i32 uimm7:$val)))]>;
2298
Scott Michel438be252007-12-17 22:32:34 +00002299def ROTHIr16_i8:
2300 RI7Form<0b00111110000, (outs R16C:$rT), (ins R16C:$rA, u7imm_i8:$val),
2301 "rothi\t$rT, $rA, $val", RotateShift,
2302 [(set R16C:$rT, (rotl R16C:$rA, (i8 uimm7:$val)))]>;
2303
Scott Michel8b6b4202007-12-04 22:35:58 +00002304def ROTv4i32:
2305 RRForm<0b00011010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
2306 "rot\t$rT, $rA, $rB", RotateShift,
2307 [(set (v4i32 VECREG:$rT),
2308 (SPUvec_rotl_v4i32 (v4i32 VECREG:$rA), R32C:$rB))]>;
2309
2310def ROTr32:
2311 RRForm<0b00011010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
2312 "rot\t$rT, $rA, $rB", RotateShift,
2313 [(set R32C:$rT, (rotl R32C:$rA, R32C:$rB))]>;
2314
Scott Michel438be252007-12-17 22:32:34 +00002315// The rotate amount is in the same bits whether we've got an 8-bit, 16-bit or
2316// 32-bit register
2317def ROTr32_r16_anyext:
2318 RRForm<0b00011010000, (outs R32C:$rT), (ins R32C:$rA, R16C:$rB),
2319 "rot\t$rT, $rA, $rB", RotateShift,
2320 [(set R32C:$rT, (rotl R32C:$rA, (i32 (anyext R16C:$rB))))]>;
2321
2322def : Pat<(rotl R32C:$rA, (i32 (zext R16C:$rB))),
2323 (ROTr32_r16_anyext R32C:$rA, R16C:$rB)>;
2324
2325def : Pat<(rotl R32C:$rA, (i32 (sext R16C:$rB))),
2326 (ROTr32_r16_anyext R32C:$rA, R16C:$rB)>;
2327
2328def ROTr32_r8_anyext:
2329 RRForm<0b00011010000, (outs R32C:$rT), (ins R32C:$rA, R8C:$rB),
2330 "rot\t$rT, $rA, $rB", RotateShift,
2331 [(set R32C:$rT, (rotl R32C:$rA, (i32 (anyext R8C:$rB))))]>;
2332
2333def : Pat<(rotl R32C:$rA, (i32 (zext R8C:$rB))),
2334 (ROTr32_r8_anyext R32C:$rA, R8C:$rB)>;
2335
2336def : Pat<(rotl R32C:$rA, (i32 (sext R8C:$rB))),
2337 (ROTr32_r8_anyext R32C:$rA, R8C:$rB)>;
2338
Scott Michel8b6b4202007-12-04 22:35:58 +00002339def ROTIv4i32:
2340 RI7Form<0b00011110000, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i32:$val),
2341 "roti\t$rT, $rA, $val", RotateShift,
2342 [(set (v4i32 VECREG:$rT),
2343 (SPUvec_rotl_v4i32 (v4i32 VECREG:$rA), (i32 uimm7:$val)))]>;
2344
2345def : Pat<(SPUvec_rotl_v4i32 (v4i32 VECREG:$rA), (i16 uimm7:$val)),
2346 (ROTIv4i32 VECREG:$rA, imm:$val)>;
2347
Scott Michel438be252007-12-17 22:32:34 +00002348def : Pat<(SPUvec_rotl_v4i32 (v4i32 VECREG:$rA), (i8 uimm7:$val)),
2349 (ROTIv4i32 VECREG:$rA, imm:$val)>;
2350
Scott Michel8b6b4202007-12-04 22:35:58 +00002351def ROTIr32:
2352 RI7Form<0b00011110000, (outs R32C:$rT), (ins R32C:$rA, u7imm_i32:$val),
2353 "roti\t$rT, $rA, $val", RotateShift,
2354 [(set R32C:$rT, (rotl R32C:$rA, (i32 uimm7:$val)))]>;
2355
2356def ROTIr32_i16:
2357 RI7Form<0b00111110000, (outs R32C:$rT), (ins R32C:$rA, u7imm:$val),
2358 "roti\t$rT, $rA, $val", RotateShift,
2359 [(set R32C:$rT, (rotl R32C:$rA, (i16 uimm7:$val)))]>;
2360
Scott Michel438be252007-12-17 22:32:34 +00002361def ROTIr32_i8:
2362 RI7Form<0b00111110000, (outs R32C:$rT), (ins R32C:$rA, u7imm_i8:$val),
2363 "roti\t$rT, $rA, $val", RotateShift,
2364 [(set R32C:$rT, (rotl R32C:$rA, (i8 uimm7:$val)))]>;
2365
Scott Michel8b6b4202007-12-04 22:35:58 +00002366// ROTQBY* vector forms: This rotates the entire vector, but vector registers
2367// are used here for type checking (instances where ROTQBI is used actually
2368// use vector registers)
2369def ROTQBYvec:
2370 RRForm<0b00111011100, (outs VECREG:$rT), (ins VECREG:$rA, R16C:$rB),
2371 "rotqby\t$rT, $rA, $rB", RotateShift,
2372 [(set (v16i8 VECREG:$rT), (SPUrotbytes_left (v16i8 VECREG:$rA), R16C:$rB))]>;
2373
2374def : Pat<(SPUrotbytes_left_chained (v16i8 VECREG:$rA), R16C:$rB),
2375 (ROTQBYvec VECREG:$rA, R16C:$rB)>;
2376
2377// See ROTQBY note above.
2378def ROTQBYIvec:
2379 RI7Form<0b00111111100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
2380 "rotqbyi\t$rT, $rA, $val", RotateShift,
2381 [(set (v16i8 VECREG:$rT),
2382 (SPUrotbytes_left (v16i8 VECREG:$rA), (i16 uimm7:$val)))]>;
2383
2384def : Pat<(SPUrotbytes_left_chained (v16i8 VECREG:$rA), (i16 uimm7:$val)),
2385 (ROTQBYIvec VECREG:$rA, uimm7:$val)>;
2386
2387// See ROTQBY note above.
2388def ROTQBYBIvec:
2389 RI7Form<0b00110011100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm:$val),
2390 "rotqbybi\t$rT, $rA, $val", RotateShift,
2391 [/* intrinsic */]>;
2392
2393// See ROTQBY note above.
2394//
2395// Assume that the user of this instruction knows to shift the rotate count
2396// into bit 29
2397def ROTQBIvec:
2398 RRForm<0b00011011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2399 "rotqbi\t$rT, $rA, $rB", RotateShift,
2400 [/* insert intrinsic here */]>;
2401
2402// See ROTQBY note above.
2403def ROTQBIIvec:
2404 RI7Form<0b00011111100, (outs VECREG:$rT), (ins VECREG:$rA, u7imm_i32:$val),
2405 "rotqbii\t$rT, $rA, $val", RotateShift,
2406 [/* insert intrinsic here */]>;
2407
2408// ROTHM v8i16 form:
2409// NOTE(1): No vector rotate is generated by the C/C++ frontend (today),
2410// so this only matches a synthetically generated/lowered code
2411// fragment.
2412// NOTE(2): $rB must be negated before the right rotate!
2413def ROTHMv8i16:
2414 RRForm<0b10111010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
2415 "rothm\t$rT, $rA, $rB", RotateShift,
2416 [/* see patterns below - $rB must be negated */]>;
2417
2418def : Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), R32C:$rB),
2419 (ROTHMv8i16 VECREG:$rA, (SFIr32 R32C:$rB, 0))>;
2420
2421def : Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), R16C:$rB),
2422 (ROTHMv8i16 VECREG:$rA,
2423 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2424
Scott Michel438be252007-12-17 22:32:34 +00002425def : Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), R8C:$rB),
Scott Michel8b6b4202007-12-04 22:35:58 +00002426 (ROTHMv8i16 VECREG:$rA,
Scott Michel438be252007-12-17 22:32:34 +00002427 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB) ), 0))>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002428
2429// ROTHM r16 form: Rotate 16-bit quantity to right, zero fill at the left
2430// Note: This instruction doesn't match a pattern because rB must be negated
2431// for the instruction to work. Thus, the pattern below the instruction!
2432def ROTHMr16:
2433 RRForm<0b10111010000, (outs R16C:$rT), (ins R16C:$rA, R32C:$rB),
2434 "rothm\t$rT, $rA, $rB", RotateShift,
2435 [/* see patterns below - $rB must be negated! */]>;
2436
2437def : Pat<(srl R16C:$rA, R32C:$rB),
2438 (ROTHMr16 R16C:$rA, (SFIr32 R32C:$rB, 0))>;
2439
2440def : Pat<(srl R16C:$rA, R16C:$rB),
2441 (ROTHMr16 R16C:$rA,
2442 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2443
Scott Michel438be252007-12-17 22:32:34 +00002444def : Pat<(srl R16C:$rA, R8C:$rB),
Scott Michel8b6b4202007-12-04 22:35:58 +00002445 (ROTHMr16 R16C:$rA,
Scott Michel438be252007-12-17 22:32:34 +00002446 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB) ), 0))>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002447
2448// ROTHMI v8i16 form: See the comment for ROTHM v8i16. The difference here is
2449// that the immediate can be complemented, so that the user doesn't have to
2450// worry about it.
2451def ROTHMIv8i16:
2452 RI7Form<0b10111110000, (outs VECREG:$rT), (ins VECREG:$rA, rothNeg7imm:$val),
2453 "rothmi\t$rT, $rA, $val", RotateShift,
2454 [(set (v8i16 VECREG:$rT),
2455 (SPUvec_srl_v8i16 (v8i16 VECREG:$rA), (i32 imm:$val)))]>;
2456
2457def: Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), (i16 imm:$val)),
2458 (ROTHMIv8i16 VECREG:$rA, imm:$val)>;
Scott Michel438be252007-12-17 22:32:34 +00002459
2460def: Pat<(SPUvec_srl_v8i16 (v8i16 VECREG:$rA), (i8 imm:$val)),
2461 (ROTHMIv8i16 VECREG:$rA, imm:$val)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002462
2463def ROTHMIr16:
2464 RI7Form<0b10111110000, (outs R16C:$rT), (ins R16C:$rA, rothNeg7imm:$val),
2465 "rothmi\t$rT, $rA, $val", RotateShift,
2466 [(set R16C:$rT, (srl R16C:$rA, (i32 uimm7:$val)))]>;
2467
2468def: Pat<(srl R16C:$rA, (i16 uimm7:$val)),
2469 (ROTHMIr16 R16C:$rA, uimm7:$val)>;
2470
Scott Michel438be252007-12-17 22:32:34 +00002471def: Pat<(srl R16C:$rA, (i8 uimm7:$val)),
2472 (ROTHMIr16 R16C:$rA, uimm7:$val)>;
2473
Scott Michel8b6b4202007-12-04 22:35:58 +00002474// ROTM v4i32 form: See the ROTHM v8i16 comments.
2475def ROTMv4i32:
2476 RRForm<0b10011010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
2477 "rotm\t$rT, $rA, $rB", RotateShift,
2478 [/* see patterns below - $rB must be negated */]>;
2479
2480def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, R32C:$rB),
2481 (ROTMv4i32 VECREG:$rA, (SFIr32 R32C:$rB, 0))>;
2482
2483def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, R16C:$rB),
2484 (ROTMv4i32 VECREG:$rA,
2485 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2486
2487def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, /* R8C */ R16C:$rB),
2488 (ROTMv4i32 VECREG:$rA,
2489 (SFIr32 (XSHWr16 /* (XSBHr8 R8C */ R16C:$rB) /*)*/, 0))>;
2490
2491def ROTMr32:
2492 RRForm<0b10011010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
2493 "rotm\t$rT, $rA, $rB", RotateShift,
2494 [/* see patterns below - $rB must be negated */]>;
2495
2496def : Pat<(srl R32C:$rA, R32C:$rB),
2497 (ROTMr32 R32C:$rA, (SFIr32 R32C:$rB, 0))>;
2498
2499def : Pat<(srl R32C:$rA, R16C:$rB),
2500 (ROTMr32 R32C:$rA,
2501 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2502
Scott Michel438be252007-12-17 22:32:34 +00002503def : Pat<(srl R32C:$rA, R8C:$rB),
2504 (ROTMr32 R32C:$rA,
2505 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
2506
Scott Michel8b6b4202007-12-04 22:35:58 +00002507// ROTMI v4i32 form: See the comment for ROTHM v8i16.
2508def ROTMIv4i32:
2509 RI7Form<0b10011110000, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
2510 "rotmi\t$rT, $rA, $val", RotateShift,
2511 [(set (v4i32 VECREG:$rT),
2512 (SPUvec_srl_v4i32 VECREG:$rA, (i32 uimm7:$val)))]>;
2513
2514def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, (i16 uimm7:$val)),
2515 (ROTMIv4i32 VECREG:$rA, uimm7:$val)>;
Scott Michel438be252007-12-17 22:32:34 +00002516
2517def : Pat<(SPUvec_srl_v4i32 VECREG:$rA, (i8 uimm7:$val)),
2518 (ROTMIv4i32 VECREG:$rA, uimm7:$val)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002519
2520// ROTMI r32 form: know how to complement the immediate value.
2521def ROTMIr32:
2522 RI7Form<0b10011110000, (outs R32C:$rT), (ins R32C:$rA, rotNeg7imm:$val),
2523 "rotmi\t$rT, $rA, $val", RotateShift,
2524 [(set R32C:$rT, (srl R32C:$rA, (i32 uimm7:$val)))]>;
2525
2526def : Pat<(srl R32C:$rA, (i16 imm:$val)),
2527 (ROTMIr32 R32C:$rA, uimm7:$val)>;
2528
Scott Michel438be252007-12-17 22:32:34 +00002529def : Pat<(srl R32C:$rA, (i8 imm:$val)),
2530 (ROTMIr32 R32C:$rA, uimm7:$val)>;
2531
Scott Michel8b6b4202007-12-04 22:35:58 +00002532// ROTQMBYvec: This is a vector form merely so that when used in an
2533// instruction pattern, type checking will succeed. This instruction assumes
2534// that the user knew to complement $rB.
2535def ROTQMBYvec:
2536 RRForm<0b10111011100, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
2537 "rotqmby\t$rT, $rA, $rB", RotateShift,
2538 [(set (v16i8 VECREG:$rT),
2539 (SPUrotbytes_right_zfill (v16i8 VECREG:$rA), R32C:$rB))]>;
2540
2541def ROTQMBYIvec:
2542 RI7Form<0b10111111100, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
2543 "rotqmbyi\t$rT, $rA, $val", RotateShift,
2544 [(set (v16i8 VECREG:$rT),
2545 (SPUrotbytes_right_zfill (v16i8 VECREG:$rA), (i32 uimm7:$val)))]>;
2546
2547def : Pat<(SPUrotbytes_right_zfill VECREG:$rA, (i16 uimm7:$val)),
2548 (ROTQMBYIvec VECREG:$rA, uimm7:$val)>;
2549
2550def ROTQMBYBIvec:
2551 RRForm<0b10110011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2552 "rotqmbybi\t$rT, $rA, $rB", RotateShift,
2553 [/* intrinsic */]>;
2554
2555def ROTQMBIvec:
2556 RRForm<0b10011011100, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2557 "rotqmbi\t$rT, $rA, $rB", RotateShift,
2558 [/* intrinsic */]>;
2559
2560def ROTQMBIIvec:
2561 RI7Form<0b10011111100, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
2562 "rotqmbii\t$rT, $rA, $val", RotateShift,
2563 [/* intrinsic */]>;
2564
2565def ROTMAHv8i16:
2566 RRForm<0b01111010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
2567 "rotmah\t$rT, $rA, $rB", RotateShift,
2568 [/* see patterns below - $rB must be negated */]>;
2569
2570def : Pat<(SPUvec_sra_v8i16 VECREG:$rA, R32C:$rB),
2571 (ROTMAHv8i16 VECREG:$rA, (SFIr32 R32C:$rB, 0))>;
2572
2573def : Pat<(SPUvec_sra_v8i16 VECREG:$rA, R16C:$rB),
2574 (ROTMAHv8i16 VECREG:$rA,
2575 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2576
Scott Michel438be252007-12-17 22:32:34 +00002577def : Pat<(SPUvec_sra_v8i16 VECREG:$rA, R8C:$rB),
2578 (ROTMAHv8i16 VECREG:$rA,
2579 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
2580
Scott Michel8b6b4202007-12-04 22:35:58 +00002581def ROTMAHr16:
2582 RRForm<0b01111010000, (outs R16C:$rT), (ins R16C:$rA, R32C:$rB),
2583 "rotmah\t$rT, $rA, $rB", RotateShift,
2584 [/* see patterns below - $rB must be negated */]>;
2585
2586def : Pat<(sra R16C:$rA, R32C:$rB),
2587 (ROTMAHr16 R16C:$rA, (SFIr32 R32C:$rB, 0))>;
2588
2589def : Pat<(sra R16C:$rA, R16C:$rB),
2590 (ROTMAHr16 R16C:$rA,
2591 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2592
Scott Michel438be252007-12-17 22:32:34 +00002593def : Pat<(sra R16C:$rA, R8C:$rB),
2594 (ROTMAHr16 R16C:$rA,
2595 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
2596
Scott Michel8b6b4202007-12-04 22:35:58 +00002597def ROTMAHIv8i16:
2598 RRForm<0b01111110000, (outs VECREG:$rT), (ins VECREG:$rA, rothNeg7imm:$val),
2599 "rotmahi\t$rT, $rA, $val", RotateShift,
2600 [(set (v8i16 VECREG:$rT),
2601 (SPUvec_sra_v8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val)))]>;
2602
2603def : Pat<(SPUvec_sra_v8i16 (v8i16 VECREG:$rA), (i16 uimm7:$val)),
2604 (ROTMAHIv8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val))>;
2605
Scott Michel438be252007-12-17 22:32:34 +00002606def : Pat<(SPUvec_sra_v8i16 (v8i16 VECREG:$rA), (i8 uimm7:$val)),
2607 (ROTMAHIv8i16 (v8i16 VECREG:$rA), (i32 uimm7:$val))>;
2608
Scott Michel8b6b4202007-12-04 22:35:58 +00002609def ROTMAHIr16:
2610 RRForm<0b01111110000, (outs R16C:$rT), (ins R16C:$rA, rothNeg7imm_i16:$val),
2611 "rotmahi\t$rT, $rA, $val", RotateShift,
2612 [(set R16C:$rT, (sra R16C:$rA, (i16 uimm7:$val)))]>;
2613
2614def : Pat<(sra R16C:$rA, (i32 imm:$val)),
2615 (ROTMAHIr16 R16C:$rA, uimm7:$val)>;
2616
Scott Michel438be252007-12-17 22:32:34 +00002617def : Pat<(sra R16C:$rA, (i8 imm:$val)),
2618 (ROTMAHIr16 R16C:$rA, uimm7:$val)>;
2619
Scott Michel8b6b4202007-12-04 22:35:58 +00002620def ROTMAv4i32:
2621 RRForm<0b01011010000, (outs VECREG:$rT), (ins VECREG:$rA, R32C:$rB),
2622 "rotma\t$rT, $rA, $rB", RotateShift,
2623 [/* see patterns below - $rB must be negated */]>;
2624
2625def : Pat<(SPUvec_sra_v4i32 VECREG:$rA, R32C:$rB),
2626 (ROTMAv4i32 (v4i32 VECREG:$rA), (SFIr32 R32C:$rB, 0))>;
2627
2628def : Pat<(SPUvec_sra_v4i32 VECREG:$rA, R16C:$rB),
2629 (ROTMAv4i32 (v4i32 VECREG:$rA),
2630 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2631
Scott Michel438be252007-12-17 22:32:34 +00002632def : Pat<(SPUvec_sra_v4i32 VECREG:$rA, R8C:$rB),
2633 (ROTMAv4i32 (v4i32 VECREG:$rA),
2634 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
2635
Scott Michel8b6b4202007-12-04 22:35:58 +00002636def ROTMAr32:
2637 RRForm<0b01011010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
2638 "rotma\t$rT, $rA, $rB", RotateShift,
2639 [/* see patterns below - $rB must be negated */]>;
2640
2641def : Pat<(sra R32C:$rA, R32C:$rB),
2642 (ROTMAr32 R32C:$rA, (SFIr32 R32C:$rB, 0))>;
2643
2644def : Pat<(sra R32C:$rA, R16C:$rB),
2645 (ROTMAr32 R32C:$rA,
2646 (SFIr32 (XSHWr16 R16C:$rB), 0))>;
2647
Scott Michel438be252007-12-17 22:32:34 +00002648def : Pat<(sra R32C:$rA, R8C:$rB),
2649 (ROTMAr32 R32C:$rA,
2650 (SFIr32 (XSHWr16 (XSBHr8 R8C:$rB)), 0))>;
2651
Scott Michel8b6b4202007-12-04 22:35:58 +00002652def ROTMAIv4i32:
2653 RRForm<0b01011110000, (outs VECREG:$rT), (ins VECREG:$rA, rotNeg7imm:$val),
2654 "rotmai\t$rT, $rA, $val", RotateShift,
2655 [(set (v4i32 VECREG:$rT),
2656 (SPUvec_sra_v4i32 VECREG:$rA, (i32 uimm7:$val)))]>;
2657
2658def : Pat<(SPUvec_sra_v4i32 VECREG:$rA, (i16 uimm7:$val)),
2659 (ROTMAIv4i32 VECREG:$rA, uimm7:$val)>;
2660
2661def ROTMAIr32:
2662 RRForm<0b01011110000, (outs R32C:$rT), (ins R32C:$rA, rotNeg7imm:$val),
2663 "rotmai\t$rT, $rA, $val", RotateShift,
2664 [(set R32C:$rT, (sra R32C:$rA, (i32 uimm7:$val)))]>;
2665
2666def : Pat<(sra R32C:$rA, (i16 uimm7:$val)),
2667 (ROTMAIr32 R32C:$rA, uimm7:$val)>;
2668
Scott Michel438be252007-12-17 22:32:34 +00002669def : Pat<(sra R32C:$rA, (i8 uimm7:$val)),
2670 (ROTMAIr32 R32C:$rA, uimm7:$val)>;
2671
Scott Michel8b6b4202007-12-04 22:35:58 +00002672//===----------------------------------------------------------------------===//
2673// Branch and conditionals:
2674//===----------------------------------------------------------------------===//
2675
2676let isTerminator = 1, isBarrier = 1 in {
2677 // Halt If Equal (r32 preferred slot only, no vector form)
2678 def HEQr32:
2679 RRForm_3<0b00011011110, (outs), (ins R32C:$rA, R32C:$rB),
2680 "heq\t$rA, $rB", BranchResolv,
2681 [/* no pattern to match */]>;
2682
2683 def HEQIr32 :
2684 RI10Form_2<0b11111110, (outs), (ins R32C:$rA, s10imm:$val),
2685 "heqi\t$rA, $val", BranchResolv,
2686 [/* no pattern to match */]>;
2687
2688 // HGT/HGTI: These instructions use signed arithmetic for the comparison,
2689 // contrasting with HLGT/HLGTI, which use unsigned comparison:
2690 def HGTr32:
2691 RRForm_3<0b00011010010, (outs), (ins R32C:$rA, R32C:$rB),
2692 "hgt\t$rA, $rB", BranchResolv,
2693 [/* no pattern to match */]>;
2694
2695 def HGTIr32:
2696 RI10Form_2<0b11110010, (outs), (ins R32C:$rA, s10imm:$val),
2697 "hgti\t$rA, $val", BranchResolv,
2698 [/* no pattern to match */]>;
2699
2700 def HLGTr32:
2701 RRForm_3<0b00011011010, (outs), (ins R32C:$rA, R32C:$rB),
2702 "hlgt\t$rA, $rB", BranchResolv,
2703 [/* no pattern to match */]>;
2704
2705 def HLGTIr32:
2706 RI10Form_2<0b11111010, (outs), (ins R32C:$rA, s10imm:$val),
2707 "hlgti\t$rA, $val", BranchResolv,
2708 [/* no pattern to match */]>;
2709}
2710
2711// Comparison operators:
Scott Michel438be252007-12-17 22:32:34 +00002712def CEQBr8:
2713 RRForm<0b00001011110, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB),
2714 "ceqb\t$rT, $rA, $rB", ByteOp,
2715 [/* no pattern to match */]>;
Scott Michel8b6b4202007-12-04 22:35:58 +00002716
2717def CEQBv16i8:
2718 RRForm<0b00001011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2719 "ceqb\t$rT, $rA, $rB", ByteOp,
2720 [/* no pattern to match: intrinsic */]>;
2721
Scott Michel438be252007-12-17 22:32:34 +00002722def CEQBIr8:
2723 RI10Form<0b01111110, (outs R8C:$rT), (ins R8C:$rA, s7imm:$val),
2724 "ceqbi\t$rT, $rA, $val", ByteOp,
2725 [/* no pattern to match: intrinsic */]>;
2726
Scott Michel8b6b4202007-12-04 22:35:58 +00002727def CEQBIv16i8:
2728 RI10Form<0b01111110, (outs VECREG:$rT), (ins VECREG:$rA, s7imm:$val),
2729 "ceqbi\t$rT, $rA, $val", ByteOp,
2730 [/* no pattern to match: intrinsic */]>;
2731
2732def CEQHr16:
2733 RRForm<0b00010011110, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB),
2734 "ceqh\t$rT, $rA, $rB", ByteOp,
2735 [/* no pattern to match */]>;
2736
2737def CEQHv8i16:
2738 RRForm<0b00010011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2739 "ceqh\t$rT, $rA, $rB", ByteOp,
2740 [/* no pattern to match: intrinsic */]>;
2741
2742def CEQHIr16:
2743 RI10Form<0b10111110, (outs R16C:$rT), (ins R16C:$rA, s10imm:$val),
2744 "ceqhi\t$rT, $rA, $val", ByteOp,
2745 [/* no pattern to match: intrinsic */]>;
2746
2747def CEQHIv8i16:
2748 RI10Form<0b10111110, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
2749 "ceqhi\t$rT, $rA, $val", ByteOp,
2750 [/* no pattern to match: intrinsic */]>;
2751
2752def CEQr32:
2753 RRForm<0b00000011110, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB),
2754 "ceq\t$rT, $rA, $rB", ByteOp,
2755 [/* no pattern to match: intrinsic */]>;
2756
2757def CEQv4i32:
2758 RRForm<0b00000011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2759 "ceq\t$rT, $rA, $rB", ByteOp,
2760 [/* no pattern to match: intrinsic */]>;
2761
2762def CEQIr32:
2763 RI10Form<0b00111110, (outs R32C:$rT), (ins R32C:$rA, s10imm:$val),
2764 "ceqi\t$rT, $rA, $val", ByteOp,
2765 [/* no pattern to match: intrinsic */]>;
2766
2767def CEQIv4i32:
2768 RI10Form<0b00111110, (outs VECREG:$rT), (ins VECREG:$rA, s10imm:$val),
2769 "ceqi\t$rT, $rA, $val", ByteOp,
2770 [/* no pattern to match: intrinsic */]>;
2771
2772let isCall = 1,
2773 // All calls clobber the non-callee-saved registers:
2774 Defs = [R0, R1, R2, R3, R4, R5, R6, R7, R8, R9,
2775 R10,R11,R12,R13,R14,R15,R16,R17,R18,R19,
2776 R20,R21,R22,R23,R24,R25,R26,R27,R28,R29,
2777 R30,R31,R32,R33,R34,R35,R36,R37,R38,R39,
2778 R40,R41,R42,R43,R44,R45,R46,R47,R48,R49,
2779 R50,R51,R52,R53,R54,R55,R56,R57,R58,R59,
2780 R60,R61,R62,R63,R64,R65,R66,R67,R68,R69,
2781 R70,R71,R72,R73,R74,R75,R76,R77,R78,R79],
2782 // All of these instructions use $lr (aka $0)
2783 Uses = [R0] in {
2784 // Branch relative and set link: Used if we actually know that the target
2785 // is within [-32768, 32767] bytes of the target
2786 def BRSL:
2787 BranchSetLink<0b011001100, (outs), (ins relcalltarget:$func, variable_ops),
2788 "brsl\t$$lr, $func",
2789 [(SPUcall (SPUpcrel tglobaladdr:$func, 0))]>;
2790
2791 // Branch absolute and set link: Used if we actually know that the target
2792 // is an absolute address
2793 def BRASL:
2794 BranchSetLink<0b011001100, (outs), (ins calltarget:$func, variable_ops),
2795 "brasl\t$$lr, $func",
2796 [(SPUcall tglobaladdr:$func)]>;
2797
2798 // Branch indirect and set link if external data. These instructions are not
2799 // actually generated, matched by an intrinsic:
2800 def BISLED_00: BISLEDForm<0b11, "bisled\t$$lr, $func", [/* empty pattern */]>;
2801 def BISLED_E0: BISLEDForm<0b10, "bisled\t$$lr, $func", [/* empty pattern */]>;
2802 def BISLED_0D: BISLEDForm<0b01, "bisled\t$$lr, $func", [/* empty pattern */]>;
2803 def BISLED_ED: BISLEDForm<0b00, "bisled\t$$lr, $func", [/* empty pattern */]>;
2804
2805 // Branch indirect and set link. This is the "X-form" address version of a
2806 // function call
2807 def BISL:
2808 BIForm<0b10010101100, "bisl\t$$lr, $func", [(SPUcall R32C:$func)]>;
2809}
2810
2811// Unconditional branches:
2812let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in {
2813 def BR :
2814 UncondBranch<0b001001100, (outs), (ins brtarget:$dest),
2815 "br\t$dest",
2816 [(br bb:$dest)]>;
2817
2818 // Unconditional, absolute address branch
2819 def BRA:
2820 UncondBranch<0b001100000, (outs), (ins brtarget:$dest),
2821 "bra\t$dest",
2822 [/* no pattern */]>;
2823
2824 // Indirect branch
2825 def BI:
2826 BIForm<0b00010101100, "bi\t$func", [(brind R32C:$func)]>;
2827
2828 // Various branches:
2829 def BRNZ:
2830 RI16Form<0b010000100, (outs), (ins R32C:$rCond, brtarget:$dest),
2831 "brnz\t$rCond,$dest",
2832 BranchResolv,
2833 [(brcond R32C:$rCond, bb:$dest)]>;
2834
2835 def BRZ:
2836 RI16Form<0b000000100, (outs), (ins R32C:$rT, brtarget:$dest),
2837 "brz\t$rT,$dest",
2838 BranchResolv,
2839 [/* no pattern */]>;
2840
2841 def BRHNZ:
2842 RI16Form<0b011000100, (outs), (ins R16C:$rCond, brtarget:$dest),
2843 "brhnz\t$rCond,$dest",
2844 BranchResolv,
2845 [(brcond R16C:$rCond, bb:$dest)]>;
2846
2847 def BRHZ:
2848 RI16Form<0b001000100, (outs), (ins R16C:$rT, brtarget:$dest),
2849 "brhz\t$rT,$dest",
2850 BranchResolv,
2851 [/* no pattern */]>;
2852
2853/*
2854 def BINZ:
2855 BICondForm<0b10010100100, "binz\t$rA, $func",
2856 [(SPUbinz R32C:$rA, R32C:$func)]>;
2857
2858 def BIZ:
2859 BICondForm<0b00010100100, "biz\t$rA, $func",
2860 [(SPUbiz R32C:$rA, R32C:$func)]>;
2861*/
2862}
2863
2864def : Pat<(brcond (i16 (seteq R16C:$rA, 0)), bb:$dest),
2865 (BRHZ R16C:$rA, bb:$dest)>;
2866def : Pat<(brcond (i16 (setne R16C:$rA, 0)), bb:$dest),
2867 (BRHNZ R16C:$rA, bb:$dest)>;
2868
2869def : Pat<(brcond (i32 (seteq R32C:$rA, 0)), bb:$dest),
2870 (BRZ R32C:$rA, bb:$dest)>;
2871def : Pat<(brcond (i32 (setne R32C:$rA, 0)), bb:$dest),
2872 (BRZ R32C:$rA, bb:$dest)>;
2873
2874let isTerminator = 1, isBarrier = 1 in {
2875 let isReturn = 1 in {
2876 def RET:
2877 RETForm<"bi\t$$lr", [(retflag)]>;
2878 }
2879}
2880
2881//===----------------------------------------------------------------------===//
2882// Various brcond predicates:
2883//===----------------------------------------------------------------------===//
2884/*
2885def : Pat<(brcond (i32 (seteq R32C:$rA, 0)), bb:$dest),
2886 (BRZ R32C:$rA, bb:$dest)>;
2887
2888def : Pat<(brcond (i32 (seteq R32C:$rA, R32C:$rB)), bb:$dest),
2889 (BRNZ (CEQr32 R32C:$rA, R32C:$rB), bb:$dest)>;
2890
2891def : Pat<(brcond (i16 (seteq R16C:$rA, i16ImmSExt10:$val)), bb:$dest),
2892 (BRHNZ (CEQHIr16 R16C:$rA, i16ImmSExt10:$val), bb:$dest)>;
2893
2894def : Pat<(brcond (i16 (seteq R16C:$rA, R16C:$rB)), bb:$dest),
2895 (BRHNZ (CEQHr16 R16C:$rA, R16C:$rB), bb:$dest)>;
2896*/
2897
2898//===----------------------------------------------------------------------===//
2899// Single precision floating point instructions
2900//===----------------------------------------------------------------------===//
2901
2902def FAv4f32:
2903 RRForm<0b00100011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2904 "fa\t$rT, $rA, $rB", SPrecFP,
2905 [(set (v4f32 VECREG:$rT), (fadd (v4f32 VECREG:$rA), (v4f32 VECREG:$rB)))]>;
2906
2907def FAf32 :
2908 RRForm<0b00100011010, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB),
2909 "fa\t$rT, $rA, $rB", SPrecFP,
2910 [(set R32FP:$rT, (fadd R32FP:$rA, R32FP:$rB))]>;
2911
2912def FSv4f32:
2913 RRForm<0b00100011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2914 "fs\t$rT, $rA, $rB", SPrecFP,
2915 [(set (v4f32 VECREG:$rT), (fsub (v4f32 VECREG:$rA), (v4f32 VECREG:$rB)))]>;
2916
2917def FSf32 :
2918 RRForm<0b10100011010, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB),
2919 "fs\t$rT, $rA, $rB", SPrecFP,
2920 [(set R32FP:$rT, (fsub R32FP:$rA, R32FP:$rB))]>;
2921
2922// Floating point reciprocal estimate
2923def FREv4f32 :
2924 RRForm_1<0b00011101100, (outs VECREG:$rT), (ins VECREG:$rA),
2925 "frest\t$rT, $rA", SPrecFP,
2926 [(set (v4f32 VECREG:$rT), (SPUreciprocalEst (v4f32 VECREG:$rA)))]>;
2927
2928def FREf32 :
2929 RRForm_1<0b00011101100, (outs R32FP:$rT), (ins R32FP:$rA),
2930 "frest\t$rT, $rA", SPrecFP,
2931 [(set R32FP:$rT, (SPUreciprocalEst R32FP:$rA))]>;
2932
2933// Floating point interpolate (used in conjunction with reciprocal estimate)
2934def FIv4f32 :
2935 RRForm<0b00101011110, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
2936 "fi\t$rT, $rA, $rB", SPrecFP,
2937 [(set (v4f32 VECREG:$rT), (SPUinterpolate (v4f32 VECREG:$rA),
2938 (v4f32 VECREG:$rB)))]>;
2939
2940def FIf32 :
2941 RRForm<0b00101011110, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB),
2942 "fi\t$rT, $rA, $rB", SPrecFP,
2943 [(set R32FP:$rT, (SPUinterpolate R32FP:$rA, R32FP:$rB))]>;
2944
2945// Floating Compare Equal
2946def FCEQf32 :
2947 RRForm<0b01000011110, (outs R32C:$rT), (ins R32FP:$rA, R32FP:$rB),
2948 "fceq\t$rT, $rA, $rB", SPrecFP,
2949 [(set R32C:$rT, (setoeq R32FP:$rA, R32FP:$rB))]>;
2950
2951def FCMEQf32 :
2952 RRForm<0b01010011110, (outs R32C:$rT), (ins R32FP:$rA, R32FP:$rB),
2953 "fcmeq\t$rT, $rA, $rB", SPrecFP,
2954 [(set R32C:$rT, (setoeq (fabs R32FP:$rA), (fabs R32FP:$rB)))]>;
2955
2956def FCGTf32 :
2957 RRForm<0b01000011010, (outs R32C:$rT), (ins R32FP:$rA, R32FP:$rB),
2958 "fcgt\t$rT, $rA, $rB", SPrecFP,
2959 [(set R32C:$rT, (setogt R32FP:$rA, R32FP:$rB))]>;
2960
2961def FCMGTf32 :
2962 RRForm<0b01010011010, (outs R32C:$rT), (ins R32FP:$rA, R32FP:$rB),
2963 "fcmgt\t$rT, $rA, $rB", SPrecFP,
2964 [(set R32C:$rT, (setogt (fabs R32FP:$rA), (fabs R32FP:$rB)))]>;
2965
2966// FP Status and Control Register Write
2967// Why isn't rT a don't care in the ISA?
2968// Should we create a special RRForm_3 for this guy and zero out the rT?
2969def FSCRWf32 :
2970 RRForm_1<0b01011101110, (outs R32FP:$rT), (ins R32FP:$rA),
2971 "fscrwr\t$rA", SPrecFP,
2972 [/* This instruction requires an intrinsic. Note: rT is unused. */]>;
2973
2974// FP Status and Control Register Read
2975def FSCRRf32 :
2976 RRForm_2<0b01011101110, (outs R32FP:$rT), (ins),
2977 "fscrrd\t$rT", SPrecFP,
2978 [/* This instruction requires an intrinsic */]>;
2979
2980// llvm instruction space
2981// How do these map onto cell instructions?
2982// fdiv rA rB
2983// frest rC rB # c = 1/b (both lines)
2984// fi rC rB rC
2985// fm rD rA rC # d = a * 1/b
2986// fnms rB rD rB rA # b = - (d * b - a) --should == 0 in a perfect world
2987// fma rB rB rC rD # b = b * c + d
2988// = -(d *b -a) * c + d
2989// = a * c - c ( a *b *c - a)
2990
2991// fcopysign (???)
2992
2993// Library calls:
2994// These llvm instructions will actually map to library calls.
2995// All that's needed, then, is to check that the appropriate library is
2996// imported and do a brsl to the proper function name.
2997// frem # fmod(x, y): x - (x/y) * y
2998// (Note: fmod(double, double), fmodf(float,float)
2999// fsqrt?
3000// fsin?
3001// fcos?
3002// Unimplemented SPU instruction space
3003// floating reciprocal absolute square root estimate (frsqest)
3004
3005// The following are probably just intrinsics
3006// status and control register write
3007// status and control register read
3008
3009//--------------------------------------
3010// Floating point multiply instructions
3011//--------------------------------------
3012
3013def FMv4f32:
3014 RRForm<0b00100011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
3015 "fm\t$rT, $rA, $rB", SPrecFP,
3016 [(set (v4f32 VECREG:$rT), (fmul (v4f32 VECREG:$rA),
3017 (v4f32 VECREG:$rB)))]>;
3018
3019def FMf32 :
3020 RRForm<0b01100011010, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB),
3021 "fm\t$rT, $rA, $rB", SPrecFP,
3022 [(set R32FP:$rT, (fmul R32FP:$rA, R32FP:$rB))]>;
3023
3024// Floating point multiply and add
3025// e.g. d = c + (a * b)
3026def FMAv4f32:
3027 RRRForm<0b0111, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3028 "fma\t$rT, $rA, $rB, $rC", SPrecFP,
3029 [(set (v4f32 VECREG:$rT),
3030 (fadd (v4f32 VECREG:$rC),
3031 (fmul (v4f32 VECREG:$rA), (v4f32 VECREG:$rB))))]>;
3032
3033def FMAf32:
3034 RRRForm<0b0111, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB, R32FP:$rC),
3035 "fma\t$rT, $rA, $rB, $rC", SPrecFP,
3036 [(set R32FP:$rT, (fadd R32FP:$rC, (fmul R32FP:$rA, R32FP:$rB)))]>;
3037
3038// FP multiply and subtract
3039// Subtracts value in rC from product
3040// res = a * b - c
3041def FMSv4f32 :
3042 RRRForm<0b0111, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3043 "fms\t$rT, $rA, $rB, $rC", SPrecFP,
3044 [(set (v4f32 VECREG:$rT),
3045 (fsub (fmul (v4f32 VECREG:$rA), (v4f32 VECREG:$rB)),
3046 (v4f32 VECREG:$rC)))]>;
3047
3048def FMSf32 :
3049 RRRForm<0b0111, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB, R32FP:$rC),
3050 "fms\t$rT, $rA, $rB, $rC", SPrecFP,
3051 [(set R32FP:$rT,
3052 (fsub (fmul R32FP:$rA, R32FP:$rB), R32FP:$rC))]>;
3053
3054// Floating Negative Mulitply and Subtract
3055// Subtracts product from value in rC
3056// res = fneg(fms a b c)
3057// = - (a * b - c)
3058// = c - a * b
3059// NOTE: subtraction order
3060// fsub a b = a - b
3061// fs a b = b - a?
3062def FNMSf32 :
3063 RRRForm<0b1101, (outs R32FP:$rT), (ins R32FP:$rA, R32FP:$rB, R32FP:$rC),
3064 "fnms\t$rT, $rA, $rB, $rC", SPrecFP,
3065 [(set R32FP:$rT, (fsub R32FP:$rC, (fmul R32FP:$rA, R32FP:$rB)))]>;
3066
3067def FNMSv4f32 :
3068 RRRForm<0b1101, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3069 "fnms\t$rT, $rA, $rB, $rC", SPrecFP,
3070 [(set (v4f32 VECREG:$rT),
3071 (fsub (v4f32 VECREG:$rC),
3072 (fmul (v4f32 VECREG:$rA),
3073 (v4f32 VECREG:$rB))))]>;
3074
3075//--------------------------------------
3076// Floating Point Conversions
3077// Signed conversions:
3078def CSiFv4f32:
3079 CVTIntFPForm<0b0101101110, (outs VECREG:$rT), (ins VECREG:$rA),
3080 "csflt\t$rT, $rA, 0", SPrecFP,
3081 [(set (v4f32 VECREG:$rT), (sint_to_fp (v4i32 VECREG:$rA)))]>;
3082
3083// Convert signed integer to floating point
3084def CSiFf32 :
3085 CVTIntFPForm<0b0101101110, (outs R32FP:$rT), (ins R32C:$rA),
3086 "csflt\t$rT, $rA, 0", SPrecFP,
3087 [(set R32FP:$rT, (sint_to_fp R32C:$rA))]>;
3088
3089// Convert unsigned into to float
3090def CUiFv4f32 :
3091 CVTIntFPForm<0b1101101110, (outs VECREG:$rT), (ins VECREG:$rA),
3092 "cuflt\t$rT, $rA, 0", SPrecFP,
3093 [(set (v4f32 VECREG:$rT), (uint_to_fp (v4i32 VECREG:$rA)))]>;
3094
3095def CUiFf32 :
3096 CVTIntFPForm<0b1101101110, (outs R32FP:$rT), (ins R32C:$rA),
3097 "cuflt\t$rT, $rA, 0", SPrecFP,
3098 [(set R32FP:$rT, (uint_to_fp R32C:$rA))]>;
3099
3100// Convert float to unsigned int
3101// Assume that scale = 0
3102
3103def CFUiv4f32 :
3104 CVTIntFPForm<0b1101101110, (outs VECREG:$rT), (ins VECREG:$rA),
3105 "cfltu\t$rT, $rA, 0", SPrecFP,
3106 [(set (v4i32 VECREG:$rT), (fp_to_uint (v4f32 VECREG:$rA)))]>;
3107
3108def CFUif32 :
3109 CVTIntFPForm<0b1101101110, (outs R32C:$rT), (ins R32FP:$rA),
3110 "cfltu\t$rT, $rA, 0", SPrecFP,
3111 [(set R32C:$rT, (fp_to_uint R32FP:$rA))]>;
3112
3113// Convert float to signed int
3114// Assume that scale = 0
3115
3116def CFSiv4f32 :
3117 CVTIntFPForm<0b1101101110, (outs VECREG:$rT), (ins VECREG:$rA),
3118 "cflts\t$rT, $rA, 0", SPrecFP,
3119 [(set (v4i32 VECREG:$rT), (fp_to_sint (v4f32 VECREG:$rA)))]>;
3120
3121def CFSif32 :
3122 CVTIntFPForm<0b1101101110, (outs R32C:$rT), (ins R32FP:$rA),
3123 "cflts\t$rT, $rA, 0", SPrecFP,
3124 [(set R32C:$rT, (fp_to_sint R32FP:$rA))]>;
3125
3126//===----------------------------------------------------------------------==//
3127// Single<->Double precision conversions
3128//===----------------------------------------------------------------------==//
3129
3130// NOTE: We use "vec" name suffix here to avoid confusion (e.g. input is a
3131// v4f32, output is v2f64--which goes in the name?)
3132
3133// Floating point extend single to double
3134// NOTE: Not sure if passing in v4f32 to FESDvec is correct since it
3135// operates on two double-word slots (i.e. 1st and 3rd fp numbers
3136// are ignored).
3137def FESDvec :
3138 RRForm_1<0b00011101110, (outs VECREG:$rT), (ins VECREG:$rA),
3139 "fesd\t$rT, $rA", SPrecFP,
3140 [(set (v2f64 VECREG:$rT), (fextend (v4f32 VECREG:$rA)))]>;
3141
3142def FESDf32 :
3143 RRForm_1<0b00011101110, (outs R64FP:$rT), (ins R32FP:$rA),
3144 "fesd\t$rT, $rA", SPrecFP,
3145 [(set R64FP:$rT, (fextend R32FP:$rA))]>;
3146
3147// Floating point round double to single
3148//def FRDSvec :
3149// RRForm_1<0b10011101110, (outs VECREG:$rT), (ins VECREG:$rA),
3150// "frds\t$rT, $rA,", SPrecFP,
3151// [(set (v4f32 R32FP:$rT), (fround (v2f64 R64FP:$rA)))]>;
3152
3153def FRDSf64 :
3154 RRForm_1<0b10011101110, (outs R32FP:$rT), (ins R64FP:$rA),
3155 "frds\t$rT, $rA", SPrecFP,
3156 [(set R32FP:$rT, (fround R64FP:$rA))]>;
3157
3158//ToDo include anyextend?
3159
3160//===----------------------------------------------------------------------==//
3161// Double precision floating point instructions
3162//===----------------------------------------------------------------------==//
3163def FAf64 :
3164 RRForm<0b00110011010, (outs R64FP:$rT), (ins R64FP:$rA, R64FP:$rB),
3165 "dfa\t$rT, $rA, $rB", DPrecFP,
3166 [(set R64FP:$rT, (fadd R64FP:$rA, R64FP:$rB))]>;
3167
3168def FAv2f64 :
3169 RRForm<0b00110011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
3170 "dfa\t$rT, $rA, $rB", DPrecFP,
3171 [(set (v2f64 VECREG:$rT), (fadd (v2f64 VECREG:$rA), (v2f64 VECREG:$rB)))]>;
3172
3173def FSf64 :
3174 RRForm<0b10100011010, (outs R64FP:$rT), (ins R64FP:$rA, R64FP:$rB),
3175 "dfs\t$rT, $rA, $rB", DPrecFP,
3176 [(set R64FP:$rT, (fsub R64FP:$rA, R64FP:$rB))]>;
3177
3178def FSv2f64 :
3179 RRForm<0b10100011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
3180 "dfs\t$rT, $rA, $rB", DPrecFP,
3181 [(set (v2f64 VECREG:$rT),
3182 (fsub (v2f64 VECREG:$rA), (v2f64 VECREG:$rB)))]>;
3183
3184def FMf64 :
3185 RRForm<0b01100011010, (outs R64FP:$rT), (ins R64FP:$rA, R64FP:$rB),
3186 "dfm\t$rT, $rA, $rB", DPrecFP,
3187 [(set R64FP:$rT, (fmul R64FP:$rA, R64FP:$rB))]>;
3188
3189def FMv2f64:
3190 RRForm<0b00100011010, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB),
3191 "dfm\t$rT, $rA, $rB", DPrecFP,
3192 [(set (v2f64 VECREG:$rT),
3193 (fmul (v2f64 VECREG:$rA), (v2f64 VECREG:$rB)))]>;
3194
3195def FMAf64:
3196 RRForm<0b00111010110, (outs R64FP:$rT),
3197 (ins R64FP:$rA, R64FP:$rB, R64FP:$rC),
3198 "dfma\t$rT, $rA, $rB", DPrecFP,
3199 [(set R64FP:$rT, (fadd R64FP:$rC, (fmul R64FP:$rA, R64FP:$rB)))]>,
3200 RegConstraint<"$rC = $rT">,
3201 NoEncode<"$rC">;
3202
3203def FMAv2f64:
3204 RRForm<0b00111010110, (outs VECREG:$rT),
3205 (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3206 "dfma\t$rT, $rA, $rB", DPrecFP,
3207 [(set (v2f64 VECREG:$rT),
3208 (fadd (v2f64 VECREG:$rC),
3209 (fmul (v2f64 VECREG:$rA), (v2f64 VECREG:$rB))))]>,
3210 RegConstraint<"$rC = $rT">,
3211 NoEncode<"$rC">;
3212
3213def FMSf64 :
3214 RRForm<0b10111010110, (outs R64FP:$rT),
3215 (ins R64FP:$rA, R64FP:$rB, R64FP:$rC),
3216 "dfms\t$rT, $rA, $rB", DPrecFP,
3217 [(set R64FP:$rT, (fsub (fmul R64FP:$rA, R64FP:$rB), R64FP:$rC))]>,
3218 RegConstraint<"$rC = $rT">,
3219 NoEncode<"$rC">;
3220
3221def FMSv2f64 :
3222 RRForm<0b10111010110, (outs VECREG:$rT),
3223 (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3224 "dfms\t$rT, $rA, $rB", DPrecFP,
3225 [(set (v2f64 VECREG:$rT),
3226 (fsub (fmul (v2f64 VECREG:$rA), (v2f64 VECREG:$rB)),
3227 (v2f64 VECREG:$rC)))]>;
3228
3229// FNMS: - (a * b - c)
3230// - (a * b) + c => c - (a * b)
3231def FNMSf64 :
3232 RRForm<0b01111010110, (outs R64FP:$rT),
3233 (ins R64FP:$rA, R64FP:$rB, R64FP:$rC),
3234 "dfnms\t$rT, $rA, $rB", DPrecFP,
3235 [(set R64FP:$rT, (fsub R64FP:$rC, (fmul R64FP:$rA, R64FP:$rB)))]>,
3236 RegConstraint<"$rC = $rT">,
3237 NoEncode<"$rC">;
3238
3239def : Pat<(fneg (fsub (fmul R64FP:$rA, R64FP:$rB), R64FP:$rC)),
3240 (FNMSf64 R64FP:$rA, R64FP:$rB, R64FP:$rC)>;
3241
3242def FNMSv2f64 :
3243 RRForm<0b01111010110, (outs VECREG:$rT),
3244 (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3245 "dfnms\t$rT, $rA, $rB", DPrecFP,
3246 [(set (v2f64 VECREG:$rT),
3247 (fsub (v2f64 VECREG:$rC),
3248 (fmul (v2f64 VECREG:$rA),
3249 (v2f64 VECREG:$rB))))]>,
3250 RegConstraint<"$rC = $rT">,
3251 NoEncode<"$rC">;
3252
3253def : Pat<(fneg (fsub (fmul (v2f64 VECREG:$rA), (v2f64 VECREG:$rB)),
3254 (v2f64 VECREG:$rC))),
3255 (FNMSv2f64 VECREG:$rA, VECREG:$rB, VECREG:$rC)>;
3256
3257// - (a * b + c)
3258// - (a * b) - c
3259def FNMAf64 :
3260 RRForm<0b11111010110, (outs R64FP:$rT),
3261 (ins R64FP:$rA, R64FP:$rB, R64FP:$rC),
3262 "dfnma\t$rT, $rA, $rB", DPrecFP,
3263 [(set R64FP:$rT, (fneg (fadd R64FP:$rC, (fmul R64FP:$rA, R64FP:$rB))))]>,
3264 RegConstraint<"$rC = $rT">,
3265 NoEncode<"$rC">;
3266
3267def FNMAv2f64 :
3268 RRForm<0b11111010110, (outs VECREG:$rT),
3269 (ins VECREG:$rA, VECREG:$rB, VECREG:$rC),
3270 "dfnma\t$rT, $rA, $rB", DPrecFP,
3271 [(set (v2f64 VECREG:$rT),
3272 (fneg (fadd (v2f64 VECREG:$rC),
3273 (fmul (v2f64 VECREG:$rA),
3274 (v2f64 VECREG:$rB)))))]>,
3275 RegConstraint<"$rC = $rT">,
3276 NoEncode<"$rC">;
3277
3278//===----------------------------------------------------------------------==//
3279// Floating point negation and absolute value
3280//===----------------------------------------------------------------------==//
3281
3282def : Pat<(fneg (v4f32 VECREG:$rA)),
3283 (XORfnegvec (v4f32 VECREG:$rA),
3284 (v4f32 (ILHUv4i32 0x8000)))>;
3285
3286def : Pat<(fneg R32FP:$rA),
3287 (XORfneg32 R32FP:$rA, (ILHUr32 0x8000))>;
3288
3289def : Pat<(fneg (v2f64 VECREG:$rA)),
3290 (XORfnegvec (v2f64 VECREG:$rA),
3291 (v2f64 (ANDBIv16i8 (FSMBIv16i8 0x8080), 0x80)))>;
3292
3293def : Pat<(fneg R64FP:$rA),
3294 (XORfneg64 R64FP:$rA,
3295 (ANDBIv16i8 (FSMBIv16i8 0x8080), 0x80))>;
3296
3297// Floating point absolute value
3298
3299def : Pat<(fabs R32FP:$rA),
3300 (ANDfabs32 R32FP:$rA, (IOHLr32 (ILHUr32 0x7fff), 0xffff))>;
3301
3302def : Pat<(fabs (v4f32 VECREG:$rA)),
3303 (ANDfabsvec (v4f32 VECREG:$rA),
3304 (v4f32 (ANDBIv16i8 (FSMBIv16i8 0xffff), 0x7f)))>;
3305
3306def : Pat<(fabs R64FP:$rA),
3307 (ANDfabs64 R64FP:$rA, (ANDBIv16i8 (FSMBIv16i8 0xffff), 0x7f))>;
3308
3309def : Pat<(fabs (v2f64 VECREG:$rA)),
3310 (ANDfabsvec (v2f64 VECREG:$rA),
3311 (v2f64 (ANDBIv16i8 (FSMBIv16i8 0xffff), 0x7f)))>;
3312
3313//===----------------------------------------------------------------------===//
3314// Execution, Load NOP (execute NOPs belong in even pipeline, load NOPs belong
3315// in the odd pipeline)
3316//===----------------------------------------------------------------------===//
3317
3318def ENOP : I<(outs), (ins), "enop", ExecNOP> {
3319 let Pattern = [];
3320
3321 let Inst{0-10} = 0b10000000010;
3322 let Inst{11-17} = 0;
3323 let Inst{18-24} = 0;
3324 let Inst{25-31} = 0;
3325}
3326
3327def LNOP : I<(outs), (ins), "lnop", LoadNOP> {
3328 let Pattern = [];
3329
3330 let Inst{0-10} = 0b10000000000;
3331 let Inst{11-17} = 0;
3332 let Inst{18-24} = 0;
3333 let Inst{25-31} = 0;
3334}
3335
3336//===----------------------------------------------------------------------===//
3337// Bit conversions (type conversions between vector/packed types)
3338// NOTE: Promotions are handled using the XS* instructions. Truncation
3339// is not handled.
3340//===----------------------------------------------------------------------===//
3341def : Pat<(v16i8 (bitconvert (v8i16 VECREG:$src))), (v16i8 VECREG:$src)>;
3342def : Pat<(v16i8 (bitconvert (v4i32 VECREG:$src))), (v16i8 VECREG:$src)>;
3343def : Pat<(v16i8 (bitconvert (v2i64 VECREG:$src))), (v16i8 VECREG:$src)>;
3344def : Pat<(v16i8 (bitconvert (v4f32 VECREG:$src))), (v16i8 VECREG:$src)>;
3345def : Pat<(v16i8 (bitconvert (v2f64 VECREG:$src))), (v16i8 VECREG:$src)>;
3346
3347def : Pat<(v8i16 (bitconvert (v16i8 VECREG:$src))), (v8i16 VECREG:$src)>;
3348def : Pat<(v8i16 (bitconvert (v4i32 VECREG:$src))), (v8i16 VECREG:$src)>;
3349def : Pat<(v8i16 (bitconvert (v2i64 VECREG:$src))), (v8i16 VECREG:$src)>;
3350def : Pat<(v8i16 (bitconvert (v4f32 VECREG:$src))), (v8i16 VECREG:$src)>;
3351def : Pat<(v8i16 (bitconvert (v2f64 VECREG:$src))), (v8i16 VECREG:$src)>;
3352
3353def : Pat<(v4i32 (bitconvert (v16i8 VECREG:$src))), (v4i32 VECREG:$src)>;
3354def : Pat<(v4i32 (bitconvert (v8i16 VECREG:$src))), (v4i32 VECREG:$src)>;
3355def : Pat<(v4i32 (bitconvert (v2i64 VECREG:$src))), (v4i32 VECREG:$src)>;
3356def : Pat<(v4i32 (bitconvert (v4f32 VECREG:$src))), (v4i32 VECREG:$src)>;
3357def : Pat<(v4i32 (bitconvert (v2f64 VECREG:$src))), (v4i32 VECREG:$src)>;
3358
3359def : Pat<(v2i64 (bitconvert (v16i8 VECREG:$src))), (v2i64 VECREG:$src)>;
3360def : Pat<(v2i64 (bitconvert (v8i16 VECREG:$src))), (v2i64 VECREG:$src)>;
3361def : Pat<(v2i64 (bitconvert (v4i32 VECREG:$src))), (v2i64 VECREG:$src)>;
3362def : Pat<(v2i64 (bitconvert (v4f32 VECREG:$src))), (v2i64 VECREG:$src)>;
3363def : Pat<(v2i64 (bitconvert (v2f64 VECREG:$src))), (v2i64 VECREG:$src)>;
3364
3365def : Pat<(v4f32 (bitconvert (v16i8 VECREG:$src))), (v4f32 VECREG:$src)>;
3366def : Pat<(v4f32 (bitconvert (v8i16 VECREG:$src))), (v4f32 VECREG:$src)>;
3367def : Pat<(v4f32 (bitconvert (v2i64 VECREG:$src))), (v4f32 VECREG:$src)>;
3368def : Pat<(v4f32 (bitconvert (v4i32 VECREG:$src))), (v4f32 VECREG:$src)>;
3369def : Pat<(v4f32 (bitconvert (v2f64 VECREG:$src))), (v4f32 VECREG:$src)>;
3370
3371def : Pat<(v2f64 (bitconvert (v16i8 VECREG:$src))), (v2f64 VECREG:$src)>;
3372def : Pat<(v2f64 (bitconvert (v8i16 VECREG:$src))), (v2f64 VECREG:$src)>;
3373def : Pat<(v2f64 (bitconvert (v4i32 VECREG:$src))), (v2f64 VECREG:$src)>;
3374def : Pat<(v2f64 (bitconvert (v2i64 VECREG:$src))), (v2f64 VECREG:$src)>;
3375def : Pat<(v2f64 (bitconvert (v2f64 VECREG:$src))), (v2f64 VECREG:$src)>;
3376
3377def : Pat<(f32 (bitconvert (i32 R32C:$src))), (f32 R32FP:$src)>;
Scott Michel754d8662007-12-20 00:44:13 +00003378def : Pat<(f64 (bitconvert (i64 R64C:$src))), (f64 R64FP:$src)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00003379
3380//===----------------------------------------------------------------------===//
3381// Instruction patterns:
3382//===----------------------------------------------------------------------===//
3383
3384// General 32-bit constants:
3385def : Pat<(i32 imm:$imm),
3386 (IOHLr32 (ILHUr32 (HI16 imm:$imm)), (LO16 imm:$imm))>;
3387
3388// Single precision float constants:
3389def : Pat<(SPUFPconstant (f32 fpimm:$imm)),
3390 (IOHLf32 (ILHUf32 (HI16_f32 fpimm:$imm)), (LO16_f32 fpimm:$imm))>;
3391
3392// General constant 32-bit vectors
3393def : Pat<(v4i32 v4i32Imm:$imm),
3394 (IOHLvec (v4i32 (ILHUv4i32 (HI16_vec v4i32Imm:$imm))),
3395 (LO16_vec v4i32Imm:$imm))>;
Scott Michel438be252007-12-17 22:32:34 +00003396
3397// 8-bit constants
3398def : Pat<(i8 imm:$imm),
3399 (ILHr8 imm:$imm)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00003400
3401//===----------------------------------------------------------------------===//
3402// Call instruction patterns:
3403//===----------------------------------------------------------------------===//
3404// Return void
3405def : Pat<(ret),
3406 (RET)>;
3407
3408//===----------------------------------------------------------------------===//
3409// Zero/Any/Sign extensions
3410//===----------------------------------------------------------------------===//
3411
3412// zext 1->32: Zero extend i1 to i32
3413def : Pat<(SPUextract_i1_zext R32C:$rSrc),
3414 (ANDIr32 R32C:$rSrc, 0x1)>;
3415
3416// sext 8->32: Sign extend bytes to words
3417def : Pat<(sext_inreg R32C:$rSrc, i8),
3418 (XSHWr32 (XSBHr32 R32C:$rSrc))>;
3419
Scott Michel438be252007-12-17 22:32:34 +00003420def : Pat<(i32 (sext R8C:$rSrc)),
3421 (XSHWr16 (XSBHr8 R8C:$rSrc))>;
3422
Scott Michel8b6b4202007-12-04 22:35:58 +00003423def : Pat<(SPUextract_i8_sext VECREG:$rSrc),
3424 (XSHWr32 (XSBHr32 (ORi32_v4i32 (v4i32 VECREG:$rSrc),
3425 (v4i32 VECREG:$rSrc))))>;
3426
Scott Michel438be252007-12-17 22:32:34 +00003427// zext 8->16: Zero extend bytes to halfwords
3428def : Pat<(i16 (zext R8C:$rSrc)),
3429 (ANDHI1To2 R8C:$rSrc, 0xff)>;
3430
3431// zext 8->32 from preferred slot in load/store
Scott Michel8b6b4202007-12-04 22:35:58 +00003432def : Pat<(SPUextract_i8_zext VECREG:$rSrc),
3433 (ANDIr32 (ORi32_v4i32 (v4i32 VECREG:$rSrc), (v4i32 VECREG:$rSrc)),
3434 0xff)>;
3435
Scott Michel438be252007-12-17 22:32:34 +00003436// zext 8->32: Zero extend bytes to words
3437def : Pat<(i32 (zext R8C:$rSrc)),
3438 (ANDI1To4 R8C:$rSrc, 0xff)>;
3439
3440// anyext 8->16: Extend 8->16 bits, irrespective of sign
3441def : Pat<(i16 (anyext R8C:$rSrc)),
3442 (ORHI1To2 R8C:$rSrc, 0)>;
3443
3444// anyext 8->32: Extend 8->32 bits, irrespective of sign
3445def : Pat<(i32 (anyext R8C:$rSrc)),
3446 (ORI1To4 R8C:$rSrc, 0)>;
3447
Scott Michel8b6b4202007-12-04 22:35:58 +00003448// zext 16->32: Zero extend halfwords to words (note that we have to juggle the
3449// 0xffff constant since it will not fit into an immediate.)
3450def : Pat<(i32 (zext R16C:$rSrc)),
3451 (AND2To4 R16C:$rSrc, (ILAr32 0xffff))>;
3452
3453def : Pat<(i32 (zext (and R16C:$rSrc, 0xf))),
3454 (ANDI2To4 R16C:$rSrc, 0xf)>;
3455
3456def : Pat<(i32 (zext (and R16C:$rSrc, 0xff))),
3457 (ANDI2To4 R16C:$rSrc, 0xff)>;
3458
3459def : Pat<(i32 (zext (and R16C:$rSrc, 0xfff))),
3460 (ANDI2To4 R16C:$rSrc, 0xfff)>;
3461
3462// anyext 16->32: Extend 16->32 bits, irrespective of sign
3463def : Pat<(i32 (anyext R16C:$rSrc)),
3464 (ORI2To4 R16C:$rSrc, 0)>;
3465
3466//===----------------------------------------------------------------------===//
3467// Address translation: SPU, like PPC, has to split addresses into high and
3468// low parts in order to load them into a register.
3469//===----------------------------------------------------------------------===//
3470
3471def : Pat<(SPUhi tglobaladdr:$in, 0), (ILHUhi tglobaladdr:$in)>;
3472def : Pat<(SPUlo tglobaladdr:$in, 0), (ILAlo tglobaladdr:$in)>;
3473def : Pat<(SPUdform tglobaladdr:$in, imm:$imm), (ILAlsa tglobaladdr:$in)>;
3474def : Pat<(SPUhi tconstpool:$in , 0), (ILHUhi tconstpool:$in)>;
3475def : Pat<(SPUlo tconstpool:$in , 0), (ILAlo tconstpool:$in)>;
3476def : Pat<(SPUdform tconstpool:$in, imm:$imm), (ILAlsa tconstpool:$in)>;
3477def : Pat<(SPUhi tjumptable:$in, 0), (ILHUhi tjumptable:$in)>;
3478def : Pat<(SPUlo tjumptable:$in, 0), (ILAlo tjumptable:$in)>;
3479def : Pat<(SPUdform tjumptable:$in, imm:$imm), (ILAlsa tjumptable:$in)>;
3480
3481// Force load of global address to a register. These forms show up in
3482// SPUISD::DFormAddr pseudo instructions:
Scott Michel8b6b4202007-12-04 22:35:58 +00003483def : Pat<(add tglobaladdr:$in, 0), (ILAlsa tglobaladdr:$in)>;
3484def : Pat<(add tconstpool:$in, 0), (ILAlsa tglobaladdr:$in)>;
3485def : Pat<(add tjumptable:$in, 0), (ILAlsa tglobaladdr:$in)>;
Scott Michel8b6b4202007-12-04 22:35:58 +00003486// Instrinsics:
3487include "CellSDKIntrinsics.td"