blob: 06516b24f329f60a3f99dad05b3f09b06f3a0b55 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIInstructions.td - SI Instruction Defintions ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// This file was originally auto-generated from a GPU register header file and
10// all the instruction definitions were originally commented out. Instructions
11// that are not yet supported remain commented out.
12//===----------------------------------------------------------------------===//
13
Eric Christopher7792e322015-01-30 23:24:40 +000014def isGCN : Predicate<"Subtarget->getGeneration() "
Matt Arsenault43e92fe2016-06-24 06:30:11 +000015 ">= SISubtarget::SOUTHERN_ISLANDS">,
Tom Stellardd7e6f132015-04-08 01:09:26 +000016 AssemblerPredicate<"FeatureGCN">;
Marek Olsak7d777282015-03-24 13:40:15 +000017def isSI : Predicate<"Subtarget->getGeneration() "
Matt Arsenault43e92fe2016-06-24 06:30:11 +000018 "== SISubtarget::SOUTHERN_ISLANDS">,
Matt Arsenaultd6adfb42015-09-24 19:52:21 +000019 AssemblerPredicate<"FeatureSouthernIslands">;
20
Tom Stellardec87f842015-05-25 16:15:54 +000021def has16BankLDS : Predicate<"Subtarget->getLDSBankCount() == 16">;
22def has32BankLDS : Predicate<"Subtarget->getLDSBankCount() == 32">;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +000023def HasVGPRIndexMode : Predicate<"Subtarget->hasVGPRIndexMode()">,
24 AssemblerPredicate<"FeatureVGPRIndexMode">;
25def HasMovrel : Predicate<"Subtarget->hasMovrel()">,
26 AssemblerPredicate<"FeatureMovrel">;
Tom Stellardec87f842015-05-25 16:15:54 +000027
Valery Pykhtin2828b9b2016-09-19 14:39:49 +000028include "VOPInstructions.td"
Valery Pykhtina34fb492016-08-30 15:20:31 +000029include "SOPInstructions.td"
Valery Pykhtin1b138862016-09-01 09:56:47 +000030include "SMInstructions.td"
Valery Pykhtin8bc65962016-09-05 11:22:51 +000031include "FLATInstructions.td"
Valery Pykhtinb66e5eb2016-09-10 13:09:16 +000032include "BUFInstructions.td"
Valery Pykhtina34fb492016-08-30 15:20:31 +000033
Marek Olsak5df00d62014-12-07 12:18:57 +000034let SubtargetPredicate = isGCN in {
Tom Stellard0e70de52014-05-16 20:56:45 +000035
Tom Stellard8d6d4492014-04-22 16:33:57 +000036//===----------------------------------------------------------------------===//
Tom Stellard3a35d8f2014-10-01 14:44:45 +000037// EXP Instructions
38//===----------------------------------------------------------------------===//
39
Matt Arsenault7bee6ac2016-12-05 20:23:10 +000040defm EXP : EXP_m<0, AMDGPUexport>;
41defm EXP_DONE : EXP_m<1, AMDGPUexport_done>;
Tom Stellard3a35d8f2014-10-01 14:44:45 +000042
43//===----------------------------------------------------------------------===//
Tom Stellard8d6d4492014-04-22 16:33:57 +000044// VINTRP Instructions
45//===----------------------------------------------------------------------===//
46
Matt Arsenault80f766a2015-09-10 01:23:28 +000047let Uses = [M0, EXEC] in {
Tom Stellard2a9d9472015-05-12 15:00:46 +000048
Tom Stellardae38f302015-01-14 01:13:19 +000049// FIXME: Specify SchedRW for VINTRP insturctions.
Tom Stellardec87f842015-05-25 16:15:54 +000050
51multiclass V_INTERP_P1_F32_m : VINTRP_m <
52 0x00000000,
Matt Arsenaultac066f32016-12-06 22:29:43 +000053 (outs VGPR_32:$vdst),
Matt Arsenault0e8a2992016-12-15 20:40:20 +000054 (ins VGPR_32:$vsrc, Attr:$attr, AttrChan:$attrchan),
55 "v_interp_p1_f32 $vdst, $vsrc, $attr$attrchan",
Matt Arsenaultf0c86252016-12-10 00:29:55 +000056 [(set f32:$vdst, (AMDGPUinterp_p1 f32:$vsrc, (i32 imm:$attrchan),
57 (i32 imm:$attr)))]
Tom Stellardec87f842015-05-25 16:15:54 +000058>;
59
60let OtherPredicates = [has32BankLDS] in {
61
62defm V_INTERP_P1_F32 : V_INTERP_P1_F32_m;
63
64} // End OtherPredicates = [has32BankLDS]
65
Matt Arsenaultac066f32016-12-06 22:29:43 +000066let OtherPredicates = [has16BankLDS], Constraints = "@earlyclobber $vdst", isAsmParserOnly=1 in {
Tom Stellardec87f842015-05-25 16:15:54 +000067
68defm V_INTERP_P1_F32_16bank : V_INTERP_P1_F32_m;
69
Matt Arsenaultac066f32016-12-06 22:29:43 +000070} // End OtherPredicates = [has32BankLDS], Constraints = "@earlyclobber $vdst", isAsmParserOnly=1
Tom Stellard75aadc22012-12-11 21:25:42 +000071
Matt Arsenaultac066f32016-12-06 22:29:43 +000072let DisableEncoding = "$src0", Constraints = "$src0 = $vdst" in {
Tom Stellard50828162015-05-25 16:15:56 +000073
Marek Olsak5df00d62014-12-07 12:18:57 +000074defm V_INTERP_P2_F32 : VINTRP_m <
Tom Stellardc70cf902015-05-25 16:15:50 +000075 0x00000001,
Matt Arsenaultac066f32016-12-06 22:29:43 +000076 (outs VGPR_32:$vdst),
Matt Arsenault0e8a2992016-12-15 20:40:20 +000077 (ins VGPR_32:$src0, VGPR_32:$vsrc, Attr:$attr, AttrChan:$attrchan),
78 "v_interp_p2_f32 $vdst, $vsrc, $attr$attrchan",
Matt Arsenaultf0c86252016-12-10 00:29:55 +000079 [(set f32:$vdst, (AMDGPUinterp_p2 f32:$src0, f32:$vsrc, (i32 imm:$attrchan),
80 (i32 imm:$attr)))]>;
Tom Stellard50828162015-05-25 16:15:56 +000081
Matt Arsenaultac066f32016-12-06 22:29:43 +000082} // End DisableEncoding = "$src0", Constraints = "$src0 = $vdst"
Tom Stellard75aadc22012-12-11 21:25:42 +000083
Marek Olsak5df00d62014-12-07 12:18:57 +000084defm V_INTERP_MOV_F32 : VINTRP_m <
Tom Stellardc70cf902015-05-25 16:15:50 +000085 0x00000002,
Matt Arsenaultac066f32016-12-06 22:29:43 +000086 (outs VGPR_32:$vdst),
Matt Arsenault0e8a2992016-12-15 20:40:20 +000087 (ins InterpSlot:$vsrc, Attr:$attr, AttrChan:$attrchan),
88 "v_interp_mov_f32 $vdst, $vsrc, $attr$attrchan",
Matt Arsenaultf0c86252016-12-10 00:29:55 +000089 [(set f32:$vdst, (AMDGPUinterp_mov (i32 imm:$vsrc), (i32 imm:$attrchan),
Matt Arsenaultac066f32016-12-06 22:29:43 +000090 (i32 imm:$attr)))]>;
Tom Stellard2a9d9472015-05-12 15:00:46 +000091
Matt Arsenault80f766a2015-09-10 01:23:28 +000092} // End Uses = [M0, EXEC]
Tom Stellard75aadc22012-12-11 21:25:42 +000093
Tom Stellard8d6d4492014-04-22 16:33:57 +000094//===----------------------------------------------------------------------===//
Tom Stellard8d6d4492014-04-22 16:33:57 +000095// Pseudo Instructions
96//===----------------------------------------------------------------------===//
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +000097
98let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] in {
Tom Stellard75aadc22012-12-11 21:25:42 +000099
Marek Olsak7d777282015-03-24 13:40:15 +0000100// For use in patterns
Tom Stellardcc4c8712016-02-16 18:14:56 +0000101def V_CNDMASK_B64_PSEUDO : VOP3Common <(outs VReg_64:$vdst),
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000102 (ins VSrc_b64:$src0, VSrc_b64:$src1, SSrc_b64:$src2), "", []> {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000103 let isPseudo = 1;
104 let isCodeGenOnly = 1;
Matt Arsenault22e41792016-08-27 01:00:37 +0000105 let usesCustomInserter = 1;
Tom Stellard60024a02014-09-24 01:33:24 +0000106}
107
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000108// 64-bit vector move instruction. This is mainly used by the SIFoldOperands
109// pass to enable folding of inline immediates.
Matt Arsenault4bd72362016-12-10 00:39:12 +0000110def V_MOV_B64_PSEUDO : VPseudoInstSI <(outs VReg_64:$vdst),
111 (ins VSrc_b64:$src0)>;
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000112} // End let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC]
113
Wei Dingee21a362017-01-24 06:41:21 +0000114def S_TRAP_PSEUDO : VPseudoInstSI <(outs), (ins),
115 [(trap)]> {
116 let hasSideEffects = 1;
117 let SALU = 1;
118 let usesCustomInserter = 1;
119}
120
Changpeng Fang01f60622016-03-15 17:28:44 +0000121let usesCustomInserter = 1, SALU = 1 in {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000122def GET_GROUPSTATICSIZE : PseudoInstSI <(outs SReg_32:$sdst), (ins),
Changpeng Fang01f60622016-03-15 17:28:44 +0000123 [(set SReg_32:$sdst, (int_amdgcn_groupstaticsize))]>;
124} // End let usesCustomInserter = 1, SALU = 1
125
Matt Arsenaulte6740752016-09-29 01:44:16 +0000126def S_MOV_B64_term : PseudoInstSI<(outs SReg_64:$dst),
127 (ins SSrc_b64:$src0)> {
128 let SALU = 1;
129 let isAsCheapAsAMove = 1;
130 let isTerminator = 1;
131}
132
133def S_XOR_B64_term : PseudoInstSI<(outs SReg_64:$dst),
134 (ins SSrc_b64:$src0, SSrc_b64:$src1)> {
135 let SALU = 1;
136 let isAsCheapAsAMove = 1;
137 let isTerminator = 1;
138}
139
140def S_ANDN2_B64_term : PseudoInstSI<(outs SReg_64:$dst),
141 (ins SSrc_b64:$src0, SSrc_b64:$src1)> {
142 let SALU = 1;
143 let isAsCheapAsAMove = 1;
144 let isTerminator = 1;
145}
146
Stanislav Mekhanoshinea91cca2016-11-15 19:00:15 +0000147def WAVE_BARRIER : SPseudoInstSI<(outs), (ins),
148 [(int_amdgcn_wave_barrier)]> {
149 let SchedRW = [];
150 let hasNoSchedulingInfo = 1;
151 let hasSideEffects = 1;
152 let mayLoad = 1;
153 let mayStore = 1;
154 let isBarrier = 1;
155 let isConvergent = 1;
156}
157
Matt Arsenault8fb37382013-10-11 21:03:36 +0000158// SI pseudo instructions. These are used by the CFG structurizer pass
Tom Stellard75aadc22012-12-11 21:25:42 +0000159// and should be lowered to ISA instructions prior to codegen.
160
Matt Arsenault9babdf42016-06-22 20:15:28 +0000161// Dummy terminator instruction to use after control flow instructions
162// replaced with exec mask operations.
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000163def SI_MASK_BRANCH : PseudoInstSI <
Matt Arsenaultf98a5962016-08-27 00:42:21 +0000164 (outs), (ins brtarget:$target)> {
Matt Arsenault57431c92016-08-10 19:11:42 +0000165 let isBranch = 0;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000166 let isTerminator = 1;
Matt Arsenault57431c92016-08-10 19:11:42 +0000167 let isBarrier = 0;
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000168 let Uses = [EXEC];
Matt Arsenaultc59a9232016-10-06 18:12:07 +0000169 let SchedRW = [];
170 let hasNoSchedulingInfo = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000171}
172
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000173let isTerminator = 1 in {
Tom Stellardf8794352012-12-19 22:10:31 +0000174
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000175def SI_IF: CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000176 (outs SReg_64:$dst), (ins SReg_64:$vcc, brtarget:$target),
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000177 [(set i64:$dst, (int_amdgcn_if i1:$vcc, bb:$target))], 1, 1> {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000178 let Constraints = "";
Matt Arsenaulte6740752016-09-29 01:44:16 +0000179 let Size = 12;
Matt Arsenault6408c912016-09-16 22:11:18 +0000180 let mayLoad = 1;
Matt Arsenaulte6740752016-09-29 01:44:16 +0000181 let mayStore = 1;
Matt Arsenault6408c912016-09-16 22:11:18 +0000182 let hasSideEffects = 1;
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000183}
Tom Stellard75aadc22012-12-11 21:25:42 +0000184
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000185def SI_ELSE : CFPseudoInstSI <
186 (outs SReg_64:$dst), (ins SReg_64:$src, brtarget:$target, i1imm:$execfix), [], 1, 1> {
Tom Stellardf8794352012-12-19 22:10:31 +0000187 let Constraints = "$src = $dst";
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000188 let Size = 12;
Matt Arsenault6408c912016-09-16 22:11:18 +0000189 let mayStore = 1;
190 let mayLoad = 1;
191 let hasSideEffects = 1;
Tom Stellardf8794352012-12-19 22:10:31 +0000192}
193
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000194def SI_LOOP : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000195 (outs), (ins SReg_64:$saved, brtarget:$target),
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000196 [(int_amdgcn_loop i64:$saved, bb:$target)], 1, 1> {
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000197 let Size = 8;
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000198 let isBranch = 1;
Matt Arsenault6408c912016-09-16 22:11:18 +0000199 let hasSideEffects = 1;
200 let mayLoad = 1;
201 let mayStore = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000202}
Tom Stellardf8794352012-12-19 22:10:31 +0000203
Matt Arsenault382d9452016-01-26 04:49:22 +0000204} // End isBranch = 1, isTerminator = 1
Tom Stellardf8794352012-12-19 22:10:31 +0000205
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000206def SI_END_CF : CFPseudoInstSI <
207 (outs), (ins SReg_64:$saved),
208 [(int_amdgcn_end_cf i64:$saved)], 1, 1> {
209 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000210 let isAsCheapAsAMove = 1;
211 let isReMaterializable = 1;
212 let mayLoad = 1;
213 let mayStore = 1;
214 let hasSideEffects = 1;
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000215}
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000216
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000217def SI_BREAK : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000218 (outs SReg_64:$dst), (ins SReg_64:$src),
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000219 [(set i64:$dst, (int_amdgcn_break i64:$src))], 1> {
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000220 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000221 let isAsCheapAsAMove = 1;
222 let isReMaterializable = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000223}
Matt Arsenault48d70cb2016-07-09 17:18:39 +0000224
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000225def SI_IF_BREAK : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000226 (outs SReg_64:$dst), (ins SReg_64:$vcc, SReg_64:$src),
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000227 [(set i64:$dst, (int_amdgcn_if_break i1:$vcc, i64:$src))]> {
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000228 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000229 let isAsCheapAsAMove = 1;
230 let isReMaterializable = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000231}
Tom Stellardf8794352012-12-19 22:10:31 +0000232
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000233def SI_ELSE_BREAK : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000234 (outs SReg_64:$dst), (ins SReg_64:$src0, SReg_64:$src1),
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000235 [(set i64:$dst, (int_amdgcn_else_break i64:$src0, i64:$src1))]> {
236 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000237 let isAsCheapAsAMove = 1;
238 let isReMaterializable = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000239}
Tom Stellardf8794352012-12-19 22:10:31 +0000240
Tom Stellardaa798342015-05-01 03:44:09 +0000241let Uses = [EXEC], Defs = [EXEC,VCC] in {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000242def SI_KILL : PseudoInstSI <
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000243 (outs), (ins VSrc_b32:$src),
Matt Arsenault03006fd2016-07-19 16:27:56 +0000244 [(AMDGPUkill i32:$src)]> {
Matt Arsenault786724a2016-07-12 21:41:32 +0000245 let isConvergent = 1;
246 let usesCustomInserter = 1;
247}
248
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000249def SI_KILL_TERMINATOR : SPseudoInstSI <
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000250 (outs), (ins VSrc_b32:$src)> {
Matt Arsenault786724a2016-07-12 21:41:32 +0000251 let isTerminator = 1;
252}
253
Tom Stellardaa798342015-05-01 03:44:09 +0000254} // End Uses = [EXEC], Defs = [EXEC,VCC]
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000255
Matt Arsenault327188a2016-12-15 21:57:11 +0000256// Branch on undef scc. Used to avoid intermediate copy from
257// IMPLICIT_DEF to SCC.
258def SI_BR_UNDEF : SPseudoInstSI <(outs), (ins sopp_brtarget:$simm16)> {
259 let isTerminator = 1;
260 let usesCustomInserter = 1;
261}
Tom Stellardf8794352012-12-19 22:10:31 +0000262
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000263def SI_PS_LIVE : PseudoInstSI <
264 (outs SReg_64:$dst), (ins),
Matt Arsenault9babdf42016-06-22 20:15:28 +0000265 [(set i1:$dst, (int_amdgcn_ps_live))]> {
266 let SALU = 1;
267}
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000268
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000269// Used as an isel pseudo to directly emit initialization with an
270// s_mov_b32 rather than a copy of another initialized
271// register. MachineCSE skips copies, and we don't want to have to
272// fold operands before it runs.
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000273def SI_INIT_M0 : SPseudoInstSI <(outs), (ins SSrc_b32:$src)> {
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000274 let Defs = [M0];
275 let usesCustomInserter = 1;
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000276 let isAsCheapAsAMove = 1;
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000277 let isReMaterializable = 1;
278}
279
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000280def SI_RETURN : SPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000281 (outs), (ins variable_ops), [(AMDGPUreturn)]> {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000282 let isTerminator = 1;
283 let isBarrier = 1;
284 let isReturn = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000285 let hasSideEffects = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000286 let hasNoSchedulingInfo = 1;
Nicolai Haehnlea246dcc2016-09-03 12:26:32 +0000287 let DisableWQM = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000288}
289
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000290let Defs = [M0, EXEC],
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000291 UseNamedOperandTable = 1 in {
Christian Konig2989ffc2013-03-18 11:34:16 +0000292
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000293class SI_INDIRECT_SRC<RegisterClass rc> : VPseudoInstSI <
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000294 (outs VGPR_32:$vdst),
295 (ins rc:$src, VS_32:$idx, i32imm:$offset)> {
296 let usesCustomInserter = 1;
297}
Christian Konig2989ffc2013-03-18 11:34:16 +0000298
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000299class SI_INDIRECT_DST<RegisterClass rc> : VPseudoInstSI <
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000300 (outs rc:$vdst),
301 (ins rc:$src, VS_32:$idx, i32imm:$offset, VGPR_32:$val)> {
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000302 let Constraints = "$src = $vdst";
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000303 let usesCustomInserter = 1;
Christian Konig2989ffc2013-03-18 11:34:16 +0000304}
305
Matt Arsenault28419272015-10-07 00:42:51 +0000306// TODO: We can support indirect SGPR access.
307def SI_INDIRECT_SRC_V1 : SI_INDIRECT_SRC<VGPR_32>;
308def SI_INDIRECT_SRC_V2 : SI_INDIRECT_SRC<VReg_64>;
309def SI_INDIRECT_SRC_V4 : SI_INDIRECT_SRC<VReg_128>;
310def SI_INDIRECT_SRC_V8 : SI_INDIRECT_SRC<VReg_256>;
311def SI_INDIRECT_SRC_V16 : SI_INDIRECT_SRC<VReg_512>;
312
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000313def SI_INDIRECT_DST_V1 : SI_INDIRECT_DST<VGPR_32>;
Christian Konig2989ffc2013-03-18 11:34:16 +0000314def SI_INDIRECT_DST_V2 : SI_INDIRECT_DST<VReg_64>;
315def SI_INDIRECT_DST_V4 : SI_INDIRECT_DST<VReg_128>;
316def SI_INDIRECT_DST_V8 : SI_INDIRECT_DST<VReg_256>;
317def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST<VReg_512>;
318
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000319} // End Uses = [EXEC], Defs = [M0, EXEC]
Christian Konig2989ffc2013-03-18 11:34:16 +0000320
Tom Stellardeba61072014-05-02 15:41:42 +0000321multiclass SI_SPILL_SGPR <RegisterClass sgpr_class> {
Matt Arsenault3354f422016-09-10 01:20:33 +0000322 let UseNamedOperandTable = 1, SGPRSpill = 1, Uses = [EXEC] in {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000323 def _SAVE : PseudoInstSI <
Tom Stellard42fb60e2015-01-14 15:42:31 +0000324 (outs),
Matt Arsenault3354f422016-09-10 01:20:33 +0000325 (ins sgpr_class:$data, i32imm:$addr)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000326 let mayStore = 1;
327 let mayLoad = 0;
328 }
Tom Stellardeba61072014-05-02 15:41:42 +0000329
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000330 def _RESTORE : PseudoInstSI <
Matt Arsenault3354f422016-09-10 01:20:33 +0000331 (outs sgpr_class:$data),
332 (ins i32imm:$addr)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000333 let mayStore = 0;
334 let mayLoad = 1;
335 }
Tom Stellard42fb60e2015-01-14 15:42:31 +0000336 } // End UseNamedOperandTable = 1
Tom Stellardeba61072014-05-02 15:41:42 +0000337}
338
Matt Arsenault2510a312016-09-03 06:57:55 +0000339// You cannot use M0 as the output of v_readlane_b32 instructions or
340// use it in the sdata operand of SMEM instructions. We still need to
341// be able to spill the physical register m0, so allow it for
342// SI_SPILL_32_* instructions.
343defm SI_SPILL_S32 : SI_SPILL_SGPR <SReg_32>;
Tom Stellardeba61072014-05-02 15:41:42 +0000344defm SI_SPILL_S64 : SI_SPILL_SGPR <SReg_64>;
345defm SI_SPILL_S128 : SI_SPILL_SGPR <SReg_128>;
346defm SI_SPILL_S256 : SI_SPILL_SGPR <SReg_256>;
347defm SI_SPILL_S512 : SI_SPILL_SGPR <SReg_512>;
348
Tom Stellard96468902014-09-24 01:33:17 +0000349multiclass SI_SPILL_VGPR <RegisterClass vgpr_class> {
Matt Arsenault7348a7e2016-09-10 01:20:28 +0000350 let UseNamedOperandTable = 1, VGPRSpill = 1,
351 SchedRW = [WriteVMEM] in {
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000352 def _SAVE : VPseudoInstSI <
Tom Stellard42fb60e2015-01-14 15:42:31 +0000353 (outs),
Matt Arsenaultbcfd94c2016-09-17 15:52:37 +0000354 (ins vgpr_class:$vdata, i32imm:$vaddr, SReg_128:$srsrc,
355 SReg_32:$soffset, i32imm:$offset)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000356 let mayStore = 1;
357 let mayLoad = 0;
Matt Arsenaultac42ba82016-09-03 17:25:44 +0000358 // (2 * 4) + (8 * num_subregs) bytes maximum
359 let Size = !add(!shl(!srl(vgpr_class.Size, 5), 3), 8);
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000360 }
Tom Stellard96468902014-09-24 01:33:17 +0000361
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000362 def _RESTORE : VPseudoInstSI <
Matt Arsenault3354f422016-09-10 01:20:33 +0000363 (outs vgpr_class:$vdata),
Matt Arsenaultbcfd94c2016-09-17 15:52:37 +0000364 (ins i32imm:$vaddr, SReg_128:$srsrc, SReg_32:$soffset,
Matt Arsenault9babdf42016-06-22 20:15:28 +0000365 i32imm:$offset)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000366 let mayStore = 0;
367 let mayLoad = 1;
Matt Arsenaultac42ba82016-09-03 17:25:44 +0000368
369 // (2 * 4) + (8 * num_subregs) bytes maximum
370 let Size = !add(!shl(!srl(vgpr_class.Size, 5), 3), 8);
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000371 }
Matt Arsenault7348a7e2016-09-10 01:20:28 +0000372 } // End UseNamedOperandTable = 1, VGPRSpill = 1, SchedRW = [WriteVMEM]
Tom Stellard96468902014-09-24 01:33:17 +0000373}
374
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000375defm SI_SPILL_V32 : SI_SPILL_VGPR <VGPR_32>;
Tom Stellard96468902014-09-24 01:33:17 +0000376defm SI_SPILL_V64 : SI_SPILL_VGPR <VReg_64>;
377defm SI_SPILL_V96 : SI_SPILL_VGPR <VReg_96>;
378defm SI_SPILL_V128 : SI_SPILL_VGPR <VReg_128>;
379defm SI_SPILL_V256 : SI_SPILL_VGPR <VReg_256>;
380defm SI_SPILL_V512 : SI_SPILL_VGPR <VReg_512>;
381
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000382def SI_PC_ADD_REL_OFFSET : SPseudoInstSI <
Tom Stellard067c8152014-07-21 14:01:14 +0000383 (outs SReg_64:$dst),
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +0000384 (ins si_ga:$ptr_lo, si_ga:$ptr_hi),
385 [(set SReg_64:$dst,
386 (i64 (SIpc_add_rel_offset (tglobaladdr:$ptr_lo), (tglobaladdr:$ptr_hi))))]> {
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000387 let Defs = [SCC];
Matt Arsenaultd092a062015-10-02 18:58:37 +0000388}
Tom Stellard067c8152014-07-21 14:01:14 +0000389
Matt Arsenault382d9452016-01-26 04:49:22 +0000390} // End SubtargetPredicate = isGCN
Tom Stellard0e70de52014-05-16 20:56:45 +0000391
Marek Olsak5df00d62014-12-07 12:18:57 +0000392let Predicates = [isGCN] in {
Tom Stellard0e70de52014-05-16 20:56:45 +0000393
Nicolai Haehnle3b572002016-07-28 11:39:24 +0000394def : Pat<
395 (int_amdgcn_else i64:$src, bb:$target),
396 (SI_ELSE $src, $target, 0)
397>;
398
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000399def : Pat <
400 (int_AMDGPU_kilp),
Tom Stellard115a6152016-11-10 16:02:37 +0000401 (SI_KILL (i32 0xbf800000))
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000402>;
403
Tom Stellard8d6d4492014-04-22 16:33:57 +0000404//===----------------------------------------------------------------------===//
Matt Arsenaulta0050b02014-06-19 01:19:19 +0000405// VOP1 Patterns
406//===----------------------------------------------------------------------===//
407
Matt Arsenault22ca3f82014-07-15 23:50:10 +0000408let Predicates = [UnsafeFPMath] in {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +0000409
410//def : RcpPat<V_RCP_F64_e32, f64>;
411//defm : RsqPat<V_RSQ_F64_e32, f64>;
412//defm : RsqPat<V_RSQ_F32_e32, f32>;
413
414def : RsqPat<V_RSQ_F32_e32, f32>;
415def : RsqPat<V_RSQ_F64_e32, f64>;
Matt Arsenault74015162016-05-28 00:19:52 +0000416
417// Convert (x - floor(x)) to fract(x)
418def : Pat <
419 (f32 (fsub (f32 (VOP3Mods f32:$x, i32:$mods)),
420 (f32 (ffloor (f32 (VOP3Mods f32:$x, i32:$mods)))))),
421 (V_FRACT_F32_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
422>;
423
424// Convert (x + (-floor(x))) to fract(x)
425def : Pat <
426 (f64 (fadd (f64 (VOP3Mods f64:$x, i32:$mods)),
427 (f64 (fneg (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))))))),
428 (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
429>;
430
431} // End Predicates = [UnsafeFPMath]
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +0000432
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000433def : Pat <
434 (f32 (fpextend f16:$src)),
435 (V_CVT_F32_F16_e32 $src)
436>;
437
438def : Pat <
439 (f64 (fpextend f16:$src)),
440 (V_CVT_F64_F32_e32 (V_CVT_F32_F16_e32 $src))
441>;
442
443def : Pat <
444 (f16 (fpround f32:$src)),
445 (V_CVT_F16_F32_e32 $src)
446>;
447
448def : Pat <
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000449 (i32 (fp_to_sint f16:$src)),
450 (V_CVT_I32_F32_e32 (V_CVT_F32_F16_e32 $src))
451>;
452
453def : Pat <
454 (i32 (fp_to_uint f16:$src)),
455 (V_CVT_U32_F32_e32 (V_CVT_F32_F16_e32 $src))
456>;
457
458def : Pat <
459 (f16 (sint_to_fp i32:$src)),
460 (V_CVT_F16_F32_e32 (V_CVT_F32_I32_e32 $src))
461>;
462
463def : Pat <
464 (f16 (uint_to_fp i32:$src)),
465 (V_CVT_F16_F32_e32 (V_CVT_F32_U32_e32 $src))
466>;
467
Matt Arsenaulta0050b02014-06-19 01:19:19 +0000468//===----------------------------------------------------------------------===//
Tom Stellard58ac7442014-04-29 23:12:48 +0000469// VOP2 Patterns
470//===----------------------------------------------------------------------===//
471
Konstantin Zhuravlyovbf998c72016-11-16 03:39:12 +0000472multiclass FMADPat <ValueType vt, Instruction inst> {
473 def : Pat <
474 (vt (fmad (VOP3NoMods0 vt:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod),
475 (VOP3NoMods vt:$src1, i32:$src1_modifiers),
476 (VOP3NoMods vt:$src2, i32:$src2_modifiers))),
477 (inst $src0_modifiers, $src0, $src1_modifiers, $src1,
478 $src2_modifiers, $src2, $clamp, $omod)
479 >;
480}
481
482defm : FMADPat <f16, V_MAC_F16_e64>;
483defm : FMADPat <f32, V_MAC_F32_e64>;
484
485multiclass SelectPat <ValueType vt, Instruction inst> {
Konstantin Zhuravlyov2a87a422016-11-16 03:16:26 +0000486 def : Pat <
487 (vt (select i1:$src0, vt:$src1, vt:$src2)),
488 (inst $src2, $src1, $src0)
489 >;
490}
491
Konstantin Zhuravlyovbf998c72016-11-16 03:39:12 +0000492defm : SelectPat <i16, V_CNDMASK_B32_e64>;
493defm : SelectPat <i32, V_CNDMASK_B32_e64>;
494defm : SelectPat <f16, V_CNDMASK_B32_e64>;
495defm : SelectPat <f32, V_CNDMASK_B32_e64>;
Konstantin Zhuravlyov2a87a422016-11-16 03:16:26 +0000496
Tom Stellardae4c9e72014-06-20 17:06:11 +0000497def : Pat <
498 (i32 (add (i32 (ctpop i32:$popcnt)), i32:$val)),
Matt Arsenault49dd4282014-09-15 17:15:02 +0000499 (V_BCNT_U32_B32_e64 $popcnt, $val)
Tom Stellardae4c9e72014-06-20 17:06:11 +0000500>;
501
Christian Konig4a1b9c32013-03-18 11:34:10 +0000502/********** ============================================ **********/
503/********** Extraction, Insertion, Building and Casting **********/
504/********** ============================================ **********/
Tom Stellard75aadc22012-12-11 21:25:42 +0000505
Christian Konig4a1b9c32013-03-18 11:34:10 +0000506foreach Index = 0-2 in {
507 def Extract_Element_v2i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000508 i32, v2i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000509 >;
510 def Insert_Element_v2i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000511 i32, v2i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000512 >;
513
514 def Extract_Element_v2f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000515 f32, v2f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000516 >;
517 def Insert_Element_v2f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000518 f32, v2f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000519 >;
520}
521
522foreach Index = 0-3 in {
523 def Extract_Element_v4i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000524 i32, v4i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000525 >;
526 def Insert_Element_v4i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000527 i32, v4i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000528 >;
529
530 def Extract_Element_v4f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000531 f32, v4f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000532 >;
533 def Insert_Element_v4f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000534 f32, v4f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000535 >;
536}
537
538foreach Index = 0-7 in {
539 def Extract_Element_v8i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000540 i32, v8i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000541 >;
542 def Insert_Element_v8i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000543 i32, v8i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000544 >;
545
546 def Extract_Element_v8f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000547 f32, v8f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000548 >;
549 def Insert_Element_v8f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000550 f32, v8f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000551 >;
552}
553
554foreach Index = 0-15 in {
555 def Extract_Element_v16i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000556 i32, v16i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000557 >;
558 def Insert_Element_v16i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000559 i32, v16i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000560 >;
561
562 def Extract_Element_v16f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000563 f32, v16f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000564 >;
565 def Insert_Element_v16f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000566 f32, v16f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000567 >;
568}
Tom Stellard75aadc22012-12-11 21:25:42 +0000569
Matt Arsenault382d9452016-01-26 04:49:22 +0000570// FIXME: Why do only some of these type combinations for SReg and
571// VReg?
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000572// 16-bit bitcast
573def : BitConvert <i16, f16, VGPR_32>;
574def : BitConvert <f16, i16, VGPR_32>;
575def : BitConvert <i16, f16, SReg_32>;
576def : BitConvert <f16, i16, SReg_32>;
577
Matt Arsenault382d9452016-01-26 04:49:22 +0000578// 32-bit bitcast
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000579def : BitConvert <i32, f32, VGPR_32>;
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000580def : BitConvert <f32, i32, VGPR_32>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000581def : BitConvert <i32, f32, SReg_32>;
582def : BitConvert <f32, i32, SReg_32>;
Tom Stellard75aadc22012-12-11 21:25:42 +0000583
Matt Arsenault382d9452016-01-26 04:49:22 +0000584// 64-bit bitcast
Tom Stellard7512c082013-07-12 18:14:56 +0000585def : BitConvert <i64, f64, VReg_64>;
Tom Stellard7512c082013-07-12 18:14:56 +0000586def : BitConvert <f64, i64, VReg_64>;
Tom Stellarded2f6142013-07-18 21:43:42 +0000587def : BitConvert <v2i32, v2f32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000588def : BitConvert <v2f32, v2i32, VReg_64>;
Tom Stellard7ea3d6d2014-03-31 14:01:55 +0000589def : BitConvert <i64, v2i32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000590def : BitConvert <v2i32, i64, VReg_64>;
Matt Arsenault064c2062014-06-11 17:40:32 +0000591def : BitConvert <i64, v2f32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000592def : BitConvert <v2f32, i64, VReg_64>;
Tom Stellard8f307212015-12-15 17:11:17 +0000593def : BitConvert <f64, v2f32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000594def : BitConvert <v2f32, f64, VReg_64>;
Matt Arsenault2acc7a42014-06-11 19:31:13 +0000595def : BitConvert <f64, v2i32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000596def : BitConvert <v2i32, f64, VReg_64>;
Tom Stellard83747202013-07-18 21:43:53 +0000597def : BitConvert <v4i32, v4f32, VReg_128>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000598def : BitConvert <v4f32, v4i32, VReg_128>;
Tom Stellard83747202013-07-18 21:43:53 +0000599
Matt Arsenault382d9452016-01-26 04:49:22 +0000600// 128-bit bitcast
Matt Arsenault61001bb2015-11-25 19:58:34 +0000601def : BitConvert <v2i64, v4i32, SReg_128>;
602def : BitConvert <v4i32, v2i64, SReg_128>;
Tom Stellard8f307212015-12-15 17:11:17 +0000603def : BitConvert <v2f64, v4f32, VReg_128>;
Matt Arsenault61001bb2015-11-25 19:58:34 +0000604def : BitConvert <v2f64, v4i32, VReg_128>;
Tom Stellard8f307212015-12-15 17:11:17 +0000605def : BitConvert <v4f32, v2f64, VReg_128>;
Matt Arsenault61001bb2015-11-25 19:58:34 +0000606def : BitConvert <v4i32, v2f64, VReg_128>;
Matt Arsenaulte57206d2016-05-25 18:07:36 +0000607def : BitConvert <v2i64, v2f64, VReg_128>;
608def : BitConvert <v2f64, v2i64, VReg_128>;
Matt Arsenault61001bb2015-11-25 19:58:34 +0000609
Matt Arsenault382d9452016-01-26 04:49:22 +0000610// 256-bit bitcast
Tom Stellard967bf582014-02-13 23:34:15 +0000611def : BitConvert <v8i32, v8f32, SReg_256>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000612def : BitConvert <v8f32, v8i32, SReg_256>;
Matt Arsenaultf5958dd2014-02-02 00:05:35 +0000613def : BitConvert <v8i32, v8f32, VReg_256>;
614def : BitConvert <v8f32, v8i32, VReg_256>;
Tom Stellard20ee94f2013-08-14 22:22:09 +0000615
Matt Arsenault382d9452016-01-26 04:49:22 +0000616// 512-bit bitcast
Matt Arsenaultf5958dd2014-02-02 00:05:35 +0000617def : BitConvert <v16i32, v16f32, VReg_512>;
618def : BitConvert <v16f32, v16i32, VReg_512>;
619
Christian Konig8dbe6f62013-02-21 15:17:27 +0000620/********** =================== **********/
621/********** Src & Dst modifiers **********/
622/********** =================== **********/
623
624def : Pat <
Matt Arsenault1cffa4c2014-11-13 19:49:04 +0000625 (AMDGPUclamp (VOP3Mods0Clamp f32:$src0, i32:$src0_modifiers, i32:$omod),
626 (f32 FP_ZERO), (f32 FP_ONE)),
Tom Stellard115a6152016-11-10 16:02:37 +0000627 (V_ADD_F32_e64 $src0_modifiers, $src0, 0, (i32 0), 1, $omod)
Christian Konig8dbe6f62013-02-21 15:17:27 +0000628>;
629
Michel Danzer624b02a2014-02-04 07:12:38 +0000630/********** ================================ **********/
631/********** Floating point absolute/negative **********/
632/********** ================================ **********/
633
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000634// Prevent expanding both fneg and fabs.
Michel Danzer624b02a2014-02-04 07:12:38 +0000635
Michel Danzer624b02a2014-02-04 07:12:38 +0000636def : Pat <
637 (fneg (fabs f32:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +0000638 (S_OR_B32 $src, (S_MOV_B32(i32 0x80000000))) // Set sign bit
Michel Danzer624b02a2014-02-04 07:12:38 +0000639>;
640
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000641// FIXME: Should use S_OR_B32
Matt Arsenault13623d02014-08-15 18:42:18 +0000642def : Pat <
643 (fneg (fabs f64:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000644 (REG_SEQUENCE VReg_64,
645 (i32 (EXTRACT_SUBREG f64:$src, sub0)),
646 sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000647 (V_OR_B32_e32 (i32 (EXTRACT_SUBREG f64:$src, sub1)),
648 (V_MOV_B32_e32 (i32 0x80000000))), // Set sign bit.
Matt Arsenault7d858d82014-11-02 23:46:54 +0000649 sub1)
Matt Arsenault13623d02014-08-15 18:42:18 +0000650>;
651
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000652def : Pat <
653 (fabs f32:$src),
Tom Stellard115a6152016-11-10 16:02:37 +0000654 (V_AND_B32_e64 $src, (V_MOV_B32_e32 (i32 0x7fffffff)))
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000655>;
Vincent Lejeune79a58342014-05-10 19:18:25 +0000656
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000657def : Pat <
658 (fneg f32:$src),
Tom Stellard115a6152016-11-10 16:02:37 +0000659 (V_XOR_B32_e32 $src, (V_MOV_B32_e32 (i32 0x80000000)))
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000660>;
Christian Konig8dbe6f62013-02-21 15:17:27 +0000661
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000662def : Pat <
663 (fabs f64:$src),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000664 (REG_SEQUENCE VReg_64,
665 (i32 (EXTRACT_SUBREG f64:$src, sub0)),
666 sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000667 (V_AND_B32_e64 (i32 (EXTRACT_SUBREG f64:$src, sub1)),
668 (V_MOV_B32_e32 (i32 0x7fffffff))), // Set sign bit.
Matt Arsenault7d858d82014-11-02 23:46:54 +0000669 sub1)
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000670>;
Vincent Lejeune79a58342014-05-10 19:18:25 +0000671
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000672def : Pat <
673 (fneg f64:$src),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000674 (REG_SEQUENCE VReg_64,
675 (i32 (EXTRACT_SUBREG f64:$src, sub0)),
676 sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000677 (V_XOR_B32_e32 (i32 (EXTRACT_SUBREG f64:$src, sub1)),
678 (i32 (V_MOV_B32_e32 (i32 0x80000000)))),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000679 sub1)
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000680>;
Christian Konig8dbe6f62013-02-21 15:17:27 +0000681
Matt Arsenaultc79dc702016-11-15 02:25:28 +0000682def : Pat <
Konstantin Zhuravlyov7d882752017-01-13 19:49:25 +0000683 (fcopysign f16:$src0, f16:$src1),
684 (V_BFI_B32 (S_MOV_B32 (i32 0x00007fff)), $src0, $src1)
685>;
686
687def : Pat <
688 (fcopysign f32:$src0, f16:$src1),
689 (V_BFI_B32 (S_MOV_B32 (i32 0x7fffffff)), $src0,
690 (V_LSHLREV_B32_e64 (i32 16), $src1))
691>;
692
693def : Pat <
694 (fcopysign f64:$src0, f16:$src1),
695 (REG_SEQUENCE SReg_64,
696 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0,
697 (V_BFI_B32 (S_MOV_B32 (i32 0x7fffffff)), (i32 (EXTRACT_SUBREG $src0, sub1)),
698 (V_LSHLREV_B32_e64 (i32 16), $src1)), sub1)
699>;
700
701def : Pat <
702 (fcopysign f16:$src0, f32:$src1),
703 (V_BFI_B32 (S_MOV_B32 (i32 0x00007fff)), $src0,
704 (V_LSHRREV_B32_e64 (i32 16), $src1))
705>;
706
707def : Pat <
708 (fcopysign f16:$src0, f64:$src1),
709 (V_BFI_B32 (S_MOV_B32 (i32 0x00007fff)), $src0,
710 (V_LSHRREV_B32_e64 (i32 16), (EXTRACT_SUBREG $src1, sub1)))
711>;
712
713def : Pat <
Matt Arsenaultc79dc702016-11-15 02:25:28 +0000714 (fneg f16:$src),
715 (V_XOR_B32_e32 $src, (V_MOV_B32_e32 (i32 0x00008000)))
716>;
717
718def : Pat <
719 (fabs f16:$src),
720 (V_AND_B32_e64 $src, (V_MOV_B32_e32 (i32 0x00007fff)))
721>;
722
723def : Pat <
724 (fneg (fabs f16:$src)),
725 (S_OR_B32 $src, (S_MOV_B32 (i32 0x00008000))) // Set sign bit
726>;
727
Christian Konigc756cb992013-02-16 11:28:22 +0000728/********** ================== **********/
729/********** Immediate Patterns **********/
730/********** ================== **********/
731
732def : Pat <
Matt Arsenault3d463192016-11-01 22:55:07 +0000733 (VGPRImm<(i32 imm)>:$imm),
Christian Konigc756cb992013-02-16 11:28:22 +0000734 (V_MOV_B32_e32 imm:$imm)
735>;
736
737def : Pat <
Matt Arsenault3d463192016-11-01 22:55:07 +0000738 (VGPRImm<(f32 fpimm)>:$imm),
Tom Stellardfb77f002015-01-13 22:59:41 +0000739 (V_MOV_B32_e32 (f32 (bitcast_fpimm_to_i32 $imm)))
Christian Konigc756cb992013-02-16 11:28:22 +0000740>;
741
742def : Pat <
Matt Arsenault3d463192016-11-01 22:55:07 +0000743 (i32 imm:$imm),
744 (S_MOV_B32 imm:$imm)
745>;
746
Matt Arsenaulte96d0372016-12-08 20:14:46 +0000747// FIXME: Workaround for ordering issue with peephole optimizer where
748// a register class copy interferes with immediate folding. Should
749// use s_mov_b32, which can be shrunk to s_movk_i32
750def : Pat <
751 (VGPRImm<(f16 fpimm)>:$imm),
752 (V_MOV_B32_e32 (f16 (bitcast_fpimm_to_i32 $imm)))
753>;
754
Matt Arsenault3d463192016-11-01 22:55:07 +0000755def : Pat <
756 (f32 fpimm:$imm),
757 (S_MOV_B32 (f32 (bitcast_fpimm_to_i32 $imm)))
758>;
759
760def : Pat <
Matt Arsenaulte96d0372016-12-08 20:14:46 +0000761 (f16 fpimm:$imm),
762 (S_MOV_B32 (i32 (bitcast_fpimm_to_i32 $imm)))
763>;
764
765def : Pat <
Matt Arsenaultac0fc842016-09-17 16:09:55 +0000766 (i32 frameindex:$fi),
767 (V_MOV_B32_e32 (i32 (frameindex_to_targetframeindex $fi)))
768>;
769
770def : Pat <
Christian Konigb559b072013-02-16 11:28:36 +0000771 (i64 InlineImm<i64>:$imm),
772 (S_MOV_B64 InlineImm<i64>:$imm)
773>;
774
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000775// XXX - Should this use a s_cmp to set SCC?
776
777// Set to sign-extended 64-bit value (true = -1, false = 0)
778def : Pat <
779 (i1 imm:$imm),
780 (S_MOV_B64 (i64 (as_i64imm $imm)))
781>;
782
Matt Arsenault303011a2014-12-17 21:04:08 +0000783def : Pat <
784 (f64 InlineFPImm<f64>:$imm),
Tom Stellardfb77f002015-01-13 22:59:41 +0000785 (S_MOV_B64 (f64 (bitcast_fpimm_to_i64 InlineFPImm<f64>:$imm)))
Matt Arsenault303011a2014-12-17 21:04:08 +0000786>;
787
Tom Stellard75aadc22012-12-11 21:25:42 +0000788/********** ================== **********/
789/********** Intrinsic Patterns **********/
790/********** ================== **********/
791
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000792def : POW_Common <V_LOG_F32_e32, V_EXP_F32_e32, V_MUL_LEGACY_F32_e32>;
Tom Stellard75aadc22012-12-11 21:25:42 +0000793
794def : Pat <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000795 (int_AMDGPU_cube v4f32:$src),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000796 (REG_SEQUENCE VReg_128,
Tom Stellard115a6152016-11-10 16:02:37 +0000797 (V_CUBETC_F32 0 /* src0_modifiers */, (f32 (EXTRACT_SUBREG $src, sub0)),
798 0 /* src1_modifiers */, (f32 (EXTRACT_SUBREG $src, sub1)),
799 0 /* src2_modifiers */, (f32 (EXTRACT_SUBREG $src, sub2)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000800 0 /* clamp */, 0 /* omod */), sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000801 (V_CUBESC_F32 0 /* src0_modifiers */, (f32 (EXTRACT_SUBREG $src, sub0)),
802 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub1)),
803 0 /* src2_modifiers */,(f32 (EXTRACT_SUBREG $src, sub2)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000804 0 /* clamp */, 0 /* omod */), sub1,
Tom Stellard115a6152016-11-10 16:02:37 +0000805 (V_CUBEMA_F32 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub0)),
806 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub1)),
807 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub2)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000808 0 /* clamp */, 0 /* omod */), sub2,
Tom Stellard115a6152016-11-10 16:02:37 +0000809 (V_CUBEID_F32 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub0)),
810 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub1)),
811 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub2)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000812 0 /* clamp */, 0 /* omod */), sub3)
Tom Stellard75aadc22012-12-11 21:25:42 +0000813>;
814
Michel Danzer0cc991e2013-02-22 11:22:58 +0000815def : Pat <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000816 (i32 (sext i1:$src0)),
817 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src0)
Michel Danzer0cc991e2013-02-22 11:22:58 +0000818>;
819
Tom Stellardf16d38c2014-02-13 23:34:13 +0000820class Ext32Pat <SDNode ext> : Pat <
821 (i32 (ext i1:$src0)),
Michel Danzer5d26fdf2014-02-05 09:48:05 +0000822 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src0)
823>;
824
Tom Stellardf16d38c2014-02-13 23:34:13 +0000825def : Ext32Pat <zext>;
826def : Ext32Pat <anyext>;
827
Michel Danzer8caa9042013-04-10 17:17:56 +0000828// The multiplication scales from [0,1] to the unsigned integer range
829def : Pat <
830 (AMDGPUurecip i32:$src0),
831 (V_CVT_U32_F32_e32
Tom Stellard115a6152016-11-10 16:02:37 +0000832 (V_MUL_F32_e32 (i32 CONST.FP_UINT_MAX_PLUS_1),
Michel Danzer8caa9042013-04-10 17:17:56 +0000833 (V_RCP_IFLAG_F32_e32 (V_CVT_F32_U32_e32 $src0))))
834>;
835
Tom Stellard0289ff42014-05-16 20:56:44 +0000836//===----------------------------------------------------------------------===//
837// VOP3 Patterns
838//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000839
Matt Arsenaulteb260202014-05-22 18:00:15 +0000840def : IMad24Pat<V_MAD_I32_I24>;
841def : UMad24Pat<V_MAD_U32_U24>;
842
Matt Arsenault7d858d82014-11-02 23:46:54 +0000843defm : BFIPatterns <V_BFI_B32, S_MOV_B32, SReg_64>;
Tom Stellard0289ff42014-05-16 20:56:44 +0000844def : ROTRPattern <V_ALIGNBIT_B32>;
845
Christian Konig2989ffc2013-03-18 11:34:16 +0000846/********** ====================== **********/
Simon Pilgrime995a8082016-11-18 11:04:02 +0000847/********** Indirect addressing **********/
Christian Konig2989ffc2013-03-18 11:34:16 +0000848/********** ====================== **********/
849
Matt Arsenault28419272015-10-07 00:42:51 +0000850multiclass SI_INDIRECT_Pattern <ValueType vt, ValueType eltvt, string VecSize> {
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000851 // Extract with offset
Christian Konig2989ffc2013-03-18 11:34:16 +0000852 def : Pat<
Nicolai Haehnle7968c342016-07-12 08:12:16 +0000853 (eltvt (extractelt vt:$src, (MOVRELOffset i32:$idx, (i32 imm:$offset)))),
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000854 (!cast<Instruction>("SI_INDIRECT_SRC_"#VecSize) $src, $idx, imm:$offset)
Christian Konig2989ffc2013-03-18 11:34:16 +0000855 >;
856
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000857 // Insert with offset
Christian Konig2989ffc2013-03-18 11:34:16 +0000858 def : Pat<
Nicolai Haehnle7968c342016-07-12 08:12:16 +0000859 (insertelt vt:$src, eltvt:$val, (MOVRELOffset i32:$idx, (i32 imm:$offset))),
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000860 (!cast<Instruction>("SI_INDIRECT_DST_"#VecSize) $src, $idx, imm:$offset, $val)
Christian Konig2989ffc2013-03-18 11:34:16 +0000861 >;
862}
863
Matt Arsenault28419272015-10-07 00:42:51 +0000864defm : SI_INDIRECT_Pattern <v2f32, f32, "V2">;
865defm : SI_INDIRECT_Pattern <v4f32, f32, "V4">;
866defm : SI_INDIRECT_Pattern <v8f32, f32, "V8">;
867defm : SI_INDIRECT_Pattern <v16f32, f32, "V16">;
Matt Arsenaultf5958dd2014-02-02 00:05:35 +0000868
Matt Arsenault28419272015-10-07 00:42:51 +0000869defm : SI_INDIRECT_Pattern <v2i32, i32, "V2">;
870defm : SI_INDIRECT_Pattern <v4i32, i32, "V4">;
871defm : SI_INDIRECT_Pattern <v8i32, i32, "V8">;
872defm : SI_INDIRECT_Pattern <v16i32, i32, "V16">;
Christian Konig2989ffc2013-03-18 11:34:16 +0000873
Tom Stellard81d871d2013-11-13 23:36:50 +0000874//===----------------------------------------------------------------------===//
Wei Ding1041a642016-08-24 14:59:47 +0000875// SAD Patterns
876//===----------------------------------------------------------------------===//
877
878def : Pat <
879 (add (sub_oneuse (umax i32:$src0, i32:$src1),
880 (umin i32:$src0, i32:$src1)),
881 i32:$src2),
882 (V_SAD_U32 $src0, $src1, $src2)
883>;
884
885def : Pat <
886 (add (select_oneuse (i1 (setugt i32:$src0, i32:$src1)),
887 (sub i32:$src0, i32:$src1),
888 (sub i32:$src1, i32:$src0)),
889 i32:$src2),
890 (V_SAD_U32 $src0, $src1, $src2)
891>;
892
893//===----------------------------------------------------------------------===//
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000894// Conversion Patterns
895//===----------------------------------------------------------------------===//
896
897def : Pat<(i32 (sext_inreg i32:$src, i1)),
Tom Stellard115a6152016-11-10 16:02:37 +0000898 (S_BFE_I32 i32:$src, (i32 65536))>; // 0 | 1 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000899
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000900// Handle sext_inreg in i64
901def : Pat <
902 (i64 (sext_inreg i64:$src, i1)),
Tom Stellard115a6152016-11-10 16:02:37 +0000903 (S_BFE_I64 i64:$src, (i32 0x10000)) // 0 | 1 << 16
904>;
905
906def : Pat <
Matt Arsenault682eb432017-01-11 22:35:22 +0000907 (i16 (sext_inreg i16:$src, i1)),
908 (S_BFE_I32 $src, (i32 0x00010000)) // 0 | 1 << 16
909>;
910
911def : Pat <
Tom Stellard115a6152016-11-10 16:02:37 +0000912 (i16 (sext_inreg i16:$src, i8)),
913 (S_BFE_I32 $src, (i32 0x80000)) // 0 | 8 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000914>;
915
916def : Pat <
917 (i64 (sext_inreg i64:$src, i8)),
Tom Stellard115a6152016-11-10 16:02:37 +0000918 (S_BFE_I64 i64:$src, (i32 0x80000)) // 0 | 8 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000919>;
920
921def : Pat <
922 (i64 (sext_inreg i64:$src, i16)),
Tom Stellard115a6152016-11-10 16:02:37 +0000923 (S_BFE_I64 i64:$src, (i32 0x100000)) // 0 | 16 << 16
Matt Arsenault94812212014-11-14 18:18:16 +0000924>;
925
926def : Pat <
927 (i64 (sext_inreg i64:$src, i32)),
Tom Stellard115a6152016-11-10 16:02:37 +0000928 (S_BFE_I64 i64:$src, (i32 0x200000)) // 0 | 32 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000929>;
930
Matt Arsenaultc6b69a92016-07-26 23:06:33 +0000931def : Pat <
932 (i64 (zext i32:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +0000933 (REG_SEQUENCE SReg_64, $src, sub0, (S_MOV_B32 (i32 0)), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000934>;
935
Matt Arsenaultc6b69a92016-07-26 23:06:33 +0000936def : Pat <
937 (i64 (anyext i32:$src)),
938 (REG_SEQUENCE SReg_64, $src, sub0, (i32 (IMPLICIT_DEF)), sub1)
939>;
940
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000941class ZExt_i64_i1_Pat <SDNode ext> : Pat <
942 (i64 (ext i1:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000943 (REG_SEQUENCE VReg_64,
944 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src), sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000945 (S_MOV_B32 (i32 0)), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000946>;
947
948
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000949def : ZExt_i64_i1_Pat<zext>;
950def : ZExt_i64_i1_Pat<anyext>;
951
Tom Stellardbc4497b2016-02-12 23:45:29 +0000952// FIXME: We need to use COPY_TO_REGCLASS to work-around the fact that
953// REG_SEQUENCE patterns don't support instructions with multiple outputs.
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000954def : Pat <
955 (i64 (sext i32:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000956 (REG_SEQUENCE SReg_64, $src, sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000957 (i32 (COPY_TO_REGCLASS (S_ASHR_I32 $src, (i32 31)), SReg_32_XM0)), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000958>;
959
960def : Pat <
961 (i64 (sext i1:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000962 (REG_SEQUENCE VReg_64,
Tom Stellard115a6152016-11-10 16:02:37 +0000963 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src), sub0,
964 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000965>;
966
Tom Stellard115a6152016-11-10 16:02:37 +0000967class FPToI1Pat<Instruction Inst, int KOne, ValueType kone_type, ValueType vt, SDPatternOperator fp_to_int> : Pat <
Matt Arsenault7fb961f2016-07-22 17:01:21 +0000968 (i1 (fp_to_int (vt (VOP3Mods vt:$src0, i32:$src0_modifiers)))),
Tom Stellard115a6152016-11-10 16:02:37 +0000969 (i1 (Inst 0, (kone_type KOne), $src0_modifiers, $src0, DSTCLAMP.NONE, DSTOMOD.NONE))
Matt Arsenault7fb961f2016-07-22 17:01:21 +0000970>;
971
Tom Stellard115a6152016-11-10 16:02:37 +0000972def : FPToI1Pat<V_CMP_EQ_F32_e64, CONST.FP32_ONE, i32, f32, fp_to_uint>;
973def : FPToI1Pat<V_CMP_EQ_F32_e64, CONST.FP32_NEG_ONE, i32, f32, fp_to_sint>;
974def : FPToI1Pat<V_CMP_EQ_F64_e64, CONST.FP64_ONE, i64, f64, fp_to_uint>;
975def : FPToI1Pat<V_CMP_EQ_F64_e64, CONST.FP64_NEG_ONE, i64, f64, fp_to_sint>;
Matt Arsenault7fb961f2016-07-22 17:01:21 +0000976
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000977// If we need to perform a logical operation on i1 values, we need to
978// use vector comparisons since there is only one SCC register. Vector
Simon Pilgrime995a8082016-11-18 11:04:02 +0000979// comparisons still write to a pair of SGPRs, so treat these as
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000980// 64-bit comparisons. When legalizing SGPR copies, instructions
981// resulting in the copies from SCC to these instructions will be
982// moved to the VALU.
983def : Pat <
984 (i1 (and i1:$src0, i1:$src1)),
985 (S_AND_B64 $src0, $src1)
986>;
987
988def : Pat <
989 (i1 (or i1:$src0, i1:$src1)),
990 (S_OR_B64 $src0, $src1)
991>;
992
993def : Pat <
994 (i1 (xor i1:$src0, i1:$src1)),
995 (S_XOR_B64 $src0, $src1)
996>;
997
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +0000998def : Pat <
999 (f32 (sint_to_fp i1:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +00001000 (V_CNDMASK_B32_e64 (i32 0), (i32 CONST.FP32_NEG_ONE), $src)
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +00001001>;
1002
1003def : Pat <
1004 (f32 (uint_to_fp i1:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +00001005 (V_CNDMASK_B32_e64 (i32 0), (i32 CONST.FP32_ONE), $src)
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +00001006>;
1007
1008def : Pat <
1009 (f64 (sint_to_fp i1:$src)),
Matt Arsenaultbecd6562014-12-03 05:22:35 +00001010 (V_CVT_F64_I32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src))
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +00001011>;
1012
1013def : Pat <
1014 (f64 (uint_to_fp i1:$src)),
1015 (V_CVT_F64_U32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src))
1016>;
1017
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001018//===----------------------------------------------------------------------===//
Tom Stellardfb961692013-10-23 00:44:19 +00001019// Miscellaneous Patterns
1020//===----------------------------------------------------------------------===//
1021
1022def : Pat <
Tom Stellard81d871d2013-11-13 23:36:50 +00001023 (i32 (trunc i64:$a)),
1024 (EXTRACT_SUBREG $a, sub0)
1025>;
1026
Michel Danzerbf1a6412014-01-28 03:01:16 +00001027def : Pat <
1028 (i1 (trunc i32:$a)),
Tom Stellard115a6152016-11-10 16:02:37 +00001029 (V_CMP_EQ_U32_e64 (S_AND_B32 (i32 1), $a), (i32 1))
Michel Danzerbf1a6412014-01-28 03:01:16 +00001030>;
1031
Matt Arsenaulte306a322014-10-21 16:25:08 +00001032def : Pat <
Matt Arsenaultabd271b2015-02-05 06:05:13 +00001033 (i1 (trunc i64:$a)),
Matt Arsenault5d8eb252016-09-30 01:50:20 +00001034 (V_CMP_EQ_U32_e64 (S_AND_B32 (i32 1),
Tom Stellard115a6152016-11-10 16:02:37 +00001035 (i32 (EXTRACT_SUBREG $a, sub0))), (i32 1))
Matt Arsenaultabd271b2015-02-05 06:05:13 +00001036>;
1037
1038def : Pat <
Matt Arsenaulte306a322014-10-21 16:25:08 +00001039 (i32 (bswap i32:$a)),
Tom Stellard115a6152016-11-10 16:02:37 +00001040 (V_BFI_B32 (S_MOV_B32 (i32 0x00ff00ff)),
1041 (V_ALIGNBIT_B32 $a, $a, (i32 24)),
1042 (V_ALIGNBIT_B32 $a, $a, (i32 8)))
Matt Arsenaulte306a322014-10-21 16:25:08 +00001043>;
1044
Marek Olsak63a7b082015-03-24 13:40:21 +00001045multiclass BFMPatterns <ValueType vt, InstSI BFM, InstSI MOV> {
1046 def : Pat <
1047 (vt (shl (vt (add (vt (shl 1, vt:$a)), -1)), vt:$b)),
1048 (BFM $a, $b)
1049 >;
1050
1051 def : Pat <
1052 (vt (add (vt (shl 1, vt:$a)), -1)),
Tom Stellard115a6152016-11-10 16:02:37 +00001053 (BFM $a, (MOV (i32 0)))
Marek Olsak63a7b082015-03-24 13:40:21 +00001054 >;
1055}
1056
1057defm : BFMPatterns <i32, S_BFM_B32, S_MOV_B32>;
1058// FIXME: defm : BFMPatterns <i64, S_BFM_B64, S_MOV_B64>;
1059
Marek Olsak949f5da2015-03-24 13:40:34 +00001060def : BFEPattern <V_BFE_U32, S_MOV_B32>;
1061
Matt Arsenault9cd90712016-04-14 01:42:16 +00001062def : Pat<
Matt Arsenaultce841302016-12-22 03:05:37 +00001063 (fcanonicalize f16:$src),
1064 (V_MUL_F16_e64 0, (i32 CONST.FP16_ONE), 0, $src, 0, 0)
1065>;
1066
1067def : Pat<
Matt Arsenault9cd90712016-04-14 01:42:16 +00001068 (fcanonicalize f32:$src),
Tom Stellard115a6152016-11-10 16:02:37 +00001069 (V_MUL_F32_e64 0, (i32 CONST.FP32_ONE), 0, $src, 0, 0)
Matt Arsenault9cd90712016-04-14 01:42:16 +00001070>;
1071
1072def : Pat<
1073 (fcanonicalize f64:$src),
1074 (V_MUL_F64 0, CONST.FP64_ONE, 0, $src, 0, 0)
1075>;
1076
Matt Arsenault4165efd2017-01-17 07:26:53 +00001077// Allow integer inputs
1078class ExpPattern<SDPatternOperator node, ValueType vt, Instruction Inst> : Pat<
1079 (node (i8 timm:$tgt), (i8 timm:$en), vt:$src0, vt:$src1, vt:$src2, vt:$src3, (i1 timm:$compr), (i1 timm:$vm)),
1080 (Inst i8:$tgt, vt:$src0, vt:$src1, vt:$src2, vt:$src3, i1:$vm, i1:$compr, i8:$en)
1081>;
1082
1083def : ExpPattern<AMDGPUexport, i32, EXP>;
1084def : ExpPattern<AMDGPUexport_done, i32, EXP_DONE>;
1085
Marek Olsak43650e42015-03-24 13:40:08 +00001086//===----------------------------------------------------------------------===//
1087// Fract Patterns
1088//===----------------------------------------------------------------------===//
1089
Marek Olsak7d777282015-03-24 13:40:15 +00001090let Predicates = [isSI] in {
1091
1092// V_FRACT is buggy on SI, so the F32 version is never used and (x-floor(x)) is
1093// used instead. However, SI doesn't have V_FLOOR_F64, so the most efficient
1094// way to implement it is using V_FRACT_F64.
1095// The workaround for the V_FRACT bug is:
1096// fract(x) = isnan(x) ? x : min(V_FRACT(x), 0.99999999999999999)
1097
Marek Olsak7d777282015-03-24 13:40:15 +00001098// Convert floor(x) to (x - fract(x))
1099def : Pat <
1100 (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))),
1101 (V_ADD_F64
1102 $mods,
1103 $x,
1104 SRCMODS.NEG,
1105 (V_CNDMASK_B64_PSEUDO
Marek Olsak7d777282015-03-24 13:40:15 +00001106 (V_MIN_F64
1107 SRCMODS.NONE,
1108 (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE),
1109 SRCMODS.NONE,
1110 (V_MOV_B64_PSEUDO 0x3fefffffffffffff),
1111 DSTCLAMP.NONE, DSTOMOD.NONE),
Marek Olsak1354b872015-07-27 11:37:42 +00001112 $x,
Tom Stellard115a6152016-11-10 16:02:37 +00001113 (V_CMP_CLASS_F64_e64 SRCMODS.NONE, $x, (i32 3 /*NaN*/))),
Marek Olsak7d777282015-03-24 13:40:15 +00001114 DSTCLAMP.NONE, DSTOMOD.NONE)
1115>;
1116
1117} // End Predicates = [isSI]
1118
Tom Stellardfb961692013-10-23 00:44:19 +00001119//============================================================================//
Tom Stellardeac65dd2013-05-03 17:21:20 +00001120// Miscellaneous Optimization Patterns
1121//============================================================================//
1122
Matt Arsenault49dd4282014-09-15 17:15:02 +00001123def : SHA256MaPattern <V_BFI_B32, V_XOR_B32_e64>;
Tom Stellardeac65dd2013-05-03 17:21:20 +00001124
Matt Arsenaultc89f2912016-03-07 21:54:48 +00001125def : IntMed3Pat<V_MED3_I32, smax, smax_oneuse, smin_oneuse>;
1126def : IntMed3Pat<V_MED3_U32, umax, umax_oneuse, umin_oneuse>;
1127
Matt Arsenaultaf635242017-01-30 19:30:24 +00001128
1129// Undo sub x, c -> add x, -c canonicalization since c is more likely
1130// an inline immediate than -c.
1131// TODO: Also do for 64-bit.
1132def : Pat<
1133 (add i32:$src0, (i32 NegSubInlineConst32:$src1)),
1134 (S_SUB_I32 $src0, NegSubInlineConst32:$src1)
1135>;
1136
Tom Stellard245c15f2015-05-26 15:55:52 +00001137//============================================================================//
1138// Assembler aliases
1139//============================================================================//
1140
1141def : MnemonicAlias<"v_add_u32", "v_add_i32">;
1142def : MnemonicAlias<"v_sub_u32", "v_sub_i32">;
1143def : MnemonicAlias<"v_subrev_u32", "v_subrev_i32">;
1144
Marek Olsak5df00d62014-12-07 12:18:57 +00001145} // End isGCN predicate