blob: 7810b0d7060c576c498bbd4f10591e5ad4d17c72 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIInstructions.td - SI Instruction Defintions ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// This file was originally auto-generated from a GPU register header file and
10// all the instruction definitions were originally commented out. Instructions
11// that are not yet supported remain commented out.
12//===----------------------------------------------------------------------===//
13
Eric Christopher7792e322015-01-30 23:24:40 +000014def isGCN : Predicate<"Subtarget->getGeneration() "
Matt Arsenault43e92fe2016-06-24 06:30:11 +000015 ">= SISubtarget::SOUTHERN_ISLANDS">,
Tom Stellardd7e6f132015-04-08 01:09:26 +000016 AssemblerPredicate<"FeatureGCN">;
Marek Olsak7d777282015-03-24 13:40:15 +000017def isSI : Predicate<"Subtarget->getGeneration() "
Matt Arsenault43e92fe2016-06-24 06:30:11 +000018 "== SISubtarget::SOUTHERN_ISLANDS">,
Matt Arsenaultd6adfb42015-09-24 19:52:21 +000019 AssemblerPredicate<"FeatureSouthernIslands">;
20
Tom Stellardec87f842015-05-25 16:15:54 +000021def has16BankLDS : Predicate<"Subtarget->getLDSBankCount() == 16">;
22def has32BankLDS : Predicate<"Subtarget->getLDSBankCount() == 32">;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +000023def HasVGPRIndexMode : Predicate<"Subtarget->hasVGPRIndexMode()">,
24 AssemblerPredicate<"FeatureVGPRIndexMode">;
25def HasMovrel : Predicate<"Subtarget->hasMovrel()">,
26 AssemblerPredicate<"FeatureMovrel">;
Tom Stellardec87f842015-05-25 16:15:54 +000027
Valery Pykhtin2828b9b2016-09-19 14:39:49 +000028include "VOPInstructions.td"
Valery Pykhtina34fb492016-08-30 15:20:31 +000029include "SOPInstructions.td"
Valery Pykhtin1b138862016-09-01 09:56:47 +000030include "SMInstructions.td"
Valery Pykhtin8bc65962016-09-05 11:22:51 +000031include "FLATInstructions.td"
Valery Pykhtinb66e5eb2016-09-10 13:09:16 +000032include "BUFInstructions.td"
Valery Pykhtina34fb492016-08-30 15:20:31 +000033
Marek Olsak5df00d62014-12-07 12:18:57 +000034let SubtargetPredicate = isGCN in {
Tom Stellard0e70de52014-05-16 20:56:45 +000035
Tom Stellard8d6d4492014-04-22 16:33:57 +000036//===----------------------------------------------------------------------===//
Tom Stellard3a35d8f2014-10-01 14:44:45 +000037// EXP Instructions
38//===----------------------------------------------------------------------===//
39
Matt Arsenault7bee6ac2016-12-05 20:23:10 +000040defm EXP : EXP_m<0, AMDGPUexport>;
41defm EXP_DONE : EXP_m<1, AMDGPUexport_done>;
Tom Stellard3a35d8f2014-10-01 14:44:45 +000042
43//===----------------------------------------------------------------------===//
Tom Stellard8d6d4492014-04-22 16:33:57 +000044// VINTRP Instructions
45//===----------------------------------------------------------------------===//
46
Matt Arsenault80f766a2015-09-10 01:23:28 +000047let Uses = [M0, EXEC] in {
Tom Stellard2a9d9472015-05-12 15:00:46 +000048
Tom Stellardae38f302015-01-14 01:13:19 +000049// FIXME: Specify SchedRW for VINTRP insturctions.
Tom Stellardec87f842015-05-25 16:15:54 +000050
51multiclass V_INTERP_P1_F32_m : VINTRP_m <
52 0x00000000,
Matt Arsenaultac066f32016-12-06 22:29:43 +000053 (outs VGPR_32:$vdst),
Matt Arsenault0e8a2992016-12-15 20:40:20 +000054 (ins VGPR_32:$vsrc, Attr:$attr, AttrChan:$attrchan),
55 "v_interp_p1_f32 $vdst, $vsrc, $attr$attrchan",
Matt Arsenaultf0c86252016-12-10 00:29:55 +000056 [(set f32:$vdst, (AMDGPUinterp_p1 f32:$vsrc, (i32 imm:$attrchan),
57 (i32 imm:$attr)))]
Tom Stellardec87f842015-05-25 16:15:54 +000058>;
59
60let OtherPredicates = [has32BankLDS] in {
61
62defm V_INTERP_P1_F32 : V_INTERP_P1_F32_m;
63
64} // End OtherPredicates = [has32BankLDS]
65
Matt Arsenaultac066f32016-12-06 22:29:43 +000066let OtherPredicates = [has16BankLDS], Constraints = "@earlyclobber $vdst", isAsmParserOnly=1 in {
Tom Stellardec87f842015-05-25 16:15:54 +000067
68defm V_INTERP_P1_F32_16bank : V_INTERP_P1_F32_m;
69
Matt Arsenaultac066f32016-12-06 22:29:43 +000070} // End OtherPredicates = [has32BankLDS], Constraints = "@earlyclobber $vdst", isAsmParserOnly=1
Tom Stellard75aadc22012-12-11 21:25:42 +000071
Matt Arsenaultac066f32016-12-06 22:29:43 +000072let DisableEncoding = "$src0", Constraints = "$src0 = $vdst" in {
Tom Stellard50828162015-05-25 16:15:56 +000073
Marek Olsak5df00d62014-12-07 12:18:57 +000074defm V_INTERP_P2_F32 : VINTRP_m <
Tom Stellardc70cf902015-05-25 16:15:50 +000075 0x00000001,
Matt Arsenaultac066f32016-12-06 22:29:43 +000076 (outs VGPR_32:$vdst),
Matt Arsenault0e8a2992016-12-15 20:40:20 +000077 (ins VGPR_32:$src0, VGPR_32:$vsrc, Attr:$attr, AttrChan:$attrchan),
78 "v_interp_p2_f32 $vdst, $vsrc, $attr$attrchan",
Matt Arsenaultf0c86252016-12-10 00:29:55 +000079 [(set f32:$vdst, (AMDGPUinterp_p2 f32:$src0, f32:$vsrc, (i32 imm:$attrchan),
80 (i32 imm:$attr)))]>;
Tom Stellard50828162015-05-25 16:15:56 +000081
Matt Arsenaultac066f32016-12-06 22:29:43 +000082} // End DisableEncoding = "$src0", Constraints = "$src0 = $vdst"
Tom Stellard75aadc22012-12-11 21:25:42 +000083
Marek Olsak5df00d62014-12-07 12:18:57 +000084defm V_INTERP_MOV_F32 : VINTRP_m <
Tom Stellardc70cf902015-05-25 16:15:50 +000085 0x00000002,
Matt Arsenaultac066f32016-12-06 22:29:43 +000086 (outs VGPR_32:$vdst),
Matt Arsenault0e8a2992016-12-15 20:40:20 +000087 (ins InterpSlot:$vsrc, Attr:$attr, AttrChan:$attrchan),
88 "v_interp_mov_f32 $vdst, $vsrc, $attr$attrchan",
Matt Arsenaultf0c86252016-12-10 00:29:55 +000089 [(set f32:$vdst, (AMDGPUinterp_mov (i32 imm:$vsrc), (i32 imm:$attrchan),
Matt Arsenaultac066f32016-12-06 22:29:43 +000090 (i32 imm:$attr)))]>;
Tom Stellard2a9d9472015-05-12 15:00:46 +000091
Matt Arsenault80f766a2015-09-10 01:23:28 +000092} // End Uses = [M0, EXEC]
Tom Stellard75aadc22012-12-11 21:25:42 +000093
Tom Stellard8d6d4492014-04-22 16:33:57 +000094//===----------------------------------------------------------------------===//
Tom Stellard8d6d4492014-04-22 16:33:57 +000095// Pseudo Instructions
96//===----------------------------------------------------------------------===//
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +000097
98let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] in {
Tom Stellard75aadc22012-12-11 21:25:42 +000099
Marek Olsak7d777282015-03-24 13:40:15 +0000100// For use in patterns
Tom Stellardcc4c8712016-02-16 18:14:56 +0000101def V_CNDMASK_B64_PSEUDO : VOP3Common <(outs VReg_64:$vdst),
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000102 (ins VSrc_b64:$src0, VSrc_b64:$src1, SSrc_b64:$src2), "", []> {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000103 let isPseudo = 1;
104 let isCodeGenOnly = 1;
Matt Arsenault22e41792016-08-27 01:00:37 +0000105 let usesCustomInserter = 1;
Tom Stellard60024a02014-09-24 01:33:24 +0000106}
107
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000108// 64-bit vector move instruction. This is mainly used by the SIFoldOperands
109// pass to enable folding of inline immediates.
Matt Arsenault4bd72362016-12-10 00:39:12 +0000110def V_MOV_B64_PSEUDO : VPseudoInstSI <(outs VReg_64:$vdst),
111 (ins VSrc_b64:$src0)>;
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000112} // End let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC]
113
Changpeng Fang01f60622016-03-15 17:28:44 +0000114let usesCustomInserter = 1, SALU = 1 in {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000115def GET_GROUPSTATICSIZE : PseudoInstSI <(outs SReg_32:$sdst), (ins),
Changpeng Fang01f60622016-03-15 17:28:44 +0000116 [(set SReg_32:$sdst, (int_amdgcn_groupstaticsize))]>;
117} // End let usesCustomInserter = 1, SALU = 1
118
Matt Arsenaulte6740752016-09-29 01:44:16 +0000119def S_MOV_B64_term : PseudoInstSI<(outs SReg_64:$dst),
120 (ins SSrc_b64:$src0)> {
121 let SALU = 1;
122 let isAsCheapAsAMove = 1;
123 let isTerminator = 1;
124}
125
126def S_XOR_B64_term : PseudoInstSI<(outs SReg_64:$dst),
127 (ins SSrc_b64:$src0, SSrc_b64:$src1)> {
128 let SALU = 1;
129 let isAsCheapAsAMove = 1;
130 let isTerminator = 1;
131}
132
133def S_ANDN2_B64_term : PseudoInstSI<(outs SReg_64:$dst),
134 (ins SSrc_b64:$src0, SSrc_b64:$src1)> {
135 let SALU = 1;
136 let isAsCheapAsAMove = 1;
137 let isTerminator = 1;
138}
139
Stanislav Mekhanoshinea91cca2016-11-15 19:00:15 +0000140def WAVE_BARRIER : SPseudoInstSI<(outs), (ins),
141 [(int_amdgcn_wave_barrier)]> {
142 let SchedRW = [];
143 let hasNoSchedulingInfo = 1;
144 let hasSideEffects = 1;
145 let mayLoad = 1;
146 let mayStore = 1;
147 let isBarrier = 1;
148 let isConvergent = 1;
149}
150
Matt Arsenault8fb37382013-10-11 21:03:36 +0000151// SI pseudo instructions. These are used by the CFG structurizer pass
Tom Stellard75aadc22012-12-11 21:25:42 +0000152// and should be lowered to ISA instructions prior to codegen.
153
Matt Arsenault9babdf42016-06-22 20:15:28 +0000154// Dummy terminator instruction to use after control flow instructions
155// replaced with exec mask operations.
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000156def SI_MASK_BRANCH : PseudoInstSI <
Matt Arsenaultf98a5962016-08-27 00:42:21 +0000157 (outs), (ins brtarget:$target)> {
Matt Arsenault57431c92016-08-10 19:11:42 +0000158 let isBranch = 0;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000159 let isTerminator = 1;
Matt Arsenault57431c92016-08-10 19:11:42 +0000160 let isBarrier = 0;
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000161 let Uses = [EXEC];
Matt Arsenaultc59a9232016-10-06 18:12:07 +0000162 let SchedRW = [];
163 let hasNoSchedulingInfo = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000164}
165
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000166let isTerminator = 1 in {
Tom Stellardf8794352012-12-19 22:10:31 +0000167
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000168def SI_IF: CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000169 (outs SReg_64:$dst), (ins SReg_64:$vcc, brtarget:$target),
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000170 [(set i64:$dst, (int_amdgcn_if i1:$vcc, bb:$target))], 1, 1> {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000171 let Constraints = "";
Matt Arsenaulte6740752016-09-29 01:44:16 +0000172 let Size = 12;
Matt Arsenault6408c912016-09-16 22:11:18 +0000173 let mayLoad = 1;
Matt Arsenaulte6740752016-09-29 01:44:16 +0000174 let mayStore = 1;
Matt Arsenault6408c912016-09-16 22:11:18 +0000175 let hasSideEffects = 1;
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000176}
Tom Stellard75aadc22012-12-11 21:25:42 +0000177
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000178def SI_ELSE : CFPseudoInstSI <
179 (outs SReg_64:$dst), (ins SReg_64:$src, brtarget:$target, i1imm:$execfix), [], 1, 1> {
Tom Stellardf8794352012-12-19 22:10:31 +0000180 let Constraints = "$src = $dst";
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000181 let Size = 12;
Matt Arsenault6408c912016-09-16 22:11:18 +0000182 let mayStore = 1;
183 let mayLoad = 1;
184 let hasSideEffects = 1;
Tom Stellardf8794352012-12-19 22:10:31 +0000185}
186
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000187def SI_LOOP : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000188 (outs), (ins SReg_64:$saved, brtarget:$target),
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000189 [(int_amdgcn_loop i64:$saved, bb:$target)], 1, 1> {
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000190 let Size = 8;
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000191 let isBranch = 1;
Matt Arsenault6408c912016-09-16 22:11:18 +0000192 let hasSideEffects = 1;
193 let mayLoad = 1;
194 let mayStore = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000195}
Tom Stellardf8794352012-12-19 22:10:31 +0000196
Matt Arsenault382d9452016-01-26 04:49:22 +0000197} // End isBranch = 1, isTerminator = 1
Tom Stellardf8794352012-12-19 22:10:31 +0000198
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000199def SI_END_CF : CFPseudoInstSI <
200 (outs), (ins SReg_64:$saved),
201 [(int_amdgcn_end_cf i64:$saved)], 1, 1> {
202 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000203 let isAsCheapAsAMove = 1;
204 let isReMaterializable = 1;
205 let mayLoad = 1;
206 let mayStore = 1;
207 let hasSideEffects = 1;
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000208}
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000209
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000210def SI_BREAK : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000211 (outs SReg_64:$dst), (ins SReg_64:$src),
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000212 [(set i64:$dst, (int_amdgcn_break i64:$src))], 1> {
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000213 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000214 let isAsCheapAsAMove = 1;
215 let isReMaterializable = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000216}
Matt Arsenault48d70cb2016-07-09 17:18:39 +0000217
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000218def SI_IF_BREAK : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000219 (outs SReg_64:$dst), (ins SReg_64:$vcc, SReg_64:$src),
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000220 [(set i64:$dst, (int_amdgcn_if_break i1:$vcc, i64:$src))]> {
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000221 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000222 let isAsCheapAsAMove = 1;
223 let isReMaterializable = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000224}
Tom Stellardf8794352012-12-19 22:10:31 +0000225
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000226def SI_ELSE_BREAK : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000227 (outs SReg_64:$dst), (ins SReg_64:$src0, SReg_64:$src1),
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000228 [(set i64:$dst, (int_amdgcn_else_break i64:$src0, i64:$src1))]> {
229 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000230 let isAsCheapAsAMove = 1;
231 let isReMaterializable = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000232}
Tom Stellardf8794352012-12-19 22:10:31 +0000233
Tom Stellardaa798342015-05-01 03:44:09 +0000234let Uses = [EXEC], Defs = [EXEC,VCC] in {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000235def SI_KILL : PseudoInstSI <
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000236 (outs), (ins VSrc_b32:$src),
Matt Arsenault03006fd2016-07-19 16:27:56 +0000237 [(AMDGPUkill i32:$src)]> {
Matt Arsenault786724a2016-07-12 21:41:32 +0000238 let isConvergent = 1;
239 let usesCustomInserter = 1;
240}
241
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000242def SI_KILL_TERMINATOR : SPseudoInstSI <
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000243 (outs), (ins VSrc_b32:$src)> {
Matt Arsenault786724a2016-07-12 21:41:32 +0000244 let isTerminator = 1;
245}
246
Tom Stellardaa798342015-05-01 03:44:09 +0000247} // End Uses = [EXEC], Defs = [EXEC,VCC]
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000248
Matt Arsenault327188a2016-12-15 21:57:11 +0000249// Branch on undef scc. Used to avoid intermediate copy from
250// IMPLICIT_DEF to SCC.
251def SI_BR_UNDEF : SPseudoInstSI <(outs), (ins sopp_brtarget:$simm16)> {
252 let isTerminator = 1;
253 let usesCustomInserter = 1;
254}
Tom Stellardf8794352012-12-19 22:10:31 +0000255
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000256def SI_PS_LIVE : PseudoInstSI <
257 (outs SReg_64:$dst), (ins),
Matt Arsenault9babdf42016-06-22 20:15:28 +0000258 [(set i1:$dst, (int_amdgcn_ps_live))]> {
259 let SALU = 1;
260}
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000261
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000262// Used as an isel pseudo to directly emit initialization with an
263// s_mov_b32 rather than a copy of another initialized
264// register. MachineCSE skips copies, and we don't want to have to
265// fold operands before it runs.
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000266def SI_INIT_M0 : SPseudoInstSI <(outs), (ins SSrc_b32:$src)> {
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000267 let Defs = [M0];
268 let usesCustomInserter = 1;
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000269 let isAsCheapAsAMove = 1;
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000270 let isReMaterializable = 1;
271}
272
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000273def SI_RETURN : SPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000274 (outs), (ins variable_ops), [(AMDGPUreturn)]> {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000275 let isTerminator = 1;
276 let isBarrier = 1;
277 let isReturn = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000278 let hasSideEffects = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000279 let hasNoSchedulingInfo = 1;
Nicolai Haehnlea246dcc2016-09-03 12:26:32 +0000280 let DisableWQM = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000281}
282
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000283let Defs = [M0, EXEC],
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000284 UseNamedOperandTable = 1 in {
Christian Konig2989ffc2013-03-18 11:34:16 +0000285
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000286class SI_INDIRECT_SRC<RegisterClass rc> : VPseudoInstSI <
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000287 (outs VGPR_32:$vdst),
288 (ins rc:$src, VS_32:$idx, i32imm:$offset)> {
289 let usesCustomInserter = 1;
290}
Christian Konig2989ffc2013-03-18 11:34:16 +0000291
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000292class SI_INDIRECT_DST<RegisterClass rc> : VPseudoInstSI <
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000293 (outs rc:$vdst),
294 (ins rc:$src, VS_32:$idx, i32imm:$offset, VGPR_32:$val)> {
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000295 let Constraints = "$src = $vdst";
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000296 let usesCustomInserter = 1;
Christian Konig2989ffc2013-03-18 11:34:16 +0000297}
298
Matt Arsenault28419272015-10-07 00:42:51 +0000299// TODO: We can support indirect SGPR access.
300def SI_INDIRECT_SRC_V1 : SI_INDIRECT_SRC<VGPR_32>;
301def SI_INDIRECT_SRC_V2 : SI_INDIRECT_SRC<VReg_64>;
302def SI_INDIRECT_SRC_V4 : SI_INDIRECT_SRC<VReg_128>;
303def SI_INDIRECT_SRC_V8 : SI_INDIRECT_SRC<VReg_256>;
304def SI_INDIRECT_SRC_V16 : SI_INDIRECT_SRC<VReg_512>;
305
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000306def SI_INDIRECT_DST_V1 : SI_INDIRECT_DST<VGPR_32>;
Christian Konig2989ffc2013-03-18 11:34:16 +0000307def SI_INDIRECT_DST_V2 : SI_INDIRECT_DST<VReg_64>;
308def SI_INDIRECT_DST_V4 : SI_INDIRECT_DST<VReg_128>;
309def SI_INDIRECT_DST_V8 : SI_INDIRECT_DST<VReg_256>;
310def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST<VReg_512>;
311
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000312} // End Uses = [EXEC], Defs = [M0, EXEC]
Christian Konig2989ffc2013-03-18 11:34:16 +0000313
Tom Stellardeba61072014-05-02 15:41:42 +0000314multiclass SI_SPILL_SGPR <RegisterClass sgpr_class> {
Matt Arsenault3354f422016-09-10 01:20:33 +0000315 let UseNamedOperandTable = 1, SGPRSpill = 1, Uses = [EXEC] in {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000316 def _SAVE : PseudoInstSI <
Tom Stellard42fb60e2015-01-14 15:42:31 +0000317 (outs),
Matt Arsenault3354f422016-09-10 01:20:33 +0000318 (ins sgpr_class:$data, i32imm:$addr)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000319 let mayStore = 1;
320 let mayLoad = 0;
321 }
Tom Stellardeba61072014-05-02 15:41:42 +0000322
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000323 def _RESTORE : PseudoInstSI <
Matt Arsenault3354f422016-09-10 01:20:33 +0000324 (outs sgpr_class:$data),
325 (ins i32imm:$addr)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000326 let mayStore = 0;
327 let mayLoad = 1;
328 }
Tom Stellard42fb60e2015-01-14 15:42:31 +0000329 } // End UseNamedOperandTable = 1
Tom Stellardeba61072014-05-02 15:41:42 +0000330}
331
Matt Arsenault2510a312016-09-03 06:57:55 +0000332// You cannot use M0 as the output of v_readlane_b32 instructions or
333// use it in the sdata operand of SMEM instructions. We still need to
334// be able to spill the physical register m0, so allow it for
335// SI_SPILL_32_* instructions.
336defm SI_SPILL_S32 : SI_SPILL_SGPR <SReg_32>;
Tom Stellardeba61072014-05-02 15:41:42 +0000337defm SI_SPILL_S64 : SI_SPILL_SGPR <SReg_64>;
338defm SI_SPILL_S128 : SI_SPILL_SGPR <SReg_128>;
339defm SI_SPILL_S256 : SI_SPILL_SGPR <SReg_256>;
340defm SI_SPILL_S512 : SI_SPILL_SGPR <SReg_512>;
341
Tom Stellard96468902014-09-24 01:33:17 +0000342multiclass SI_SPILL_VGPR <RegisterClass vgpr_class> {
Matt Arsenault7348a7e2016-09-10 01:20:28 +0000343 let UseNamedOperandTable = 1, VGPRSpill = 1,
344 SchedRW = [WriteVMEM] in {
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000345 def _SAVE : VPseudoInstSI <
Tom Stellard42fb60e2015-01-14 15:42:31 +0000346 (outs),
Matt Arsenaultbcfd94c2016-09-17 15:52:37 +0000347 (ins vgpr_class:$vdata, i32imm:$vaddr, SReg_128:$srsrc,
348 SReg_32:$soffset, i32imm:$offset)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000349 let mayStore = 1;
350 let mayLoad = 0;
Matt Arsenaultac42ba82016-09-03 17:25:44 +0000351 // (2 * 4) + (8 * num_subregs) bytes maximum
352 let Size = !add(!shl(!srl(vgpr_class.Size, 5), 3), 8);
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000353 }
Tom Stellard96468902014-09-24 01:33:17 +0000354
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000355 def _RESTORE : VPseudoInstSI <
Matt Arsenault3354f422016-09-10 01:20:33 +0000356 (outs vgpr_class:$vdata),
Matt Arsenaultbcfd94c2016-09-17 15:52:37 +0000357 (ins i32imm:$vaddr, SReg_128:$srsrc, SReg_32:$soffset,
Matt Arsenault9babdf42016-06-22 20:15:28 +0000358 i32imm:$offset)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000359 let mayStore = 0;
360 let mayLoad = 1;
Matt Arsenaultac42ba82016-09-03 17:25:44 +0000361
362 // (2 * 4) + (8 * num_subregs) bytes maximum
363 let Size = !add(!shl(!srl(vgpr_class.Size, 5), 3), 8);
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000364 }
Matt Arsenault7348a7e2016-09-10 01:20:28 +0000365 } // End UseNamedOperandTable = 1, VGPRSpill = 1, SchedRW = [WriteVMEM]
Tom Stellard96468902014-09-24 01:33:17 +0000366}
367
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000368defm SI_SPILL_V32 : SI_SPILL_VGPR <VGPR_32>;
Tom Stellard96468902014-09-24 01:33:17 +0000369defm SI_SPILL_V64 : SI_SPILL_VGPR <VReg_64>;
370defm SI_SPILL_V96 : SI_SPILL_VGPR <VReg_96>;
371defm SI_SPILL_V128 : SI_SPILL_VGPR <VReg_128>;
372defm SI_SPILL_V256 : SI_SPILL_VGPR <VReg_256>;
373defm SI_SPILL_V512 : SI_SPILL_VGPR <VReg_512>;
374
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000375def SI_PC_ADD_REL_OFFSET : SPseudoInstSI <
Tom Stellard067c8152014-07-21 14:01:14 +0000376 (outs SReg_64:$dst),
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +0000377 (ins si_ga:$ptr_lo, si_ga:$ptr_hi),
378 [(set SReg_64:$dst,
379 (i64 (SIpc_add_rel_offset (tglobaladdr:$ptr_lo), (tglobaladdr:$ptr_hi))))]> {
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000380 let Defs = [SCC];
Matt Arsenaultd092a062015-10-02 18:58:37 +0000381}
Tom Stellard067c8152014-07-21 14:01:14 +0000382
Matt Arsenault382d9452016-01-26 04:49:22 +0000383} // End SubtargetPredicate = isGCN
Tom Stellard0e70de52014-05-16 20:56:45 +0000384
Marek Olsak5df00d62014-12-07 12:18:57 +0000385let Predicates = [isGCN] in {
Tom Stellard0e70de52014-05-16 20:56:45 +0000386
Nicolai Haehnle3b572002016-07-28 11:39:24 +0000387def : Pat<
388 (int_amdgcn_else i64:$src, bb:$target),
389 (SI_ELSE $src, $target, 0)
390>;
391
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000392def : Pat <
393 (int_AMDGPU_kilp),
Tom Stellard115a6152016-11-10 16:02:37 +0000394 (SI_KILL (i32 0xbf800000))
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000395>;
396
Tom Stellard8d6d4492014-04-22 16:33:57 +0000397//===----------------------------------------------------------------------===//
Matt Arsenaulta0050b02014-06-19 01:19:19 +0000398// VOP1 Patterns
399//===----------------------------------------------------------------------===//
400
Matt Arsenault22ca3f82014-07-15 23:50:10 +0000401let Predicates = [UnsafeFPMath] in {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +0000402
403//def : RcpPat<V_RCP_F64_e32, f64>;
404//defm : RsqPat<V_RSQ_F64_e32, f64>;
405//defm : RsqPat<V_RSQ_F32_e32, f32>;
406
407def : RsqPat<V_RSQ_F32_e32, f32>;
408def : RsqPat<V_RSQ_F64_e32, f64>;
Matt Arsenault74015162016-05-28 00:19:52 +0000409
410// Convert (x - floor(x)) to fract(x)
411def : Pat <
412 (f32 (fsub (f32 (VOP3Mods f32:$x, i32:$mods)),
413 (f32 (ffloor (f32 (VOP3Mods f32:$x, i32:$mods)))))),
414 (V_FRACT_F32_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
415>;
416
417// Convert (x + (-floor(x))) to fract(x)
418def : Pat <
419 (f64 (fadd (f64 (VOP3Mods f64:$x, i32:$mods)),
420 (f64 (fneg (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))))))),
421 (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
422>;
423
424} // End Predicates = [UnsafeFPMath]
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +0000425
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000426def : Pat <
427 (f32 (fpextend f16:$src)),
428 (V_CVT_F32_F16_e32 $src)
429>;
430
431def : Pat <
432 (f64 (fpextend f16:$src)),
433 (V_CVT_F64_F32_e32 (V_CVT_F32_F16_e32 $src))
434>;
435
436def : Pat <
437 (f16 (fpround f32:$src)),
438 (V_CVT_F16_F32_e32 $src)
439>;
440
441def : Pat <
442 (f16 (fpround f64:$src)),
443 (V_CVT_F16_F32_e32 (V_CVT_F32_F64_e32 $src))
444>;
445
446def : Pat <
447 (i32 (fp_to_sint f16:$src)),
448 (V_CVT_I32_F32_e32 (V_CVT_F32_F16_e32 $src))
449>;
450
451def : Pat <
452 (i32 (fp_to_uint f16:$src)),
453 (V_CVT_U32_F32_e32 (V_CVT_F32_F16_e32 $src))
454>;
455
456def : Pat <
457 (f16 (sint_to_fp i32:$src)),
458 (V_CVT_F16_F32_e32 (V_CVT_F32_I32_e32 $src))
459>;
460
461def : Pat <
462 (f16 (uint_to_fp i32:$src)),
463 (V_CVT_F16_F32_e32 (V_CVT_F32_U32_e32 $src))
464>;
465
Matt Arsenaulta0050b02014-06-19 01:19:19 +0000466//===----------------------------------------------------------------------===//
Tom Stellard58ac7442014-04-29 23:12:48 +0000467// VOP2 Patterns
468//===----------------------------------------------------------------------===//
469
Konstantin Zhuravlyovbf998c72016-11-16 03:39:12 +0000470multiclass FMADPat <ValueType vt, Instruction inst> {
471 def : Pat <
472 (vt (fmad (VOP3NoMods0 vt:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod),
473 (VOP3NoMods vt:$src1, i32:$src1_modifiers),
474 (VOP3NoMods vt:$src2, i32:$src2_modifiers))),
475 (inst $src0_modifiers, $src0, $src1_modifiers, $src1,
476 $src2_modifiers, $src2, $clamp, $omod)
477 >;
478}
479
480defm : FMADPat <f16, V_MAC_F16_e64>;
481defm : FMADPat <f32, V_MAC_F32_e64>;
482
483multiclass SelectPat <ValueType vt, Instruction inst> {
Konstantin Zhuravlyov2a87a422016-11-16 03:16:26 +0000484 def : Pat <
485 (vt (select i1:$src0, vt:$src1, vt:$src2)),
486 (inst $src2, $src1, $src0)
487 >;
488}
489
Konstantin Zhuravlyovbf998c72016-11-16 03:39:12 +0000490defm : SelectPat <i16, V_CNDMASK_B32_e64>;
491defm : SelectPat <i32, V_CNDMASK_B32_e64>;
492defm : SelectPat <f16, V_CNDMASK_B32_e64>;
493defm : SelectPat <f32, V_CNDMASK_B32_e64>;
Konstantin Zhuravlyov2a87a422016-11-16 03:16:26 +0000494
Tom Stellardae4c9e72014-06-20 17:06:11 +0000495def : Pat <
496 (i32 (add (i32 (ctpop i32:$popcnt)), i32:$val)),
Matt Arsenault49dd4282014-09-15 17:15:02 +0000497 (V_BCNT_U32_B32_e64 $popcnt, $val)
Tom Stellardae4c9e72014-06-20 17:06:11 +0000498>;
499
Christian Konig4a1b9c32013-03-18 11:34:10 +0000500/********** ============================================ **********/
501/********** Extraction, Insertion, Building and Casting **********/
502/********** ============================================ **********/
Tom Stellard75aadc22012-12-11 21:25:42 +0000503
Christian Konig4a1b9c32013-03-18 11:34:10 +0000504foreach Index = 0-2 in {
505 def Extract_Element_v2i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000506 i32, v2i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000507 >;
508 def Insert_Element_v2i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000509 i32, v2i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000510 >;
511
512 def Extract_Element_v2f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000513 f32, v2f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000514 >;
515 def Insert_Element_v2f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000516 f32, v2f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000517 >;
518}
519
520foreach Index = 0-3 in {
521 def Extract_Element_v4i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000522 i32, v4i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000523 >;
524 def Insert_Element_v4i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000525 i32, v4i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000526 >;
527
528 def Extract_Element_v4f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000529 f32, v4f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000530 >;
531 def Insert_Element_v4f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000532 f32, v4f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000533 >;
534}
535
536foreach Index = 0-7 in {
537 def Extract_Element_v8i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000538 i32, v8i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000539 >;
540 def Insert_Element_v8i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000541 i32, v8i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000542 >;
543
544 def Extract_Element_v8f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000545 f32, v8f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000546 >;
547 def Insert_Element_v8f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000548 f32, v8f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000549 >;
550}
551
552foreach Index = 0-15 in {
553 def Extract_Element_v16i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000554 i32, v16i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000555 >;
556 def Insert_Element_v16i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000557 i32, v16i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000558 >;
559
560 def Extract_Element_v16f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000561 f32, v16f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000562 >;
563 def Insert_Element_v16f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000564 f32, v16f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000565 >;
566}
Tom Stellard75aadc22012-12-11 21:25:42 +0000567
Matt Arsenault382d9452016-01-26 04:49:22 +0000568// FIXME: Why do only some of these type combinations for SReg and
569// VReg?
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000570// 16-bit bitcast
571def : BitConvert <i16, f16, VGPR_32>;
572def : BitConvert <f16, i16, VGPR_32>;
573def : BitConvert <i16, f16, SReg_32>;
574def : BitConvert <f16, i16, SReg_32>;
575
Matt Arsenault382d9452016-01-26 04:49:22 +0000576// 32-bit bitcast
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000577def : BitConvert <i32, f32, VGPR_32>;
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000578def : BitConvert <f32, i32, VGPR_32>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000579def : BitConvert <i32, f32, SReg_32>;
580def : BitConvert <f32, i32, SReg_32>;
Tom Stellard75aadc22012-12-11 21:25:42 +0000581
Matt Arsenault382d9452016-01-26 04:49:22 +0000582// 64-bit bitcast
Tom Stellard7512c082013-07-12 18:14:56 +0000583def : BitConvert <i64, f64, VReg_64>;
Tom Stellard7512c082013-07-12 18:14:56 +0000584def : BitConvert <f64, i64, VReg_64>;
Tom Stellarded2f6142013-07-18 21:43:42 +0000585def : BitConvert <v2i32, v2f32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000586def : BitConvert <v2f32, v2i32, VReg_64>;
Tom Stellard7ea3d6d2014-03-31 14:01:55 +0000587def : BitConvert <i64, v2i32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000588def : BitConvert <v2i32, i64, VReg_64>;
Matt Arsenault064c2062014-06-11 17:40:32 +0000589def : BitConvert <i64, v2f32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000590def : BitConvert <v2f32, i64, VReg_64>;
Tom Stellard8f307212015-12-15 17:11:17 +0000591def : BitConvert <f64, v2f32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000592def : BitConvert <v2f32, f64, VReg_64>;
Matt Arsenault2acc7a42014-06-11 19:31:13 +0000593def : BitConvert <f64, v2i32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000594def : BitConvert <v2i32, f64, VReg_64>;
Tom Stellard83747202013-07-18 21:43:53 +0000595def : BitConvert <v4i32, v4f32, VReg_128>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000596def : BitConvert <v4f32, v4i32, VReg_128>;
Tom Stellard83747202013-07-18 21:43:53 +0000597
Matt Arsenault382d9452016-01-26 04:49:22 +0000598// 128-bit bitcast
Matt Arsenault61001bb2015-11-25 19:58:34 +0000599def : BitConvert <v2i64, v4i32, SReg_128>;
600def : BitConvert <v4i32, v2i64, SReg_128>;
Tom Stellard8f307212015-12-15 17:11:17 +0000601def : BitConvert <v2f64, v4f32, VReg_128>;
Matt Arsenault61001bb2015-11-25 19:58:34 +0000602def : BitConvert <v2f64, v4i32, VReg_128>;
Tom Stellard8f307212015-12-15 17:11:17 +0000603def : BitConvert <v4f32, v2f64, VReg_128>;
Matt Arsenault61001bb2015-11-25 19:58:34 +0000604def : BitConvert <v4i32, v2f64, VReg_128>;
Matt Arsenaulte57206d2016-05-25 18:07:36 +0000605def : BitConvert <v2i64, v2f64, VReg_128>;
606def : BitConvert <v2f64, v2i64, VReg_128>;
Matt Arsenault61001bb2015-11-25 19:58:34 +0000607
Matt Arsenault382d9452016-01-26 04:49:22 +0000608// 256-bit bitcast
Tom Stellard967bf582014-02-13 23:34:15 +0000609def : BitConvert <v8i32, v8f32, SReg_256>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000610def : BitConvert <v8f32, v8i32, SReg_256>;
Matt Arsenaultf5958dd2014-02-02 00:05:35 +0000611def : BitConvert <v8i32, v8f32, VReg_256>;
612def : BitConvert <v8f32, v8i32, VReg_256>;
Tom Stellard20ee94f2013-08-14 22:22:09 +0000613
Matt Arsenault382d9452016-01-26 04:49:22 +0000614// 512-bit bitcast
Matt Arsenaultf5958dd2014-02-02 00:05:35 +0000615def : BitConvert <v16i32, v16f32, VReg_512>;
616def : BitConvert <v16f32, v16i32, VReg_512>;
617
Christian Konig8dbe6f62013-02-21 15:17:27 +0000618/********** =================== **********/
619/********** Src & Dst modifiers **********/
620/********** =================== **********/
621
622def : Pat <
Matt Arsenault1cffa4c2014-11-13 19:49:04 +0000623 (AMDGPUclamp (VOP3Mods0Clamp f32:$src0, i32:$src0_modifiers, i32:$omod),
624 (f32 FP_ZERO), (f32 FP_ONE)),
Tom Stellard115a6152016-11-10 16:02:37 +0000625 (V_ADD_F32_e64 $src0_modifiers, $src0, 0, (i32 0), 1, $omod)
Christian Konig8dbe6f62013-02-21 15:17:27 +0000626>;
627
Michel Danzer624b02a2014-02-04 07:12:38 +0000628/********** ================================ **********/
629/********** Floating point absolute/negative **********/
630/********** ================================ **********/
631
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000632// Prevent expanding both fneg and fabs.
Michel Danzer624b02a2014-02-04 07:12:38 +0000633
Michel Danzer624b02a2014-02-04 07:12:38 +0000634def : Pat <
635 (fneg (fabs f32:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +0000636 (S_OR_B32 $src, (S_MOV_B32(i32 0x80000000))) // Set sign bit
Michel Danzer624b02a2014-02-04 07:12:38 +0000637>;
638
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000639// FIXME: Should use S_OR_B32
Matt Arsenault13623d02014-08-15 18:42:18 +0000640def : Pat <
641 (fneg (fabs f64:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000642 (REG_SEQUENCE VReg_64,
643 (i32 (EXTRACT_SUBREG f64:$src, sub0)),
644 sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000645 (V_OR_B32_e32 (i32 (EXTRACT_SUBREG f64:$src, sub1)),
646 (V_MOV_B32_e32 (i32 0x80000000))), // Set sign bit.
Matt Arsenault7d858d82014-11-02 23:46:54 +0000647 sub1)
Matt Arsenault13623d02014-08-15 18:42:18 +0000648>;
649
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000650def : Pat <
651 (fabs f32:$src),
Tom Stellard115a6152016-11-10 16:02:37 +0000652 (V_AND_B32_e64 $src, (V_MOV_B32_e32 (i32 0x7fffffff)))
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000653>;
Vincent Lejeune79a58342014-05-10 19:18:25 +0000654
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000655def : Pat <
656 (fneg f32:$src),
Tom Stellard115a6152016-11-10 16:02:37 +0000657 (V_XOR_B32_e32 $src, (V_MOV_B32_e32 (i32 0x80000000)))
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000658>;
Christian Konig8dbe6f62013-02-21 15:17:27 +0000659
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000660def : Pat <
661 (fabs f64:$src),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000662 (REG_SEQUENCE VReg_64,
663 (i32 (EXTRACT_SUBREG f64:$src, sub0)),
664 sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000665 (V_AND_B32_e64 (i32 (EXTRACT_SUBREG f64:$src, sub1)),
666 (V_MOV_B32_e32 (i32 0x7fffffff))), // Set sign bit.
Matt Arsenault7d858d82014-11-02 23:46:54 +0000667 sub1)
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000668>;
Vincent Lejeune79a58342014-05-10 19:18:25 +0000669
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000670def : Pat <
671 (fneg f64:$src),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000672 (REG_SEQUENCE VReg_64,
673 (i32 (EXTRACT_SUBREG f64:$src, sub0)),
674 sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000675 (V_XOR_B32_e32 (i32 (EXTRACT_SUBREG f64:$src, sub1)),
676 (i32 (V_MOV_B32_e32 (i32 0x80000000)))),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000677 sub1)
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000678>;
Christian Konig8dbe6f62013-02-21 15:17:27 +0000679
Matt Arsenaultc79dc702016-11-15 02:25:28 +0000680def : Pat <
Konstantin Zhuravlyov7d882752017-01-13 19:49:25 +0000681 (fcopysign f16:$src0, f16:$src1),
682 (V_BFI_B32 (S_MOV_B32 (i32 0x00007fff)), $src0, $src1)
683>;
684
685def : Pat <
686 (fcopysign f32:$src0, f16:$src1),
687 (V_BFI_B32 (S_MOV_B32 (i32 0x7fffffff)), $src0,
688 (V_LSHLREV_B32_e64 (i32 16), $src1))
689>;
690
691def : Pat <
692 (fcopysign f64:$src0, f16:$src1),
693 (REG_SEQUENCE SReg_64,
694 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0,
695 (V_BFI_B32 (S_MOV_B32 (i32 0x7fffffff)), (i32 (EXTRACT_SUBREG $src0, sub1)),
696 (V_LSHLREV_B32_e64 (i32 16), $src1)), sub1)
697>;
698
699def : Pat <
700 (fcopysign f16:$src0, f32:$src1),
701 (V_BFI_B32 (S_MOV_B32 (i32 0x00007fff)), $src0,
702 (V_LSHRREV_B32_e64 (i32 16), $src1))
703>;
704
705def : Pat <
706 (fcopysign f16:$src0, f64:$src1),
707 (V_BFI_B32 (S_MOV_B32 (i32 0x00007fff)), $src0,
708 (V_LSHRREV_B32_e64 (i32 16), (EXTRACT_SUBREG $src1, sub1)))
709>;
710
711def : Pat <
Matt Arsenaultc79dc702016-11-15 02:25:28 +0000712 (fneg f16:$src),
713 (V_XOR_B32_e32 $src, (V_MOV_B32_e32 (i32 0x00008000)))
714>;
715
716def : Pat <
717 (fabs f16:$src),
718 (V_AND_B32_e64 $src, (V_MOV_B32_e32 (i32 0x00007fff)))
719>;
720
721def : Pat <
722 (fneg (fabs f16:$src)),
723 (S_OR_B32 $src, (S_MOV_B32 (i32 0x00008000))) // Set sign bit
724>;
725
Christian Konigc756cb992013-02-16 11:28:22 +0000726/********** ================== **********/
727/********** Immediate Patterns **********/
728/********** ================== **********/
729
730def : Pat <
Matt Arsenault3d463192016-11-01 22:55:07 +0000731 (VGPRImm<(i32 imm)>:$imm),
Christian Konigc756cb992013-02-16 11:28:22 +0000732 (V_MOV_B32_e32 imm:$imm)
733>;
734
735def : Pat <
Matt Arsenault3d463192016-11-01 22:55:07 +0000736 (VGPRImm<(f32 fpimm)>:$imm),
Tom Stellardfb77f002015-01-13 22:59:41 +0000737 (V_MOV_B32_e32 (f32 (bitcast_fpimm_to_i32 $imm)))
Christian Konigc756cb992013-02-16 11:28:22 +0000738>;
739
740def : Pat <
Matt Arsenault3d463192016-11-01 22:55:07 +0000741 (i32 imm:$imm),
742 (S_MOV_B32 imm:$imm)
743>;
744
Matt Arsenaulte96d0372016-12-08 20:14:46 +0000745// FIXME: Workaround for ordering issue with peephole optimizer where
746// a register class copy interferes with immediate folding. Should
747// use s_mov_b32, which can be shrunk to s_movk_i32
748def : Pat <
749 (VGPRImm<(f16 fpimm)>:$imm),
750 (V_MOV_B32_e32 (f16 (bitcast_fpimm_to_i32 $imm)))
751>;
752
Matt Arsenault3d463192016-11-01 22:55:07 +0000753def : Pat <
754 (f32 fpimm:$imm),
755 (S_MOV_B32 (f32 (bitcast_fpimm_to_i32 $imm)))
756>;
757
758def : Pat <
Matt Arsenaulte96d0372016-12-08 20:14:46 +0000759 (f16 fpimm:$imm),
760 (S_MOV_B32 (i32 (bitcast_fpimm_to_i32 $imm)))
761>;
762
763def : Pat <
Matt Arsenaultac0fc842016-09-17 16:09:55 +0000764 (i32 frameindex:$fi),
765 (V_MOV_B32_e32 (i32 (frameindex_to_targetframeindex $fi)))
766>;
767
768def : Pat <
Christian Konigb559b072013-02-16 11:28:36 +0000769 (i64 InlineImm<i64>:$imm),
770 (S_MOV_B64 InlineImm<i64>:$imm)
771>;
772
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000773// XXX - Should this use a s_cmp to set SCC?
774
775// Set to sign-extended 64-bit value (true = -1, false = 0)
776def : Pat <
777 (i1 imm:$imm),
778 (S_MOV_B64 (i64 (as_i64imm $imm)))
779>;
780
Matt Arsenault303011a2014-12-17 21:04:08 +0000781def : Pat <
782 (f64 InlineFPImm<f64>:$imm),
Tom Stellardfb77f002015-01-13 22:59:41 +0000783 (S_MOV_B64 (f64 (bitcast_fpimm_to_i64 InlineFPImm<f64>:$imm)))
Matt Arsenault303011a2014-12-17 21:04:08 +0000784>;
785
Tom Stellard75aadc22012-12-11 21:25:42 +0000786/********** ================== **********/
787/********** Intrinsic Patterns **********/
788/********** ================== **********/
789
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000790def : POW_Common <V_LOG_F32_e32, V_EXP_F32_e32, V_MUL_LEGACY_F32_e32>;
Tom Stellard75aadc22012-12-11 21:25:42 +0000791
792def : Pat <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000793 (int_AMDGPU_cube v4f32:$src),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000794 (REG_SEQUENCE VReg_128,
Tom Stellard115a6152016-11-10 16:02:37 +0000795 (V_CUBETC_F32 0 /* src0_modifiers */, (f32 (EXTRACT_SUBREG $src, sub0)),
796 0 /* src1_modifiers */, (f32 (EXTRACT_SUBREG $src, sub1)),
797 0 /* src2_modifiers */, (f32 (EXTRACT_SUBREG $src, sub2)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000798 0 /* clamp */, 0 /* omod */), sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000799 (V_CUBESC_F32 0 /* src0_modifiers */, (f32 (EXTRACT_SUBREG $src, sub0)),
800 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub1)),
801 0 /* src2_modifiers */,(f32 (EXTRACT_SUBREG $src, sub2)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000802 0 /* clamp */, 0 /* omod */), sub1,
Tom Stellard115a6152016-11-10 16:02:37 +0000803 (V_CUBEMA_F32 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub0)),
804 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub1)),
805 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub2)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000806 0 /* clamp */, 0 /* omod */), sub2,
Tom Stellard115a6152016-11-10 16:02:37 +0000807 (V_CUBEID_F32 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub0)),
808 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub1)),
809 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub2)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000810 0 /* clamp */, 0 /* omod */), sub3)
Tom Stellard75aadc22012-12-11 21:25:42 +0000811>;
812
Michel Danzer0cc991e2013-02-22 11:22:58 +0000813def : Pat <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000814 (i32 (sext i1:$src0)),
815 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src0)
Michel Danzer0cc991e2013-02-22 11:22:58 +0000816>;
817
Tom Stellardf16d38c2014-02-13 23:34:13 +0000818class Ext32Pat <SDNode ext> : Pat <
819 (i32 (ext i1:$src0)),
Michel Danzer5d26fdf2014-02-05 09:48:05 +0000820 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src0)
821>;
822
Tom Stellardf16d38c2014-02-13 23:34:13 +0000823def : Ext32Pat <zext>;
824def : Ext32Pat <anyext>;
825
Michel Danzer8caa9042013-04-10 17:17:56 +0000826// The multiplication scales from [0,1] to the unsigned integer range
827def : Pat <
828 (AMDGPUurecip i32:$src0),
829 (V_CVT_U32_F32_e32
Tom Stellard115a6152016-11-10 16:02:37 +0000830 (V_MUL_F32_e32 (i32 CONST.FP_UINT_MAX_PLUS_1),
Michel Danzer8caa9042013-04-10 17:17:56 +0000831 (V_RCP_IFLAG_F32_e32 (V_CVT_F32_U32_e32 $src0))))
832>;
833
Tom Stellard0289ff42014-05-16 20:56:44 +0000834//===----------------------------------------------------------------------===//
835// VOP3 Patterns
836//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000837
Matt Arsenaulteb260202014-05-22 18:00:15 +0000838def : IMad24Pat<V_MAD_I32_I24>;
839def : UMad24Pat<V_MAD_U32_U24>;
840
Matt Arsenault7d858d82014-11-02 23:46:54 +0000841defm : BFIPatterns <V_BFI_B32, S_MOV_B32, SReg_64>;
Tom Stellard0289ff42014-05-16 20:56:44 +0000842def : ROTRPattern <V_ALIGNBIT_B32>;
843
Christian Konig2989ffc2013-03-18 11:34:16 +0000844/********** ====================== **********/
Simon Pilgrime995a8082016-11-18 11:04:02 +0000845/********** Indirect addressing **********/
Christian Konig2989ffc2013-03-18 11:34:16 +0000846/********** ====================== **********/
847
Matt Arsenault28419272015-10-07 00:42:51 +0000848multiclass SI_INDIRECT_Pattern <ValueType vt, ValueType eltvt, string VecSize> {
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000849 // Extract with offset
Christian Konig2989ffc2013-03-18 11:34:16 +0000850 def : Pat<
Nicolai Haehnle7968c342016-07-12 08:12:16 +0000851 (eltvt (extractelt vt:$src, (MOVRELOffset i32:$idx, (i32 imm:$offset)))),
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000852 (!cast<Instruction>("SI_INDIRECT_SRC_"#VecSize) $src, $idx, imm:$offset)
Christian Konig2989ffc2013-03-18 11:34:16 +0000853 >;
854
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000855 // Insert with offset
Christian Konig2989ffc2013-03-18 11:34:16 +0000856 def : Pat<
Nicolai Haehnle7968c342016-07-12 08:12:16 +0000857 (insertelt vt:$src, eltvt:$val, (MOVRELOffset i32:$idx, (i32 imm:$offset))),
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000858 (!cast<Instruction>("SI_INDIRECT_DST_"#VecSize) $src, $idx, imm:$offset, $val)
Christian Konig2989ffc2013-03-18 11:34:16 +0000859 >;
860}
861
Matt Arsenault28419272015-10-07 00:42:51 +0000862defm : SI_INDIRECT_Pattern <v2f32, f32, "V2">;
863defm : SI_INDIRECT_Pattern <v4f32, f32, "V4">;
864defm : SI_INDIRECT_Pattern <v8f32, f32, "V8">;
865defm : SI_INDIRECT_Pattern <v16f32, f32, "V16">;
Matt Arsenaultf5958dd2014-02-02 00:05:35 +0000866
Matt Arsenault28419272015-10-07 00:42:51 +0000867defm : SI_INDIRECT_Pattern <v2i32, i32, "V2">;
868defm : SI_INDIRECT_Pattern <v4i32, i32, "V4">;
869defm : SI_INDIRECT_Pattern <v8i32, i32, "V8">;
870defm : SI_INDIRECT_Pattern <v16i32, i32, "V16">;
Christian Konig2989ffc2013-03-18 11:34:16 +0000871
Tom Stellard81d871d2013-11-13 23:36:50 +0000872//===----------------------------------------------------------------------===//
Wei Ding1041a642016-08-24 14:59:47 +0000873// SAD Patterns
874//===----------------------------------------------------------------------===//
875
876def : Pat <
877 (add (sub_oneuse (umax i32:$src0, i32:$src1),
878 (umin i32:$src0, i32:$src1)),
879 i32:$src2),
880 (V_SAD_U32 $src0, $src1, $src2)
881>;
882
883def : Pat <
884 (add (select_oneuse (i1 (setugt i32:$src0, i32:$src1)),
885 (sub i32:$src0, i32:$src1),
886 (sub i32:$src1, i32:$src0)),
887 i32:$src2),
888 (V_SAD_U32 $src0, $src1, $src2)
889>;
890
891//===----------------------------------------------------------------------===//
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000892// Conversion Patterns
893//===----------------------------------------------------------------------===//
894
895def : Pat<(i32 (sext_inreg i32:$src, i1)),
Tom Stellard115a6152016-11-10 16:02:37 +0000896 (S_BFE_I32 i32:$src, (i32 65536))>; // 0 | 1 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000897
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000898// Handle sext_inreg in i64
899def : Pat <
900 (i64 (sext_inreg i64:$src, i1)),
Tom Stellard115a6152016-11-10 16:02:37 +0000901 (S_BFE_I64 i64:$src, (i32 0x10000)) // 0 | 1 << 16
902>;
903
904def : Pat <
Matt Arsenault682eb432017-01-11 22:35:22 +0000905 (i16 (sext_inreg i16:$src, i1)),
906 (S_BFE_I32 $src, (i32 0x00010000)) // 0 | 1 << 16
907>;
908
909def : Pat <
Tom Stellard115a6152016-11-10 16:02:37 +0000910 (i16 (sext_inreg i16:$src, i8)),
911 (S_BFE_I32 $src, (i32 0x80000)) // 0 | 8 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000912>;
913
914def : Pat <
915 (i64 (sext_inreg i64:$src, i8)),
Tom Stellard115a6152016-11-10 16:02:37 +0000916 (S_BFE_I64 i64:$src, (i32 0x80000)) // 0 | 8 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000917>;
918
919def : Pat <
920 (i64 (sext_inreg i64:$src, i16)),
Tom Stellard115a6152016-11-10 16:02:37 +0000921 (S_BFE_I64 i64:$src, (i32 0x100000)) // 0 | 16 << 16
Matt Arsenault94812212014-11-14 18:18:16 +0000922>;
923
924def : Pat <
925 (i64 (sext_inreg i64:$src, i32)),
Tom Stellard115a6152016-11-10 16:02:37 +0000926 (S_BFE_I64 i64:$src, (i32 0x200000)) // 0 | 32 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000927>;
928
Matt Arsenaultc6b69a92016-07-26 23:06:33 +0000929def : Pat <
930 (i64 (zext i32:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +0000931 (REG_SEQUENCE SReg_64, $src, sub0, (S_MOV_B32 (i32 0)), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000932>;
933
Matt Arsenaultc6b69a92016-07-26 23:06:33 +0000934def : Pat <
935 (i64 (anyext i32:$src)),
936 (REG_SEQUENCE SReg_64, $src, sub0, (i32 (IMPLICIT_DEF)), sub1)
937>;
938
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000939class ZExt_i64_i1_Pat <SDNode ext> : Pat <
940 (i64 (ext i1:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000941 (REG_SEQUENCE VReg_64,
942 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src), sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000943 (S_MOV_B32 (i32 0)), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000944>;
945
946
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000947def : ZExt_i64_i1_Pat<zext>;
948def : ZExt_i64_i1_Pat<anyext>;
949
Tom Stellardbc4497b2016-02-12 23:45:29 +0000950// FIXME: We need to use COPY_TO_REGCLASS to work-around the fact that
951// REG_SEQUENCE patterns don't support instructions with multiple outputs.
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000952def : Pat <
953 (i64 (sext i32:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000954 (REG_SEQUENCE SReg_64, $src, sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000955 (i32 (COPY_TO_REGCLASS (S_ASHR_I32 $src, (i32 31)), SReg_32_XM0)), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000956>;
957
958def : Pat <
959 (i64 (sext i1:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000960 (REG_SEQUENCE VReg_64,
Tom Stellard115a6152016-11-10 16:02:37 +0000961 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src), sub0,
962 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000963>;
964
Tom Stellard115a6152016-11-10 16:02:37 +0000965class FPToI1Pat<Instruction Inst, int KOne, ValueType kone_type, ValueType vt, SDPatternOperator fp_to_int> : Pat <
Matt Arsenault7fb961f2016-07-22 17:01:21 +0000966 (i1 (fp_to_int (vt (VOP3Mods vt:$src0, i32:$src0_modifiers)))),
Tom Stellard115a6152016-11-10 16:02:37 +0000967 (i1 (Inst 0, (kone_type KOne), $src0_modifiers, $src0, DSTCLAMP.NONE, DSTOMOD.NONE))
Matt Arsenault7fb961f2016-07-22 17:01:21 +0000968>;
969
Tom Stellard115a6152016-11-10 16:02:37 +0000970def : FPToI1Pat<V_CMP_EQ_F32_e64, CONST.FP32_ONE, i32, f32, fp_to_uint>;
971def : FPToI1Pat<V_CMP_EQ_F32_e64, CONST.FP32_NEG_ONE, i32, f32, fp_to_sint>;
972def : FPToI1Pat<V_CMP_EQ_F64_e64, CONST.FP64_ONE, i64, f64, fp_to_uint>;
973def : FPToI1Pat<V_CMP_EQ_F64_e64, CONST.FP64_NEG_ONE, i64, f64, fp_to_sint>;
Matt Arsenault7fb961f2016-07-22 17:01:21 +0000974
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000975// If we need to perform a logical operation on i1 values, we need to
976// use vector comparisons since there is only one SCC register. Vector
Simon Pilgrime995a8082016-11-18 11:04:02 +0000977// comparisons still write to a pair of SGPRs, so treat these as
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000978// 64-bit comparisons. When legalizing SGPR copies, instructions
979// resulting in the copies from SCC to these instructions will be
980// moved to the VALU.
981def : Pat <
982 (i1 (and i1:$src0, i1:$src1)),
983 (S_AND_B64 $src0, $src1)
984>;
985
986def : Pat <
987 (i1 (or i1:$src0, i1:$src1)),
988 (S_OR_B64 $src0, $src1)
989>;
990
991def : Pat <
992 (i1 (xor i1:$src0, i1:$src1)),
993 (S_XOR_B64 $src0, $src1)
994>;
995
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +0000996def : Pat <
997 (f32 (sint_to_fp i1:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +0000998 (V_CNDMASK_B32_e64 (i32 0), (i32 CONST.FP32_NEG_ONE), $src)
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +0000999>;
1000
1001def : Pat <
1002 (f32 (uint_to_fp i1:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +00001003 (V_CNDMASK_B32_e64 (i32 0), (i32 CONST.FP32_ONE), $src)
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +00001004>;
1005
1006def : Pat <
1007 (f64 (sint_to_fp i1:$src)),
Matt Arsenaultbecd6562014-12-03 05:22:35 +00001008 (V_CVT_F64_I32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src))
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +00001009>;
1010
1011def : Pat <
1012 (f64 (uint_to_fp i1:$src)),
1013 (V_CVT_F64_U32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src))
1014>;
1015
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001016//===----------------------------------------------------------------------===//
Tom Stellardfb961692013-10-23 00:44:19 +00001017// Miscellaneous Patterns
1018//===----------------------------------------------------------------------===//
1019
1020def : Pat <
Tom Stellard81d871d2013-11-13 23:36:50 +00001021 (i32 (trunc i64:$a)),
1022 (EXTRACT_SUBREG $a, sub0)
1023>;
1024
Michel Danzerbf1a6412014-01-28 03:01:16 +00001025def : Pat <
1026 (i1 (trunc i32:$a)),
Tom Stellard115a6152016-11-10 16:02:37 +00001027 (V_CMP_EQ_U32_e64 (S_AND_B32 (i32 1), $a), (i32 1))
Michel Danzerbf1a6412014-01-28 03:01:16 +00001028>;
1029
Matt Arsenaulte306a322014-10-21 16:25:08 +00001030def : Pat <
Matt Arsenaultabd271b2015-02-05 06:05:13 +00001031 (i1 (trunc i64:$a)),
Matt Arsenault5d8eb252016-09-30 01:50:20 +00001032 (V_CMP_EQ_U32_e64 (S_AND_B32 (i32 1),
Tom Stellard115a6152016-11-10 16:02:37 +00001033 (i32 (EXTRACT_SUBREG $a, sub0))), (i32 1))
Matt Arsenaultabd271b2015-02-05 06:05:13 +00001034>;
1035
1036def : Pat <
Matt Arsenaulte306a322014-10-21 16:25:08 +00001037 (i32 (bswap i32:$a)),
Tom Stellard115a6152016-11-10 16:02:37 +00001038 (V_BFI_B32 (S_MOV_B32 (i32 0x00ff00ff)),
1039 (V_ALIGNBIT_B32 $a, $a, (i32 24)),
1040 (V_ALIGNBIT_B32 $a, $a, (i32 8)))
Matt Arsenaulte306a322014-10-21 16:25:08 +00001041>;
1042
Marek Olsak63a7b082015-03-24 13:40:21 +00001043multiclass BFMPatterns <ValueType vt, InstSI BFM, InstSI MOV> {
1044 def : Pat <
1045 (vt (shl (vt (add (vt (shl 1, vt:$a)), -1)), vt:$b)),
1046 (BFM $a, $b)
1047 >;
1048
1049 def : Pat <
1050 (vt (add (vt (shl 1, vt:$a)), -1)),
Tom Stellard115a6152016-11-10 16:02:37 +00001051 (BFM $a, (MOV (i32 0)))
Marek Olsak63a7b082015-03-24 13:40:21 +00001052 >;
1053}
1054
1055defm : BFMPatterns <i32, S_BFM_B32, S_MOV_B32>;
1056// FIXME: defm : BFMPatterns <i64, S_BFM_B64, S_MOV_B64>;
1057
Marek Olsak949f5da2015-03-24 13:40:34 +00001058def : BFEPattern <V_BFE_U32, S_MOV_B32>;
1059
Matt Arsenault9cd90712016-04-14 01:42:16 +00001060def : Pat<
Matt Arsenaultce841302016-12-22 03:05:37 +00001061 (fcanonicalize f16:$src),
1062 (V_MUL_F16_e64 0, (i32 CONST.FP16_ONE), 0, $src, 0, 0)
1063>;
1064
1065def : Pat<
Matt Arsenault9cd90712016-04-14 01:42:16 +00001066 (fcanonicalize f32:$src),
Tom Stellard115a6152016-11-10 16:02:37 +00001067 (V_MUL_F32_e64 0, (i32 CONST.FP32_ONE), 0, $src, 0, 0)
Matt Arsenault9cd90712016-04-14 01:42:16 +00001068>;
1069
1070def : Pat<
1071 (fcanonicalize f64:$src),
1072 (V_MUL_F64 0, CONST.FP64_ONE, 0, $src, 0, 0)
1073>;
1074
Marek Olsak43650e42015-03-24 13:40:08 +00001075//===----------------------------------------------------------------------===//
1076// Fract Patterns
1077//===----------------------------------------------------------------------===//
1078
Marek Olsak7d777282015-03-24 13:40:15 +00001079let Predicates = [isSI] in {
1080
1081// V_FRACT is buggy on SI, so the F32 version is never used and (x-floor(x)) is
1082// used instead. However, SI doesn't have V_FLOOR_F64, so the most efficient
1083// way to implement it is using V_FRACT_F64.
1084// The workaround for the V_FRACT bug is:
1085// fract(x) = isnan(x) ? x : min(V_FRACT(x), 0.99999999999999999)
1086
Marek Olsak7d777282015-03-24 13:40:15 +00001087// Convert floor(x) to (x - fract(x))
1088def : Pat <
1089 (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))),
1090 (V_ADD_F64
1091 $mods,
1092 $x,
1093 SRCMODS.NEG,
1094 (V_CNDMASK_B64_PSEUDO
Marek Olsak7d777282015-03-24 13:40:15 +00001095 (V_MIN_F64
1096 SRCMODS.NONE,
1097 (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE),
1098 SRCMODS.NONE,
1099 (V_MOV_B64_PSEUDO 0x3fefffffffffffff),
1100 DSTCLAMP.NONE, DSTOMOD.NONE),
Marek Olsak1354b872015-07-27 11:37:42 +00001101 $x,
Tom Stellard115a6152016-11-10 16:02:37 +00001102 (V_CMP_CLASS_F64_e64 SRCMODS.NONE, $x, (i32 3 /*NaN*/))),
Marek Olsak7d777282015-03-24 13:40:15 +00001103 DSTCLAMP.NONE, DSTOMOD.NONE)
1104>;
1105
1106} // End Predicates = [isSI]
1107
Tom Stellardfb961692013-10-23 00:44:19 +00001108//============================================================================//
Tom Stellardeac65dd2013-05-03 17:21:20 +00001109// Miscellaneous Optimization Patterns
1110//============================================================================//
1111
Matt Arsenault49dd4282014-09-15 17:15:02 +00001112def : SHA256MaPattern <V_BFI_B32, V_XOR_B32_e64>;
Tom Stellardeac65dd2013-05-03 17:21:20 +00001113
Matt Arsenaultc89f2912016-03-07 21:54:48 +00001114def : IntMed3Pat<V_MED3_I32, smax, smax_oneuse, smin_oneuse>;
1115def : IntMed3Pat<V_MED3_U32, umax, umax_oneuse, umin_oneuse>;
1116
Tom Stellard245c15f2015-05-26 15:55:52 +00001117//============================================================================//
1118// Assembler aliases
1119//============================================================================//
1120
1121def : MnemonicAlias<"v_add_u32", "v_add_i32">;
1122def : MnemonicAlias<"v_sub_u32", "v_sub_i32">;
1123def : MnemonicAlias<"v_subrev_u32", "v_subrev_i32">;
1124
Marek Olsak5df00d62014-12-07 12:18:57 +00001125} // End isGCN predicate