blob: bd7c9ddf728326e91c60bc9c18516db51c8a11c7 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIInstructions.td - SI Instruction Defintions ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// This file was originally auto-generated from a GPU register header file and
10// all the instruction definitions were originally commented out. Instructions
11// that are not yet supported remain commented out.
12//===----------------------------------------------------------------------===//
13
Eric Christopher7792e322015-01-30 23:24:40 +000014def isGCN : Predicate<"Subtarget->getGeneration() "
Matt Arsenault43e92fe2016-06-24 06:30:11 +000015 ">= SISubtarget::SOUTHERN_ISLANDS">,
Tom Stellardd7e6f132015-04-08 01:09:26 +000016 AssemblerPredicate<"FeatureGCN">;
Marek Olsak7d777282015-03-24 13:40:15 +000017def isSI : Predicate<"Subtarget->getGeneration() "
Matt Arsenault43e92fe2016-06-24 06:30:11 +000018 "== SISubtarget::SOUTHERN_ISLANDS">,
Matt Arsenaultd6adfb42015-09-24 19:52:21 +000019 AssemblerPredicate<"FeatureSouthernIslands">;
20
Tom Stellardec87f842015-05-25 16:15:54 +000021def has16BankLDS : Predicate<"Subtarget->getLDSBankCount() == 16">;
22def has32BankLDS : Predicate<"Subtarget->getLDSBankCount() == 32">;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +000023def HasVGPRIndexMode : Predicate<"Subtarget->hasVGPRIndexMode()">,
24 AssemblerPredicate<"FeatureVGPRIndexMode">;
25def HasMovrel : Predicate<"Subtarget->hasMovrel()">,
26 AssemblerPredicate<"FeatureMovrel">;
Tom Stellardec87f842015-05-25 16:15:54 +000027
Valery Pykhtin2828b9b2016-09-19 14:39:49 +000028include "VOPInstructions.td"
Valery Pykhtina34fb492016-08-30 15:20:31 +000029include "SOPInstructions.td"
Valery Pykhtin1b138862016-09-01 09:56:47 +000030include "SMInstructions.td"
Valery Pykhtin8bc65962016-09-05 11:22:51 +000031include "FLATInstructions.td"
Valery Pykhtinb66e5eb2016-09-10 13:09:16 +000032include "BUFInstructions.td"
Valery Pykhtina34fb492016-08-30 15:20:31 +000033
Marek Olsak5df00d62014-12-07 12:18:57 +000034let SubtargetPredicate = isGCN in {
Tom Stellard0e70de52014-05-16 20:56:45 +000035
Tom Stellard8d6d4492014-04-22 16:33:57 +000036//===----------------------------------------------------------------------===//
Tom Stellard3a35d8f2014-10-01 14:44:45 +000037// EXP Instructions
38//===----------------------------------------------------------------------===//
39
Matt Arsenault7bee6ac2016-12-05 20:23:10 +000040defm EXP : EXP_m<0, AMDGPUexport>;
41defm EXP_DONE : EXP_m<1, AMDGPUexport_done>;
Tom Stellard3a35d8f2014-10-01 14:44:45 +000042
43//===----------------------------------------------------------------------===//
Tom Stellard8d6d4492014-04-22 16:33:57 +000044// VINTRP Instructions
45//===----------------------------------------------------------------------===//
46
Matt Arsenault80f766a2015-09-10 01:23:28 +000047let Uses = [M0, EXEC] in {
Tom Stellard2a9d9472015-05-12 15:00:46 +000048
Tom Stellardae38f302015-01-14 01:13:19 +000049// FIXME: Specify SchedRW for VINTRP insturctions.
Tom Stellardec87f842015-05-25 16:15:54 +000050
51multiclass V_INTERP_P1_F32_m : VINTRP_m <
52 0x00000000,
Matt Arsenaultac066f32016-12-06 22:29:43 +000053 (outs VGPR_32:$vdst),
Matt Arsenault0e8a2992016-12-15 20:40:20 +000054 (ins VGPR_32:$vsrc, Attr:$attr, AttrChan:$attrchan),
55 "v_interp_p1_f32 $vdst, $vsrc, $attr$attrchan",
Matt Arsenaultf0c86252016-12-10 00:29:55 +000056 [(set f32:$vdst, (AMDGPUinterp_p1 f32:$vsrc, (i32 imm:$attrchan),
57 (i32 imm:$attr)))]
Tom Stellardec87f842015-05-25 16:15:54 +000058>;
59
60let OtherPredicates = [has32BankLDS] in {
61
62defm V_INTERP_P1_F32 : V_INTERP_P1_F32_m;
63
64} // End OtherPredicates = [has32BankLDS]
65
Matt Arsenaultac066f32016-12-06 22:29:43 +000066let OtherPredicates = [has16BankLDS], Constraints = "@earlyclobber $vdst", isAsmParserOnly=1 in {
Tom Stellardec87f842015-05-25 16:15:54 +000067
68defm V_INTERP_P1_F32_16bank : V_INTERP_P1_F32_m;
69
Matt Arsenaultac066f32016-12-06 22:29:43 +000070} // End OtherPredicates = [has32BankLDS], Constraints = "@earlyclobber $vdst", isAsmParserOnly=1
Tom Stellard75aadc22012-12-11 21:25:42 +000071
Matt Arsenaultac066f32016-12-06 22:29:43 +000072let DisableEncoding = "$src0", Constraints = "$src0 = $vdst" in {
Tom Stellard50828162015-05-25 16:15:56 +000073
Marek Olsak5df00d62014-12-07 12:18:57 +000074defm V_INTERP_P2_F32 : VINTRP_m <
Tom Stellardc70cf902015-05-25 16:15:50 +000075 0x00000001,
Matt Arsenaultac066f32016-12-06 22:29:43 +000076 (outs VGPR_32:$vdst),
Matt Arsenault0e8a2992016-12-15 20:40:20 +000077 (ins VGPR_32:$src0, VGPR_32:$vsrc, Attr:$attr, AttrChan:$attrchan),
78 "v_interp_p2_f32 $vdst, $vsrc, $attr$attrchan",
Matt Arsenaultf0c86252016-12-10 00:29:55 +000079 [(set f32:$vdst, (AMDGPUinterp_p2 f32:$src0, f32:$vsrc, (i32 imm:$attrchan),
80 (i32 imm:$attr)))]>;
Tom Stellard50828162015-05-25 16:15:56 +000081
Matt Arsenaultac066f32016-12-06 22:29:43 +000082} // End DisableEncoding = "$src0", Constraints = "$src0 = $vdst"
Tom Stellard75aadc22012-12-11 21:25:42 +000083
Marek Olsak5df00d62014-12-07 12:18:57 +000084defm V_INTERP_MOV_F32 : VINTRP_m <
Tom Stellardc70cf902015-05-25 16:15:50 +000085 0x00000002,
Matt Arsenaultac066f32016-12-06 22:29:43 +000086 (outs VGPR_32:$vdst),
Matt Arsenault0e8a2992016-12-15 20:40:20 +000087 (ins InterpSlot:$vsrc, Attr:$attr, AttrChan:$attrchan),
88 "v_interp_mov_f32 $vdst, $vsrc, $attr$attrchan",
Matt Arsenaultf0c86252016-12-10 00:29:55 +000089 [(set f32:$vdst, (AMDGPUinterp_mov (i32 imm:$vsrc), (i32 imm:$attrchan),
Matt Arsenaultac066f32016-12-06 22:29:43 +000090 (i32 imm:$attr)))]>;
Tom Stellard2a9d9472015-05-12 15:00:46 +000091
Matt Arsenault80f766a2015-09-10 01:23:28 +000092} // End Uses = [M0, EXEC]
Tom Stellard75aadc22012-12-11 21:25:42 +000093
Tom Stellard8d6d4492014-04-22 16:33:57 +000094//===----------------------------------------------------------------------===//
Tom Stellard8d6d4492014-04-22 16:33:57 +000095// Pseudo Instructions
96//===----------------------------------------------------------------------===//
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +000097
98let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] in {
Tom Stellard75aadc22012-12-11 21:25:42 +000099
Marek Olsak7d777282015-03-24 13:40:15 +0000100// For use in patterns
Tom Stellardcc4c8712016-02-16 18:14:56 +0000101def V_CNDMASK_B64_PSEUDO : VOP3Common <(outs VReg_64:$vdst),
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000102 (ins VSrc_b64:$src0, VSrc_b64:$src1, SSrc_b64:$src2), "", []> {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000103 let isPseudo = 1;
104 let isCodeGenOnly = 1;
Matt Arsenault22e41792016-08-27 01:00:37 +0000105 let usesCustomInserter = 1;
Tom Stellard60024a02014-09-24 01:33:24 +0000106}
107
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000108// 64-bit vector move instruction. This is mainly used by the SIFoldOperands
109// pass to enable folding of inline immediates.
Matt Arsenault4bd72362016-12-10 00:39:12 +0000110def V_MOV_B64_PSEUDO : VPseudoInstSI <(outs VReg_64:$vdst),
111 (ins VSrc_b64:$src0)>;
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000112} // End let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC]
113
Wei Ding205bfdb2017-02-10 02:15:29 +0000114def S_TRAP_PSEUDO : SPseudoInstSI <(outs), (ins i16imm:$simm16)> {
Wei Dingee21a362017-01-24 06:41:21 +0000115 let hasSideEffects = 1;
116 let SALU = 1;
117 let usesCustomInserter = 1;
118}
119
Changpeng Fang01f60622016-03-15 17:28:44 +0000120let usesCustomInserter = 1, SALU = 1 in {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000121def GET_GROUPSTATICSIZE : PseudoInstSI <(outs SReg_32:$sdst), (ins),
Changpeng Fang01f60622016-03-15 17:28:44 +0000122 [(set SReg_32:$sdst, (int_amdgcn_groupstaticsize))]>;
123} // End let usesCustomInserter = 1, SALU = 1
124
Matt Arsenaulte6740752016-09-29 01:44:16 +0000125def S_MOV_B64_term : PseudoInstSI<(outs SReg_64:$dst),
126 (ins SSrc_b64:$src0)> {
127 let SALU = 1;
128 let isAsCheapAsAMove = 1;
129 let isTerminator = 1;
130}
131
132def S_XOR_B64_term : PseudoInstSI<(outs SReg_64:$dst),
133 (ins SSrc_b64:$src0, SSrc_b64:$src1)> {
134 let SALU = 1;
135 let isAsCheapAsAMove = 1;
136 let isTerminator = 1;
137}
138
139def S_ANDN2_B64_term : PseudoInstSI<(outs SReg_64:$dst),
140 (ins SSrc_b64:$src0, SSrc_b64:$src1)> {
141 let SALU = 1;
142 let isAsCheapAsAMove = 1;
143 let isTerminator = 1;
144}
145
Stanislav Mekhanoshinea91cca2016-11-15 19:00:15 +0000146def WAVE_BARRIER : SPseudoInstSI<(outs), (ins),
147 [(int_amdgcn_wave_barrier)]> {
148 let SchedRW = [];
149 let hasNoSchedulingInfo = 1;
150 let hasSideEffects = 1;
151 let mayLoad = 1;
152 let mayStore = 1;
153 let isBarrier = 1;
154 let isConvergent = 1;
155}
156
Matt Arsenault8fb37382013-10-11 21:03:36 +0000157// SI pseudo instructions. These are used by the CFG structurizer pass
Tom Stellard75aadc22012-12-11 21:25:42 +0000158// and should be lowered to ISA instructions prior to codegen.
159
Matt Arsenault9babdf42016-06-22 20:15:28 +0000160// Dummy terminator instruction to use after control flow instructions
161// replaced with exec mask operations.
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000162def SI_MASK_BRANCH : PseudoInstSI <
Matt Arsenaultf98a5962016-08-27 00:42:21 +0000163 (outs), (ins brtarget:$target)> {
Matt Arsenault57431c92016-08-10 19:11:42 +0000164 let isBranch = 0;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000165 let isTerminator = 1;
Matt Arsenault57431c92016-08-10 19:11:42 +0000166 let isBarrier = 0;
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000167 let Uses = [EXEC];
Matt Arsenaultc59a9232016-10-06 18:12:07 +0000168 let SchedRW = [];
169 let hasNoSchedulingInfo = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000170}
171
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000172let isTerminator = 1 in {
Tom Stellardf8794352012-12-19 22:10:31 +0000173
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000174def SI_IF: CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000175 (outs SReg_64:$dst), (ins SReg_64:$vcc, brtarget:$target),
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000176 [(set i64:$dst, (int_amdgcn_if i1:$vcc, bb:$target))], 1, 1> {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000177 let Constraints = "";
Matt Arsenaulte6740752016-09-29 01:44:16 +0000178 let Size = 12;
Matt Arsenault6408c912016-09-16 22:11:18 +0000179 let mayLoad = 1;
Matt Arsenaulte6740752016-09-29 01:44:16 +0000180 let mayStore = 1;
Matt Arsenault6408c912016-09-16 22:11:18 +0000181 let hasSideEffects = 1;
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000182}
Tom Stellard75aadc22012-12-11 21:25:42 +0000183
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000184def SI_ELSE : CFPseudoInstSI <
185 (outs SReg_64:$dst), (ins SReg_64:$src, brtarget:$target, i1imm:$execfix), [], 1, 1> {
Tom Stellardf8794352012-12-19 22:10:31 +0000186 let Constraints = "$src = $dst";
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000187 let Size = 12;
Matt Arsenault6408c912016-09-16 22:11:18 +0000188 let mayStore = 1;
189 let mayLoad = 1;
190 let hasSideEffects = 1;
Tom Stellardf8794352012-12-19 22:10:31 +0000191}
192
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000193def SI_LOOP : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000194 (outs), (ins SReg_64:$saved, brtarget:$target),
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000195 [(int_amdgcn_loop i64:$saved, bb:$target)], 1, 1> {
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000196 let Size = 8;
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000197 let isBranch = 1;
Matt Arsenault6408c912016-09-16 22:11:18 +0000198 let hasSideEffects = 1;
199 let mayLoad = 1;
200 let mayStore = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000201}
Tom Stellardf8794352012-12-19 22:10:31 +0000202
Matt Arsenault382d9452016-01-26 04:49:22 +0000203} // End isBranch = 1, isTerminator = 1
Tom Stellardf8794352012-12-19 22:10:31 +0000204
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000205def SI_END_CF : CFPseudoInstSI <
206 (outs), (ins SReg_64:$saved),
207 [(int_amdgcn_end_cf i64:$saved)], 1, 1> {
208 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000209 let isAsCheapAsAMove = 1;
210 let isReMaterializable = 1;
211 let mayLoad = 1;
212 let mayStore = 1;
213 let hasSideEffects = 1;
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000214}
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000215
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000216def SI_BREAK : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000217 (outs SReg_64:$dst), (ins SReg_64:$src),
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000218 [(set i64:$dst, (int_amdgcn_break i64:$src))], 1> {
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000219 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000220 let isAsCheapAsAMove = 1;
221 let isReMaterializable = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000222}
Matt Arsenault48d70cb2016-07-09 17:18:39 +0000223
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000224def SI_IF_BREAK : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000225 (outs SReg_64:$dst), (ins SReg_64:$vcc, SReg_64:$src),
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000226 [(set i64:$dst, (int_amdgcn_if_break i1:$vcc, i64:$src))]> {
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000227 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000228 let isAsCheapAsAMove = 1;
229 let isReMaterializable = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000230}
Tom Stellardf8794352012-12-19 22:10:31 +0000231
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000232def SI_ELSE_BREAK : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000233 (outs SReg_64:$dst), (ins SReg_64:$src0, SReg_64:$src1),
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000234 [(set i64:$dst, (int_amdgcn_else_break i64:$src0, i64:$src1))]> {
235 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000236 let isAsCheapAsAMove = 1;
237 let isReMaterializable = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000238}
Tom Stellardf8794352012-12-19 22:10:31 +0000239
Tom Stellardaa798342015-05-01 03:44:09 +0000240let Uses = [EXEC], Defs = [EXEC,VCC] in {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000241def SI_KILL : PseudoInstSI <
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000242 (outs), (ins VSrc_b32:$src),
Matt Arsenault03006fd2016-07-19 16:27:56 +0000243 [(AMDGPUkill i32:$src)]> {
Matt Arsenault786724a2016-07-12 21:41:32 +0000244 let isConvergent = 1;
245 let usesCustomInserter = 1;
246}
247
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000248def SI_KILL_TERMINATOR : SPseudoInstSI <
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000249 (outs), (ins VSrc_b32:$src)> {
Matt Arsenault786724a2016-07-12 21:41:32 +0000250 let isTerminator = 1;
251}
252
Tom Stellardaa798342015-05-01 03:44:09 +0000253} // End Uses = [EXEC], Defs = [EXEC,VCC]
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000254
Matt Arsenault327188a2016-12-15 21:57:11 +0000255// Branch on undef scc. Used to avoid intermediate copy from
256// IMPLICIT_DEF to SCC.
257def SI_BR_UNDEF : SPseudoInstSI <(outs), (ins sopp_brtarget:$simm16)> {
258 let isTerminator = 1;
259 let usesCustomInserter = 1;
260}
Tom Stellardf8794352012-12-19 22:10:31 +0000261
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000262def SI_PS_LIVE : PseudoInstSI <
263 (outs SReg_64:$dst), (ins),
Matt Arsenault9babdf42016-06-22 20:15:28 +0000264 [(set i1:$dst, (int_amdgcn_ps_live))]> {
265 let SALU = 1;
266}
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000267
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000268// Used as an isel pseudo to directly emit initialization with an
269// s_mov_b32 rather than a copy of another initialized
270// register. MachineCSE skips copies, and we don't want to have to
271// fold operands before it runs.
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000272def SI_INIT_M0 : SPseudoInstSI <(outs), (ins SSrc_b32:$src)> {
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000273 let Defs = [M0];
274 let usesCustomInserter = 1;
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000275 let isAsCheapAsAMove = 1;
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000276 let isReMaterializable = 1;
277}
278
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000279def SI_RETURN : SPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000280 (outs), (ins variable_ops), [(AMDGPUreturn)]> {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000281 let isTerminator = 1;
282 let isBarrier = 1;
283 let isReturn = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000284 let hasSideEffects = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000285 let hasNoSchedulingInfo = 1;
Nicolai Haehnlea246dcc2016-09-03 12:26:32 +0000286 let DisableWQM = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000287}
288
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000289let Defs = [M0, EXEC],
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000290 UseNamedOperandTable = 1 in {
Christian Konig2989ffc2013-03-18 11:34:16 +0000291
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000292class SI_INDIRECT_SRC<RegisterClass rc> : VPseudoInstSI <
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000293 (outs VGPR_32:$vdst),
294 (ins rc:$src, VS_32:$idx, i32imm:$offset)> {
295 let usesCustomInserter = 1;
296}
Christian Konig2989ffc2013-03-18 11:34:16 +0000297
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000298class SI_INDIRECT_DST<RegisterClass rc> : VPseudoInstSI <
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000299 (outs rc:$vdst),
300 (ins rc:$src, VS_32:$idx, i32imm:$offset, VGPR_32:$val)> {
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000301 let Constraints = "$src = $vdst";
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000302 let usesCustomInserter = 1;
Christian Konig2989ffc2013-03-18 11:34:16 +0000303}
304
Matt Arsenault28419272015-10-07 00:42:51 +0000305// TODO: We can support indirect SGPR access.
306def SI_INDIRECT_SRC_V1 : SI_INDIRECT_SRC<VGPR_32>;
307def SI_INDIRECT_SRC_V2 : SI_INDIRECT_SRC<VReg_64>;
308def SI_INDIRECT_SRC_V4 : SI_INDIRECT_SRC<VReg_128>;
309def SI_INDIRECT_SRC_V8 : SI_INDIRECT_SRC<VReg_256>;
310def SI_INDIRECT_SRC_V16 : SI_INDIRECT_SRC<VReg_512>;
311
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000312def SI_INDIRECT_DST_V1 : SI_INDIRECT_DST<VGPR_32>;
Christian Konig2989ffc2013-03-18 11:34:16 +0000313def SI_INDIRECT_DST_V2 : SI_INDIRECT_DST<VReg_64>;
314def SI_INDIRECT_DST_V4 : SI_INDIRECT_DST<VReg_128>;
315def SI_INDIRECT_DST_V8 : SI_INDIRECT_DST<VReg_256>;
316def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST<VReg_512>;
317
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000318} // End Uses = [EXEC], Defs = [M0, EXEC]
Christian Konig2989ffc2013-03-18 11:34:16 +0000319
Tom Stellardeba61072014-05-02 15:41:42 +0000320multiclass SI_SPILL_SGPR <RegisterClass sgpr_class> {
Matt Arsenault3354f422016-09-10 01:20:33 +0000321 let UseNamedOperandTable = 1, SGPRSpill = 1, Uses = [EXEC] in {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000322 def _SAVE : PseudoInstSI <
Tom Stellard42fb60e2015-01-14 15:42:31 +0000323 (outs),
Matt Arsenault3354f422016-09-10 01:20:33 +0000324 (ins sgpr_class:$data, i32imm:$addr)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000325 let mayStore = 1;
326 let mayLoad = 0;
327 }
Tom Stellardeba61072014-05-02 15:41:42 +0000328
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000329 def _RESTORE : PseudoInstSI <
Matt Arsenault3354f422016-09-10 01:20:33 +0000330 (outs sgpr_class:$data),
331 (ins i32imm:$addr)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000332 let mayStore = 0;
333 let mayLoad = 1;
334 }
Tom Stellard42fb60e2015-01-14 15:42:31 +0000335 } // End UseNamedOperandTable = 1
Tom Stellardeba61072014-05-02 15:41:42 +0000336}
337
Matt Arsenault2510a312016-09-03 06:57:55 +0000338// You cannot use M0 as the output of v_readlane_b32 instructions or
339// use it in the sdata operand of SMEM instructions. We still need to
340// be able to spill the physical register m0, so allow it for
341// SI_SPILL_32_* instructions.
342defm SI_SPILL_S32 : SI_SPILL_SGPR <SReg_32>;
Tom Stellardeba61072014-05-02 15:41:42 +0000343defm SI_SPILL_S64 : SI_SPILL_SGPR <SReg_64>;
344defm SI_SPILL_S128 : SI_SPILL_SGPR <SReg_128>;
345defm SI_SPILL_S256 : SI_SPILL_SGPR <SReg_256>;
346defm SI_SPILL_S512 : SI_SPILL_SGPR <SReg_512>;
347
Tom Stellard96468902014-09-24 01:33:17 +0000348multiclass SI_SPILL_VGPR <RegisterClass vgpr_class> {
Matt Arsenault7348a7e2016-09-10 01:20:28 +0000349 let UseNamedOperandTable = 1, VGPRSpill = 1,
350 SchedRW = [WriteVMEM] in {
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000351 def _SAVE : VPseudoInstSI <
Tom Stellard42fb60e2015-01-14 15:42:31 +0000352 (outs),
Matt Arsenaultbcfd94c2016-09-17 15:52:37 +0000353 (ins vgpr_class:$vdata, i32imm:$vaddr, SReg_128:$srsrc,
354 SReg_32:$soffset, i32imm:$offset)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000355 let mayStore = 1;
356 let mayLoad = 0;
Matt Arsenaultac42ba82016-09-03 17:25:44 +0000357 // (2 * 4) + (8 * num_subregs) bytes maximum
358 let Size = !add(!shl(!srl(vgpr_class.Size, 5), 3), 8);
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000359 }
Tom Stellard96468902014-09-24 01:33:17 +0000360
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000361 def _RESTORE : VPseudoInstSI <
Matt Arsenault3354f422016-09-10 01:20:33 +0000362 (outs vgpr_class:$vdata),
Matt Arsenaultbcfd94c2016-09-17 15:52:37 +0000363 (ins i32imm:$vaddr, SReg_128:$srsrc, SReg_32:$soffset,
Matt Arsenault9babdf42016-06-22 20:15:28 +0000364 i32imm:$offset)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000365 let mayStore = 0;
366 let mayLoad = 1;
Matt Arsenaultac42ba82016-09-03 17:25:44 +0000367
368 // (2 * 4) + (8 * num_subregs) bytes maximum
369 let Size = !add(!shl(!srl(vgpr_class.Size, 5), 3), 8);
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000370 }
Matt Arsenault7348a7e2016-09-10 01:20:28 +0000371 } // End UseNamedOperandTable = 1, VGPRSpill = 1, SchedRW = [WriteVMEM]
Tom Stellard96468902014-09-24 01:33:17 +0000372}
373
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000374defm SI_SPILL_V32 : SI_SPILL_VGPR <VGPR_32>;
Tom Stellard96468902014-09-24 01:33:17 +0000375defm SI_SPILL_V64 : SI_SPILL_VGPR <VReg_64>;
376defm SI_SPILL_V96 : SI_SPILL_VGPR <VReg_96>;
377defm SI_SPILL_V128 : SI_SPILL_VGPR <VReg_128>;
378defm SI_SPILL_V256 : SI_SPILL_VGPR <VReg_256>;
379defm SI_SPILL_V512 : SI_SPILL_VGPR <VReg_512>;
380
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000381def SI_PC_ADD_REL_OFFSET : SPseudoInstSI <
Tom Stellard067c8152014-07-21 14:01:14 +0000382 (outs SReg_64:$dst),
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +0000383 (ins si_ga:$ptr_lo, si_ga:$ptr_hi),
384 [(set SReg_64:$dst,
385 (i64 (SIpc_add_rel_offset (tglobaladdr:$ptr_lo), (tglobaladdr:$ptr_hi))))]> {
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000386 let Defs = [SCC];
Matt Arsenaultd092a062015-10-02 18:58:37 +0000387}
Tom Stellard067c8152014-07-21 14:01:14 +0000388
Matt Arsenault382d9452016-01-26 04:49:22 +0000389} // End SubtargetPredicate = isGCN
Tom Stellard0e70de52014-05-16 20:56:45 +0000390
Marek Olsak5df00d62014-12-07 12:18:57 +0000391let Predicates = [isGCN] in {
Wei Ding205bfdb2017-02-10 02:15:29 +0000392def : Pat<
393 (trap),
Wei Dingf2cce022017-02-22 23:22:19 +0000394 (S_TRAP_PSEUDO TRAPID.LLVM_TRAP)
Wei Ding205bfdb2017-02-10 02:15:29 +0000395>;
396
397def : Pat<
398 (debugtrap),
Wei Dingf2cce022017-02-22 23:22:19 +0000399 (S_TRAP_PSEUDO TRAPID.LLVM_DEBUG_TRAP)
Wei Ding205bfdb2017-02-10 02:15:29 +0000400>;
Tom Stellard0e70de52014-05-16 20:56:45 +0000401
Nicolai Haehnle3b572002016-07-28 11:39:24 +0000402def : Pat<
403 (int_amdgcn_else i64:$src, bb:$target),
404 (SI_ELSE $src, $target, 0)
405>;
406
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000407def : Pat <
408 (int_AMDGPU_kilp),
Tom Stellard115a6152016-11-10 16:02:37 +0000409 (SI_KILL (i32 0xbf800000))
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000410>;
411
Tom Stellard8d6d4492014-04-22 16:33:57 +0000412//===----------------------------------------------------------------------===//
Matt Arsenaulta0050b02014-06-19 01:19:19 +0000413// VOP1 Patterns
414//===----------------------------------------------------------------------===//
415
Matt Arsenault22ca3f82014-07-15 23:50:10 +0000416let Predicates = [UnsafeFPMath] in {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +0000417
418//def : RcpPat<V_RCP_F64_e32, f64>;
419//defm : RsqPat<V_RSQ_F64_e32, f64>;
420//defm : RsqPat<V_RSQ_F32_e32, f32>;
421
422def : RsqPat<V_RSQ_F32_e32, f32>;
423def : RsqPat<V_RSQ_F64_e32, f64>;
Matt Arsenault74015162016-05-28 00:19:52 +0000424
425// Convert (x - floor(x)) to fract(x)
426def : Pat <
427 (f32 (fsub (f32 (VOP3Mods f32:$x, i32:$mods)),
428 (f32 (ffloor (f32 (VOP3Mods f32:$x, i32:$mods)))))),
429 (V_FRACT_F32_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
430>;
431
432// Convert (x + (-floor(x))) to fract(x)
433def : Pat <
434 (f64 (fadd (f64 (VOP3Mods f64:$x, i32:$mods)),
435 (f64 (fneg (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))))))),
436 (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
437>;
438
439} // End Predicates = [UnsafeFPMath]
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +0000440
Matt Arsenault9dba9bd2017-02-02 02:27:04 +0000441
442// f16_to_fp patterns
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000443def : Pat <
Matt Arsenault9dba9bd2017-02-02 02:27:04 +0000444 (f32 (f16_to_fp i32:$src0)),
445 (V_CVT_F32_F16_e64 SRCMODS.NONE, $src0, DSTCLAMP.NONE, DSTOMOD.NONE)
446>;
447
448def : Pat <
449 (f32 (f16_to_fp (and_oneuse i32:$src0, 0x7fff))),
450 (V_CVT_F32_F16_e64 SRCMODS.ABS, $src0, DSTCLAMP.NONE, DSTOMOD.NONE)
451>;
452
453def : Pat <
454 (f32 (f16_to_fp (or_oneuse i32:$src0, 0x8000))),
455 (V_CVT_F32_F16_e64 SRCMODS.NEG_ABS, $src0, DSTCLAMP.NONE, DSTOMOD.NONE)
456>;
457
458def : Pat <
459 (f32 (f16_to_fp (xor_oneuse i32:$src0, 0x8000))),
460 (V_CVT_F32_F16_e64 SRCMODS.NEG, $src0, DSTCLAMP.NONE, DSTOMOD.NONE)
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000461>;
462
463def : Pat <
464 (f64 (fpextend f16:$src)),
465 (V_CVT_F64_F32_e32 (V_CVT_F32_F16_e32 $src))
466>;
467
Matt Arsenault9dba9bd2017-02-02 02:27:04 +0000468// fp_to_fp16 patterns
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000469def : Pat <
Matt Arsenault9dba9bd2017-02-02 02:27:04 +0000470 (i32 (fp_to_f16 (f32 (VOP3Mods0 f32:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod)))),
471 (V_CVT_F16_F32_e64 $src0_modifiers, f32:$src0, $clamp, $omod)
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000472>;
473
474def : Pat <
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000475 (i32 (fp_to_sint f16:$src)),
476 (V_CVT_I32_F32_e32 (V_CVT_F32_F16_e32 $src))
477>;
478
479def : Pat <
480 (i32 (fp_to_uint f16:$src)),
481 (V_CVT_U32_F32_e32 (V_CVT_F32_F16_e32 $src))
482>;
483
484def : Pat <
485 (f16 (sint_to_fp i32:$src)),
486 (V_CVT_F16_F32_e32 (V_CVT_F32_I32_e32 $src))
487>;
488
489def : Pat <
490 (f16 (uint_to_fp i32:$src)),
491 (V_CVT_F16_F32_e32 (V_CVT_F32_U32_e32 $src))
492>;
493
Matt Arsenaulta0050b02014-06-19 01:19:19 +0000494//===----------------------------------------------------------------------===//
Tom Stellard58ac7442014-04-29 23:12:48 +0000495// VOP2 Patterns
496//===----------------------------------------------------------------------===//
497
Konstantin Zhuravlyovbf998c72016-11-16 03:39:12 +0000498multiclass FMADPat <ValueType vt, Instruction inst> {
499 def : Pat <
500 (vt (fmad (VOP3NoMods0 vt:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod),
501 (VOP3NoMods vt:$src1, i32:$src1_modifiers),
502 (VOP3NoMods vt:$src2, i32:$src2_modifiers))),
503 (inst $src0_modifiers, $src0, $src1_modifiers, $src1,
504 $src2_modifiers, $src2, $clamp, $omod)
505 >;
506}
507
508defm : FMADPat <f16, V_MAC_F16_e64>;
509defm : FMADPat <f32, V_MAC_F32_e64>;
510
Wei Ding4d3d4ca2017-02-24 23:00:29 +0000511class FMADModsPat<Instruction inst, SDPatternOperator mad_opr> : Pat<
512 (f32 (mad_opr (VOP3Mods f32:$src0, i32:$src0_mod),
513 (VOP3Mods f32:$src1, i32:$src1_mod),
514 (VOP3Mods f32:$src2, i32:$src2_mod))),
515 (inst $src0_mod, $src0, $src1_mod, $src1,
516 $src2_mod, $src2, DSTCLAMP.NONE, DSTOMOD.NONE)
517>;
518
519def : FMADModsPat<V_MAD_F32, AMDGPUfmad_ftz>;
520
Konstantin Zhuravlyovbf998c72016-11-16 03:39:12 +0000521multiclass SelectPat <ValueType vt, Instruction inst> {
Konstantin Zhuravlyov2a87a422016-11-16 03:16:26 +0000522 def : Pat <
523 (vt (select i1:$src0, vt:$src1, vt:$src2)),
524 (inst $src2, $src1, $src0)
525 >;
526}
527
Konstantin Zhuravlyovbf998c72016-11-16 03:39:12 +0000528defm : SelectPat <i16, V_CNDMASK_B32_e64>;
529defm : SelectPat <i32, V_CNDMASK_B32_e64>;
530defm : SelectPat <f16, V_CNDMASK_B32_e64>;
531defm : SelectPat <f32, V_CNDMASK_B32_e64>;
Konstantin Zhuravlyov2a87a422016-11-16 03:16:26 +0000532
Tom Stellardae4c9e72014-06-20 17:06:11 +0000533def : Pat <
534 (i32 (add (i32 (ctpop i32:$popcnt)), i32:$val)),
Matt Arsenault49dd4282014-09-15 17:15:02 +0000535 (V_BCNT_U32_B32_e64 $popcnt, $val)
Tom Stellardae4c9e72014-06-20 17:06:11 +0000536>;
537
Christian Konig4a1b9c32013-03-18 11:34:10 +0000538/********** ============================================ **********/
539/********** Extraction, Insertion, Building and Casting **********/
540/********** ============================================ **********/
Tom Stellard75aadc22012-12-11 21:25:42 +0000541
Christian Konig4a1b9c32013-03-18 11:34:10 +0000542foreach Index = 0-2 in {
543 def Extract_Element_v2i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000544 i32, v2i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000545 >;
546 def Insert_Element_v2i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000547 i32, v2i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000548 >;
549
550 def Extract_Element_v2f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000551 f32, v2f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000552 >;
553 def Insert_Element_v2f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000554 f32, v2f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000555 >;
556}
557
558foreach Index = 0-3 in {
559 def Extract_Element_v4i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000560 i32, v4i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000561 >;
562 def Insert_Element_v4i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000563 i32, v4i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000564 >;
565
566 def Extract_Element_v4f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000567 f32, v4f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000568 >;
569 def Insert_Element_v4f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000570 f32, v4f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000571 >;
572}
573
574foreach Index = 0-7 in {
575 def Extract_Element_v8i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000576 i32, v8i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000577 >;
578 def Insert_Element_v8i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000579 i32, v8i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000580 >;
581
582 def Extract_Element_v8f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000583 f32, v8f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000584 >;
585 def Insert_Element_v8f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000586 f32, v8f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000587 >;
588}
589
590foreach Index = 0-15 in {
591 def Extract_Element_v16i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000592 i32, v16i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000593 >;
594 def Insert_Element_v16i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000595 i32, v16i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000596 >;
597
598 def Extract_Element_v16f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000599 f32, v16f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000600 >;
601 def Insert_Element_v16f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000602 f32, v16f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000603 >;
604}
Tom Stellard75aadc22012-12-11 21:25:42 +0000605
Matt Arsenault382d9452016-01-26 04:49:22 +0000606// FIXME: Why do only some of these type combinations for SReg and
607// VReg?
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000608// 16-bit bitcast
609def : BitConvert <i16, f16, VGPR_32>;
610def : BitConvert <f16, i16, VGPR_32>;
611def : BitConvert <i16, f16, SReg_32>;
612def : BitConvert <f16, i16, SReg_32>;
613
Matt Arsenault382d9452016-01-26 04:49:22 +0000614// 32-bit bitcast
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000615def : BitConvert <i32, f32, VGPR_32>;
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000616def : BitConvert <f32, i32, VGPR_32>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000617def : BitConvert <i32, f32, SReg_32>;
618def : BitConvert <f32, i32, SReg_32>;
Tom Stellard75aadc22012-12-11 21:25:42 +0000619
Matt Arsenault382d9452016-01-26 04:49:22 +0000620// 64-bit bitcast
Tom Stellard7512c082013-07-12 18:14:56 +0000621def : BitConvert <i64, f64, VReg_64>;
Tom Stellard7512c082013-07-12 18:14:56 +0000622def : BitConvert <f64, i64, VReg_64>;
Tom Stellarded2f6142013-07-18 21:43:42 +0000623def : BitConvert <v2i32, v2f32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000624def : BitConvert <v2f32, v2i32, VReg_64>;
Tom Stellard7ea3d6d2014-03-31 14:01:55 +0000625def : BitConvert <i64, v2i32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000626def : BitConvert <v2i32, i64, VReg_64>;
Matt Arsenault064c2062014-06-11 17:40:32 +0000627def : BitConvert <i64, v2f32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000628def : BitConvert <v2f32, i64, VReg_64>;
Tom Stellard8f307212015-12-15 17:11:17 +0000629def : BitConvert <f64, v2f32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000630def : BitConvert <v2f32, f64, VReg_64>;
Matt Arsenault2acc7a42014-06-11 19:31:13 +0000631def : BitConvert <f64, v2i32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000632def : BitConvert <v2i32, f64, VReg_64>;
Tom Stellard83747202013-07-18 21:43:53 +0000633def : BitConvert <v4i32, v4f32, VReg_128>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000634def : BitConvert <v4f32, v4i32, VReg_128>;
Tom Stellard83747202013-07-18 21:43:53 +0000635
Matt Arsenault382d9452016-01-26 04:49:22 +0000636// 128-bit bitcast
Matt Arsenault61001bb2015-11-25 19:58:34 +0000637def : BitConvert <v2i64, v4i32, SReg_128>;
638def : BitConvert <v4i32, v2i64, SReg_128>;
Tom Stellard8f307212015-12-15 17:11:17 +0000639def : BitConvert <v2f64, v4f32, VReg_128>;
Matt Arsenault61001bb2015-11-25 19:58:34 +0000640def : BitConvert <v2f64, v4i32, VReg_128>;
Tom Stellard8f307212015-12-15 17:11:17 +0000641def : BitConvert <v4f32, v2f64, VReg_128>;
Matt Arsenault61001bb2015-11-25 19:58:34 +0000642def : BitConvert <v4i32, v2f64, VReg_128>;
Matt Arsenaulte57206d2016-05-25 18:07:36 +0000643def : BitConvert <v2i64, v2f64, VReg_128>;
644def : BitConvert <v2f64, v2i64, VReg_128>;
Matt Arsenault61001bb2015-11-25 19:58:34 +0000645
Matt Arsenault382d9452016-01-26 04:49:22 +0000646// 256-bit bitcast
Tom Stellard967bf582014-02-13 23:34:15 +0000647def : BitConvert <v8i32, v8f32, SReg_256>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000648def : BitConvert <v8f32, v8i32, SReg_256>;
Matt Arsenaultf5958dd2014-02-02 00:05:35 +0000649def : BitConvert <v8i32, v8f32, VReg_256>;
650def : BitConvert <v8f32, v8i32, VReg_256>;
Tom Stellard20ee94f2013-08-14 22:22:09 +0000651
Matt Arsenault382d9452016-01-26 04:49:22 +0000652// 512-bit bitcast
Matt Arsenaultf5958dd2014-02-02 00:05:35 +0000653def : BitConvert <v16i32, v16f32, VReg_512>;
654def : BitConvert <v16f32, v16i32, VReg_512>;
655
Christian Konig8dbe6f62013-02-21 15:17:27 +0000656/********** =================== **********/
657/********** Src & Dst modifiers **********/
658/********** =================== **********/
659
Matt Arsenault2fdf2a12017-02-21 23:35:48 +0000660
661// If denormals are not enabled, it only impacts the compare of the
662// inputs. The output result is not flushed.
663class ClampPat<Instruction inst, ValueType vt> : Pat <
664 (vt (AMDGPUclamp
665 (VOP3Mods0Clamp vt:$src0, i32:$src0_modifiers, i32:$omod))),
666 (inst i32:$src0_modifiers, vt:$src0,
667 i32:$src0_modifiers, vt:$src0, DSTCLAMP.ENABLE, $omod)
Christian Konig8dbe6f62013-02-21 15:17:27 +0000668>;
669
Matt Arsenault2fdf2a12017-02-21 23:35:48 +0000670def : ClampPat<V_MAX_F32_e64, f32>;
Matt Arsenault79a45db2017-02-22 23:53:37 +0000671def : ClampPat<V_MAX_F64, f64>;
Matt Arsenault2fdf2a12017-02-21 23:35:48 +0000672def : ClampPat<V_MAX_F16_e64, f16>;
673
Michel Danzer624b02a2014-02-04 07:12:38 +0000674/********** ================================ **********/
675/********** Floating point absolute/negative **********/
676/********** ================================ **********/
677
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000678// Prevent expanding both fneg and fabs.
Michel Danzer624b02a2014-02-04 07:12:38 +0000679
Michel Danzer624b02a2014-02-04 07:12:38 +0000680def : Pat <
681 (fneg (fabs f32:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +0000682 (S_OR_B32 $src, (S_MOV_B32(i32 0x80000000))) // Set sign bit
Michel Danzer624b02a2014-02-04 07:12:38 +0000683>;
684
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000685// FIXME: Should use S_OR_B32
Matt Arsenault13623d02014-08-15 18:42:18 +0000686def : Pat <
687 (fneg (fabs f64:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000688 (REG_SEQUENCE VReg_64,
689 (i32 (EXTRACT_SUBREG f64:$src, sub0)),
690 sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000691 (V_OR_B32_e32 (i32 (EXTRACT_SUBREG f64:$src, sub1)),
692 (V_MOV_B32_e32 (i32 0x80000000))), // Set sign bit.
Matt Arsenault7d858d82014-11-02 23:46:54 +0000693 sub1)
Matt Arsenault13623d02014-08-15 18:42:18 +0000694>;
695
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000696def : Pat <
697 (fabs f32:$src),
Tom Stellard115a6152016-11-10 16:02:37 +0000698 (V_AND_B32_e64 $src, (V_MOV_B32_e32 (i32 0x7fffffff)))
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000699>;
Vincent Lejeune79a58342014-05-10 19:18:25 +0000700
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000701def : Pat <
702 (fneg f32:$src),
Tom Stellard115a6152016-11-10 16:02:37 +0000703 (V_XOR_B32_e32 $src, (V_MOV_B32_e32 (i32 0x80000000)))
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000704>;
Christian Konig8dbe6f62013-02-21 15:17:27 +0000705
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000706def : Pat <
707 (fabs f64:$src),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000708 (REG_SEQUENCE VReg_64,
709 (i32 (EXTRACT_SUBREG f64:$src, sub0)),
710 sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000711 (V_AND_B32_e64 (i32 (EXTRACT_SUBREG f64:$src, sub1)),
712 (V_MOV_B32_e32 (i32 0x7fffffff))), // Set sign bit.
Matt Arsenault7d858d82014-11-02 23:46:54 +0000713 sub1)
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000714>;
Vincent Lejeune79a58342014-05-10 19:18:25 +0000715
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000716def : Pat <
717 (fneg f64:$src),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000718 (REG_SEQUENCE VReg_64,
719 (i32 (EXTRACT_SUBREG f64:$src, sub0)),
720 sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000721 (V_XOR_B32_e32 (i32 (EXTRACT_SUBREG f64:$src, sub1)),
722 (i32 (V_MOV_B32_e32 (i32 0x80000000)))),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000723 sub1)
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000724>;
Christian Konig8dbe6f62013-02-21 15:17:27 +0000725
Matt Arsenaultc79dc702016-11-15 02:25:28 +0000726def : Pat <
Konstantin Zhuravlyov7d882752017-01-13 19:49:25 +0000727 (fcopysign f16:$src0, f16:$src1),
728 (V_BFI_B32 (S_MOV_B32 (i32 0x00007fff)), $src0, $src1)
729>;
730
731def : Pat <
732 (fcopysign f32:$src0, f16:$src1),
733 (V_BFI_B32 (S_MOV_B32 (i32 0x7fffffff)), $src0,
734 (V_LSHLREV_B32_e64 (i32 16), $src1))
735>;
736
737def : Pat <
738 (fcopysign f64:$src0, f16:$src1),
739 (REG_SEQUENCE SReg_64,
740 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0,
741 (V_BFI_B32 (S_MOV_B32 (i32 0x7fffffff)), (i32 (EXTRACT_SUBREG $src0, sub1)),
742 (V_LSHLREV_B32_e64 (i32 16), $src1)), sub1)
743>;
744
745def : Pat <
746 (fcopysign f16:$src0, f32:$src1),
747 (V_BFI_B32 (S_MOV_B32 (i32 0x00007fff)), $src0,
748 (V_LSHRREV_B32_e64 (i32 16), $src1))
749>;
750
751def : Pat <
752 (fcopysign f16:$src0, f64:$src1),
753 (V_BFI_B32 (S_MOV_B32 (i32 0x00007fff)), $src0,
754 (V_LSHRREV_B32_e64 (i32 16), (EXTRACT_SUBREG $src1, sub1)))
755>;
756
757def : Pat <
Matt Arsenaultc79dc702016-11-15 02:25:28 +0000758 (fneg f16:$src),
759 (V_XOR_B32_e32 $src, (V_MOV_B32_e32 (i32 0x00008000)))
760>;
761
762def : Pat <
763 (fabs f16:$src),
764 (V_AND_B32_e64 $src, (V_MOV_B32_e32 (i32 0x00007fff)))
765>;
766
767def : Pat <
768 (fneg (fabs f16:$src)),
769 (S_OR_B32 $src, (S_MOV_B32 (i32 0x00008000))) // Set sign bit
770>;
771
Christian Konigc756cb992013-02-16 11:28:22 +0000772/********** ================== **********/
773/********** Immediate Patterns **********/
774/********** ================== **********/
775
776def : Pat <
Matt Arsenault3d463192016-11-01 22:55:07 +0000777 (VGPRImm<(i32 imm)>:$imm),
Christian Konigc756cb992013-02-16 11:28:22 +0000778 (V_MOV_B32_e32 imm:$imm)
779>;
780
781def : Pat <
Matt Arsenault3d463192016-11-01 22:55:07 +0000782 (VGPRImm<(f32 fpimm)>:$imm),
Tom Stellardfb77f002015-01-13 22:59:41 +0000783 (V_MOV_B32_e32 (f32 (bitcast_fpimm_to_i32 $imm)))
Christian Konigc756cb992013-02-16 11:28:22 +0000784>;
785
786def : Pat <
Matt Arsenault3d463192016-11-01 22:55:07 +0000787 (i32 imm:$imm),
788 (S_MOV_B32 imm:$imm)
789>;
790
Matt Arsenaulte96d0372016-12-08 20:14:46 +0000791// FIXME: Workaround for ordering issue with peephole optimizer where
792// a register class copy interferes with immediate folding. Should
793// use s_mov_b32, which can be shrunk to s_movk_i32
794def : Pat <
795 (VGPRImm<(f16 fpimm)>:$imm),
796 (V_MOV_B32_e32 (f16 (bitcast_fpimm_to_i32 $imm)))
797>;
798
Matt Arsenault3d463192016-11-01 22:55:07 +0000799def : Pat <
800 (f32 fpimm:$imm),
801 (S_MOV_B32 (f32 (bitcast_fpimm_to_i32 $imm)))
802>;
803
804def : Pat <
Matt Arsenaulte96d0372016-12-08 20:14:46 +0000805 (f16 fpimm:$imm),
806 (S_MOV_B32 (i32 (bitcast_fpimm_to_i32 $imm)))
807>;
808
809def : Pat <
Matt Arsenaultac0fc842016-09-17 16:09:55 +0000810 (i32 frameindex:$fi),
811 (V_MOV_B32_e32 (i32 (frameindex_to_targetframeindex $fi)))
812>;
813
814def : Pat <
Christian Konigb559b072013-02-16 11:28:36 +0000815 (i64 InlineImm<i64>:$imm),
816 (S_MOV_B64 InlineImm<i64>:$imm)
817>;
818
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000819// XXX - Should this use a s_cmp to set SCC?
820
821// Set to sign-extended 64-bit value (true = -1, false = 0)
822def : Pat <
823 (i1 imm:$imm),
824 (S_MOV_B64 (i64 (as_i64imm $imm)))
825>;
826
Matt Arsenault303011a2014-12-17 21:04:08 +0000827def : Pat <
828 (f64 InlineFPImm<f64>:$imm),
Tom Stellardfb77f002015-01-13 22:59:41 +0000829 (S_MOV_B64 (f64 (bitcast_fpimm_to_i64 InlineFPImm<f64>:$imm)))
Matt Arsenault303011a2014-12-17 21:04:08 +0000830>;
831
Tom Stellard75aadc22012-12-11 21:25:42 +0000832/********** ================== **********/
833/********** Intrinsic Patterns **********/
834/********** ================== **********/
835
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000836def : POW_Common <V_LOG_F32_e32, V_EXP_F32_e32, V_MUL_LEGACY_F32_e32>;
Tom Stellard75aadc22012-12-11 21:25:42 +0000837
838def : Pat <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000839 (i32 (sext i1:$src0)),
840 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src0)
Michel Danzer0cc991e2013-02-22 11:22:58 +0000841>;
842
Tom Stellardf16d38c2014-02-13 23:34:13 +0000843class Ext32Pat <SDNode ext> : Pat <
844 (i32 (ext i1:$src0)),
Michel Danzer5d26fdf2014-02-05 09:48:05 +0000845 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src0)
846>;
847
Tom Stellardf16d38c2014-02-13 23:34:13 +0000848def : Ext32Pat <zext>;
849def : Ext32Pat <anyext>;
850
Michel Danzer8caa9042013-04-10 17:17:56 +0000851// The multiplication scales from [0,1] to the unsigned integer range
852def : Pat <
853 (AMDGPUurecip i32:$src0),
854 (V_CVT_U32_F32_e32
Tom Stellard115a6152016-11-10 16:02:37 +0000855 (V_MUL_F32_e32 (i32 CONST.FP_UINT_MAX_PLUS_1),
Michel Danzer8caa9042013-04-10 17:17:56 +0000856 (V_RCP_IFLAG_F32_e32 (V_CVT_F32_U32_e32 $src0))))
857>;
858
Tom Stellard0289ff42014-05-16 20:56:44 +0000859//===----------------------------------------------------------------------===//
860// VOP3 Patterns
861//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000862
Matt Arsenaulteb260202014-05-22 18:00:15 +0000863def : IMad24Pat<V_MAD_I32_I24>;
864def : UMad24Pat<V_MAD_U32_U24>;
865
Matt Arsenault7d858d82014-11-02 23:46:54 +0000866defm : BFIPatterns <V_BFI_B32, S_MOV_B32, SReg_64>;
Tom Stellard0289ff42014-05-16 20:56:44 +0000867def : ROTRPattern <V_ALIGNBIT_B32>;
868
Christian Konig2989ffc2013-03-18 11:34:16 +0000869/********** ====================== **********/
Simon Pilgrime995a8082016-11-18 11:04:02 +0000870/********** Indirect addressing **********/
Christian Konig2989ffc2013-03-18 11:34:16 +0000871/********** ====================== **********/
872
Matt Arsenault28419272015-10-07 00:42:51 +0000873multiclass SI_INDIRECT_Pattern <ValueType vt, ValueType eltvt, string VecSize> {
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000874 // Extract with offset
Christian Konig2989ffc2013-03-18 11:34:16 +0000875 def : Pat<
Nicolai Haehnle7968c342016-07-12 08:12:16 +0000876 (eltvt (extractelt vt:$src, (MOVRELOffset i32:$idx, (i32 imm:$offset)))),
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000877 (!cast<Instruction>("SI_INDIRECT_SRC_"#VecSize) $src, $idx, imm:$offset)
Christian Konig2989ffc2013-03-18 11:34:16 +0000878 >;
879
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000880 // Insert with offset
Christian Konig2989ffc2013-03-18 11:34:16 +0000881 def : Pat<
Nicolai Haehnle7968c342016-07-12 08:12:16 +0000882 (insertelt vt:$src, eltvt:$val, (MOVRELOffset i32:$idx, (i32 imm:$offset))),
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000883 (!cast<Instruction>("SI_INDIRECT_DST_"#VecSize) $src, $idx, imm:$offset, $val)
Christian Konig2989ffc2013-03-18 11:34:16 +0000884 >;
885}
886
Matt Arsenault28419272015-10-07 00:42:51 +0000887defm : SI_INDIRECT_Pattern <v2f32, f32, "V2">;
888defm : SI_INDIRECT_Pattern <v4f32, f32, "V4">;
889defm : SI_INDIRECT_Pattern <v8f32, f32, "V8">;
890defm : SI_INDIRECT_Pattern <v16f32, f32, "V16">;
Matt Arsenaultf5958dd2014-02-02 00:05:35 +0000891
Matt Arsenault28419272015-10-07 00:42:51 +0000892defm : SI_INDIRECT_Pattern <v2i32, i32, "V2">;
893defm : SI_INDIRECT_Pattern <v4i32, i32, "V4">;
894defm : SI_INDIRECT_Pattern <v8i32, i32, "V8">;
895defm : SI_INDIRECT_Pattern <v16i32, i32, "V16">;
Christian Konig2989ffc2013-03-18 11:34:16 +0000896
Tom Stellard81d871d2013-11-13 23:36:50 +0000897//===----------------------------------------------------------------------===//
Wei Ding1041a642016-08-24 14:59:47 +0000898// SAD Patterns
899//===----------------------------------------------------------------------===//
900
901def : Pat <
902 (add (sub_oneuse (umax i32:$src0, i32:$src1),
903 (umin i32:$src0, i32:$src1)),
904 i32:$src2),
905 (V_SAD_U32 $src0, $src1, $src2)
906>;
907
908def : Pat <
909 (add (select_oneuse (i1 (setugt i32:$src0, i32:$src1)),
910 (sub i32:$src0, i32:$src1),
911 (sub i32:$src1, i32:$src0)),
912 i32:$src2),
913 (V_SAD_U32 $src0, $src1, $src2)
914>;
915
916//===----------------------------------------------------------------------===//
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000917// Conversion Patterns
918//===----------------------------------------------------------------------===//
919
920def : Pat<(i32 (sext_inreg i32:$src, i1)),
Tom Stellard115a6152016-11-10 16:02:37 +0000921 (S_BFE_I32 i32:$src, (i32 65536))>; // 0 | 1 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000922
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000923// Handle sext_inreg in i64
924def : Pat <
925 (i64 (sext_inreg i64:$src, i1)),
Tom Stellard115a6152016-11-10 16:02:37 +0000926 (S_BFE_I64 i64:$src, (i32 0x10000)) // 0 | 1 << 16
927>;
928
929def : Pat <
Matt Arsenault682eb432017-01-11 22:35:22 +0000930 (i16 (sext_inreg i16:$src, i1)),
931 (S_BFE_I32 $src, (i32 0x00010000)) // 0 | 1 << 16
932>;
933
934def : Pat <
Tom Stellard115a6152016-11-10 16:02:37 +0000935 (i16 (sext_inreg i16:$src, i8)),
936 (S_BFE_I32 $src, (i32 0x80000)) // 0 | 8 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000937>;
938
939def : Pat <
940 (i64 (sext_inreg i64:$src, i8)),
Tom Stellard115a6152016-11-10 16:02:37 +0000941 (S_BFE_I64 i64:$src, (i32 0x80000)) // 0 | 8 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000942>;
943
944def : Pat <
945 (i64 (sext_inreg i64:$src, i16)),
Tom Stellard115a6152016-11-10 16:02:37 +0000946 (S_BFE_I64 i64:$src, (i32 0x100000)) // 0 | 16 << 16
Matt Arsenault94812212014-11-14 18:18:16 +0000947>;
948
949def : Pat <
950 (i64 (sext_inreg i64:$src, i32)),
Tom Stellard115a6152016-11-10 16:02:37 +0000951 (S_BFE_I64 i64:$src, (i32 0x200000)) // 0 | 32 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000952>;
953
Matt Arsenaultc6b69a92016-07-26 23:06:33 +0000954def : Pat <
955 (i64 (zext i32:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +0000956 (REG_SEQUENCE SReg_64, $src, sub0, (S_MOV_B32 (i32 0)), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000957>;
958
Matt Arsenaultc6b69a92016-07-26 23:06:33 +0000959def : Pat <
960 (i64 (anyext i32:$src)),
961 (REG_SEQUENCE SReg_64, $src, sub0, (i32 (IMPLICIT_DEF)), sub1)
962>;
963
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000964class ZExt_i64_i1_Pat <SDNode ext> : Pat <
965 (i64 (ext i1:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000966 (REG_SEQUENCE VReg_64,
967 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src), sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000968 (S_MOV_B32 (i32 0)), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000969>;
970
971
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000972def : ZExt_i64_i1_Pat<zext>;
973def : ZExt_i64_i1_Pat<anyext>;
974
Tom Stellardbc4497b2016-02-12 23:45:29 +0000975// FIXME: We need to use COPY_TO_REGCLASS to work-around the fact that
976// REG_SEQUENCE patterns don't support instructions with multiple outputs.
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000977def : Pat <
978 (i64 (sext i32:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000979 (REG_SEQUENCE SReg_64, $src, sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000980 (i32 (COPY_TO_REGCLASS (S_ASHR_I32 $src, (i32 31)), SReg_32_XM0)), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000981>;
982
983def : Pat <
984 (i64 (sext i1:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000985 (REG_SEQUENCE VReg_64,
Tom Stellard115a6152016-11-10 16:02:37 +0000986 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src), sub0,
987 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000988>;
989
Tom Stellard115a6152016-11-10 16:02:37 +0000990class FPToI1Pat<Instruction Inst, int KOne, ValueType kone_type, ValueType vt, SDPatternOperator fp_to_int> : Pat <
Matt Arsenault7fb961f2016-07-22 17:01:21 +0000991 (i1 (fp_to_int (vt (VOP3Mods vt:$src0, i32:$src0_modifiers)))),
Tom Stellard115a6152016-11-10 16:02:37 +0000992 (i1 (Inst 0, (kone_type KOne), $src0_modifiers, $src0, DSTCLAMP.NONE, DSTOMOD.NONE))
Matt Arsenault7fb961f2016-07-22 17:01:21 +0000993>;
994
Tom Stellard115a6152016-11-10 16:02:37 +0000995def : FPToI1Pat<V_CMP_EQ_F32_e64, CONST.FP32_ONE, i32, f32, fp_to_uint>;
996def : FPToI1Pat<V_CMP_EQ_F32_e64, CONST.FP32_NEG_ONE, i32, f32, fp_to_sint>;
997def : FPToI1Pat<V_CMP_EQ_F64_e64, CONST.FP64_ONE, i64, f64, fp_to_uint>;
998def : FPToI1Pat<V_CMP_EQ_F64_e64, CONST.FP64_NEG_ONE, i64, f64, fp_to_sint>;
Matt Arsenault7fb961f2016-07-22 17:01:21 +0000999
Matt Arsenaultbecd6562014-12-03 05:22:35 +00001000// If we need to perform a logical operation on i1 values, we need to
1001// use vector comparisons since there is only one SCC register. Vector
Simon Pilgrime995a8082016-11-18 11:04:02 +00001002// comparisons still write to a pair of SGPRs, so treat these as
Matt Arsenaultbecd6562014-12-03 05:22:35 +00001003// 64-bit comparisons. When legalizing SGPR copies, instructions
1004// resulting in the copies from SCC to these instructions will be
1005// moved to the VALU.
1006def : Pat <
1007 (i1 (and i1:$src0, i1:$src1)),
1008 (S_AND_B64 $src0, $src1)
1009>;
1010
1011def : Pat <
1012 (i1 (or i1:$src0, i1:$src1)),
1013 (S_OR_B64 $src0, $src1)
1014>;
1015
1016def : Pat <
1017 (i1 (xor i1:$src0, i1:$src1)),
1018 (S_XOR_B64 $src0, $src1)
1019>;
1020
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +00001021def : Pat <
1022 (f32 (sint_to_fp i1:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +00001023 (V_CNDMASK_B32_e64 (i32 0), (i32 CONST.FP32_NEG_ONE), $src)
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +00001024>;
1025
1026def : Pat <
1027 (f32 (uint_to_fp i1:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +00001028 (V_CNDMASK_B32_e64 (i32 0), (i32 CONST.FP32_ONE), $src)
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +00001029>;
1030
1031def : Pat <
1032 (f64 (sint_to_fp i1:$src)),
Matt Arsenaultbecd6562014-12-03 05:22:35 +00001033 (V_CVT_F64_I32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src))
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +00001034>;
1035
1036def : Pat <
1037 (f64 (uint_to_fp i1:$src)),
1038 (V_CVT_F64_U32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src))
1039>;
1040
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001041//===----------------------------------------------------------------------===//
Tom Stellardfb961692013-10-23 00:44:19 +00001042// Miscellaneous Patterns
1043//===----------------------------------------------------------------------===//
1044
1045def : Pat <
Tom Stellard81d871d2013-11-13 23:36:50 +00001046 (i32 (trunc i64:$a)),
1047 (EXTRACT_SUBREG $a, sub0)
1048>;
1049
Michel Danzerbf1a6412014-01-28 03:01:16 +00001050def : Pat <
1051 (i1 (trunc i32:$a)),
Tom Stellard115a6152016-11-10 16:02:37 +00001052 (V_CMP_EQ_U32_e64 (S_AND_B32 (i32 1), $a), (i32 1))
Michel Danzerbf1a6412014-01-28 03:01:16 +00001053>;
1054
Matt Arsenaulte306a322014-10-21 16:25:08 +00001055def : Pat <
Jan Vesely70293a02017-02-23 16:12:21 +00001056 (i1 (trunc i16:$a)),
1057 (V_CMP_EQ_U32_e64 (S_AND_B32 (i32 1), $a), (i32 1))
1058>;
1059
1060def : Pat <
Matt Arsenaultabd271b2015-02-05 06:05:13 +00001061 (i1 (trunc i64:$a)),
Matt Arsenault5d8eb252016-09-30 01:50:20 +00001062 (V_CMP_EQ_U32_e64 (S_AND_B32 (i32 1),
Tom Stellard115a6152016-11-10 16:02:37 +00001063 (i32 (EXTRACT_SUBREG $a, sub0))), (i32 1))
Matt Arsenaultabd271b2015-02-05 06:05:13 +00001064>;
1065
1066def : Pat <
Matt Arsenaulte306a322014-10-21 16:25:08 +00001067 (i32 (bswap i32:$a)),
Tom Stellard115a6152016-11-10 16:02:37 +00001068 (V_BFI_B32 (S_MOV_B32 (i32 0x00ff00ff)),
1069 (V_ALIGNBIT_B32 $a, $a, (i32 24)),
1070 (V_ALIGNBIT_B32 $a, $a, (i32 8)))
Matt Arsenaulte306a322014-10-21 16:25:08 +00001071>;
1072
Marek Olsak63a7b082015-03-24 13:40:21 +00001073multiclass BFMPatterns <ValueType vt, InstSI BFM, InstSI MOV> {
1074 def : Pat <
1075 (vt (shl (vt (add (vt (shl 1, vt:$a)), -1)), vt:$b)),
1076 (BFM $a, $b)
1077 >;
1078
1079 def : Pat <
1080 (vt (add (vt (shl 1, vt:$a)), -1)),
Tom Stellard115a6152016-11-10 16:02:37 +00001081 (BFM $a, (MOV (i32 0)))
Marek Olsak63a7b082015-03-24 13:40:21 +00001082 >;
1083}
1084
1085defm : BFMPatterns <i32, S_BFM_B32, S_MOV_B32>;
1086// FIXME: defm : BFMPatterns <i64, S_BFM_B64, S_MOV_B64>;
Matt Arsenaulta9e16e62017-02-23 00:23:43 +00001087defm : BFEPattern <V_BFE_U32, V_BFE_I32, S_MOV_B32>;
Marek Olsak949f5da2015-03-24 13:40:34 +00001088
Matt Arsenault9cd90712016-04-14 01:42:16 +00001089def : Pat<
Matt Arsenaultd5d78512017-01-31 17:28:40 +00001090 (fcanonicalize (f16 (VOP3Mods f16:$src, i32:$src_mods))),
1091 (V_MUL_F16_e64 0, (i32 CONST.FP16_ONE), $src_mods, $src, 0, 0)
Matt Arsenaultce841302016-12-22 03:05:37 +00001092>;
1093
1094def : Pat<
Matt Arsenaultd5d78512017-01-31 17:28:40 +00001095 (fcanonicalize (f32 (VOP3Mods f32:$src, i32:$src_mods))),
1096 (V_MUL_F32_e64 0, (i32 CONST.FP32_ONE), $src_mods, $src, 0, 0)
Matt Arsenault9cd90712016-04-14 01:42:16 +00001097>;
1098
1099def : Pat<
Matt Arsenaultd5d78512017-01-31 17:28:40 +00001100 (fcanonicalize (f64 (VOP3Mods f64:$src, i32:$src_mods))),
1101 (V_MUL_F64 0, CONST.FP64_ONE, $src_mods, $src, 0, 0)
Matt Arsenault9cd90712016-04-14 01:42:16 +00001102>;
1103
Matt Arsenault4165efd2017-01-17 07:26:53 +00001104// Allow integer inputs
1105class ExpPattern<SDPatternOperator node, ValueType vt, Instruction Inst> : Pat<
1106 (node (i8 timm:$tgt), (i8 timm:$en), vt:$src0, vt:$src1, vt:$src2, vt:$src3, (i1 timm:$compr), (i1 timm:$vm)),
1107 (Inst i8:$tgt, vt:$src0, vt:$src1, vt:$src2, vt:$src3, i1:$vm, i1:$compr, i8:$en)
1108>;
1109
1110def : ExpPattern<AMDGPUexport, i32, EXP>;
1111def : ExpPattern<AMDGPUexport_done, i32, EXP_DONE>;
1112
Marek Olsak43650e42015-03-24 13:40:08 +00001113//===----------------------------------------------------------------------===//
1114// Fract Patterns
1115//===----------------------------------------------------------------------===//
1116
Marek Olsak7d777282015-03-24 13:40:15 +00001117let Predicates = [isSI] in {
1118
1119// V_FRACT is buggy on SI, so the F32 version is never used and (x-floor(x)) is
1120// used instead. However, SI doesn't have V_FLOOR_F64, so the most efficient
1121// way to implement it is using V_FRACT_F64.
1122// The workaround for the V_FRACT bug is:
1123// fract(x) = isnan(x) ? x : min(V_FRACT(x), 0.99999999999999999)
1124
Marek Olsak7d777282015-03-24 13:40:15 +00001125// Convert floor(x) to (x - fract(x))
1126def : Pat <
1127 (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))),
1128 (V_ADD_F64
1129 $mods,
1130 $x,
1131 SRCMODS.NEG,
1132 (V_CNDMASK_B64_PSEUDO
Marek Olsak7d777282015-03-24 13:40:15 +00001133 (V_MIN_F64
1134 SRCMODS.NONE,
1135 (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE),
1136 SRCMODS.NONE,
1137 (V_MOV_B64_PSEUDO 0x3fefffffffffffff),
1138 DSTCLAMP.NONE, DSTOMOD.NONE),
Marek Olsak1354b872015-07-27 11:37:42 +00001139 $x,
Tom Stellard115a6152016-11-10 16:02:37 +00001140 (V_CMP_CLASS_F64_e64 SRCMODS.NONE, $x, (i32 3 /*NaN*/))),
Marek Olsak7d777282015-03-24 13:40:15 +00001141 DSTCLAMP.NONE, DSTOMOD.NONE)
1142>;
1143
1144} // End Predicates = [isSI]
1145
Tom Stellardfb961692013-10-23 00:44:19 +00001146//============================================================================//
Tom Stellardeac65dd2013-05-03 17:21:20 +00001147// Miscellaneous Optimization Patterns
1148//============================================================================//
1149
Matt Arsenault49dd4282014-09-15 17:15:02 +00001150def : SHA256MaPattern <V_BFI_B32, V_XOR_B32_e64>;
Tom Stellardeac65dd2013-05-03 17:21:20 +00001151
Matt Arsenaultc89f2912016-03-07 21:54:48 +00001152def : IntMed3Pat<V_MED3_I32, smax, smax_oneuse, smin_oneuse>;
1153def : IntMed3Pat<V_MED3_U32, umax, umax_oneuse, umin_oneuse>;
1154
Matt Arsenaultf84e5d92017-01-31 03:07:46 +00001155// This matches 16 permutations of
1156// max(min(x, y), min(max(x, y), z))
1157class FPMed3Pat<ValueType vt,
1158 Instruction med3Inst> : Pat<
1159 (fmaxnum (fminnum_oneuse (VOP3Mods_nnan vt:$src0, i32:$src0_mods),
1160 (VOP3Mods_nnan vt:$src1, i32:$src1_mods)),
1161 (fminnum_oneuse (fmaxnum_oneuse (VOP3Mods_nnan vt:$src0, i32:$src0_mods),
1162 (VOP3Mods_nnan vt:$src1, i32:$src1_mods)),
1163 (vt (VOP3Mods_nnan vt:$src2, i32:$src2_mods)))),
1164 (med3Inst $src0_mods, $src0, $src1_mods, $src1, $src2_mods, $src2, DSTCLAMP.NONE, DSTOMOD.NONE)
1165>;
1166
1167def : FPMed3Pat<f32, V_MED3_F32>;
1168
Matt Arsenaultaf635242017-01-30 19:30:24 +00001169
1170// Undo sub x, c -> add x, -c canonicalization since c is more likely
1171// an inline immediate than -c.
1172// TODO: Also do for 64-bit.
1173def : Pat<
1174 (add i32:$src0, (i32 NegSubInlineConst32:$src1)),
1175 (S_SUB_I32 $src0, NegSubInlineConst32:$src1)
1176>;
1177
Tom Stellard245c15f2015-05-26 15:55:52 +00001178//============================================================================//
1179// Assembler aliases
1180//============================================================================//
1181
1182def : MnemonicAlias<"v_add_u32", "v_add_i32">;
1183def : MnemonicAlias<"v_sub_u32", "v_sub_i32">;
1184def : MnemonicAlias<"v_subrev_u32", "v_subrev_i32">;
1185
Marek Olsak5df00d62014-12-07 12:18:57 +00001186} // End isGCN predicate