blob: 2f217ae6cec4c13ecf086081fab73d70dc9271dd [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIInstructions.td - SI Instruction Defintions ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// This file was originally auto-generated from a GPU register header file and
10// all the instruction definitions were originally commented out. Instructions
11// that are not yet supported remain commented out.
12//===----------------------------------------------------------------------===//
13
Eric Christopher7792e322015-01-30 23:24:40 +000014def isGCN : Predicate<"Subtarget->getGeneration() "
Matt Arsenault43e92fe2016-06-24 06:30:11 +000015 ">= SISubtarget::SOUTHERN_ISLANDS">,
Tom Stellardd7e6f132015-04-08 01:09:26 +000016 AssemblerPredicate<"FeatureGCN">;
Marek Olsak7d777282015-03-24 13:40:15 +000017def isSI : Predicate<"Subtarget->getGeneration() "
Matt Arsenault43e92fe2016-06-24 06:30:11 +000018 "== SISubtarget::SOUTHERN_ISLANDS">,
Matt Arsenaultd6adfb42015-09-24 19:52:21 +000019 AssemblerPredicate<"FeatureSouthernIslands">;
20
Tom Stellardec87f842015-05-25 16:15:54 +000021def has16BankLDS : Predicate<"Subtarget->getLDSBankCount() == 16">;
22def has32BankLDS : Predicate<"Subtarget->getLDSBankCount() == 32">;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +000023def HasVGPRIndexMode : Predicate<"Subtarget->hasVGPRIndexMode()">,
24 AssemblerPredicate<"FeatureVGPRIndexMode">;
25def HasMovrel : Predicate<"Subtarget->hasMovrel()">,
26 AssemblerPredicate<"FeatureMovrel">;
Tom Stellardec87f842015-05-25 16:15:54 +000027
Valery Pykhtin2828b9b2016-09-19 14:39:49 +000028include "VOPInstructions.td"
Valery Pykhtina34fb492016-08-30 15:20:31 +000029include "SOPInstructions.td"
Valery Pykhtin1b138862016-09-01 09:56:47 +000030include "SMInstructions.td"
Valery Pykhtin8bc65962016-09-05 11:22:51 +000031include "FLATInstructions.td"
Valery Pykhtinb66e5eb2016-09-10 13:09:16 +000032include "BUFInstructions.td"
Valery Pykhtina34fb492016-08-30 15:20:31 +000033
Marek Olsak5df00d62014-12-07 12:18:57 +000034let SubtargetPredicate = isGCN in {
Tom Stellard0e70de52014-05-16 20:56:45 +000035
Tom Stellard8d6d4492014-04-22 16:33:57 +000036//===----------------------------------------------------------------------===//
Tom Stellard3a35d8f2014-10-01 14:44:45 +000037// EXP Instructions
38//===----------------------------------------------------------------------===//
39
Matt Arsenault7bee6ac2016-12-05 20:23:10 +000040defm EXP : EXP_m<0, AMDGPUexport>;
41defm EXP_DONE : EXP_m<1, AMDGPUexport_done>;
Tom Stellard3a35d8f2014-10-01 14:44:45 +000042
43//===----------------------------------------------------------------------===//
Tom Stellard8d6d4492014-04-22 16:33:57 +000044// VINTRP Instructions
45//===----------------------------------------------------------------------===//
46
Matt Arsenault80f766a2015-09-10 01:23:28 +000047let Uses = [M0, EXEC] in {
Tom Stellard2a9d9472015-05-12 15:00:46 +000048
Tom Stellardae38f302015-01-14 01:13:19 +000049// FIXME: Specify SchedRW for VINTRP insturctions.
Tom Stellardec87f842015-05-25 16:15:54 +000050
51multiclass V_INTERP_P1_F32_m : VINTRP_m <
52 0x00000000,
Matt Arsenaultac066f32016-12-06 22:29:43 +000053 (outs VGPR_32:$vdst),
Matt Arsenault0e8a2992016-12-15 20:40:20 +000054 (ins VGPR_32:$vsrc, Attr:$attr, AttrChan:$attrchan),
55 "v_interp_p1_f32 $vdst, $vsrc, $attr$attrchan",
Matt Arsenaultf0c86252016-12-10 00:29:55 +000056 [(set f32:$vdst, (AMDGPUinterp_p1 f32:$vsrc, (i32 imm:$attrchan),
57 (i32 imm:$attr)))]
Tom Stellardec87f842015-05-25 16:15:54 +000058>;
59
60let OtherPredicates = [has32BankLDS] in {
61
62defm V_INTERP_P1_F32 : V_INTERP_P1_F32_m;
63
64} // End OtherPredicates = [has32BankLDS]
65
Matt Arsenaultac066f32016-12-06 22:29:43 +000066let OtherPredicates = [has16BankLDS], Constraints = "@earlyclobber $vdst", isAsmParserOnly=1 in {
Tom Stellardec87f842015-05-25 16:15:54 +000067
68defm V_INTERP_P1_F32_16bank : V_INTERP_P1_F32_m;
69
Matt Arsenaultac066f32016-12-06 22:29:43 +000070} // End OtherPredicates = [has32BankLDS], Constraints = "@earlyclobber $vdst", isAsmParserOnly=1
Tom Stellard75aadc22012-12-11 21:25:42 +000071
Matt Arsenaultac066f32016-12-06 22:29:43 +000072let DisableEncoding = "$src0", Constraints = "$src0 = $vdst" in {
Tom Stellard50828162015-05-25 16:15:56 +000073
Marek Olsak5df00d62014-12-07 12:18:57 +000074defm V_INTERP_P2_F32 : VINTRP_m <
Tom Stellardc70cf902015-05-25 16:15:50 +000075 0x00000001,
Matt Arsenaultac066f32016-12-06 22:29:43 +000076 (outs VGPR_32:$vdst),
Matt Arsenault0e8a2992016-12-15 20:40:20 +000077 (ins VGPR_32:$src0, VGPR_32:$vsrc, Attr:$attr, AttrChan:$attrchan),
78 "v_interp_p2_f32 $vdst, $vsrc, $attr$attrchan",
Matt Arsenaultf0c86252016-12-10 00:29:55 +000079 [(set f32:$vdst, (AMDGPUinterp_p2 f32:$src0, f32:$vsrc, (i32 imm:$attrchan),
80 (i32 imm:$attr)))]>;
Tom Stellard50828162015-05-25 16:15:56 +000081
Matt Arsenaultac066f32016-12-06 22:29:43 +000082} // End DisableEncoding = "$src0", Constraints = "$src0 = $vdst"
Tom Stellard75aadc22012-12-11 21:25:42 +000083
Marek Olsak5df00d62014-12-07 12:18:57 +000084defm V_INTERP_MOV_F32 : VINTRP_m <
Tom Stellardc70cf902015-05-25 16:15:50 +000085 0x00000002,
Matt Arsenaultac066f32016-12-06 22:29:43 +000086 (outs VGPR_32:$vdst),
Matt Arsenault0e8a2992016-12-15 20:40:20 +000087 (ins InterpSlot:$vsrc, Attr:$attr, AttrChan:$attrchan),
88 "v_interp_mov_f32 $vdst, $vsrc, $attr$attrchan",
Matt Arsenaultf0c86252016-12-10 00:29:55 +000089 [(set f32:$vdst, (AMDGPUinterp_mov (i32 imm:$vsrc), (i32 imm:$attrchan),
Matt Arsenaultac066f32016-12-06 22:29:43 +000090 (i32 imm:$attr)))]>;
Tom Stellard2a9d9472015-05-12 15:00:46 +000091
Matt Arsenault80f766a2015-09-10 01:23:28 +000092} // End Uses = [M0, EXEC]
Tom Stellard75aadc22012-12-11 21:25:42 +000093
Tom Stellard8d6d4492014-04-22 16:33:57 +000094//===----------------------------------------------------------------------===//
Tom Stellard8d6d4492014-04-22 16:33:57 +000095// Pseudo Instructions
96//===----------------------------------------------------------------------===//
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +000097
98let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] in {
Tom Stellard75aadc22012-12-11 21:25:42 +000099
Marek Olsak7d777282015-03-24 13:40:15 +0000100// For use in patterns
Tom Stellardcc4c8712016-02-16 18:14:56 +0000101def V_CNDMASK_B64_PSEUDO : VOP3Common <(outs VReg_64:$vdst),
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000102 (ins VSrc_b64:$src0, VSrc_b64:$src1, SSrc_b64:$src2), "", []> {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000103 let isPseudo = 1;
104 let isCodeGenOnly = 1;
Matt Arsenault22e41792016-08-27 01:00:37 +0000105 let usesCustomInserter = 1;
Tom Stellard60024a02014-09-24 01:33:24 +0000106}
107
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000108// 64-bit vector move instruction. This is mainly used by the SIFoldOperands
109// pass to enable folding of inline immediates.
Matt Arsenault4bd72362016-12-10 00:39:12 +0000110def V_MOV_B64_PSEUDO : VPseudoInstSI <(outs VReg_64:$vdst),
111 (ins VSrc_b64:$src0)>;
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000112} // End let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC]
113
Wei Ding205bfdb2017-02-10 02:15:29 +0000114def S_TRAP_PSEUDO : SPseudoInstSI <(outs), (ins i16imm:$simm16)> {
Wei Dingee21a362017-01-24 06:41:21 +0000115 let hasSideEffects = 1;
116 let SALU = 1;
117 let usesCustomInserter = 1;
118}
119
Changpeng Fang01f60622016-03-15 17:28:44 +0000120let usesCustomInserter = 1, SALU = 1 in {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000121def GET_GROUPSTATICSIZE : PseudoInstSI <(outs SReg_32:$sdst), (ins),
Changpeng Fang01f60622016-03-15 17:28:44 +0000122 [(set SReg_32:$sdst, (int_amdgcn_groupstaticsize))]>;
123} // End let usesCustomInserter = 1, SALU = 1
124
Matt Arsenaulte6740752016-09-29 01:44:16 +0000125def S_MOV_B64_term : PseudoInstSI<(outs SReg_64:$dst),
126 (ins SSrc_b64:$src0)> {
127 let SALU = 1;
128 let isAsCheapAsAMove = 1;
129 let isTerminator = 1;
130}
131
132def S_XOR_B64_term : PseudoInstSI<(outs SReg_64:$dst),
133 (ins SSrc_b64:$src0, SSrc_b64:$src1)> {
134 let SALU = 1;
135 let isAsCheapAsAMove = 1;
136 let isTerminator = 1;
137}
138
139def S_ANDN2_B64_term : PseudoInstSI<(outs SReg_64:$dst),
140 (ins SSrc_b64:$src0, SSrc_b64:$src1)> {
141 let SALU = 1;
142 let isAsCheapAsAMove = 1;
143 let isTerminator = 1;
144}
145
Stanislav Mekhanoshinea91cca2016-11-15 19:00:15 +0000146def WAVE_BARRIER : SPseudoInstSI<(outs), (ins),
147 [(int_amdgcn_wave_barrier)]> {
148 let SchedRW = [];
149 let hasNoSchedulingInfo = 1;
150 let hasSideEffects = 1;
151 let mayLoad = 1;
152 let mayStore = 1;
153 let isBarrier = 1;
154 let isConvergent = 1;
155}
156
Matt Arsenault8fb37382013-10-11 21:03:36 +0000157// SI pseudo instructions. These are used by the CFG structurizer pass
Tom Stellard75aadc22012-12-11 21:25:42 +0000158// and should be lowered to ISA instructions prior to codegen.
159
Matt Arsenault9babdf42016-06-22 20:15:28 +0000160// Dummy terminator instruction to use after control flow instructions
161// replaced with exec mask operations.
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000162def SI_MASK_BRANCH : PseudoInstSI <
Matt Arsenaultf98a5962016-08-27 00:42:21 +0000163 (outs), (ins brtarget:$target)> {
Matt Arsenault57431c92016-08-10 19:11:42 +0000164 let isBranch = 0;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000165 let isTerminator = 1;
Matt Arsenault57431c92016-08-10 19:11:42 +0000166 let isBarrier = 0;
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000167 let Uses = [EXEC];
Matt Arsenaultc59a9232016-10-06 18:12:07 +0000168 let SchedRW = [];
169 let hasNoSchedulingInfo = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000170}
171
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000172let isTerminator = 1 in {
Tom Stellardf8794352012-12-19 22:10:31 +0000173
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000174def SI_IF: CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000175 (outs SReg_64:$dst), (ins SReg_64:$vcc, brtarget:$target),
Matt Arsenaultc5b641a2017-03-17 20:41:45 +0000176 [(set i64:$dst, (AMDGPUif i1:$vcc, bb:$target))], 1, 1> {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000177 let Constraints = "";
Matt Arsenaulte6740752016-09-29 01:44:16 +0000178 let Size = 12;
Matt Arsenault6408c912016-09-16 22:11:18 +0000179 let hasSideEffects = 1;
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000180}
Tom Stellard75aadc22012-12-11 21:25:42 +0000181
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000182def SI_ELSE : CFPseudoInstSI <
Matt Arsenaultc5b641a2017-03-17 20:41:45 +0000183 (outs SReg_64:$dst),
184 (ins SReg_64:$src, brtarget:$target, i1imm:$execfix), [], 1, 1> {
Tom Stellardf8794352012-12-19 22:10:31 +0000185 let Constraints = "$src = $dst";
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000186 let Size = 12;
Matt Arsenault6408c912016-09-16 22:11:18 +0000187 let hasSideEffects = 1;
Tom Stellardf8794352012-12-19 22:10:31 +0000188}
189
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000190def SI_LOOP : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000191 (outs), (ins SReg_64:$saved, brtarget:$target),
Matt Arsenaultc5b641a2017-03-17 20:41:45 +0000192 [(AMDGPUloop i64:$saved, bb:$target)], 1, 1> {
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000193 let Size = 8;
Matt Arsenaultc5b641a2017-03-17 20:41:45 +0000194 let isBranch = 0;
Matt Arsenault6408c912016-09-16 22:11:18 +0000195 let hasSideEffects = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000196}
Tom Stellardf8794352012-12-19 22:10:31 +0000197
Matt Arsenaultc5b641a2017-03-17 20:41:45 +0000198} // End isTerminator = 1
Tom Stellardf8794352012-12-19 22:10:31 +0000199
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000200def SI_END_CF : CFPseudoInstSI <
201 (outs), (ins SReg_64:$saved),
202 [(int_amdgcn_end_cf i64:$saved)], 1, 1> {
203 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000204 let isAsCheapAsAMove = 1;
205 let isReMaterializable = 1;
Matt Arsenault6408c912016-09-16 22:11:18 +0000206 let hasSideEffects = 1;
Matt Arsenaultc5b641a2017-03-17 20:41:45 +0000207 let mayLoad = 1; // FIXME: Should not need memory flags
208 let mayStore = 1;
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000209}
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000210
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000211def SI_BREAK : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000212 (outs SReg_64:$dst), (ins SReg_64:$src),
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000213 [(set i64:$dst, (int_amdgcn_break i64:$src))], 1> {
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000214 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000215 let isAsCheapAsAMove = 1;
216 let isReMaterializable = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000217}
Matt Arsenault48d70cb2016-07-09 17:18:39 +0000218
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000219def SI_IF_BREAK : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000220 (outs SReg_64:$dst), (ins SReg_64:$vcc, SReg_64:$src),
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000221 [(set i64:$dst, (int_amdgcn_if_break i1:$vcc, i64:$src))]> {
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000222 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000223 let isAsCheapAsAMove = 1;
224 let isReMaterializable = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000225}
Tom Stellardf8794352012-12-19 22:10:31 +0000226
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000227def SI_ELSE_BREAK : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000228 (outs SReg_64:$dst), (ins SReg_64:$src0, SReg_64:$src1),
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000229 [(set i64:$dst, (int_amdgcn_else_break i64:$src0, i64:$src1))]> {
230 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000231 let isAsCheapAsAMove = 1;
232 let isReMaterializable = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000233}
Tom Stellardf8794352012-12-19 22:10:31 +0000234
Tom Stellardaa798342015-05-01 03:44:09 +0000235let Uses = [EXEC], Defs = [EXEC,VCC] in {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000236def SI_KILL : PseudoInstSI <
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000237 (outs), (ins VSrc_b32:$src),
Matt Arsenault03006fd2016-07-19 16:27:56 +0000238 [(AMDGPUkill i32:$src)]> {
Matt Arsenault786724a2016-07-12 21:41:32 +0000239 let isConvergent = 1;
240 let usesCustomInserter = 1;
241}
242
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000243def SI_KILL_TERMINATOR : SPseudoInstSI <
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000244 (outs), (ins VSrc_b32:$src)> {
Matt Arsenault786724a2016-07-12 21:41:32 +0000245 let isTerminator = 1;
246}
247
Tom Stellardaa798342015-05-01 03:44:09 +0000248} // End Uses = [EXEC], Defs = [EXEC,VCC]
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000249
Matt Arsenault327188a2016-12-15 21:57:11 +0000250// Branch on undef scc. Used to avoid intermediate copy from
251// IMPLICIT_DEF to SCC.
252def SI_BR_UNDEF : SPseudoInstSI <(outs), (ins sopp_brtarget:$simm16)> {
253 let isTerminator = 1;
254 let usesCustomInserter = 1;
255}
Tom Stellardf8794352012-12-19 22:10:31 +0000256
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000257def SI_PS_LIVE : PseudoInstSI <
258 (outs SReg_64:$dst), (ins),
Matt Arsenault9babdf42016-06-22 20:15:28 +0000259 [(set i1:$dst, (int_amdgcn_ps_live))]> {
260 let SALU = 1;
261}
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000262
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000263// Used as an isel pseudo to directly emit initialization with an
264// s_mov_b32 rather than a copy of another initialized
265// register. MachineCSE skips copies, and we don't want to have to
266// fold operands before it runs.
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000267def SI_INIT_M0 : SPseudoInstSI <(outs), (ins SSrc_b32:$src)> {
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000268 let Defs = [M0];
269 let usesCustomInserter = 1;
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000270 let isAsCheapAsAMove = 1;
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000271 let isReMaterializable = 1;
272}
273
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000274def SI_RETURN : SPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000275 (outs), (ins variable_ops), [(AMDGPUreturn)]> {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000276 let isTerminator = 1;
277 let isBarrier = 1;
278 let isReturn = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000279 let hasSideEffects = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000280 let hasNoSchedulingInfo = 1;
Nicolai Haehnlea246dcc2016-09-03 12:26:32 +0000281 let DisableWQM = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000282}
283
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000284let Defs = [M0, EXEC],
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000285 UseNamedOperandTable = 1 in {
Christian Konig2989ffc2013-03-18 11:34:16 +0000286
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000287class SI_INDIRECT_SRC<RegisterClass rc> : VPseudoInstSI <
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000288 (outs VGPR_32:$vdst),
289 (ins rc:$src, VS_32:$idx, i32imm:$offset)> {
290 let usesCustomInserter = 1;
291}
Christian Konig2989ffc2013-03-18 11:34:16 +0000292
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000293class SI_INDIRECT_DST<RegisterClass rc> : VPseudoInstSI <
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000294 (outs rc:$vdst),
295 (ins rc:$src, VS_32:$idx, i32imm:$offset, VGPR_32:$val)> {
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000296 let Constraints = "$src = $vdst";
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000297 let usesCustomInserter = 1;
Christian Konig2989ffc2013-03-18 11:34:16 +0000298}
299
Matt Arsenault28419272015-10-07 00:42:51 +0000300// TODO: We can support indirect SGPR access.
301def SI_INDIRECT_SRC_V1 : SI_INDIRECT_SRC<VGPR_32>;
302def SI_INDIRECT_SRC_V2 : SI_INDIRECT_SRC<VReg_64>;
303def SI_INDIRECT_SRC_V4 : SI_INDIRECT_SRC<VReg_128>;
304def SI_INDIRECT_SRC_V8 : SI_INDIRECT_SRC<VReg_256>;
305def SI_INDIRECT_SRC_V16 : SI_INDIRECT_SRC<VReg_512>;
306
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000307def SI_INDIRECT_DST_V1 : SI_INDIRECT_DST<VGPR_32>;
Christian Konig2989ffc2013-03-18 11:34:16 +0000308def SI_INDIRECT_DST_V2 : SI_INDIRECT_DST<VReg_64>;
309def SI_INDIRECT_DST_V4 : SI_INDIRECT_DST<VReg_128>;
310def SI_INDIRECT_DST_V8 : SI_INDIRECT_DST<VReg_256>;
311def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST<VReg_512>;
312
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000313} // End Uses = [EXEC], Defs = [M0, EXEC]
Christian Konig2989ffc2013-03-18 11:34:16 +0000314
Tom Stellardeba61072014-05-02 15:41:42 +0000315multiclass SI_SPILL_SGPR <RegisterClass sgpr_class> {
Matt Arsenault3354f422016-09-10 01:20:33 +0000316 let UseNamedOperandTable = 1, SGPRSpill = 1, Uses = [EXEC] in {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000317 def _SAVE : PseudoInstSI <
Tom Stellard42fb60e2015-01-14 15:42:31 +0000318 (outs),
Matt Arsenault3354f422016-09-10 01:20:33 +0000319 (ins sgpr_class:$data, i32imm:$addr)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000320 let mayStore = 1;
321 let mayLoad = 0;
322 }
Tom Stellardeba61072014-05-02 15:41:42 +0000323
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000324 def _RESTORE : PseudoInstSI <
Matt Arsenault3354f422016-09-10 01:20:33 +0000325 (outs sgpr_class:$data),
326 (ins i32imm:$addr)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000327 let mayStore = 0;
328 let mayLoad = 1;
329 }
Tom Stellard42fb60e2015-01-14 15:42:31 +0000330 } // End UseNamedOperandTable = 1
Tom Stellardeba61072014-05-02 15:41:42 +0000331}
332
Matt Arsenault2510a312016-09-03 06:57:55 +0000333// You cannot use M0 as the output of v_readlane_b32 instructions or
334// use it in the sdata operand of SMEM instructions. We still need to
335// be able to spill the physical register m0, so allow it for
336// SI_SPILL_32_* instructions.
337defm SI_SPILL_S32 : SI_SPILL_SGPR <SReg_32>;
Tom Stellardeba61072014-05-02 15:41:42 +0000338defm SI_SPILL_S64 : SI_SPILL_SGPR <SReg_64>;
339defm SI_SPILL_S128 : SI_SPILL_SGPR <SReg_128>;
340defm SI_SPILL_S256 : SI_SPILL_SGPR <SReg_256>;
341defm SI_SPILL_S512 : SI_SPILL_SGPR <SReg_512>;
342
Tom Stellard96468902014-09-24 01:33:17 +0000343multiclass SI_SPILL_VGPR <RegisterClass vgpr_class> {
Matt Arsenault7348a7e2016-09-10 01:20:28 +0000344 let UseNamedOperandTable = 1, VGPRSpill = 1,
345 SchedRW = [WriteVMEM] in {
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000346 def _SAVE : VPseudoInstSI <
Tom Stellard42fb60e2015-01-14 15:42:31 +0000347 (outs),
Matt Arsenaultbcfd94c2016-09-17 15:52:37 +0000348 (ins vgpr_class:$vdata, i32imm:$vaddr, SReg_128:$srsrc,
349 SReg_32:$soffset, i32imm:$offset)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000350 let mayStore = 1;
351 let mayLoad = 0;
Matt Arsenaultac42ba82016-09-03 17:25:44 +0000352 // (2 * 4) + (8 * num_subregs) bytes maximum
353 let Size = !add(!shl(!srl(vgpr_class.Size, 5), 3), 8);
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000354 }
Tom Stellard96468902014-09-24 01:33:17 +0000355
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000356 def _RESTORE : VPseudoInstSI <
Matt Arsenault3354f422016-09-10 01:20:33 +0000357 (outs vgpr_class:$vdata),
Matt Arsenaultbcfd94c2016-09-17 15:52:37 +0000358 (ins i32imm:$vaddr, SReg_128:$srsrc, SReg_32:$soffset,
Matt Arsenault9babdf42016-06-22 20:15:28 +0000359 i32imm:$offset)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000360 let mayStore = 0;
361 let mayLoad = 1;
Matt Arsenaultac42ba82016-09-03 17:25:44 +0000362
363 // (2 * 4) + (8 * num_subregs) bytes maximum
364 let Size = !add(!shl(!srl(vgpr_class.Size, 5), 3), 8);
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000365 }
Matt Arsenault7348a7e2016-09-10 01:20:28 +0000366 } // End UseNamedOperandTable = 1, VGPRSpill = 1, SchedRW = [WriteVMEM]
Tom Stellard96468902014-09-24 01:33:17 +0000367}
368
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000369defm SI_SPILL_V32 : SI_SPILL_VGPR <VGPR_32>;
Tom Stellard96468902014-09-24 01:33:17 +0000370defm SI_SPILL_V64 : SI_SPILL_VGPR <VReg_64>;
371defm SI_SPILL_V96 : SI_SPILL_VGPR <VReg_96>;
372defm SI_SPILL_V128 : SI_SPILL_VGPR <VReg_128>;
373defm SI_SPILL_V256 : SI_SPILL_VGPR <VReg_256>;
374defm SI_SPILL_V512 : SI_SPILL_VGPR <VReg_512>;
375
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000376def SI_PC_ADD_REL_OFFSET : SPseudoInstSI <
Tom Stellard067c8152014-07-21 14:01:14 +0000377 (outs SReg_64:$dst),
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +0000378 (ins si_ga:$ptr_lo, si_ga:$ptr_hi),
379 [(set SReg_64:$dst,
380 (i64 (SIpc_add_rel_offset (tglobaladdr:$ptr_lo), (tglobaladdr:$ptr_hi))))]> {
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000381 let Defs = [SCC];
Matt Arsenaultd092a062015-10-02 18:58:37 +0000382}
Tom Stellard067c8152014-07-21 14:01:14 +0000383
Matt Arsenault382d9452016-01-26 04:49:22 +0000384} // End SubtargetPredicate = isGCN
Tom Stellard0e70de52014-05-16 20:56:45 +0000385
Marek Olsak5df00d62014-12-07 12:18:57 +0000386let Predicates = [isGCN] in {
Wei Ding205bfdb2017-02-10 02:15:29 +0000387def : Pat<
388 (trap),
Wei Dingf2cce022017-02-22 23:22:19 +0000389 (S_TRAP_PSEUDO TRAPID.LLVM_TRAP)
Wei Ding205bfdb2017-02-10 02:15:29 +0000390>;
391
392def : Pat<
393 (debugtrap),
Wei Dingf2cce022017-02-22 23:22:19 +0000394 (S_TRAP_PSEUDO TRAPID.LLVM_DEBUG_TRAP)
Wei Ding205bfdb2017-02-10 02:15:29 +0000395>;
Tom Stellard0e70de52014-05-16 20:56:45 +0000396
Nicolai Haehnle3b572002016-07-28 11:39:24 +0000397def : Pat<
Matt Arsenaultc5b641a2017-03-17 20:41:45 +0000398 (AMDGPUelse i64:$src, bb:$target),
Nicolai Haehnle3b572002016-07-28 11:39:24 +0000399 (SI_ELSE $src, $target, 0)
400>;
401
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000402def : Pat <
403 (int_AMDGPU_kilp),
Tom Stellard115a6152016-11-10 16:02:37 +0000404 (SI_KILL (i32 0xbf800000))
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000405>;
406
Tom Stellard8d6d4492014-04-22 16:33:57 +0000407//===----------------------------------------------------------------------===//
Matt Arsenaulta0050b02014-06-19 01:19:19 +0000408// VOP1 Patterns
409//===----------------------------------------------------------------------===//
410
Matt Arsenault22ca3f82014-07-15 23:50:10 +0000411let Predicates = [UnsafeFPMath] in {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +0000412
413//def : RcpPat<V_RCP_F64_e32, f64>;
414//defm : RsqPat<V_RSQ_F64_e32, f64>;
415//defm : RsqPat<V_RSQ_F32_e32, f32>;
416
417def : RsqPat<V_RSQ_F32_e32, f32>;
418def : RsqPat<V_RSQ_F64_e32, f64>;
Matt Arsenault74015162016-05-28 00:19:52 +0000419
420// Convert (x - floor(x)) to fract(x)
421def : Pat <
422 (f32 (fsub (f32 (VOP3Mods f32:$x, i32:$mods)),
423 (f32 (ffloor (f32 (VOP3Mods f32:$x, i32:$mods)))))),
424 (V_FRACT_F32_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
425>;
426
427// Convert (x + (-floor(x))) to fract(x)
428def : Pat <
429 (f64 (fadd (f64 (VOP3Mods f64:$x, i32:$mods)),
430 (f64 (fneg (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))))))),
431 (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
432>;
433
434} // End Predicates = [UnsafeFPMath]
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +0000435
Matt Arsenault9dba9bd2017-02-02 02:27:04 +0000436
437// f16_to_fp patterns
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000438def : Pat <
Matt Arsenault9dba9bd2017-02-02 02:27:04 +0000439 (f32 (f16_to_fp i32:$src0)),
440 (V_CVT_F32_F16_e64 SRCMODS.NONE, $src0, DSTCLAMP.NONE, DSTOMOD.NONE)
441>;
442
443def : Pat <
444 (f32 (f16_to_fp (and_oneuse i32:$src0, 0x7fff))),
445 (V_CVT_F32_F16_e64 SRCMODS.ABS, $src0, DSTCLAMP.NONE, DSTOMOD.NONE)
446>;
447
448def : Pat <
449 (f32 (f16_to_fp (or_oneuse i32:$src0, 0x8000))),
450 (V_CVT_F32_F16_e64 SRCMODS.NEG_ABS, $src0, DSTCLAMP.NONE, DSTOMOD.NONE)
451>;
452
453def : Pat <
454 (f32 (f16_to_fp (xor_oneuse i32:$src0, 0x8000))),
455 (V_CVT_F32_F16_e64 SRCMODS.NEG, $src0, DSTCLAMP.NONE, DSTOMOD.NONE)
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000456>;
457
458def : Pat <
459 (f64 (fpextend f16:$src)),
460 (V_CVT_F64_F32_e32 (V_CVT_F32_F16_e32 $src))
461>;
462
Matt Arsenault9dba9bd2017-02-02 02:27:04 +0000463// fp_to_fp16 patterns
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000464def : Pat <
Matt Arsenault86e02ce2017-03-15 19:04:26 +0000465 (i32 (AMDGPUfp_to_f16 (f32 (VOP3Mods0 f32:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod)))),
Matt Arsenault9dba9bd2017-02-02 02:27:04 +0000466 (V_CVT_F16_F32_e64 $src0_modifiers, f32:$src0, $clamp, $omod)
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000467>;
468
469def : Pat <
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000470 (i32 (fp_to_sint f16:$src)),
471 (V_CVT_I32_F32_e32 (V_CVT_F32_F16_e32 $src))
472>;
473
474def : Pat <
475 (i32 (fp_to_uint f16:$src)),
476 (V_CVT_U32_F32_e32 (V_CVT_F32_F16_e32 $src))
477>;
478
479def : Pat <
480 (f16 (sint_to_fp i32:$src)),
481 (V_CVT_F16_F32_e32 (V_CVT_F32_I32_e32 $src))
482>;
483
484def : Pat <
485 (f16 (uint_to_fp i32:$src)),
486 (V_CVT_F16_F32_e32 (V_CVT_F32_U32_e32 $src))
487>;
488
Matt Arsenaulta0050b02014-06-19 01:19:19 +0000489//===----------------------------------------------------------------------===//
Tom Stellard58ac7442014-04-29 23:12:48 +0000490// VOP2 Patterns
491//===----------------------------------------------------------------------===//
492
Konstantin Zhuravlyovbf998c72016-11-16 03:39:12 +0000493multiclass FMADPat <ValueType vt, Instruction inst> {
494 def : Pat <
495 (vt (fmad (VOP3NoMods0 vt:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod),
496 (VOP3NoMods vt:$src1, i32:$src1_modifiers),
497 (VOP3NoMods vt:$src2, i32:$src2_modifiers))),
498 (inst $src0_modifiers, $src0, $src1_modifiers, $src1,
499 $src2_modifiers, $src2, $clamp, $omod)
500 >;
501}
502
503defm : FMADPat <f16, V_MAC_F16_e64>;
504defm : FMADPat <f32, V_MAC_F32_e64>;
505
Wei Ding4d3d4ca2017-02-24 23:00:29 +0000506class FMADModsPat<Instruction inst, SDPatternOperator mad_opr> : Pat<
507 (f32 (mad_opr (VOP3Mods f32:$src0, i32:$src0_mod),
508 (VOP3Mods f32:$src1, i32:$src1_mod),
509 (VOP3Mods f32:$src2, i32:$src2_mod))),
510 (inst $src0_mod, $src0, $src1_mod, $src1,
511 $src2_mod, $src2, DSTCLAMP.NONE, DSTOMOD.NONE)
512>;
513
514def : FMADModsPat<V_MAD_F32, AMDGPUfmad_ftz>;
515
Konstantin Zhuravlyovbf998c72016-11-16 03:39:12 +0000516multiclass SelectPat <ValueType vt, Instruction inst> {
Konstantin Zhuravlyov2a87a422016-11-16 03:16:26 +0000517 def : Pat <
518 (vt (select i1:$src0, vt:$src1, vt:$src2)),
519 (inst $src2, $src1, $src0)
520 >;
521}
522
Konstantin Zhuravlyovbf998c72016-11-16 03:39:12 +0000523defm : SelectPat <i16, V_CNDMASK_B32_e64>;
524defm : SelectPat <i32, V_CNDMASK_B32_e64>;
525defm : SelectPat <f16, V_CNDMASK_B32_e64>;
526defm : SelectPat <f32, V_CNDMASK_B32_e64>;
Konstantin Zhuravlyov2a87a422016-11-16 03:16:26 +0000527
Tom Stellardae4c9e72014-06-20 17:06:11 +0000528def : Pat <
529 (i32 (add (i32 (ctpop i32:$popcnt)), i32:$val)),
Matt Arsenault49dd4282014-09-15 17:15:02 +0000530 (V_BCNT_U32_B32_e64 $popcnt, $val)
Tom Stellardae4c9e72014-06-20 17:06:11 +0000531>;
532
Christian Konig4a1b9c32013-03-18 11:34:10 +0000533/********** ============================================ **********/
534/********** Extraction, Insertion, Building and Casting **********/
535/********** ============================================ **********/
Tom Stellard75aadc22012-12-11 21:25:42 +0000536
Christian Konig4a1b9c32013-03-18 11:34:10 +0000537foreach Index = 0-2 in {
538 def Extract_Element_v2i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000539 i32, v2i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000540 >;
541 def Insert_Element_v2i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000542 i32, v2i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000543 >;
544
545 def Extract_Element_v2f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000546 f32, v2f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000547 >;
548 def Insert_Element_v2f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000549 f32, v2f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000550 >;
551}
552
553foreach Index = 0-3 in {
554 def Extract_Element_v4i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000555 i32, v4i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000556 >;
557 def Insert_Element_v4i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000558 i32, v4i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000559 >;
560
561 def Extract_Element_v4f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000562 f32, v4f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000563 >;
564 def Insert_Element_v4f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000565 f32, v4f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000566 >;
567}
568
569foreach Index = 0-7 in {
570 def Extract_Element_v8i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000571 i32, v8i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000572 >;
573 def Insert_Element_v8i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000574 i32, v8i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000575 >;
576
577 def Extract_Element_v8f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000578 f32, v8f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000579 >;
580 def Insert_Element_v8f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000581 f32, v8f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000582 >;
583}
584
585foreach Index = 0-15 in {
586 def Extract_Element_v16i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000587 i32, v16i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000588 >;
589 def Insert_Element_v16i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000590 i32, v16i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000591 >;
592
593 def Extract_Element_v16f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000594 f32, v16f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000595 >;
596 def Insert_Element_v16f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000597 f32, v16f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000598 >;
599}
Tom Stellard75aadc22012-12-11 21:25:42 +0000600
Matt Arsenault382d9452016-01-26 04:49:22 +0000601// FIXME: Why do only some of these type combinations for SReg and
602// VReg?
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000603// 16-bit bitcast
604def : BitConvert <i16, f16, VGPR_32>;
605def : BitConvert <f16, i16, VGPR_32>;
606def : BitConvert <i16, f16, SReg_32>;
607def : BitConvert <f16, i16, SReg_32>;
608
Matt Arsenault382d9452016-01-26 04:49:22 +0000609// 32-bit bitcast
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000610def : BitConvert <i32, f32, VGPR_32>;
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000611def : BitConvert <f32, i32, VGPR_32>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000612def : BitConvert <i32, f32, SReg_32>;
613def : BitConvert <f32, i32, SReg_32>;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000614def : BitConvert <v2i16, i32, SReg_32>;
615def : BitConvert <i32, v2i16, SReg_32>;
616def : BitConvert <v2f16, i32, SReg_32>;
617def : BitConvert <i32, v2f16, SReg_32>;
618def : BitConvert <v2i16, v2f16, SReg_32>;
619def : BitConvert <v2f16, v2i16, SReg_32>;
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000620def : BitConvert <v2f16, f32, SReg_32>;
621def : BitConvert <f32, v2f16, SReg_32>;
622def : BitConvert <v2i16, f32, SReg_32>;
623def : BitConvert <f32, v2i16, SReg_32>;
Tom Stellard75aadc22012-12-11 21:25:42 +0000624
Matt Arsenault382d9452016-01-26 04:49:22 +0000625// 64-bit bitcast
Tom Stellard7512c082013-07-12 18:14:56 +0000626def : BitConvert <i64, f64, VReg_64>;
Tom Stellard7512c082013-07-12 18:14:56 +0000627def : BitConvert <f64, i64, VReg_64>;
Tom Stellarded2f6142013-07-18 21:43:42 +0000628def : BitConvert <v2i32, v2f32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000629def : BitConvert <v2f32, v2i32, VReg_64>;
Tom Stellard7ea3d6d2014-03-31 14:01:55 +0000630def : BitConvert <i64, v2i32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000631def : BitConvert <v2i32, i64, VReg_64>;
Matt Arsenault064c2062014-06-11 17:40:32 +0000632def : BitConvert <i64, v2f32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000633def : BitConvert <v2f32, i64, VReg_64>;
Tom Stellard8f307212015-12-15 17:11:17 +0000634def : BitConvert <f64, v2f32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000635def : BitConvert <v2f32, f64, VReg_64>;
Matt Arsenault2acc7a42014-06-11 19:31:13 +0000636def : BitConvert <f64, v2i32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000637def : BitConvert <v2i32, f64, VReg_64>;
Tom Stellard83747202013-07-18 21:43:53 +0000638def : BitConvert <v4i32, v4f32, VReg_128>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000639def : BitConvert <v4f32, v4i32, VReg_128>;
Tom Stellard83747202013-07-18 21:43:53 +0000640
Matt Arsenault382d9452016-01-26 04:49:22 +0000641// 128-bit bitcast
Matt Arsenault61001bb2015-11-25 19:58:34 +0000642def : BitConvert <v2i64, v4i32, SReg_128>;
643def : BitConvert <v4i32, v2i64, SReg_128>;
Tom Stellard8f307212015-12-15 17:11:17 +0000644def : BitConvert <v2f64, v4f32, VReg_128>;
Matt Arsenault61001bb2015-11-25 19:58:34 +0000645def : BitConvert <v2f64, v4i32, VReg_128>;
Tom Stellard8f307212015-12-15 17:11:17 +0000646def : BitConvert <v4f32, v2f64, VReg_128>;
Matt Arsenault61001bb2015-11-25 19:58:34 +0000647def : BitConvert <v4i32, v2f64, VReg_128>;
Matt Arsenaulte57206d2016-05-25 18:07:36 +0000648def : BitConvert <v2i64, v2f64, VReg_128>;
649def : BitConvert <v2f64, v2i64, VReg_128>;
Matt Arsenault61001bb2015-11-25 19:58:34 +0000650
Matt Arsenault382d9452016-01-26 04:49:22 +0000651// 256-bit bitcast
Tom Stellard967bf582014-02-13 23:34:15 +0000652def : BitConvert <v8i32, v8f32, SReg_256>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000653def : BitConvert <v8f32, v8i32, SReg_256>;
Matt Arsenaultf5958dd2014-02-02 00:05:35 +0000654def : BitConvert <v8i32, v8f32, VReg_256>;
655def : BitConvert <v8f32, v8i32, VReg_256>;
Tom Stellard20ee94f2013-08-14 22:22:09 +0000656
Matt Arsenault382d9452016-01-26 04:49:22 +0000657// 512-bit bitcast
Matt Arsenaultf5958dd2014-02-02 00:05:35 +0000658def : BitConvert <v16i32, v16f32, VReg_512>;
659def : BitConvert <v16f32, v16i32, VReg_512>;
660
Christian Konig8dbe6f62013-02-21 15:17:27 +0000661/********** =================== **********/
662/********** Src & Dst modifiers **********/
663/********** =================== **********/
664
Matt Arsenault2fdf2a12017-02-21 23:35:48 +0000665
666// If denormals are not enabled, it only impacts the compare of the
667// inputs. The output result is not flushed.
668class ClampPat<Instruction inst, ValueType vt> : Pat <
669 (vt (AMDGPUclamp
670 (VOP3Mods0Clamp vt:$src0, i32:$src0_modifiers, i32:$omod))),
671 (inst i32:$src0_modifiers, vt:$src0,
672 i32:$src0_modifiers, vt:$src0, DSTCLAMP.ENABLE, $omod)
Christian Konig8dbe6f62013-02-21 15:17:27 +0000673>;
674
Matt Arsenault2fdf2a12017-02-21 23:35:48 +0000675def : ClampPat<V_MAX_F32_e64, f32>;
Matt Arsenault79a45db2017-02-22 23:53:37 +0000676def : ClampPat<V_MAX_F64, f64>;
Matt Arsenault2fdf2a12017-02-21 23:35:48 +0000677def : ClampPat<V_MAX_F16_e64, f16>;
678
Michel Danzer624b02a2014-02-04 07:12:38 +0000679/********** ================================ **********/
680/********** Floating point absolute/negative **********/
681/********** ================================ **********/
682
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000683// Prevent expanding both fneg and fabs.
Michel Danzer624b02a2014-02-04 07:12:38 +0000684
Michel Danzer624b02a2014-02-04 07:12:38 +0000685def : Pat <
686 (fneg (fabs f32:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +0000687 (S_OR_B32 $src, (S_MOV_B32(i32 0x80000000))) // Set sign bit
Michel Danzer624b02a2014-02-04 07:12:38 +0000688>;
689
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000690// FIXME: Should use S_OR_B32
Matt Arsenault13623d02014-08-15 18:42:18 +0000691def : Pat <
692 (fneg (fabs f64:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000693 (REG_SEQUENCE VReg_64,
694 (i32 (EXTRACT_SUBREG f64:$src, sub0)),
695 sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000696 (V_OR_B32_e32 (i32 (EXTRACT_SUBREG f64:$src, sub1)),
697 (V_MOV_B32_e32 (i32 0x80000000))), // Set sign bit.
Matt Arsenault7d858d82014-11-02 23:46:54 +0000698 sub1)
Matt Arsenault13623d02014-08-15 18:42:18 +0000699>;
700
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000701def : Pat <
702 (fabs f32:$src),
Tom Stellard115a6152016-11-10 16:02:37 +0000703 (V_AND_B32_e64 $src, (V_MOV_B32_e32 (i32 0x7fffffff)))
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000704>;
Vincent Lejeune79a58342014-05-10 19:18:25 +0000705
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000706def : Pat <
707 (fneg f32:$src),
Tom Stellard115a6152016-11-10 16:02:37 +0000708 (V_XOR_B32_e32 $src, (V_MOV_B32_e32 (i32 0x80000000)))
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000709>;
Christian Konig8dbe6f62013-02-21 15:17:27 +0000710
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000711def : Pat <
712 (fabs f64:$src),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000713 (REG_SEQUENCE VReg_64,
714 (i32 (EXTRACT_SUBREG f64:$src, sub0)),
715 sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000716 (V_AND_B32_e64 (i32 (EXTRACT_SUBREG f64:$src, sub1)),
717 (V_MOV_B32_e32 (i32 0x7fffffff))), // Set sign bit.
Matt Arsenault7d858d82014-11-02 23:46:54 +0000718 sub1)
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000719>;
Vincent Lejeune79a58342014-05-10 19:18:25 +0000720
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000721def : Pat <
722 (fneg f64:$src),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000723 (REG_SEQUENCE VReg_64,
724 (i32 (EXTRACT_SUBREG f64:$src, sub0)),
725 sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000726 (V_XOR_B32_e32 (i32 (EXTRACT_SUBREG f64:$src, sub1)),
727 (i32 (V_MOV_B32_e32 (i32 0x80000000)))),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000728 sub1)
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000729>;
Christian Konig8dbe6f62013-02-21 15:17:27 +0000730
Matt Arsenaultc79dc702016-11-15 02:25:28 +0000731def : Pat <
Konstantin Zhuravlyov7d882752017-01-13 19:49:25 +0000732 (fcopysign f16:$src0, f16:$src1),
733 (V_BFI_B32 (S_MOV_B32 (i32 0x00007fff)), $src0, $src1)
734>;
735
736def : Pat <
737 (fcopysign f32:$src0, f16:$src1),
738 (V_BFI_B32 (S_MOV_B32 (i32 0x7fffffff)), $src0,
739 (V_LSHLREV_B32_e64 (i32 16), $src1))
740>;
741
742def : Pat <
743 (fcopysign f64:$src0, f16:$src1),
744 (REG_SEQUENCE SReg_64,
745 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0,
746 (V_BFI_B32 (S_MOV_B32 (i32 0x7fffffff)), (i32 (EXTRACT_SUBREG $src0, sub1)),
747 (V_LSHLREV_B32_e64 (i32 16), $src1)), sub1)
748>;
749
750def : Pat <
751 (fcopysign f16:$src0, f32:$src1),
752 (V_BFI_B32 (S_MOV_B32 (i32 0x00007fff)), $src0,
753 (V_LSHRREV_B32_e64 (i32 16), $src1))
754>;
755
756def : Pat <
757 (fcopysign f16:$src0, f64:$src1),
758 (V_BFI_B32 (S_MOV_B32 (i32 0x00007fff)), $src0,
759 (V_LSHRREV_B32_e64 (i32 16), (EXTRACT_SUBREG $src1, sub1)))
760>;
761
762def : Pat <
Matt Arsenaultc79dc702016-11-15 02:25:28 +0000763 (fneg f16:$src),
764 (V_XOR_B32_e32 $src, (V_MOV_B32_e32 (i32 0x00008000)))
765>;
766
767def : Pat <
768 (fabs f16:$src),
769 (V_AND_B32_e64 $src, (V_MOV_B32_e32 (i32 0x00007fff)))
770>;
771
772def : Pat <
773 (fneg (fabs f16:$src)),
774 (S_OR_B32 $src, (S_MOV_B32 (i32 0x00008000))) // Set sign bit
775>;
776
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000777def : Pat <
778 (fneg v2f16:$src),
779 (V_XOR_B32_e64 (S_MOV_B32 (i32 0x80008000)), $src)
780>;
781
782def : Pat <
783 (fabs v2f16:$src),
784 (V_AND_B32_e64 (S_MOV_B32 (i32 0x7fff7fff)), $src)
785>;
786
787// This is really (fneg (fabs v2f16:$src))
788//
789// fabs is not reported as free because there is modifier for it in
790// VOP3P instructions, so it is turned into the bit op.
791def : Pat <
792 (fneg (v2f16 (bitconvert (and_oneuse i32:$src, 0x7fff7fff)))),
793 (S_OR_B32 (S_MOV_B32 (i32 0x80008000)), $src) // Set sign bit
794>;
795
Christian Konigc756cb992013-02-16 11:28:22 +0000796/********** ================== **********/
797/********** Immediate Patterns **********/
798/********** ================== **********/
799
800def : Pat <
Matt Arsenault3d463192016-11-01 22:55:07 +0000801 (VGPRImm<(i32 imm)>:$imm),
Christian Konigc756cb992013-02-16 11:28:22 +0000802 (V_MOV_B32_e32 imm:$imm)
803>;
804
805def : Pat <
Matt Arsenault3d463192016-11-01 22:55:07 +0000806 (VGPRImm<(f32 fpimm)>:$imm),
Tom Stellardfb77f002015-01-13 22:59:41 +0000807 (V_MOV_B32_e32 (f32 (bitcast_fpimm_to_i32 $imm)))
Christian Konigc756cb992013-02-16 11:28:22 +0000808>;
809
810def : Pat <
Matt Arsenault3d463192016-11-01 22:55:07 +0000811 (i32 imm:$imm),
812 (S_MOV_B32 imm:$imm)
813>;
814
Matt Arsenaulte96d0372016-12-08 20:14:46 +0000815// FIXME: Workaround for ordering issue with peephole optimizer where
816// a register class copy interferes with immediate folding. Should
817// use s_mov_b32, which can be shrunk to s_movk_i32
818def : Pat <
819 (VGPRImm<(f16 fpimm)>:$imm),
820 (V_MOV_B32_e32 (f16 (bitcast_fpimm_to_i32 $imm)))
821>;
822
Matt Arsenault3d463192016-11-01 22:55:07 +0000823def : Pat <
824 (f32 fpimm:$imm),
825 (S_MOV_B32 (f32 (bitcast_fpimm_to_i32 $imm)))
826>;
827
828def : Pat <
Matt Arsenaulte96d0372016-12-08 20:14:46 +0000829 (f16 fpimm:$imm),
830 (S_MOV_B32 (i32 (bitcast_fpimm_to_i32 $imm)))
831>;
832
833def : Pat <
Matt Arsenaultac0fc842016-09-17 16:09:55 +0000834 (i32 frameindex:$fi),
835 (V_MOV_B32_e32 (i32 (frameindex_to_targetframeindex $fi)))
836>;
837
838def : Pat <
Christian Konigb559b072013-02-16 11:28:36 +0000839 (i64 InlineImm<i64>:$imm),
840 (S_MOV_B64 InlineImm<i64>:$imm)
841>;
842
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000843// XXX - Should this use a s_cmp to set SCC?
844
845// Set to sign-extended 64-bit value (true = -1, false = 0)
846def : Pat <
847 (i1 imm:$imm),
848 (S_MOV_B64 (i64 (as_i64imm $imm)))
849>;
850
Matt Arsenault303011a2014-12-17 21:04:08 +0000851def : Pat <
852 (f64 InlineFPImm<f64>:$imm),
Tom Stellardfb77f002015-01-13 22:59:41 +0000853 (S_MOV_B64 (f64 (bitcast_fpimm_to_i64 InlineFPImm<f64>:$imm)))
Matt Arsenault303011a2014-12-17 21:04:08 +0000854>;
855
Tom Stellard75aadc22012-12-11 21:25:42 +0000856/********** ================== **********/
857/********** Intrinsic Patterns **********/
858/********** ================== **********/
859
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000860def : POW_Common <V_LOG_F32_e32, V_EXP_F32_e32, V_MUL_LEGACY_F32_e32>;
Tom Stellard75aadc22012-12-11 21:25:42 +0000861
862def : Pat <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000863 (i32 (sext i1:$src0)),
864 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src0)
Michel Danzer0cc991e2013-02-22 11:22:58 +0000865>;
866
Tom Stellardf16d38c2014-02-13 23:34:13 +0000867class Ext32Pat <SDNode ext> : Pat <
868 (i32 (ext i1:$src0)),
Michel Danzer5d26fdf2014-02-05 09:48:05 +0000869 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src0)
870>;
871
Tom Stellardf16d38c2014-02-13 23:34:13 +0000872def : Ext32Pat <zext>;
873def : Ext32Pat <anyext>;
874
Michel Danzer8caa9042013-04-10 17:17:56 +0000875// The multiplication scales from [0,1] to the unsigned integer range
876def : Pat <
877 (AMDGPUurecip i32:$src0),
878 (V_CVT_U32_F32_e32
Tom Stellard115a6152016-11-10 16:02:37 +0000879 (V_MUL_F32_e32 (i32 CONST.FP_UINT_MAX_PLUS_1),
Michel Danzer8caa9042013-04-10 17:17:56 +0000880 (V_RCP_IFLAG_F32_e32 (V_CVT_F32_U32_e32 $src0))))
881>;
882
Tom Stellard0289ff42014-05-16 20:56:44 +0000883//===----------------------------------------------------------------------===//
884// VOP3 Patterns
885//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000886
Matt Arsenaulteb260202014-05-22 18:00:15 +0000887def : IMad24Pat<V_MAD_I32_I24>;
888def : UMad24Pat<V_MAD_U32_U24>;
889
Matt Arsenault7d858d82014-11-02 23:46:54 +0000890defm : BFIPatterns <V_BFI_B32, S_MOV_B32, SReg_64>;
Tom Stellard0289ff42014-05-16 20:56:44 +0000891def : ROTRPattern <V_ALIGNBIT_B32>;
892
Christian Konig2989ffc2013-03-18 11:34:16 +0000893/********** ====================== **********/
Simon Pilgrime995a8082016-11-18 11:04:02 +0000894/********** Indirect addressing **********/
Christian Konig2989ffc2013-03-18 11:34:16 +0000895/********** ====================== **********/
896
Matt Arsenault28419272015-10-07 00:42:51 +0000897multiclass SI_INDIRECT_Pattern <ValueType vt, ValueType eltvt, string VecSize> {
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000898 // Extract with offset
Christian Konig2989ffc2013-03-18 11:34:16 +0000899 def : Pat<
Nicolai Haehnle7968c342016-07-12 08:12:16 +0000900 (eltvt (extractelt vt:$src, (MOVRELOffset i32:$idx, (i32 imm:$offset)))),
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000901 (!cast<Instruction>("SI_INDIRECT_SRC_"#VecSize) $src, $idx, imm:$offset)
Christian Konig2989ffc2013-03-18 11:34:16 +0000902 >;
903
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000904 // Insert with offset
Christian Konig2989ffc2013-03-18 11:34:16 +0000905 def : Pat<
Nicolai Haehnle7968c342016-07-12 08:12:16 +0000906 (insertelt vt:$src, eltvt:$val, (MOVRELOffset i32:$idx, (i32 imm:$offset))),
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000907 (!cast<Instruction>("SI_INDIRECT_DST_"#VecSize) $src, $idx, imm:$offset, $val)
Christian Konig2989ffc2013-03-18 11:34:16 +0000908 >;
909}
910
Matt Arsenault28419272015-10-07 00:42:51 +0000911defm : SI_INDIRECT_Pattern <v2f32, f32, "V2">;
912defm : SI_INDIRECT_Pattern <v4f32, f32, "V4">;
913defm : SI_INDIRECT_Pattern <v8f32, f32, "V8">;
914defm : SI_INDIRECT_Pattern <v16f32, f32, "V16">;
Matt Arsenaultf5958dd2014-02-02 00:05:35 +0000915
Matt Arsenault28419272015-10-07 00:42:51 +0000916defm : SI_INDIRECT_Pattern <v2i32, i32, "V2">;
917defm : SI_INDIRECT_Pattern <v4i32, i32, "V4">;
918defm : SI_INDIRECT_Pattern <v8i32, i32, "V8">;
919defm : SI_INDIRECT_Pattern <v16i32, i32, "V16">;
Christian Konig2989ffc2013-03-18 11:34:16 +0000920
Tom Stellard81d871d2013-11-13 23:36:50 +0000921//===----------------------------------------------------------------------===//
Wei Ding1041a642016-08-24 14:59:47 +0000922// SAD Patterns
923//===----------------------------------------------------------------------===//
924
925def : Pat <
926 (add (sub_oneuse (umax i32:$src0, i32:$src1),
927 (umin i32:$src0, i32:$src1)),
928 i32:$src2),
929 (V_SAD_U32 $src0, $src1, $src2)
930>;
931
932def : Pat <
933 (add (select_oneuse (i1 (setugt i32:$src0, i32:$src1)),
934 (sub i32:$src0, i32:$src1),
935 (sub i32:$src1, i32:$src0)),
936 i32:$src2),
937 (V_SAD_U32 $src0, $src1, $src2)
938>;
939
940//===----------------------------------------------------------------------===//
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000941// Conversion Patterns
942//===----------------------------------------------------------------------===//
943
944def : Pat<(i32 (sext_inreg i32:$src, i1)),
Tom Stellard115a6152016-11-10 16:02:37 +0000945 (S_BFE_I32 i32:$src, (i32 65536))>; // 0 | 1 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000946
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000947// Handle sext_inreg in i64
948def : Pat <
949 (i64 (sext_inreg i64:$src, i1)),
Tom Stellard115a6152016-11-10 16:02:37 +0000950 (S_BFE_I64 i64:$src, (i32 0x10000)) // 0 | 1 << 16
951>;
952
953def : Pat <
Matt Arsenault682eb432017-01-11 22:35:22 +0000954 (i16 (sext_inreg i16:$src, i1)),
955 (S_BFE_I32 $src, (i32 0x00010000)) // 0 | 1 << 16
956>;
957
958def : Pat <
Tom Stellard115a6152016-11-10 16:02:37 +0000959 (i16 (sext_inreg i16:$src, i8)),
960 (S_BFE_I32 $src, (i32 0x80000)) // 0 | 8 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000961>;
962
963def : Pat <
964 (i64 (sext_inreg i64:$src, i8)),
Tom Stellard115a6152016-11-10 16:02:37 +0000965 (S_BFE_I64 i64:$src, (i32 0x80000)) // 0 | 8 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000966>;
967
968def : Pat <
969 (i64 (sext_inreg i64:$src, i16)),
Tom Stellard115a6152016-11-10 16:02:37 +0000970 (S_BFE_I64 i64:$src, (i32 0x100000)) // 0 | 16 << 16
Matt Arsenault94812212014-11-14 18:18:16 +0000971>;
972
973def : Pat <
974 (i64 (sext_inreg i64:$src, i32)),
Tom Stellard115a6152016-11-10 16:02:37 +0000975 (S_BFE_I64 i64:$src, (i32 0x200000)) // 0 | 32 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000976>;
977
Matt Arsenaultc6b69a92016-07-26 23:06:33 +0000978def : Pat <
979 (i64 (zext i32:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +0000980 (REG_SEQUENCE SReg_64, $src, sub0, (S_MOV_B32 (i32 0)), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000981>;
982
Matt Arsenaultc6b69a92016-07-26 23:06:33 +0000983def : Pat <
984 (i64 (anyext i32:$src)),
985 (REG_SEQUENCE SReg_64, $src, sub0, (i32 (IMPLICIT_DEF)), sub1)
986>;
987
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000988class ZExt_i64_i1_Pat <SDNode ext> : Pat <
989 (i64 (ext i1:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000990 (REG_SEQUENCE VReg_64,
991 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src), sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000992 (S_MOV_B32 (i32 0)), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000993>;
994
995
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000996def : ZExt_i64_i1_Pat<zext>;
997def : ZExt_i64_i1_Pat<anyext>;
998
Tom Stellardbc4497b2016-02-12 23:45:29 +0000999// FIXME: We need to use COPY_TO_REGCLASS to work-around the fact that
1000// REG_SEQUENCE patterns don't support instructions with multiple outputs.
Matt Arsenaultb2cbf792014-06-10 18:54:59 +00001001def : Pat <
1002 (i64 (sext i32:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +00001003 (REG_SEQUENCE SReg_64, $src, sub0,
Tom Stellard115a6152016-11-10 16:02:37 +00001004 (i32 (COPY_TO_REGCLASS (S_ASHR_I32 $src, (i32 31)), SReg_32_XM0)), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +00001005>;
1006
1007def : Pat <
1008 (i64 (sext i1:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +00001009 (REG_SEQUENCE VReg_64,
Tom Stellard115a6152016-11-10 16:02:37 +00001010 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src), sub0,
1011 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +00001012>;
1013
Tom Stellard115a6152016-11-10 16:02:37 +00001014class FPToI1Pat<Instruction Inst, int KOne, ValueType kone_type, ValueType vt, SDPatternOperator fp_to_int> : Pat <
Matt Arsenault7fb961f2016-07-22 17:01:21 +00001015 (i1 (fp_to_int (vt (VOP3Mods vt:$src0, i32:$src0_modifiers)))),
Tom Stellard115a6152016-11-10 16:02:37 +00001016 (i1 (Inst 0, (kone_type KOne), $src0_modifiers, $src0, DSTCLAMP.NONE, DSTOMOD.NONE))
Matt Arsenault7fb961f2016-07-22 17:01:21 +00001017>;
1018
Tom Stellard115a6152016-11-10 16:02:37 +00001019def : FPToI1Pat<V_CMP_EQ_F32_e64, CONST.FP32_ONE, i32, f32, fp_to_uint>;
1020def : FPToI1Pat<V_CMP_EQ_F32_e64, CONST.FP32_NEG_ONE, i32, f32, fp_to_sint>;
1021def : FPToI1Pat<V_CMP_EQ_F64_e64, CONST.FP64_ONE, i64, f64, fp_to_uint>;
1022def : FPToI1Pat<V_CMP_EQ_F64_e64, CONST.FP64_NEG_ONE, i64, f64, fp_to_sint>;
Matt Arsenault7fb961f2016-07-22 17:01:21 +00001023
Matt Arsenaultbecd6562014-12-03 05:22:35 +00001024// If we need to perform a logical operation on i1 values, we need to
1025// use vector comparisons since there is only one SCC register. Vector
Simon Pilgrime995a8082016-11-18 11:04:02 +00001026// comparisons still write to a pair of SGPRs, so treat these as
Matt Arsenaultbecd6562014-12-03 05:22:35 +00001027// 64-bit comparisons. When legalizing SGPR copies, instructions
1028// resulting in the copies from SCC to these instructions will be
1029// moved to the VALU.
1030def : Pat <
1031 (i1 (and i1:$src0, i1:$src1)),
1032 (S_AND_B64 $src0, $src1)
1033>;
1034
1035def : Pat <
1036 (i1 (or i1:$src0, i1:$src1)),
1037 (S_OR_B64 $src0, $src1)
1038>;
1039
1040def : Pat <
1041 (i1 (xor i1:$src0, i1:$src1)),
1042 (S_XOR_B64 $src0, $src1)
1043>;
1044
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +00001045def : Pat <
1046 (f32 (sint_to_fp i1:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +00001047 (V_CNDMASK_B32_e64 (i32 0), (i32 CONST.FP32_NEG_ONE), $src)
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +00001048>;
1049
1050def : Pat <
1051 (f32 (uint_to_fp i1:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +00001052 (V_CNDMASK_B32_e64 (i32 0), (i32 CONST.FP32_ONE), $src)
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +00001053>;
1054
1055def : Pat <
1056 (f64 (sint_to_fp i1:$src)),
Matt Arsenaultbecd6562014-12-03 05:22:35 +00001057 (V_CVT_F64_I32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src))
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +00001058>;
1059
1060def : Pat <
1061 (f64 (uint_to_fp i1:$src)),
1062 (V_CVT_F64_U32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src))
1063>;
1064
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001065//===----------------------------------------------------------------------===//
Tom Stellardfb961692013-10-23 00:44:19 +00001066// Miscellaneous Patterns
1067//===----------------------------------------------------------------------===//
1068
1069def : Pat <
Tom Stellard81d871d2013-11-13 23:36:50 +00001070 (i32 (trunc i64:$a)),
1071 (EXTRACT_SUBREG $a, sub0)
1072>;
1073
Michel Danzerbf1a6412014-01-28 03:01:16 +00001074def : Pat <
1075 (i1 (trunc i32:$a)),
Tom Stellard115a6152016-11-10 16:02:37 +00001076 (V_CMP_EQ_U32_e64 (S_AND_B32 (i32 1), $a), (i32 1))
Michel Danzerbf1a6412014-01-28 03:01:16 +00001077>;
1078
Matt Arsenaulte306a322014-10-21 16:25:08 +00001079def : Pat <
Jan Vesely70293a02017-02-23 16:12:21 +00001080 (i1 (trunc i16:$a)),
1081 (V_CMP_EQ_U32_e64 (S_AND_B32 (i32 1), $a), (i32 1))
1082>;
1083
1084def : Pat <
Matt Arsenaultabd271b2015-02-05 06:05:13 +00001085 (i1 (trunc i64:$a)),
Matt Arsenault5d8eb252016-09-30 01:50:20 +00001086 (V_CMP_EQ_U32_e64 (S_AND_B32 (i32 1),
Tom Stellard115a6152016-11-10 16:02:37 +00001087 (i32 (EXTRACT_SUBREG $a, sub0))), (i32 1))
Matt Arsenaultabd271b2015-02-05 06:05:13 +00001088>;
1089
1090def : Pat <
Matt Arsenaulte306a322014-10-21 16:25:08 +00001091 (i32 (bswap i32:$a)),
Tom Stellard115a6152016-11-10 16:02:37 +00001092 (V_BFI_B32 (S_MOV_B32 (i32 0x00ff00ff)),
1093 (V_ALIGNBIT_B32 $a, $a, (i32 24)),
1094 (V_ALIGNBIT_B32 $a, $a, (i32 8)))
Matt Arsenaulte306a322014-10-21 16:25:08 +00001095>;
1096
Marek Olsak63a7b082015-03-24 13:40:21 +00001097multiclass BFMPatterns <ValueType vt, InstSI BFM, InstSI MOV> {
1098 def : Pat <
1099 (vt (shl (vt (add (vt (shl 1, vt:$a)), -1)), vt:$b)),
1100 (BFM $a, $b)
1101 >;
1102
1103 def : Pat <
1104 (vt (add (vt (shl 1, vt:$a)), -1)),
Tom Stellard115a6152016-11-10 16:02:37 +00001105 (BFM $a, (MOV (i32 0)))
Marek Olsak63a7b082015-03-24 13:40:21 +00001106 >;
1107}
1108
1109defm : BFMPatterns <i32, S_BFM_B32, S_MOV_B32>;
1110// FIXME: defm : BFMPatterns <i64, S_BFM_B64, S_MOV_B64>;
Matt Arsenaulta9e16e62017-02-23 00:23:43 +00001111defm : BFEPattern <V_BFE_U32, V_BFE_I32, S_MOV_B32>;
Marek Olsak949f5da2015-03-24 13:40:34 +00001112
Matt Arsenault9cd90712016-04-14 01:42:16 +00001113def : Pat<
Matt Arsenaultd5d78512017-01-31 17:28:40 +00001114 (fcanonicalize (f16 (VOP3Mods f16:$src, i32:$src_mods))),
1115 (V_MUL_F16_e64 0, (i32 CONST.FP16_ONE), $src_mods, $src, 0, 0)
Matt Arsenaultce841302016-12-22 03:05:37 +00001116>;
1117
1118def : Pat<
Matt Arsenaultd5d78512017-01-31 17:28:40 +00001119 (fcanonicalize (f32 (VOP3Mods f32:$src, i32:$src_mods))),
1120 (V_MUL_F32_e64 0, (i32 CONST.FP32_ONE), $src_mods, $src, 0, 0)
Matt Arsenault9cd90712016-04-14 01:42:16 +00001121>;
1122
1123def : Pat<
Matt Arsenaultd5d78512017-01-31 17:28:40 +00001124 (fcanonicalize (f64 (VOP3Mods f64:$src, i32:$src_mods))),
1125 (V_MUL_F64 0, CONST.FP64_ONE, $src_mods, $src, 0, 0)
Matt Arsenault9cd90712016-04-14 01:42:16 +00001126>;
1127
Matt Arsenaulteb522e62017-02-27 22:15:25 +00001128def : Pat<
1129 (fcanonicalize (v2f16 (VOP3PMods v2f16:$src, i32:$src_mods))),
1130 (V_PK_MUL_F16 SRCMODS.OP_SEL_1, (i32 CONST.V2FP16_ONE), $src_mods, $src, DSTCLAMP.NONE)
1131>;
1132
1133
Matt Arsenault4165efd2017-01-17 07:26:53 +00001134// Allow integer inputs
1135class ExpPattern<SDPatternOperator node, ValueType vt, Instruction Inst> : Pat<
1136 (node (i8 timm:$tgt), (i8 timm:$en), vt:$src0, vt:$src1, vt:$src2, vt:$src3, (i1 timm:$compr), (i1 timm:$vm)),
1137 (Inst i8:$tgt, vt:$src0, vt:$src1, vt:$src2, vt:$src3, i1:$vm, i1:$compr, i8:$en)
1138>;
1139
1140def : ExpPattern<AMDGPUexport, i32, EXP>;
1141def : ExpPattern<AMDGPUexport_done, i32, EXP_DONE>;
1142
Matt Arsenaulteb522e62017-02-27 22:15:25 +00001143def : Pat <
1144 (v2i16 (build_vector i16:$src0, i16:$src1)),
1145 (v2i16 (S_PACK_LL_B32_B16 $src0, $src1))
1146>;
1147
1148// With multiple uses of the shift, this will duplicate the shift and
1149// increase register pressure.
1150def : Pat <
1151 (v2i16 (build_vector i16:$src0, (i16 (trunc (srl_oneuse i32:$src1, (i32 16)))))),
1152 (v2i16 (S_PACK_LH_B32_B16 i16:$src0, i32:$src1))
1153>;
1154
1155def : Pat <
1156 (v2i16 (build_vector (i16 (trunc (srl_oneuse i32:$src0, (i32 16)))),
1157 (i16 (trunc (srl_oneuse i32:$src1, (i32 16)))))),
1158 (v2i16 (S_PACK_HH_B32_B16 $src0, $src1))
1159>;
1160
1161// TODO: Should source modifiers be matched to v_pack_b32_f16?
1162def : Pat <
1163 (v2f16 (build_vector f16:$src0, f16:$src1)),
1164 (v2f16 (S_PACK_LL_B32_B16 $src0, $src1))
1165>;
1166
1167// def : Pat <
1168// (v2f16 (scalar_to_vector f16:$src0)),
1169// (COPY $src0)
1170// >;
1171
1172// def : Pat <
1173// (v2i16 (scalar_to_vector i16:$src0)),
1174// (COPY $src0)
1175// >;
1176
Marek Olsak43650e42015-03-24 13:40:08 +00001177//===----------------------------------------------------------------------===//
1178// Fract Patterns
1179//===----------------------------------------------------------------------===//
1180
Marek Olsak7d777282015-03-24 13:40:15 +00001181let Predicates = [isSI] in {
1182
1183// V_FRACT is buggy on SI, so the F32 version is never used and (x-floor(x)) is
1184// used instead. However, SI doesn't have V_FLOOR_F64, so the most efficient
1185// way to implement it is using V_FRACT_F64.
1186// The workaround for the V_FRACT bug is:
1187// fract(x) = isnan(x) ? x : min(V_FRACT(x), 0.99999999999999999)
1188
Marek Olsak7d777282015-03-24 13:40:15 +00001189// Convert floor(x) to (x - fract(x))
1190def : Pat <
1191 (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))),
1192 (V_ADD_F64
1193 $mods,
1194 $x,
1195 SRCMODS.NEG,
1196 (V_CNDMASK_B64_PSEUDO
Marek Olsak7d777282015-03-24 13:40:15 +00001197 (V_MIN_F64
1198 SRCMODS.NONE,
1199 (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE),
1200 SRCMODS.NONE,
1201 (V_MOV_B64_PSEUDO 0x3fefffffffffffff),
1202 DSTCLAMP.NONE, DSTOMOD.NONE),
Marek Olsak1354b872015-07-27 11:37:42 +00001203 $x,
Tom Stellard115a6152016-11-10 16:02:37 +00001204 (V_CMP_CLASS_F64_e64 SRCMODS.NONE, $x, (i32 3 /*NaN*/))),
Marek Olsak7d777282015-03-24 13:40:15 +00001205 DSTCLAMP.NONE, DSTOMOD.NONE)
1206>;
1207
1208} // End Predicates = [isSI]
1209
Tom Stellardfb961692013-10-23 00:44:19 +00001210//============================================================================//
Tom Stellardeac65dd2013-05-03 17:21:20 +00001211// Miscellaneous Optimization Patterns
1212//============================================================================//
1213
Matt Arsenault10268f92017-02-27 22:40:39 +00001214// Undo sub x, c -> add x, -c canonicalization since c is more likely
1215// an inline immediate than -c.
1216// TODO: Also do for 64-bit.
1217def : Pat<
1218 (add i32:$src0, (i32 NegSubInlineConst32:$src1)),
1219 (S_SUB_I32 $src0, NegSubInlineConst32:$src1)
1220>;
1221
Matt Arsenault49dd4282014-09-15 17:15:02 +00001222def : SHA256MaPattern <V_BFI_B32, V_XOR_B32_e64>;
Tom Stellardeac65dd2013-05-03 17:21:20 +00001223
Matt Arsenaultc89f2912016-03-07 21:54:48 +00001224def : IntMed3Pat<V_MED3_I32, smax, smax_oneuse, smin_oneuse>;
1225def : IntMed3Pat<V_MED3_U32, umax, umax_oneuse, umin_oneuse>;
1226
Matt Arsenaultf84e5d92017-01-31 03:07:46 +00001227// This matches 16 permutations of
1228// max(min(x, y), min(max(x, y), z))
1229class FPMed3Pat<ValueType vt,
1230 Instruction med3Inst> : Pat<
1231 (fmaxnum (fminnum_oneuse (VOP3Mods_nnan vt:$src0, i32:$src0_mods),
1232 (VOP3Mods_nnan vt:$src1, i32:$src1_mods)),
1233 (fminnum_oneuse (fmaxnum_oneuse (VOP3Mods_nnan vt:$src0, i32:$src0_mods),
1234 (VOP3Mods_nnan vt:$src1, i32:$src1_mods)),
1235 (vt (VOP3Mods_nnan vt:$src2, i32:$src2_mods)))),
1236 (med3Inst $src0_mods, $src0, $src1_mods, $src1, $src2_mods, $src2, DSTCLAMP.NONE, DSTOMOD.NONE)
1237>;
1238
1239def : FPMed3Pat<f32, V_MED3_F32>;
1240
Matt Arsenault10268f92017-02-27 22:40:39 +00001241let Predicates = [isGFX9] in {
1242def : FPMed3Pat<f16, V_MED3_F16>;
1243def : IntMed3Pat<V_MED3_I16, smax, smax_oneuse, smin_oneuse, i16>;
1244def : IntMed3Pat<V_MED3_U16, umax, umax_oneuse, umin_oneuse, i16>;
1245} // End Predicates = [isGFX9]
Matt Arsenaultaf635242017-01-30 19:30:24 +00001246
Tom Stellard245c15f2015-05-26 15:55:52 +00001247//============================================================================//
1248// Assembler aliases
1249//============================================================================//
1250
1251def : MnemonicAlias<"v_add_u32", "v_add_i32">;
1252def : MnemonicAlias<"v_sub_u32", "v_sub_i32">;
1253def : MnemonicAlias<"v_subrev_u32", "v_subrev_i32">;
1254
Marek Olsak5df00d62014-12-07 12:18:57 +00001255} // End isGCN predicate