blob: be82c9f03e7cfd00b10f0b452954ae7c09f65995 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIInstructions.td - SI Instruction Defintions ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// This file was originally auto-generated from a GPU register header file and
10// all the instruction definitions were originally commented out. Instructions
11// that are not yet supported remain commented out.
12//===----------------------------------------------------------------------===//
13
Eric Christopher7792e322015-01-30 23:24:40 +000014def isGCN : Predicate<"Subtarget->getGeneration() "
Matt Arsenault43e92fe2016-06-24 06:30:11 +000015 ">= SISubtarget::SOUTHERN_ISLANDS">,
Tom Stellardd7e6f132015-04-08 01:09:26 +000016 AssemblerPredicate<"FeatureGCN">;
Marek Olsak7d777282015-03-24 13:40:15 +000017def isSI : Predicate<"Subtarget->getGeneration() "
Matt Arsenault43e92fe2016-06-24 06:30:11 +000018 "== SISubtarget::SOUTHERN_ISLANDS">,
Matt Arsenaultd6adfb42015-09-24 19:52:21 +000019 AssemblerPredicate<"FeatureSouthernIslands">;
20
Tom Stellardec87f842015-05-25 16:15:54 +000021def has16BankLDS : Predicate<"Subtarget->getLDSBankCount() == 16">;
22def has32BankLDS : Predicate<"Subtarget->getLDSBankCount() == 32">;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +000023def HasVGPRIndexMode : Predicate<"Subtarget->hasVGPRIndexMode()">,
24 AssemblerPredicate<"FeatureVGPRIndexMode">;
25def HasMovrel : Predicate<"Subtarget->hasMovrel()">,
26 AssemblerPredicate<"FeatureMovrel">;
Tom Stellardec87f842015-05-25 16:15:54 +000027
Valery Pykhtin2828b9b2016-09-19 14:39:49 +000028include "VOPInstructions.td"
Valery Pykhtina34fb492016-08-30 15:20:31 +000029include "SOPInstructions.td"
Valery Pykhtin1b138862016-09-01 09:56:47 +000030include "SMInstructions.td"
Valery Pykhtin8bc65962016-09-05 11:22:51 +000031include "FLATInstructions.td"
Valery Pykhtinb66e5eb2016-09-10 13:09:16 +000032include "BUFInstructions.td"
Valery Pykhtina34fb492016-08-30 15:20:31 +000033
Marek Olsak5df00d62014-12-07 12:18:57 +000034let SubtargetPredicate = isGCN in {
Tom Stellard0e70de52014-05-16 20:56:45 +000035
Tom Stellard8d6d4492014-04-22 16:33:57 +000036//===----------------------------------------------------------------------===//
Tom Stellard3a35d8f2014-10-01 14:44:45 +000037// EXP Instructions
38//===----------------------------------------------------------------------===//
39
Matt Arsenault7bee6ac2016-12-05 20:23:10 +000040defm EXP : EXP_m<0, AMDGPUexport>;
41defm EXP_DONE : EXP_m<1, AMDGPUexport_done>;
Tom Stellard3a35d8f2014-10-01 14:44:45 +000042
43//===----------------------------------------------------------------------===//
Tom Stellard8d6d4492014-04-22 16:33:57 +000044// VINTRP Instructions
45//===----------------------------------------------------------------------===//
46
Matt Arsenault80f766a2015-09-10 01:23:28 +000047let Uses = [M0, EXEC] in {
Tom Stellard2a9d9472015-05-12 15:00:46 +000048
Tom Stellardae38f302015-01-14 01:13:19 +000049// FIXME: Specify SchedRW for VINTRP insturctions.
Tom Stellardec87f842015-05-25 16:15:54 +000050
51multiclass V_INTERP_P1_F32_m : VINTRP_m <
52 0x00000000,
Matt Arsenaultac066f32016-12-06 22:29:43 +000053 (outs VGPR_32:$vdst),
Matt Arsenault0e8a2992016-12-15 20:40:20 +000054 (ins VGPR_32:$vsrc, Attr:$attr, AttrChan:$attrchan),
55 "v_interp_p1_f32 $vdst, $vsrc, $attr$attrchan",
Matt Arsenaultf0c86252016-12-10 00:29:55 +000056 [(set f32:$vdst, (AMDGPUinterp_p1 f32:$vsrc, (i32 imm:$attrchan),
57 (i32 imm:$attr)))]
Tom Stellardec87f842015-05-25 16:15:54 +000058>;
59
60let OtherPredicates = [has32BankLDS] in {
61
62defm V_INTERP_P1_F32 : V_INTERP_P1_F32_m;
63
64} // End OtherPredicates = [has32BankLDS]
65
Matt Arsenaultac066f32016-12-06 22:29:43 +000066let OtherPredicates = [has16BankLDS], Constraints = "@earlyclobber $vdst", isAsmParserOnly=1 in {
Tom Stellardec87f842015-05-25 16:15:54 +000067
68defm V_INTERP_P1_F32_16bank : V_INTERP_P1_F32_m;
69
Matt Arsenaultac066f32016-12-06 22:29:43 +000070} // End OtherPredicates = [has32BankLDS], Constraints = "@earlyclobber $vdst", isAsmParserOnly=1
Tom Stellard75aadc22012-12-11 21:25:42 +000071
Matt Arsenaultac066f32016-12-06 22:29:43 +000072let DisableEncoding = "$src0", Constraints = "$src0 = $vdst" in {
Tom Stellard50828162015-05-25 16:15:56 +000073
Marek Olsak5df00d62014-12-07 12:18:57 +000074defm V_INTERP_P2_F32 : VINTRP_m <
Tom Stellardc70cf902015-05-25 16:15:50 +000075 0x00000001,
Matt Arsenaultac066f32016-12-06 22:29:43 +000076 (outs VGPR_32:$vdst),
Matt Arsenault0e8a2992016-12-15 20:40:20 +000077 (ins VGPR_32:$src0, VGPR_32:$vsrc, Attr:$attr, AttrChan:$attrchan),
78 "v_interp_p2_f32 $vdst, $vsrc, $attr$attrchan",
Matt Arsenaultf0c86252016-12-10 00:29:55 +000079 [(set f32:$vdst, (AMDGPUinterp_p2 f32:$src0, f32:$vsrc, (i32 imm:$attrchan),
80 (i32 imm:$attr)))]>;
Tom Stellard50828162015-05-25 16:15:56 +000081
Matt Arsenaultac066f32016-12-06 22:29:43 +000082} // End DisableEncoding = "$src0", Constraints = "$src0 = $vdst"
Tom Stellard75aadc22012-12-11 21:25:42 +000083
Marek Olsak5df00d62014-12-07 12:18:57 +000084defm V_INTERP_MOV_F32 : VINTRP_m <
Tom Stellardc70cf902015-05-25 16:15:50 +000085 0x00000002,
Matt Arsenaultac066f32016-12-06 22:29:43 +000086 (outs VGPR_32:$vdst),
Matt Arsenault0e8a2992016-12-15 20:40:20 +000087 (ins InterpSlot:$vsrc, Attr:$attr, AttrChan:$attrchan),
88 "v_interp_mov_f32 $vdst, $vsrc, $attr$attrchan",
Matt Arsenaultf0c86252016-12-10 00:29:55 +000089 [(set f32:$vdst, (AMDGPUinterp_mov (i32 imm:$vsrc), (i32 imm:$attrchan),
Matt Arsenaultac066f32016-12-06 22:29:43 +000090 (i32 imm:$attr)))]>;
Tom Stellard2a9d9472015-05-12 15:00:46 +000091
Matt Arsenault80f766a2015-09-10 01:23:28 +000092} // End Uses = [M0, EXEC]
Tom Stellard75aadc22012-12-11 21:25:42 +000093
Tom Stellard8d6d4492014-04-22 16:33:57 +000094//===----------------------------------------------------------------------===//
Tom Stellard8d6d4492014-04-22 16:33:57 +000095// Pseudo Instructions
96//===----------------------------------------------------------------------===//
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +000097
98let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] in {
Tom Stellard75aadc22012-12-11 21:25:42 +000099
Marek Olsak7d777282015-03-24 13:40:15 +0000100// For use in patterns
Tom Stellardcc4c8712016-02-16 18:14:56 +0000101def V_CNDMASK_B64_PSEUDO : VOP3Common <(outs VReg_64:$vdst),
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000102 (ins VSrc_b64:$src0, VSrc_b64:$src1, SSrc_b64:$src2), "", []> {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000103 let isPseudo = 1;
104 let isCodeGenOnly = 1;
Matt Arsenault22e41792016-08-27 01:00:37 +0000105 let usesCustomInserter = 1;
Tom Stellard60024a02014-09-24 01:33:24 +0000106}
107
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000108// 64-bit vector move instruction. This is mainly used by the SIFoldOperands
109// pass to enable folding of inline immediates.
Matt Arsenault4bd72362016-12-10 00:39:12 +0000110def V_MOV_B64_PSEUDO : VPseudoInstSI <(outs VReg_64:$vdst),
111 (ins VSrc_b64:$src0)>;
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000112} // End let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC]
113
Wei Ding205bfdb2017-02-10 02:15:29 +0000114def S_TRAP_PSEUDO : SPseudoInstSI <(outs), (ins i16imm:$simm16)> {
Wei Dingee21a362017-01-24 06:41:21 +0000115 let hasSideEffects = 1;
116 let SALU = 1;
117 let usesCustomInserter = 1;
118}
119
Changpeng Fang01f60622016-03-15 17:28:44 +0000120let usesCustomInserter = 1, SALU = 1 in {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000121def GET_GROUPSTATICSIZE : PseudoInstSI <(outs SReg_32:$sdst), (ins),
Changpeng Fang01f60622016-03-15 17:28:44 +0000122 [(set SReg_32:$sdst, (int_amdgcn_groupstaticsize))]>;
123} // End let usesCustomInserter = 1, SALU = 1
124
Matt Arsenaulte6740752016-09-29 01:44:16 +0000125def S_MOV_B64_term : PseudoInstSI<(outs SReg_64:$dst),
126 (ins SSrc_b64:$src0)> {
127 let SALU = 1;
128 let isAsCheapAsAMove = 1;
129 let isTerminator = 1;
130}
131
132def S_XOR_B64_term : PseudoInstSI<(outs SReg_64:$dst),
133 (ins SSrc_b64:$src0, SSrc_b64:$src1)> {
134 let SALU = 1;
135 let isAsCheapAsAMove = 1;
136 let isTerminator = 1;
137}
138
139def S_ANDN2_B64_term : PseudoInstSI<(outs SReg_64:$dst),
140 (ins SSrc_b64:$src0, SSrc_b64:$src1)> {
141 let SALU = 1;
142 let isAsCheapAsAMove = 1;
143 let isTerminator = 1;
144}
145
Stanislav Mekhanoshinea91cca2016-11-15 19:00:15 +0000146def WAVE_BARRIER : SPseudoInstSI<(outs), (ins),
147 [(int_amdgcn_wave_barrier)]> {
148 let SchedRW = [];
149 let hasNoSchedulingInfo = 1;
150 let hasSideEffects = 1;
151 let mayLoad = 1;
152 let mayStore = 1;
153 let isBarrier = 1;
154 let isConvergent = 1;
155}
156
Matt Arsenault8fb37382013-10-11 21:03:36 +0000157// SI pseudo instructions. These are used by the CFG structurizer pass
Tom Stellard75aadc22012-12-11 21:25:42 +0000158// and should be lowered to ISA instructions prior to codegen.
159
Matt Arsenault9babdf42016-06-22 20:15:28 +0000160// Dummy terminator instruction to use after control flow instructions
161// replaced with exec mask operations.
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000162def SI_MASK_BRANCH : PseudoInstSI <
Matt Arsenaultf98a5962016-08-27 00:42:21 +0000163 (outs), (ins brtarget:$target)> {
Matt Arsenault57431c92016-08-10 19:11:42 +0000164 let isBranch = 0;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000165 let isTerminator = 1;
Matt Arsenault57431c92016-08-10 19:11:42 +0000166 let isBarrier = 0;
Matt Arsenault78fc9da2016-08-22 19:33:16 +0000167 let Uses = [EXEC];
Matt Arsenaultc59a9232016-10-06 18:12:07 +0000168 let SchedRW = [];
169 let hasNoSchedulingInfo = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000170}
171
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000172let isTerminator = 1 in {
Tom Stellardf8794352012-12-19 22:10:31 +0000173
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000174def SI_IF: CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000175 (outs SReg_64:$dst), (ins SReg_64:$vcc, brtarget:$target),
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000176 [(set i64:$dst, (int_amdgcn_if i1:$vcc, bb:$target))], 1, 1> {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000177 let Constraints = "";
Matt Arsenaulte6740752016-09-29 01:44:16 +0000178 let Size = 12;
Matt Arsenault6408c912016-09-16 22:11:18 +0000179 let mayLoad = 1;
Matt Arsenaulte6740752016-09-29 01:44:16 +0000180 let mayStore = 1;
Matt Arsenault6408c912016-09-16 22:11:18 +0000181 let hasSideEffects = 1;
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000182}
Tom Stellard75aadc22012-12-11 21:25:42 +0000183
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000184def SI_ELSE : CFPseudoInstSI <
185 (outs SReg_64:$dst), (ins SReg_64:$src, brtarget:$target, i1imm:$execfix), [], 1, 1> {
Tom Stellardf8794352012-12-19 22:10:31 +0000186 let Constraints = "$src = $dst";
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000187 let Size = 12;
Matt Arsenault6408c912016-09-16 22:11:18 +0000188 let mayStore = 1;
189 let mayLoad = 1;
190 let hasSideEffects = 1;
Tom Stellardf8794352012-12-19 22:10:31 +0000191}
192
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000193def SI_LOOP : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000194 (outs), (ins SReg_64:$saved, brtarget:$target),
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000195 [(int_amdgcn_loop i64:$saved, bb:$target)], 1, 1> {
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000196 let Size = 8;
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000197 let isBranch = 1;
Matt Arsenault6408c912016-09-16 22:11:18 +0000198 let hasSideEffects = 1;
199 let mayLoad = 1;
200 let mayStore = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000201}
Tom Stellardf8794352012-12-19 22:10:31 +0000202
Matt Arsenault382d9452016-01-26 04:49:22 +0000203} // End isBranch = 1, isTerminator = 1
Tom Stellardf8794352012-12-19 22:10:31 +0000204
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000205def SI_END_CF : CFPseudoInstSI <
206 (outs), (ins SReg_64:$saved),
207 [(int_amdgcn_end_cf i64:$saved)], 1, 1> {
208 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000209 let isAsCheapAsAMove = 1;
210 let isReMaterializable = 1;
211 let mayLoad = 1;
212 let mayStore = 1;
213 let hasSideEffects = 1;
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000214}
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000215
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000216def SI_BREAK : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000217 (outs SReg_64:$dst), (ins SReg_64:$src),
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000218 [(set i64:$dst, (int_amdgcn_break i64:$src))], 1> {
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000219 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000220 let isAsCheapAsAMove = 1;
221 let isReMaterializable = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000222}
Matt Arsenault48d70cb2016-07-09 17:18:39 +0000223
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000224def SI_IF_BREAK : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000225 (outs SReg_64:$dst), (ins SReg_64:$vcc, SReg_64:$src),
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000226 [(set i64:$dst, (int_amdgcn_if_break i1:$vcc, i64:$src))]> {
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000227 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000228 let isAsCheapAsAMove = 1;
229 let isReMaterializable = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000230}
Tom Stellardf8794352012-12-19 22:10:31 +0000231
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000232def SI_ELSE_BREAK : CFPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000233 (outs SReg_64:$dst), (ins SReg_64:$src0, SReg_64:$src1),
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000234 [(set i64:$dst, (int_amdgcn_else_break i64:$src0, i64:$src1))]> {
235 let Size = 4;
Matt Arsenault6408c912016-09-16 22:11:18 +0000236 let isAsCheapAsAMove = 1;
237 let isReMaterializable = 1;
Matt Arsenaultc6b13502016-08-10 19:11:51 +0000238}
Tom Stellardf8794352012-12-19 22:10:31 +0000239
Tom Stellardaa798342015-05-01 03:44:09 +0000240let Uses = [EXEC], Defs = [EXEC,VCC] in {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000241def SI_KILL : PseudoInstSI <
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000242 (outs), (ins VSrc_b32:$src),
Matt Arsenault03006fd2016-07-19 16:27:56 +0000243 [(AMDGPUkill i32:$src)]> {
Matt Arsenault786724a2016-07-12 21:41:32 +0000244 let isConvergent = 1;
245 let usesCustomInserter = 1;
246}
247
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000248def SI_KILL_TERMINATOR : SPseudoInstSI <
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000249 (outs), (ins VSrc_b32:$src)> {
Matt Arsenault786724a2016-07-12 21:41:32 +0000250 let isTerminator = 1;
251}
252
Tom Stellardaa798342015-05-01 03:44:09 +0000253} // End Uses = [EXEC], Defs = [EXEC,VCC]
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000254
Matt Arsenault327188a2016-12-15 21:57:11 +0000255// Branch on undef scc. Used to avoid intermediate copy from
256// IMPLICIT_DEF to SCC.
257def SI_BR_UNDEF : SPseudoInstSI <(outs), (ins sopp_brtarget:$simm16)> {
258 let isTerminator = 1;
259 let usesCustomInserter = 1;
260}
Tom Stellardf8794352012-12-19 22:10:31 +0000261
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000262def SI_PS_LIVE : PseudoInstSI <
263 (outs SReg_64:$dst), (ins),
Matt Arsenault9babdf42016-06-22 20:15:28 +0000264 [(set i1:$dst, (int_amdgcn_ps_live))]> {
265 let SALU = 1;
266}
Nicolai Haehnleb0c97482016-04-22 04:04:08 +0000267
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000268// Used as an isel pseudo to directly emit initialization with an
269// s_mov_b32 rather than a copy of another initialized
270// register. MachineCSE skips copies, and we don't want to have to
271// fold operands before it runs.
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000272def SI_INIT_M0 : SPseudoInstSI <(outs), (ins SSrc_b32:$src)> {
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000273 let Defs = [M0];
274 let usesCustomInserter = 1;
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000275 let isAsCheapAsAMove = 1;
Matt Arsenault4ac341c2016-04-14 21:58:15 +0000276 let isReMaterializable = 1;
277}
278
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000279def SI_RETURN : SPseudoInstSI <
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000280 (outs), (ins variable_ops), [(AMDGPUreturn)]> {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000281 let isTerminator = 1;
282 let isBarrier = 1;
283 let isReturn = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000284 let hasSideEffects = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000285 let hasNoSchedulingInfo = 1;
Nicolai Haehnlea246dcc2016-09-03 12:26:32 +0000286 let DisableWQM = 1;
Matt Arsenault9babdf42016-06-22 20:15:28 +0000287}
288
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000289let Defs = [M0, EXEC],
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000290 UseNamedOperandTable = 1 in {
Christian Konig2989ffc2013-03-18 11:34:16 +0000291
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000292class SI_INDIRECT_SRC<RegisterClass rc> : VPseudoInstSI <
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000293 (outs VGPR_32:$vdst),
294 (ins rc:$src, VS_32:$idx, i32imm:$offset)> {
295 let usesCustomInserter = 1;
296}
Christian Konig2989ffc2013-03-18 11:34:16 +0000297
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000298class SI_INDIRECT_DST<RegisterClass rc> : VPseudoInstSI <
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000299 (outs rc:$vdst),
300 (ins rc:$src, VS_32:$idx, i32imm:$offset, VGPR_32:$val)> {
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000301 let Constraints = "$src = $vdst";
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000302 let usesCustomInserter = 1;
Christian Konig2989ffc2013-03-18 11:34:16 +0000303}
304
Matt Arsenault28419272015-10-07 00:42:51 +0000305// TODO: We can support indirect SGPR access.
306def SI_INDIRECT_SRC_V1 : SI_INDIRECT_SRC<VGPR_32>;
307def SI_INDIRECT_SRC_V2 : SI_INDIRECT_SRC<VReg_64>;
308def SI_INDIRECT_SRC_V4 : SI_INDIRECT_SRC<VReg_128>;
309def SI_INDIRECT_SRC_V8 : SI_INDIRECT_SRC<VReg_256>;
310def SI_INDIRECT_SRC_V16 : SI_INDIRECT_SRC<VReg_512>;
311
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000312def SI_INDIRECT_DST_V1 : SI_INDIRECT_DST<VGPR_32>;
Christian Konig2989ffc2013-03-18 11:34:16 +0000313def SI_INDIRECT_DST_V2 : SI_INDIRECT_DST<VReg_64>;
314def SI_INDIRECT_DST_V4 : SI_INDIRECT_DST<VReg_128>;
315def SI_INDIRECT_DST_V8 : SI_INDIRECT_DST<VReg_256>;
316def SI_INDIRECT_DST_V16 : SI_INDIRECT_DST<VReg_512>;
317
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000318} // End Uses = [EXEC], Defs = [M0, EXEC]
Christian Konig2989ffc2013-03-18 11:34:16 +0000319
Tom Stellardeba61072014-05-02 15:41:42 +0000320multiclass SI_SPILL_SGPR <RegisterClass sgpr_class> {
Matt Arsenault3354f422016-09-10 01:20:33 +0000321 let UseNamedOperandTable = 1, SGPRSpill = 1, Uses = [EXEC] in {
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000322 def _SAVE : PseudoInstSI <
Tom Stellard42fb60e2015-01-14 15:42:31 +0000323 (outs),
Matt Arsenault3354f422016-09-10 01:20:33 +0000324 (ins sgpr_class:$data, i32imm:$addr)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000325 let mayStore = 1;
326 let mayLoad = 0;
327 }
Tom Stellardeba61072014-05-02 15:41:42 +0000328
Matt Arsenaultfc7e6a02016-07-12 00:23:17 +0000329 def _RESTORE : PseudoInstSI <
Matt Arsenault3354f422016-09-10 01:20:33 +0000330 (outs sgpr_class:$data),
331 (ins i32imm:$addr)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000332 let mayStore = 0;
333 let mayLoad = 1;
334 }
Tom Stellard42fb60e2015-01-14 15:42:31 +0000335 } // End UseNamedOperandTable = 1
Tom Stellardeba61072014-05-02 15:41:42 +0000336}
337
Matt Arsenault2510a312016-09-03 06:57:55 +0000338// You cannot use M0 as the output of v_readlane_b32 instructions or
339// use it in the sdata operand of SMEM instructions. We still need to
340// be able to spill the physical register m0, so allow it for
341// SI_SPILL_32_* instructions.
342defm SI_SPILL_S32 : SI_SPILL_SGPR <SReg_32>;
Tom Stellardeba61072014-05-02 15:41:42 +0000343defm SI_SPILL_S64 : SI_SPILL_SGPR <SReg_64>;
344defm SI_SPILL_S128 : SI_SPILL_SGPR <SReg_128>;
345defm SI_SPILL_S256 : SI_SPILL_SGPR <SReg_256>;
346defm SI_SPILL_S512 : SI_SPILL_SGPR <SReg_512>;
347
Tom Stellard96468902014-09-24 01:33:17 +0000348multiclass SI_SPILL_VGPR <RegisterClass vgpr_class> {
Matt Arsenault7348a7e2016-09-10 01:20:28 +0000349 let UseNamedOperandTable = 1, VGPRSpill = 1,
350 SchedRW = [WriteVMEM] in {
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000351 def _SAVE : VPseudoInstSI <
Tom Stellard42fb60e2015-01-14 15:42:31 +0000352 (outs),
Matt Arsenaultbcfd94c2016-09-17 15:52:37 +0000353 (ins vgpr_class:$vdata, i32imm:$vaddr, SReg_128:$srsrc,
354 SReg_32:$soffset, i32imm:$offset)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000355 let mayStore = 1;
356 let mayLoad = 0;
Matt Arsenaultac42ba82016-09-03 17:25:44 +0000357 // (2 * 4) + (8 * num_subregs) bytes maximum
358 let Size = !add(!shl(!srl(vgpr_class.Size, 5), 3), 8);
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000359 }
Tom Stellard96468902014-09-24 01:33:17 +0000360
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000361 def _RESTORE : VPseudoInstSI <
Matt Arsenault3354f422016-09-10 01:20:33 +0000362 (outs vgpr_class:$vdata),
Matt Arsenaultbcfd94c2016-09-17 15:52:37 +0000363 (ins i32imm:$vaddr, SReg_128:$srsrc, SReg_32:$soffset,
Matt Arsenault9babdf42016-06-22 20:15:28 +0000364 i32imm:$offset)> {
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000365 let mayStore = 0;
366 let mayLoad = 1;
Matt Arsenaultac42ba82016-09-03 17:25:44 +0000367
368 // (2 * 4) + (8 * num_subregs) bytes maximum
369 let Size = !add(!shl(!srl(vgpr_class.Size, 5), 3), 8);
Matt Arsenault9a32cd32015-08-29 06:48:57 +0000370 }
Matt Arsenault7348a7e2016-09-10 01:20:28 +0000371 } // End UseNamedOperandTable = 1, VGPRSpill = 1, SchedRW = [WriteVMEM]
Tom Stellard96468902014-09-24 01:33:17 +0000372}
373
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000374defm SI_SPILL_V32 : SI_SPILL_VGPR <VGPR_32>;
Tom Stellard96468902014-09-24 01:33:17 +0000375defm SI_SPILL_V64 : SI_SPILL_VGPR <VReg_64>;
376defm SI_SPILL_V96 : SI_SPILL_VGPR <VReg_96>;
377defm SI_SPILL_V128 : SI_SPILL_VGPR <VReg_128>;
378defm SI_SPILL_V256 : SI_SPILL_VGPR <VReg_256>;
379defm SI_SPILL_V512 : SI_SPILL_VGPR <VReg_512>;
380
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000381def SI_PC_ADD_REL_OFFSET : SPseudoInstSI <
Tom Stellard067c8152014-07-21 14:01:14 +0000382 (outs SReg_64:$dst),
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +0000383 (ins si_ga:$ptr_lo, si_ga:$ptr_hi),
384 [(set SReg_64:$dst,
385 (i64 (SIpc_add_rel_offset (tglobaladdr:$ptr_lo), (tglobaladdr:$ptr_hi))))]> {
Matt Arsenault71ed8a62016-08-27 03:00:51 +0000386 let Defs = [SCC];
Matt Arsenaultd092a062015-10-02 18:58:37 +0000387}
Tom Stellard067c8152014-07-21 14:01:14 +0000388
Matt Arsenault382d9452016-01-26 04:49:22 +0000389} // End SubtargetPredicate = isGCN
Tom Stellard0e70de52014-05-16 20:56:45 +0000390
Marek Olsak5df00d62014-12-07 12:18:57 +0000391let Predicates = [isGCN] in {
Wei Ding205bfdb2017-02-10 02:15:29 +0000392def : Pat<
393 (trap),
394 (S_TRAP_PSEUDO TRAPTYPE.LLVM_TRAP)
395>;
396
397def : Pat<
398 (debugtrap),
399 (S_TRAP_PSEUDO TRAPTYPE.LLVM_DEBUG_TRAP)
400>;
Tom Stellard0e70de52014-05-16 20:56:45 +0000401
Nicolai Haehnle3b572002016-07-28 11:39:24 +0000402def : Pat<
403 (int_amdgcn_else i64:$src, bb:$target),
404 (SI_ELSE $src, $target, 0)
405>;
406
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000407def : Pat <
408 (int_AMDGPU_kilp),
Tom Stellard115a6152016-11-10 16:02:37 +0000409 (SI_KILL (i32 0xbf800000))
Tom Stellardbe8ebee2013-01-18 21:15:50 +0000410>;
411
Tom Stellard8d6d4492014-04-22 16:33:57 +0000412//===----------------------------------------------------------------------===//
Matt Arsenaulta0050b02014-06-19 01:19:19 +0000413// VOP1 Patterns
414//===----------------------------------------------------------------------===//
415
Matt Arsenault22ca3f82014-07-15 23:50:10 +0000416let Predicates = [UnsafeFPMath] in {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +0000417
418//def : RcpPat<V_RCP_F64_e32, f64>;
419//defm : RsqPat<V_RSQ_F64_e32, f64>;
420//defm : RsqPat<V_RSQ_F32_e32, f32>;
421
422def : RsqPat<V_RSQ_F32_e32, f32>;
423def : RsqPat<V_RSQ_F64_e32, f64>;
Matt Arsenault74015162016-05-28 00:19:52 +0000424
425// Convert (x - floor(x)) to fract(x)
426def : Pat <
427 (f32 (fsub (f32 (VOP3Mods f32:$x, i32:$mods)),
428 (f32 (ffloor (f32 (VOP3Mods f32:$x, i32:$mods)))))),
429 (V_FRACT_F32_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
430>;
431
432// Convert (x + (-floor(x))) to fract(x)
433def : Pat <
434 (f64 (fadd (f64 (VOP3Mods f64:$x, i32:$mods)),
435 (f64 (fneg (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))))))),
436 (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE)
437>;
438
439} // End Predicates = [UnsafeFPMath]
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +0000440
Matt Arsenault9dba9bd2017-02-02 02:27:04 +0000441
442// f16_to_fp patterns
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000443def : Pat <
Matt Arsenault9dba9bd2017-02-02 02:27:04 +0000444 (f32 (f16_to_fp i32:$src0)),
445 (V_CVT_F32_F16_e64 SRCMODS.NONE, $src0, DSTCLAMP.NONE, DSTOMOD.NONE)
446>;
447
448def : Pat <
449 (f32 (f16_to_fp (and_oneuse i32:$src0, 0x7fff))),
450 (V_CVT_F32_F16_e64 SRCMODS.ABS, $src0, DSTCLAMP.NONE, DSTOMOD.NONE)
451>;
452
453def : Pat <
454 (f32 (f16_to_fp (or_oneuse i32:$src0, 0x8000))),
455 (V_CVT_F32_F16_e64 SRCMODS.NEG_ABS, $src0, DSTCLAMP.NONE, DSTOMOD.NONE)
456>;
457
458def : Pat <
459 (f32 (f16_to_fp (xor_oneuse i32:$src0, 0x8000))),
460 (V_CVT_F32_F16_e64 SRCMODS.NEG, $src0, DSTCLAMP.NONE, DSTOMOD.NONE)
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000461>;
462
463def : Pat <
464 (f64 (fpextend f16:$src)),
465 (V_CVT_F64_F32_e32 (V_CVT_F32_F16_e32 $src))
466>;
467
Matt Arsenault9dba9bd2017-02-02 02:27:04 +0000468// fp_to_fp16 patterns
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000469def : Pat <
Matt Arsenault9dba9bd2017-02-02 02:27:04 +0000470 (i32 (fp_to_f16 (f32 (VOP3Mods0 f32:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod)))),
471 (V_CVT_F16_F32_e64 $src0_modifiers, f32:$src0, $clamp, $omod)
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000472>;
473
474def : Pat <
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000475 (i32 (fp_to_sint f16:$src)),
476 (V_CVT_I32_F32_e32 (V_CVT_F32_F16_e32 $src))
477>;
478
479def : Pat <
480 (i32 (fp_to_uint f16:$src)),
481 (V_CVT_U32_F32_e32 (V_CVT_F32_F16_e32 $src))
482>;
483
484def : Pat <
485 (f16 (sint_to_fp i32:$src)),
486 (V_CVT_F16_F32_e32 (V_CVT_F32_I32_e32 $src))
487>;
488
489def : Pat <
490 (f16 (uint_to_fp i32:$src)),
491 (V_CVT_F16_F32_e32 (V_CVT_F32_U32_e32 $src))
492>;
493
Matt Arsenaulta0050b02014-06-19 01:19:19 +0000494//===----------------------------------------------------------------------===//
Tom Stellard58ac7442014-04-29 23:12:48 +0000495// VOP2 Patterns
496//===----------------------------------------------------------------------===//
497
Konstantin Zhuravlyovbf998c72016-11-16 03:39:12 +0000498multiclass FMADPat <ValueType vt, Instruction inst> {
499 def : Pat <
500 (vt (fmad (VOP3NoMods0 vt:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod),
501 (VOP3NoMods vt:$src1, i32:$src1_modifiers),
502 (VOP3NoMods vt:$src2, i32:$src2_modifiers))),
503 (inst $src0_modifiers, $src0, $src1_modifiers, $src1,
504 $src2_modifiers, $src2, $clamp, $omod)
505 >;
506}
507
508defm : FMADPat <f16, V_MAC_F16_e64>;
509defm : FMADPat <f32, V_MAC_F32_e64>;
510
511multiclass SelectPat <ValueType vt, Instruction inst> {
Konstantin Zhuravlyov2a87a422016-11-16 03:16:26 +0000512 def : Pat <
513 (vt (select i1:$src0, vt:$src1, vt:$src2)),
514 (inst $src2, $src1, $src0)
515 >;
516}
517
Konstantin Zhuravlyovbf998c72016-11-16 03:39:12 +0000518defm : SelectPat <i16, V_CNDMASK_B32_e64>;
519defm : SelectPat <i32, V_CNDMASK_B32_e64>;
520defm : SelectPat <f16, V_CNDMASK_B32_e64>;
521defm : SelectPat <f32, V_CNDMASK_B32_e64>;
Konstantin Zhuravlyov2a87a422016-11-16 03:16:26 +0000522
Tom Stellardae4c9e72014-06-20 17:06:11 +0000523def : Pat <
524 (i32 (add (i32 (ctpop i32:$popcnt)), i32:$val)),
Matt Arsenault49dd4282014-09-15 17:15:02 +0000525 (V_BCNT_U32_B32_e64 $popcnt, $val)
Tom Stellardae4c9e72014-06-20 17:06:11 +0000526>;
527
Christian Konig4a1b9c32013-03-18 11:34:10 +0000528/********** ============================================ **********/
529/********** Extraction, Insertion, Building and Casting **********/
530/********** ============================================ **********/
Tom Stellard75aadc22012-12-11 21:25:42 +0000531
Christian Konig4a1b9c32013-03-18 11:34:10 +0000532foreach Index = 0-2 in {
533 def Extract_Element_v2i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000534 i32, v2i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000535 >;
536 def Insert_Element_v2i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000537 i32, v2i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000538 >;
539
540 def Extract_Element_v2f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000541 f32, v2f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000542 >;
543 def Insert_Element_v2f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000544 f32, v2f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000545 >;
546}
547
548foreach Index = 0-3 in {
549 def Extract_Element_v4i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000550 i32, v4i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000551 >;
552 def Insert_Element_v4i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000553 i32, v4i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000554 >;
555
556 def Extract_Element_v4f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000557 f32, v4f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000558 >;
559 def Insert_Element_v4f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000560 f32, v4f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000561 >;
562}
563
564foreach Index = 0-7 in {
565 def Extract_Element_v8i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000566 i32, v8i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000567 >;
568 def Insert_Element_v8i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000569 i32, v8i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000570 >;
571
572 def Extract_Element_v8f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000573 f32, v8f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000574 >;
575 def Insert_Element_v8f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000576 f32, v8f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000577 >;
578}
579
580foreach Index = 0-15 in {
581 def Extract_Element_v16i32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000582 i32, v16i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000583 >;
584 def Insert_Element_v16i32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000585 i32, v16i32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000586 >;
587
588 def Extract_Element_v16f32_#Index : Extract_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000589 f32, v16f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000590 >;
591 def Insert_Element_v16f32_#Index : Insert_Element <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000592 f32, v16f32, Index, !cast<SubRegIndex>(sub#Index)
Christian Konig4a1b9c32013-03-18 11:34:10 +0000593 >;
594}
Tom Stellard75aadc22012-12-11 21:25:42 +0000595
Matt Arsenault382d9452016-01-26 04:49:22 +0000596// FIXME: Why do only some of these type combinations for SReg and
597// VReg?
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000598// 16-bit bitcast
599def : BitConvert <i16, f16, VGPR_32>;
600def : BitConvert <f16, i16, VGPR_32>;
601def : BitConvert <i16, f16, SReg_32>;
602def : BitConvert <f16, i16, SReg_32>;
603
Matt Arsenault382d9452016-01-26 04:49:22 +0000604// 32-bit bitcast
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000605def : BitConvert <i32, f32, VGPR_32>;
Tom Stellard45c0b3a2015-01-07 20:59:25 +0000606def : BitConvert <f32, i32, VGPR_32>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000607def : BitConvert <i32, f32, SReg_32>;
608def : BitConvert <f32, i32, SReg_32>;
Tom Stellard75aadc22012-12-11 21:25:42 +0000609
Matt Arsenault382d9452016-01-26 04:49:22 +0000610// 64-bit bitcast
Tom Stellard7512c082013-07-12 18:14:56 +0000611def : BitConvert <i64, f64, VReg_64>;
Tom Stellard7512c082013-07-12 18:14:56 +0000612def : BitConvert <f64, i64, VReg_64>;
Tom Stellarded2f6142013-07-18 21:43:42 +0000613def : BitConvert <v2i32, v2f32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000614def : BitConvert <v2f32, v2i32, VReg_64>;
Tom Stellard7ea3d6d2014-03-31 14:01:55 +0000615def : BitConvert <i64, v2i32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000616def : BitConvert <v2i32, i64, VReg_64>;
Matt Arsenault064c2062014-06-11 17:40:32 +0000617def : BitConvert <i64, v2f32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000618def : BitConvert <v2f32, i64, VReg_64>;
Tom Stellard8f307212015-12-15 17:11:17 +0000619def : BitConvert <f64, v2f32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000620def : BitConvert <v2f32, f64, VReg_64>;
Matt Arsenault2acc7a42014-06-11 19:31:13 +0000621def : BitConvert <f64, v2i32, VReg_64>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000622def : BitConvert <v2i32, f64, VReg_64>;
Tom Stellard83747202013-07-18 21:43:53 +0000623def : BitConvert <v4i32, v4f32, VReg_128>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000624def : BitConvert <v4f32, v4i32, VReg_128>;
Tom Stellard83747202013-07-18 21:43:53 +0000625
Matt Arsenault382d9452016-01-26 04:49:22 +0000626// 128-bit bitcast
Matt Arsenault61001bb2015-11-25 19:58:34 +0000627def : BitConvert <v2i64, v4i32, SReg_128>;
628def : BitConvert <v4i32, v2i64, SReg_128>;
Tom Stellard8f307212015-12-15 17:11:17 +0000629def : BitConvert <v2f64, v4f32, VReg_128>;
Matt Arsenault61001bb2015-11-25 19:58:34 +0000630def : BitConvert <v2f64, v4i32, VReg_128>;
Tom Stellard8f307212015-12-15 17:11:17 +0000631def : BitConvert <v4f32, v2f64, VReg_128>;
Matt Arsenault61001bb2015-11-25 19:58:34 +0000632def : BitConvert <v4i32, v2f64, VReg_128>;
Matt Arsenaulte57206d2016-05-25 18:07:36 +0000633def : BitConvert <v2i64, v2f64, VReg_128>;
634def : BitConvert <v2f64, v2i64, VReg_128>;
Matt Arsenault61001bb2015-11-25 19:58:34 +0000635
Matt Arsenault382d9452016-01-26 04:49:22 +0000636// 256-bit bitcast
Tom Stellard967bf582014-02-13 23:34:15 +0000637def : BitConvert <v8i32, v8f32, SReg_256>;
Matt Arsenault382d9452016-01-26 04:49:22 +0000638def : BitConvert <v8f32, v8i32, SReg_256>;
Matt Arsenaultf5958dd2014-02-02 00:05:35 +0000639def : BitConvert <v8i32, v8f32, VReg_256>;
640def : BitConvert <v8f32, v8i32, VReg_256>;
Tom Stellard20ee94f2013-08-14 22:22:09 +0000641
Matt Arsenault382d9452016-01-26 04:49:22 +0000642// 512-bit bitcast
Matt Arsenaultf5958dd2014-02-02 00:05:35 +0000643def : BitConvert <v16i32, v16f32, VReg_512>;
644def : BitConvert <v16f32, v16i32, VReg_512>;
645
Christian Konig8dbe6f62013-02-21 15:17:27 +0000646/********** =================== **********/
647/********** Src & Dst modifiers **********/
648/********** =================== **********/
649
650def : Pat <
Matt Arsenault1cffa4c2014-11-13 19:49:04 +0000651 (AMDGPUclamp (VOP3Mods0Clamp f32:$src0, i32:$src0_modifiers, i32:$omod),
652 (f32 FP_ZERO), (f32 FP_ONE)),
Tom Stellard115a6152016-11-10 16:02:37 +0000653 (V_ADD_F32_e64 $src0_modifiers, $src0, 0, (i32 0), 1, $omod)
Christian Konig8dbe6f62013-02-21 15:17:27 +0000654>;
655
Michel Danzer624b02a2014-02-04 07:12:38 +0000656/********** ================================ **********/
657/********** Floating point absolute/negative **********/
658/********** ================================ **********/
659
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000660// Prevent expanding both fneg and fabs.
Michel Danzer624b02a2014-02-04 07:12:38 +0000661
Michel Danzer624b02a2014-02-04 07:12:38 +0000662def : Pat <
663 (fneg (fabs f32:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +0000664 (S_OR_B32 $src, (S_MOV_B32(i32 0x80000000))) // Set sign bit
Michel Danzer624b02a2014-02-04 07:12:38 +0000665>;
666
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000667// FIXME: Should use S_OR_B32
Matt Arsenault13623d02014-08-15 18:42:18 +0000668def : Pat <
669 (fneg (fabs f64:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000670 (REG_SEQUENCE VReg_64,
671 (i32 (EXTRACT_SUBREG f64:$src, sub0)),
672 sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000673 (V_OR_B32_e32 (i32 (EXTRACT_SUBREG f64:$src, sub1)),
674 (V_MOV_B32_e32 (i32 0x80000000))), // Set sign bit.
Matt Arsenault7d858d82014-11-02 23:46:54 +0000675 sub1)
Matt Arsenault13623d02014-08-15 18:42:18 +0000676>;
677
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000678def : Pat <
679 (fabs f32:$src),
Tom Stellard115a6152016-11-10 16:02:37 +0000680 (V_AND_B32_e64 $src, (V_MOV_B32_e32 (i32 0x7fffffff)))
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000681>;
Vincent Lejeune79a58342014-05-10 19:18:25 +0000682
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000683def : Pat <
684 (fneg f32:$src),
Tom Stellard115a6152016-11-10 16:02:37 +0000685 (V_XOR_B32_e32 $src, (V_MOV_B32_e32 (i32 0x80000000)))
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000686>;
Christian Konig8dbe6f62013-02-21 15:17:27 +0000687
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000688def : Pat <
689 (fabs f64:$src),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000690 (REG_SEQUENCE VReg_64,
691 (i32 (EXTRACT_SUBREG f64:$src, sub0)),
692 sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000693 (V_AND_B32_e64 (i32 (EXTRACT_SUBREG f64:$src, sub1)),
694 (V_MOV_B32_e32 (i32 0x7fffffff))), // Set sign bit.
Matt Arsenault7d858d82014-11-02 23:46:54 +0000695 sub1)
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000696>;
Vincent Lejeune79a58342014-05-10 19:18:25 +0000697
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000698def : Pat <
699 (fneg f64:$src),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000700 (REG_SEQUENCE VReg_64,
701 (i32 (EXTRACT_SUBREG f64:$src, sub0)),
702 sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000703 (V_XOR_B32_e32 (i32 (EXTRACT_SUBREG f64:$src, sub1)),
704 (i32 (V_MOV_B32_e32 (i32 0x80000000)))),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000705 sub1)
Matt Arsenaultfabf5452014-08-15 18:42:22 +0000706>;
Christian Konig8dbe6f62013-02-21 15:17:27 +0000707
Matt Arsenaultc79dc702016-11-15 02:25:28 +0000708def : Pat <
Konstantin Zhuravlyov7d882752017-01-13 19:49:25 +0000709 (fcopysign f16:$src0, f16:$src1),
710 (V_BFI_B32 (S_MOV_B32 (i32 0x00007fff)), $src0, $src1)
711>;
712
713def : Pat <
714 (fcopysign f32:$src0, f16:$src1),
715 (V_BFI_B32 (S_MOV_B32 (i32 0x7fffffff)), $src0,
716 (V_LSHLREV_B32_e64 (i32 16), $src1))
717>;
718
719def : Pat <
720 (fcopysign f64:$src0, f16:$src1),
721 (REG_SEQUENCE SReg_64,
722 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0,
723 (V_BFI_B32 (S_MOV_B32 (i32 0x7fffffff)), (i32 (EXTRACT_SUBREG $src0, sub1)),
724 (V_LSHLREV_B32_e64 (i32 16), $src1)), sub1)
725>;
726
727def : Pat <
728 (fcopysign f16:$src0, f32:$src1),
729 (V_BFI_B32 (S_MOV_B32 (i32 0x00007fff)), $src0,
730 (V_LSHRREV_B32_e64 (i32 16), $src1))
731>;
732
733def : Pat <
734 (fcopysign f16:$src0, f64:$src1),
735 (V_BFI_B32 (S_MOV_B32 (i32 0x00007fff)), $src0,
736 (V_LSHRREV_B32_e64 (i32 16), (EXTRACT_SUBREG $src1, sub1)))
737>;
738
739def : Pat <
Matt Arsenaultc79dc702016-11-15 02:25:28 +0000740 (fneg f16:$src),
741 (V_XOR_B32_e32 $src, (V_MOV_B32_e32 (i32 0x00008000)))
742>;
743
744def : Pat <
745 (fabs f16:$src),
746 (V_AND_B32_e64 $src, (V_MOV_B32_e32 (i32 0x00007fff)))
747>;
748
749def : Pat <
750 (fneg (fabs f16:$src)),
751 (S_OR_B32 $src, (S_MOV_B32 (i32 0x00008000))) // Set sign bit
752>;
753
Christian Konigc756cb992013-02-16 11:28:22 +0000754/********** ================== **********/
755/********** Immediate Patterns **********/
756/********** ================== **********/
757
758def : Pat <
Matt Arsenault3d463192016-11-01 22:55:07 +0000759 (VGPRImm<(i32 imm)>:$imm),
Christian Konigc756cb992013-02-16 11:28:22 +0000760 (V_MOV_B32_e32 imm:$imm)
761>;
762
763def : Pat <
Matt Arsenault3d463192016-11-01 22:55:07 +0000764 (VGPRImm<(f32 fpimm)>:$imm),
Tom Stellardfb77f002015-01-13 22:59:41 +0000765 (V_MOV_B32_e32 (f32 (bitcast_fpimm_to_i32 $imm)))
Christian Konigc756cb992013-02-16 11:28:22 +0000766>;
767
768def : Pat <
Matt Arsenault3d463192016-11-01 22:55:07 +0000769 (i32 imm:$imm),
770 (S_MOV_B32 imm:$imm)
771>;
772
Matt Arsenaulte96d0372016-12-08 20:14:46 +0000773// FIXME: Workaround for ordering issue with peephole optimizer where
774// a register class copy interferes with immediate folding. Should
775// use s_mov_b32, which can be shrunk to s_movk_i32
776def : Pat <
777 (VGPRImm<(f16 fpimm)>:$imm),
778 (V_MOV_B32_e32 (f16 (bitcast_fpimm_to_i32 $imm)))
779>;
780
Matt Arsenault3d463192016-11-01 22:55:07 +0000781def : Pat <
782 (f32 fpimm:$imm),
783 (S_MOV_B32 (f32 (bitcast_fpimm_to_i32 $imm)))
784>;
785
786def : Pat <
Matt Arsenaulte96d0372016-12-08 20:14:46 +0000787 (f16 fpimm:$imm),
788 (S_MOV_B32 (i32 (bitcast_fpimm_to_i32 $imm)))
789>;
790
791def : Pat <
Matt Arsenaultac0fc842016-09-17 16:09:55 +0000792 (i32 frameindex:$fi),
793 (V_MOV_B32_e32 (i32 (frameindex_to_targetframeindex $fi)))
794>;
795
796def : Pat <
Christian Konigb559b072013-02-16 11:28:36 +0000797 (i64 InlineImm<i64>:$imm),
798 (S_MOV_B64 InlineImm<i64>:$imm)
799>;
800
Matt Arsenaultbecd6562014-12-03 05:22:35 +0000801// XXX - Should this use a s_cmp to set SCC?
802
803// Set to sign-extended 64-bit value (true = -1, false = 0)
804def : Pat <
805 (i1 imm:$imm),
806 (S_MOV_B64 (i64 (as_i64imm $imm)))
807>;
808
Matt Arsenault303011a2014-12-17 21:04:08 +0000809def : Pat <
810 (f64 InlineFPImm<f64>:$imm),
Tom Stellardfb77f002015-01-13 22:59:41 +0000811 (S_MOV_B64 (f64 (bitcast_fpimm_to_i64 InlineFPImm<f64>:$imm)))
Matt Arsenault303011a2014-12-17 21:04:08 +0000812>;
813
Tom Stellard75aadc22012-12-11 21:25:42 +0000814/********** ================== **********/
815/********** Intrinsic Patterns **********/
816/********** ================== **********/
817
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000818def : POW_Common <V_LOG_F32_e32, V_EXP_F32_e32, V_MUL_LEGACY_F32_e32>;
Tom Stellard75aadc22012-12-11 21:25:42 +0000819
820def : Pat <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000821 (int_AMDGPU_cube v4f32:$src),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000822 (REG_SEQUENCE VReg_128,
Tom Stellard115a6152016-11-10 16:02:37 +0000823 (V_CUBETC_F32 0 /* src0_modifiers */, (f32 (EXTRACT_SUBREG $src, sub0)),
824 0 /* src1_modifiers */, (f32 (EXTRACT_SUBREG $src, sub1)),
825 0 /* src2_modifiers */, (f32 (EXTRACT_SUBREG $src, sub2)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000826 0 /* clamp */, 0 /* omod */), sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000827 (V_CUBESC_F32 0 /* src0_modifiers */, (f32 (EXTRACT_SUBREG $src, sub0)),
828 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub1)),
829 0 /* src2_modifiers */,(f32 (EXTRACT_SUBREG $src, sub2)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000830 0 /* clamp */, 0 /* omod */), sub1,
Tom Stellard115a6152016-11-10 16:02:37 +0000831 (V_CUBEMA_F32 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub0)),
832 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub1)),
833 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub2)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000834 0 /* clamp */, 0 /* omod */), sub2,
Tom Stellard115a6152016-11-10 16:02:37 +0000835 (V_CUBEID_F32 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub0)),
836 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub1)),
837 0 /* src1_modifiers */,(f32 (EXTRACT_SUBREG $src, sub2)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000838 0 /* clamp */, 0 /* omod */), sub3)
Tom Stellard75aadc22012-12-11 21:25:42 +0000839>;
840
Michel Danzer0cc991e2013-02-22 11:22:58 +0000841def : Pat <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000842 (i32 (sext i1:$src0)),
843 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src0)
Michel Danzer0cc991e2013-02-22 11:22:58 +0000844>;
845
Tom Stellardf16d38c2014-02-13 23:34:13 +0000846class Ext32Pat <SDNode ext> : Pat <
847 (i32 (ext i1:$src0)),
Michel Danzer5d26fdf2014-02-05 09:48:05 +0000848 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src0)
849>;
850
Tom Stellardf16d38c2014-02-13 23:34:13 +0000851def : Ext32Pat <zext>;
852def : Ext32Pat <anyext>;
853
Michel Danzer8caa9042013-04-10 17:17:56 +0000854// The multiplication scales from [0,1] to the unsigned integer range
855def : Pat <
856 (AMDGPUurecip i32:$src0),
857 (V_CVT_U32_F32_e32
Tom Stellard115a6152016-11-10 16:02:37 +0000858 (V_MUL_F32_e32 (i32 CONST.FP_UINT_MAX_PLUS_1),
Michel Danzer8caa9042013-04-10 17:17:56 +0000859 (V_RCP_IFLAG_F32_e32 (V_CVT_F32_U32_e32 $src0))))
860>;
861
Tom Stellard0289ff42014-05-16 20:56:44 +0000862//===----------------------------------------------------------------------===//
863// VOP3 Patterns
864//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000865
Matt Arsenaulteb260202014-05-22 18:00:15 +0000866def : IMad24Pat<V_MAD_I32_I24>;
867def : UMad24Pat<V_MAD_U32_U24>;
868
Matt Arsenault7d858d82014-11-02 23:46:54 +0000869defm : BFIPatterns <V_BFI_B32, S_MOV_B32, SReg_64>;
Tom Stellard0289ff42014-05-16 20:56:44 +0000870def : ROTRPattern <V_ALIGNBIT_B32>;
871
Christian Konig2989ffc2013-03-18 11:34:16 +0000872/********** ====================== **********/
Simon Pilgrime995a8082016-11-18 11:04:02 +0000873/********** Indirect addressing **********/
Christian Konig2989ffc2013-03-18 11:34:16 +0000874/********** ====================== **********/
875
Matt Arsenault28419272015-10-07 00:42:51 +0000876multiclass SI_INDIRECT_Pattern <ValueType vt, ValueType eltvt, string VecSize> {
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000877 // Extract with offset
Christian Konig2989ffc2013-03-18 11:34:16 +0000878 def : Pat<
Nicolai Haehnle7968c342016-07-12 08:12:16 +0000879 (eltvt (extractelt vt:$src, (MOVRELOffset i32:$idx, (i32 imm:$offset)))),
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000880 (!cast<Instruction>("SI_INDIRECT_SRC_"#VecSize) $src, $idx, imm:$offset)
Christian Konig2989ffc2013-03-18 11:34:16 +0000881 >;
882
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000883 // Insert with offset
Christian Konig2989ffc2013-03-18 11:34:16 +0000884 def : Pat<
Nicolai Haehnle7968c342016-07-12 08:12:16 +0000885 (insertelt vt:$src, eltvt:$val, (MOVRELOffset i32:$idx, (i32 imm:$offset))),
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000886 (!cast<Instruction>("SI_INDIRECT_DST_"#VecSize) $src, $idx, imm:$offset, $val)
Christian Konig2989ffc2013-03-18 11:34:16 +0000887 >;
888}
889
Matt Arsenault28419272015-10-07 00:42:51 +0000890defm : SI_INDIRECT_Pattern <v2f32, f32, "V2">;
891defm : SI_INDIRECT_Pattern <v4f32, f32, "V4">;
892defm : SI_INDIRECT_Pattern <v8f32, f32, "V8">;
893defm : SI_INDIRECT_Pattern <v16f32, f32, "V16">;
Matt Arsenaultf5958dd2014-02-02 00:05:35 +0000894
Matt Arsenault28419272015-10-07 00:42:51 +0000895defm : SI_INDIRECT_Pattern <v2i32, i32, "V2">;
896defm : SI_INDIRECT_Pattern <v4i32, i32, "V4">;
897defm : SI_INDIRECT_Pattern <v8i32, i32, "V8">;
898defm : SI_INDIRECT_Pattern <v16i32, i32, "V16">;
Christian Konig2989ffc2013-03-18 11:34:16 +0000899
Tom Stellard81d871d2013-11-13 23:36:50 +0000900//===----------------------------------------------------------------------===//
Wei Ding1041a642016-08-24 14:59:47 +0000901// SAD Patterns
902//===----------------------------------------------------------------------===//
903
904def : Pat <
905 (add (sub_oneuse (umax i32:$src0, i32:$src1),
906 (umin i32:$src0, i32:$src1)),
907 i32:$src2),
908 (V_SAD_U32 $src0, $src1, $src2)
909>;
910
911def : Pat <
912 (add (select_oneuse (i1 (setugt i32:$src0, i32:$src1)),
913 (sub i32:$src0, i32:$src1),
914 (sub i32:$src1, i32:$src0)),
915 i32:$src2),
916 (V_SAD_U32 $src0, $src1, $src2)
917>;
918
919//===----------------------------------------------------------------------===//
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000920// Conversion Patterns
921//===----------------------------------------------------------------------===//
922
923def : Pat<(i32 (sext_inreg i32:$src, i1)),
Tom Stellard115a6152016-11-10 16:02:37 +0000924 (S_BFE_I32 i32:$src, (i32 65536))>; // 0 | 1 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000925
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000926// Handle sext_inreg in i64
927def : Pat <
928 (i64 (sext_inreg i64:$src, i1)),
Tom Stellard115a6152016-11-10 16:02:37 +0000929 (S_BFE_I64 i64:$src, (i32 0x10000)) // 0 | 1 << 16
930>;
931
932def : Pat <
Matt Arsenault682eb432017-01-11 22:35:22 +0000933 (i16 (sext_inreg i16:$src, i1)),
934 (S_BFE_I32 $src, (i32 0x00010000)) // 0 | 1 << 16
935>;
936
937def : Pat <
Tom Stellard115a6152016-11-10 16:02:37 +0000938 (i16 (sext_inreg i16:$src, i8)),
939 (S_BFE_I32 $src, (i32 0x80000)) // 0 | 8 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000940>;
941
942def : Pat <
943 (i64 (sext_inreg i64:$src, i8)),
Tom Stellard115a6152016-11-10 16:02:37 +0000944 (S_BFE_I64 i64:$src, (i32 0x80000)) // 0 | 8 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000945>;
946
947def : Pat <
948 (i64 (sext_inreg i64:$src, i16)),
Tom Stellard115a6152016-11-10 16:02:37 +0000949 (S_BFE_I64 i64:$src, (i32 0x100000)) // 0 | 16 << 16
Matt Arsenault94812212014-11-14 18:18:16 +0000950>;
951
952def : Pat <
953 (i64 (sext_inreg i64:$src, i32)),
Tom Stellard115a6152016-11-10 16:02:37 +0000954 (S_BFE_I64 i64:$src, (i32 0x200000)) // 0 | 32 << 16
Matt Arsenault5dbd5db2014-04-22 03:49:30 +0000955>;
956
Matt Arsenaultc6b69a92016-07-26 23:06:33 +0000957def : Pat <
958 (i64 (zext i32:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +0000959 (REG_SEQUENCE SReg_64, $src, sub0, (S_MOV_B32 (i32 0)), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000960>;
961
Matt Arsenaultc6b69a92016-07-26 23:06:33 +0000962def : Pat <
963 (i64 (anyext i32:$src)),
964 (REG_SEQUENCE SReg_64, $src, sub0, (i32 (IMPLICIT_DEF)), sub1)
965>;
966
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000967class ZExt_i64_i1_Pat <SDNode ext> : Pat <
968 (i64 (ext i1:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000969 (REG_SEQUENCE VReg_64,
970 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src), sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000971 (S_MOV_B32 (i32 0)), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000972>;
973
974
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000975def : ZExt_i64_i1_Pat<zext>;
976def : ZExt_i64_i1_Pat<anyext>;
977
Tom Stellardbc4497b2016-02-12 23:45:29 +0000978// FIXME: We need to use COPY_TO_REGCLASS to work-around the fact that
979// REG_SEQUENCE patterns don't support instructions with multiple outputs.
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000980def : Pat <
981 (i64 (sext i32:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000982 (REG_SEQUENCE SReg_64, $src, sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000983 (i32 (COPY_TO_REGCLASS (S_ASHR_I32 $src, (i32 31)), SReg_32_XM0)), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000984>;
985
986def : Pat <
987 (i64 (sext i1:$src)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000988 (REG_SEQUENCE VReg_64,
Tom Stellard115a6152016-11-10 16:02:37 +0000989 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src), sub0,
990 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src), sub1)
Matt Arsenaultb2cbf792014-06-10 18:54:59 +0000991>;
992
Tom Stellard115a6152016-11-10 16:02:37 +0000993class FPToI1Pat<Instruction Inst, int KOne, ValueType kone_type, ValueType vt, SDPatternOperator fp_to_int> : Pat <
Matt Arsenault7fb961f2016-07-22 17:01:21 +0000994 (i1 (fp_to_int (vt (VOP3Mods vt:$src0, i32:$src0_modifiers)))),
Tom Stellard115a6152016-11-10 16:02:37 +0000995 (i1 (Inst 0, (kone_type KOne), $src0_modifiers, $src0, DSTCLAMP.NONE, DSTOMOD.NONE))
Matt Arsenault7fb961f2016-07-22 17:01:21 +0000996>;
997
Tom Stellard115a6152016-11-10 16:02:37 +0000998def : FPToI1Pat<V_CMP_EQ_F32_e64, CONST.FP32_ONE, i32, f32, fp_to_uint>;
999def : FPToI1Pat<V_CMP_EQ_F32_e64, CONST.FP32_NEG_ONE, i32, f32, fp_to_sint>;
1000def : FPToI1Pat<V_CMP_EQ_F64_e64, CONST.FP64_ONE, i64, f64, fp_to_uint>;
1001def : FPToI1Pat<V_CMP_EQ_F64_e64, CONST.FP64_NEG_ONE, i64, f64, fp_to_sint>;
Matt Arsenault7fb961f2016-07-22 17:01:21 +00001002
Matt Arsenaultbecd6562014-12-03 05:22:35 +00001003// If we need to perform a logical operation on i1 values, we need to
1004// use vector comparisons since there is only one SCC register. Vector
Simon Pilgrime995a8082016-11-18 11:04:02 +00001005// comparisons still write to a pair of SGPRs, so treat these as
Matt Arsenaultbecd6562014-12-03 05:22:35 +00001006// 64-bit comparisons. When legalizing SGPR copies, instructions
1007// resulting in the copies from SCC to these instructions will be
1008// moved to the VALU.
1009def : Pat <
1010 (i1 (and i1:$src0, i1:$src1)),
1011 (S_AND_B64 $src0, $src1)
1012>;
1013
1014def : Pat <
1015 (i1 (or i1:$src0, i1:$src1)),
1016 (S_OR_B64 $src0, $src1)
1017>;
1018
1019def : Pat <
1020 (i1 (xor i1:$src0, i1:$src1)),
1021 (S_XOR_B64 $src0, $src1)
1022>;
1023
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +00001024def : Pat <
1025 (f32 (sint_to_fp i1:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +00001026 (V_CNDMASK_B32_e64 (i32 0), (i32 CONST.FP32_NEG_ONE), $src)
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +00001027>;
1028
1029def : Pat <
1030 (f32 (uint_to_fp i1:$src)),
Tom Stellard115a6152016-11-10 16:02:37 +00001031 (V_CNDMASK_B32_e64 (i32 0), (i32 CONST.FP32_ONE), $src)
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +00001032>;
1033
1034def : Pat <
1035 (f64 (sint_to_fp i1:$src)),
Matt Arsenaultbecd6562014-12-03 05:22:35 +00001036 (V_CVT_F64_I32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src))
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +00001037>;
1038
1039def : Pat <
1040 (f64 (uint_to_fp i1:$src)),
1041 (V_CVT_F64_U32_e32 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src))
1042>;
1043
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001044//===----------------------------------------------------------------------===//
Tom Stellardfb961692013-10-23 00:44:19 +00001045// Miscellaneous Patterns
1046//===----------------------------------------------------------------------===//
1047
1048def : Pat <
Tom Stellard81d871d2013-11-13 23:36:50 +00001049 (i32 (trunc i64:$a)),
1050 (EXTRACT_SUBREG $a, sub0)
1051>;
1052
Michel Danzerbf1a6412014-01-28 03:01:16 +00001053def : Pat <
1054 (i1 (trunc i32:$a)),
Tom Stellard115a6152016-11-10 16:02:37 +00001055 (V_CMP_EQ_U32_e64 (S_AND_B32 (i32 1), $a), (i32 1))
Michel Danzerbf1a6412014-01-28 03:01:16 +00001056>;
1057
Matt Arsenaulte306a322014-10-21 16:25:08 +00001058def : Pat <
Matt Arsenaultabd271b2015-02-05 06:05:13 +00001059 (i1 (trunc i64:$a)),
Matt Arsenault5d8eb252016-09-30 01:50:20 +00001060 (V_CMP_EQ_U32_e64 (S_AND_B32 (i32 1),
Tom Stellard115a6152016-11-10 16:02:37 +00001061 (i32 (EXTRACT_SUBREG $a, sub0))), (i32 1))
Matt Arsenaultabd271b2015-02-05 06:05:13 +00001062>;
1063
1064def : Pat <
Matt Arsenaulte306a322014-10-21 16:25:08 +00001065 (i32 (bswap i32:$a)),
Tom Stellard115a6152016-11-10 16:02:37 +00001066 (V_BFI_B32 (S_MOV_B32 (i32 0x00ff00ff)),
1067 (V_ALIGNBIT_B32 $a, $a, (i32 24)),
1068 (V_ALIGNBIT_B32 $a, $a, (i32 8)))
Matt Arsenaulte306a322014-10-21 16:25:08 +00001069>;
1070
Marek Olsak63a7b082015-03-24 13:40:21 +00001071multiclass BFMPatterns <ValueType vt, InstSI BFM, InstSI MOV> {
1072 def : Pat <
1073 (vt (shl (vt (add (vt (shl 1, vt:$a)), -1)), vt:$b)),
1074 (BFM $a, $b)
1075 >;
1076
1077 def : Pat <
1078 (vt (add (vt (shl 1, vt:$a)), -1)),
Tom Stellard115a6152016-11-10 16:02:37 +00001079 (BFM $a, (MOV (i32 0)))
Marek Olsak63a7b082015-03-24 13:40:21 +00001080 >;
1081}
1082
1083defm : BFMPatterns <i32, S_BFM_B32, S_MOV_B32>;
1084// FIXME: defm : BFMPatterns <i64, S_BFM_B64, S_MOV_B64>;
1085
Marek Olsak949f5da2015-03-24 13:40:34 +00001086def : BFEPattern <V_BFE_U32, S_MOV_B32>;
1087
Matt Arsenault9cd90712016-04-14 01:42:16 +00001088def : Pat<
Matt Arsenaultd5d78512017-01-31 17:28:40 +00001089 (fcanonicalize (f16 (VOP3Mods f16:$src, i32:$src_mods))),
1090 (V_MUL_F16_e64 0, (i32 CONST.FP16_ONE), $src_mods, $src, 0, 0)
Matt Arsenaultce841302016-12-22 03:05:37 +00001091>;
1092
1093def : Pat<
Matt Arsenaultd5d78512017-01-31 17:28:40 +00001094 (fcanonicalize (f32 (VOP3Mods f32:$src, i32:$src_mods))),
1095 (V_MUL_F32_e64 0, (i32 CONST.FP32_ONE), $src_mods, $src, 0, 0)
Matt Arsenault9cd90712016-04-14 01:42:16 +00001096>;
1097
1098def : Pat<
Matt Arsenaultd5d78512017-01-31 17:28:40 +00001099 (fcanonicalize (f64 (VOP3Mods f64:$src, i32:$src_mods))),
1100 (V_MUL_F64 0, CONST.FP64_ONE, $src_mods, $src, 0, 0)
Matt Arsenault9cd90712016-04-14 01:42:16 +00001101>;
1102
Matt Arsenault4165efd2017-01-17 07:26:53 +00001103// Allow integer inputs
1104class ExpPattern<SDPatternOperator node, ValueType vt, Instruction Inst> : Pat<
1105 (node (i8 timm:$tgt), (i8 timm:$en), vt:$src0, vt:$src1, vt:$src2, vt:$src3, (i1 timm:$compr), (i1 timm:$vm)),
1106 (Inst i8:$tgt, vt:$src0, vt:$src1, vt:$src2, vt:$src3, i1:$vm, i1:$compr, i8:$en)
1107>;
1108
1109def : ExpPattern<AMDGPUexport, i32, EXP>;
1110def : ExpPattern<AMDGPUexport_done, i32, EXP_DONE>;
1111
Marek Olsak43650e42015-03-24 13:40:08 +00001112//===----------------------------------------------------------------------===//
1113// Fract Patterns
1114//===----------------------------------------------------------------------===//
1115
Marek Olsak7d777282015-03-24 13:40:15 +00001116let Predicates = [isSI] in {
1117
1118// V_FRACT is buggy on SI, so the F32 version is never used and (x-floor(x)) is
1119// used instead. However, SI doesn't have V_FLOOR_F64, so the most efficient
1120// way to implement it is using V_FRACT_F64.
1121// The workaround for the V_FRACT bug is:
1122// fract(x) = isnan(x) ? x : min(V_FRACT(x), 0.99999999999999999)
1123
Marek Olsak7d777282015-03-24 13:40:15 +00001124// Convert floor(x) to (x - fract(x))
1125def : Pat <
1126 (f64 (ffloor (f64 (VOP3Mods f64:$x, i32:$mods)))),
1127 (V_ADD_F64
1128 $mods,
1129 $x,
1130 SRCMODS.NEG,
1131 (V_CNDMASK_B64_PSEUDO
Marek Olsak7d777282015-03-24 13:40:15 +00001132 (V_MIN_F64
1133 SRCMODS.NONE,
1134 (V_FRACT_F64_e64 $mods, $x, DSTCLAMP.NONE, DSTOMOD.NONE),
1135 SRCMODS.NONE,
1136 (V_MOV_B64_PSEUDO 0x3fefffffffffffff),
1137 DSTCLAMP.NONE, DSTOMOD.NONE),
Marek Olsak1354b872015-07-27 11:37:42 +00001138 $x,
Tom Stellard115a6152016-11-10 16:02:37 +00001139 (V_CMP_CLASS_F64_e64 SRCMODS.NONE, $x, (i32 3 /*NaN*/))),
Marek Olsak7d777282015-03-24 13:40:15 +00001140 DSTCLAMP.NONE, DSTOMOD.NONE)
1141>;
1142
1143} // End Predicates = [isSI]
1144
Tom Stellardfb961692013-10-23 00:44:19 +00001145//============================================================================//
Tom Stellardeac65dd2013-05-03 17:21:20 +00001146// Miscellaneous Optimization Patterns
1147//============================================================================//
1148
Matt Arsenault49dd4282014-09-15 17:15:02 +00001149def : SHA256MaPattern <V_BFI_B32, V_XOR_B32_e64>;
Tom Stellardeac65dd2013-05-03 17:21:20 +00001150
Matt Arsenaultc89f2912016-03-07 21:54:48 +00001151def : IntMed3Pat<V_MED3_I32, smax, smax_oneuse, smin_oneuse>;
1152def : IntMed3Pat<V_MED3_U32, umax, umax_oneuse, umin_oneuse>;
1153
Matt Arsenaultf84e5d92017-01-31 03:07:46 +00001154// This matches 16 permutations of
1155// max(min(x, y), min(max(x, y), z))
1156class FPMed3Pat<ValueType vt,
1157 Instruction med3Inst> : Pat<
1158 (fmaxnum (fminnum_oneuse (VOP3Mods_nnan vt:$src0, i32:$src0_mods),
1159 (VOP3Mods_nnan vt:$src1, i32:$src1_mods)),
1160 (fminnum_oneuse (fmaxnum_oneuse (VOP3Mods_nnan vt:$src0, i32:$src0_mods),
1161 (VOP3Mods_nnan vt:$src1, i32:$src1_mods)),
1162 (vt (VOP3Mods_nnan vt:$src2, i32:$src2_mods)))),
1163 (med3Inst $src0_mods, $src0, $src1_mods, $src1, $src2_mods, $src2, DSTCLAMP.NONE, DSTOMOD.NONE)
1164>;
1165
1166def : FPMed3Pat<f32, V_MED3_F32>;
1167
Matt Arsenaultaf635242017-01-30 19:30:24 +00001168
1169// Undo sub x, c -> add x, -c canonicalization since c is more likely
1170// an inline immediate than -c.
1171// TODO: Also do for 64-bit.
1172def : Pat<
1173 (add i32:$src0, (i32 NegSubInlineConst32:$src1)),
1174 (S_SUB_I32 $src0, NegSubInlineConst32:$src1)
1175>;
1176
Tom Stellard245c15f2015-05-26 15:55:52 +00001177//============================================================================//
1178// Assembler aliases
1179//============================================================================//
1180
1181def : MnemonicAlias<"v_add_u32", "v_add_i32">;
1182def : MnemonicAlias<"v_sub_u32", "v_sub_i32">;
1183def : MnemonicAlias<"v_subrev_u32", "v_subrev_i32">;
1184
Marek Olsak5df00d62014-12-07 12:18:57 +00001185} // End isGCN predicate