blob: 83a2f5265db0fc190a6efeea5cb621efc2d5984a [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUInstructions.td - Common instruction defs ---*- tablegen -*-===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard75aadc22012-12-11 21:25:42 +00006//
7//===----------------------------------------------------------------------===//
8//
9// This file contains instruction defs that are common to all hw codegen
10// targets.
11//
12//===----------------------------------------------------------------------===//
13
Matt Arsenaultc6fd5ab2019-07-16 17:38:50 +000014class AddressSpacesImpl {
15 int Flat = 0;
16 int Global = 1;
17 int Region = 2;
18 int Local = 3;
19 int Constant = 4;
20 int Private = 5;
21}
22
23def AddrSpaces : AddressSpacesImpl;
24
25
Matt Arsenault648e4222016-07-14 05:23:23 +000026class AMDGPUInst <dag outs, dag ins, string asm = "",
27 list<dag> pattern = []> : Instruction {
Tom Stellardf3b2a1e2013-02-06 17:32:29 +000028 field bit isRegisterLoad = 0;
29 field bit isRegisterStore = 0;
Tom Stellard75aadc22012-12-11 21:25:42 +000030
31 let Namespace = "AMDGPU";
32 let OutOperandList = outs;
33 let InOperandList = ins;
34 let AsmString = asm;
35 let Pattern = pattern;
36 let Itinerary = NullALU;
Tom Stellardf3b2a1e2013-02-06 17:32:29 +000037
Tom Stellarde1818af2016-02-18 03:42:32 +000038 // SoftFail is a field the disassembler can use to provide a way for
39 // instructions to not match without killing the whole decode process. It is
40 // mainly used for ARM, but Tablegen expects this field to exist or it fails
41 // to build the decode table.
42 field bits<64> SoftFail = 0;
43
44 let DecoderNamespace = Namespace;
Matt Arsenault37fefd62016-06-10 02:18:02 +000045
Tom Stellardf3b2a1e2013-02-06 17:32:29 +000046 let TSFlags{63} = isRegisterLoad;
47 let TSFlags{62} = isRegisterStore;
Tom Stellard75aadc22012-12-11 21:25:42 +000048}
49
Matt Arsenault648e4222016-07-14 05:23:23 +000050class AMDGPUShaderInst <dag outs, dag ins, string asm = "",
51 list<dag> pattern = []> : AMDGPUInst<outs, ins, asm, pattern> {
Tom Stellard75aadc22012-12-11 21:25:42 +000052
53 field bits<32> Inst = 0xffffffff;
Tom Stellard75aadc22012-12-11 21:25:42 +000054}
55
Tom Stellardc5a154d2018-06-28 23:47:12 +000056//===---------------------------------------------------------------------===//
57// Return instruction
58//===---------------------------------------------------------------------===//
59
60class ILFormat<dag outs, dag ins, string asmstr, list<dag> pattern>
61: Instruction {
62
63 let Namespace = "AMDGPU";
64 dag OutOperandList = outs;
65 dag InOperandList = ins;
66 let Pattern = pattern;
67 let AsmString = !strconcat(asmstr, "\n");
68 let isPseudo = 1;
69 let Itinerary = NullALU;
70 bit hasIEEEFlag = 0;
71 bit hasZeroOpFlag = 0;
72 let mayLoad = 0;
73 let mayStore = 0;
74 let hasSideEffects = 0;
75 let isCodeGenOnly = 1;
76}
77
Matt Arsenault57ef94f2019-07-30 15:56:43 +000078def TruePredicate : Predicate<"">;
Tom Stellardc5a154d2018-06-28 23:47:12 +000079
Tom Stellardc5a154d2018-06-28 23:47:12 +000080class PredicateControl {
Matt Arsenaultd7047272019-02-08 19:18:01 +000081 Predicate SubtargetPredicate = TruePredicate;
Tom Stellardc5a154d2018-06-28 23:47:12 +000082 list<Predicate> AssemblerPredicates = [];
83 Predicate AssemblerPredicate = TruePredicate;
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +000084 Predicate WaveSizePredicate = TruePredicate;
Tom Stellardc5a154d2018-06-28 23:47:12 +000085 list<Predicate> OtherPredicates = [];
86 list<Predicate> Predicates = !listconcat([SubtargetPredicate,
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +000087 AssemblerPredicate,
88 WaveSizePredicate],
Tom Stellardc5a154d2018-06-28 23:47:12 +000089 AssemblerPredicates,
90 OtherPredicates);
91}
92class AMDGPUPat<dag pattern, dag result> : Pat<pattern, result>,
93 PredicateControl;
94
Stanislav Mekhanoshin06cab792017-08-30 03:03:38 +000095def FP16Denormals : Predicate<"Subtarget->hasFP16Denormals()">;
96def FP32Denormals : Predicate<"Subtarget->hasFP32Denormals()">;
97def FP64Denormals : Predicate<"Subtarget->hasFP64Denormals()">;
98def NoFP16Denormals : Predicate<"!Subtarget->hasFP16Denormals()">;
99def NoFP32Denormals : Predicate<"!Subtarget->hasFP32Denormals()">;
100def NoFP64Denormals : Predicate<"!Subtarget->hasFP64Denormals()">;
Matt Arsenault1d077742014-07-15 20:18:24 +0000101def UnsafeFPMath : Predicate<"TM.Options.UnsafeFPMath">;
Jan Vesely39aeab42017-12-04 23:07:28 +0000102def FMA : Predicate<"Subtarget->hasFMA()">;
Matt Arsenaultf171cf22014-07-14 23:40:49 +0000103
Tom Stellard75aadc22012-12-11 21:25:42 +0000104def InstFlag : OperandWithDefaultOps <i32, (ops (i32 0))>;
105
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +0000106def u16ImmTarget : AsmOperandClass {
107 let Name = "U16Imm";
108 let RenderMethod = "addImmOperands";
109}
110
111def s16ImmTarget : AsmOperandClass {
112 let Name = "S16Imm";
113 let RenderMethod = "addImmOperands";
114}
115
Tom Stellardb02094e2014-07-21 15:45:01 +0000116let OperandType = "OPERAND_IMMEDIATE" in {
117
Matt Arsenault4d7d3832014-04-15 22:32:49 +0000118def u32imm : Operand<i32> {
119 let PrintMethod = "printU32ImmOperand";
120}
121
122def u16imm : Operand<i16> {
123 let PrintMethod = "printU16ImmOperand";
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +0000124 let ParserMatchClass = u16ImmTarget;
125}
126
127def s16imm : Operand<i16> {
128 let PrintMethod = "printU16ImmOperand";
129 let ParserMatchClass = s16ImmTarget;
Matt Arsenault4d7d3832014-04-15 22:32:49 +0000130}
131
132def u8imm : Operand<i8> {
133 let PrintMethod = "printU8ImmOperand";
134}
135
Tom Stellardb02094e2014-07-21 15:45:01 +0000136} // End OperandType = "OPERAND_IMMEDIATE"
137
Tom Stellardbc5b5372014-06-13 16:38:59 +0000138//===--------------------------------------------------------------------===//
139// Custom Operands
140//===--------------------------------------------------------------------===//
141def brtarget : Operand<OtherVT>;
142
Tom Stellardc0845332013-11-22 23:07:58 +0000143//===----------------------------------------------------------------------===//
Matt Arsenaulta9e16e62017-02-23 00:23:43 +0000144// Misc. PatFrags
145//===----------------------------------------------------------------------===//
146
Graham Sellers04f7a4d2018-11-29 16:05:38 +0000147class HasOneUseUnaryOp<SDPatternOperator op> : PatFrag<
148 (ops node:$src0),
149 (op $src0),
150 [{ return N->hasOneUse(); }]
151>;
152
Matt Arsenaulta9e16e62017-02-23 00:23:43 +0000153class HasOneUseBinOp<SDPatternOperator op> : PatFrag<
154 (ops node:$src0, node:$src1),
155 (op $src0, $src1),
156 [{ return N->hasOneUse(); }]
157>;
158
159class HasOneUseTernaryOp<SDPatternOperator op> : PatFrag<
160 (ops node:$src0, node:$src1, node:$src2),
161 (op $src0, $src1, $src2),
162 [{ return N->hasOneUse(); }]
163>;
164
Matt Arsenaulta9e16e62017-02-23 00:23:43 +0000165let Properties = [SDNPCommutative, SDNPAssociative] in {
166def smax_oneuse : HasOneUseBinOp<smax>;
167def smin_oneuse : HasOneUseBinOp<smin>;
168def umax_oneuse : HasOneUseBinOp<umax>;
169def umin_oneuse : HasOneUseBinOp<umin>;
Matt Arsenault687ec752018-10-22 16:27:27 +0000170
Matt Arsenaulta9e16e62017-02-23 00:23:43 +0000171def fminnum_oneuse : HasOneUseBinOp<fminnum>;
172def fmaxnum_oneuse : HasOneUseBinOp<fmaxnum>;
Matt Arsenault687ec752018-10-22 16:27:27 +0000173
174def fminnum_ieee_oneuse : HasOneUseBinOp<fminnum_ieee>;
175def fmaxnum_ieee_oneuse : HasOneUseBinOp<fmaxnum_ieee>;
176
177
Matt Arsenaulta9e16e62017-02-23 00:23:43 +0000178def and_oneuse : HasOneUseBinOp<and>;
179def or_oneuse : HasOneUseBinOp<or>;
180def xor_oneuse : HasOneUseBinOp<xor>;
181} // Properties = [SDNPCommutative, SDNPAssociative]
182
Graham Sellers04f7a4d2018-11-29 16:05:38 +0000183def not_oneuse : HasOneUseUnaryOp<not>;
184
Roman Lebedev9c17dad2018-06-15 09:56:39 +0000185def add_oneuse : HasOneUseBinOp<add>;
Matt Arsenaulta9e16e62017-02-23 00:23:43 +0000186def sub_oneuse : HasOneUseBinOp<sub>;
Matt Arsenaulteb522e62017-02-27 22:15:25 +0000187
188def srl_oneuse : HasOneUseBinOp<srl>;
Matt Arsenaulta9e16e62017-02-23 00:23:43 +0000189def shl_oneuse : HasOneUseBinOp<shl>;
190
191def select_oneuse : HasOneUseTernaryOp<select>;
192
Farhana Aleen3528c802018-08-21 16:21:15 +0000193def AMDGPUmul_u24_oneuse : HasOneUseBinOp<AMDGPUmul_u24>;
194def AMDGPUmul_i24_oneuse : HasOneUseBinOp<AMDGPUmul_i24>;
195
Matt Arsenaulte1cd4822017-11-13 00:22:09 +0000196def srl_16 : PatFrag<
197 (ops node:$src0), (srl_oneuse node:$src0, (i32 16))
198>;
199
200
201def hi_i16_elt : PatFrag<
202 (ops node:$src0), (i16 (trunc (i32 (srl_16 node:$src0))))
203>;
204
205
206def hi_f16_elt : PatLeaf<
207 (vt), [{
208 if (N->getOpcode() != ISD::BITCAST)
209 return false;
210 SDValue Tmp = N->getOperand(0);
211
212 if (Tmp.getOpcode() != ISD::SRL)
213 return false;
214 if (const auto *RHS = dyn_cast<ConstantSDNode>(Tmp.getOperand(1))
215 return RHS->getZExtValue() == 16;
216 return false;
217}]>;
218
Matt Arsenaulta9e16e62017-02-23 00:23:43 +0000219//===----------------------------------------------------------------------===//
Tom Stellardc0845332013-11-22 23:07:58 +0000220// PatLeafs for floating-point comparisons
221//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000222
Matt Arsenaulte3401a92019-07-19 20:24:40 +0000223def COND_OEQ : PatFrags<(ops), [(OtherVT SETOEQ), (OtherVT SETEQ)]>;
224def COND_ONE : PatFrags<(ops), [(OtherVT SETONE), (OtherVT SETNE)]>;
225def COND_OGT : PatFrags<(ops), [(OtherVT SETOGT), (OtherVT SETGT)]>;
226def COND_OGE : PatFrags<(ops), [(OtherVT SETOGE), (OtherVT SETGE)]>;
227def COND_OLT : PatFrags<(ops), [(OtherVT SETOLT), (OtherVT SETLT)]>;
228def COND_OLE : PatFrags<(ops), [(OtherVT SETOLE), (OtherVT SETLE)]>;
229def COND_O : PatFrags<(ops), [(OtherVT SETO)]>;
230def COND_UO : PatFrags<(ops), [(OtherVT SETUO)]>;
Tom Stellardc0845332013-11-22 23:07:58 +0000231
232//===----------------------------------------------------------------------===//
Matt Arsenault8b989ef2014-12-11 22:15:39 +0000233// PatLeafs for unsigned / unordered comparisons
Tom Stellardc0845332013-11-22 23:07:58 +0000234//===----------------------------------------------------------------------===//
235
Matt Arsenaulte3401a92019-07-19 20:24:40 +0000236def COND_UEQ : PatFrag<(ops), (OtherVT SETUEQ)>;
237def COND_UNE : PatFrag<(ops), (OtherVT SETUNE)>;
238def COND_UGT : PatFrag<(ops), (OtherVT SETUGT)>;
239def COND_UGE : PatFrag<(ops), (OtherVT SETUGE)>;
240def COND_ULT : PatFrag<(ops), (OtherVT SETULT)>;
241def COND_ULE : PatFrag<(ops), (OtherVT SETULE)>;
Tom Stellardc0845332013-11-22 23:07:58 +0000242
Matt Arsenault9cded7a2014-12-11 22:15:35 +0000243// XXX - For some reason R600 version is preferring to use unordered
244// for setne?
Matt Arsenaulte3401a92019-07-19 20:24:40 +0000245def COND_UNE_NE : PatFrags<(ops), [(OtherVT SETUNE), (OtherVT SETNE)]>;
Matt Arsenault9cded7a2014-12-11 22:15:35 +0000246
Tom Stellardc0845332013-11-22 23:07:58 +0000247//===----------------------------------------------------------------------===//
248// PatLeafs for signed comparisons
249//===----------------------------------------------------------------------===//
250
Matt Arsenaulte3401a92019-07-19 20:24:40 +0000251def COND_SGT : PatFrag<(ops), (OtherVT SETGT)>;
252def COND_SGE : PatFrag<(ops), (OtherVT SETGE)>;
253def COND_SLT : PatFrag<(ops), (OtherVT SETLT)>;
254def COND_SLE : PatFrag<(ops), (OtherVT SETLE)>;
Tom Stellardc0845332013-11-22 23:07:58 +0000255
256//===----------------------------------------------------------------------===//
257// PatLeafs for integer equality
258//===----------------------------------------------------------------------===//
259
Matt Arsenaulte3401a92019-07-19 20:24:40 +0000260def COND_EQ : PatFrags<(ops), [(OtherVT SETEQ), (OtherVT SETUEQ)]>;
261def COND_NE : PatFrags<(ops), [(OtherVT SETNE), (OtherVT SETUNE)]>;
Tom Stellardc0845332013-11-22 23:07:58 +0000262
Matt Arsenaulte3401a92019-07-19 20:24:40 +0000263// FIXME: Should not need code predicate
264//def COND_NULL : PatLeaf<(OtherVT null_frag)>;
Christian Konigb19849a2013-02-21 15:17:04 +0000265def COND_NULL : PatLeaf <
266 (cond),
Tom Stellardaa9a1a82014-08-01 02:05:57 +0000267 [{(void)N; return false;}]
Christian Konigb19849a2013-02-21 15:17:04 +0000268>;
269
Tom Stellardc5a154d2018-06-28 23:47:12 +0000270//===----------------------------------------------------------------------===//
271// PatLeafs for Texture Constants
272//===----------------------------------------------------------------------===//
273
274def TEX_ARRAY : PatLeaf<
275 (imm),
276 [{uint32_t TType = (uint32_t)N->getZExtValue();
277 return TType == 9 || TType == 10 || TType == 16;
278 }]
279>;
280
281def TEX_RECT : PatLeaf<
282 (imm),
283 [{uint32_t TType = (uint32_t)N->getZExtValue();
284 return TType == 5;
285 }]
286>;
287
288def TEX_SHADOW : PatLeaf<
289 (imm),
290 [{uint32_t TType = (uint32_t)N->getZExtValue();
291 return (TType >= 6 && TType <= 8) || TType == 13;
292 }]
293>;
294
295def TEX_SHADOW_ARRAY : PatLeaf<
296 (imm),
297 [{uint32_t TType = (uint32_t)N->getZExtValue();
298 return TType == 11 || TType == 12 || TType == 17;
299 }]
300>;
Matt Arsenaultc89f2912016-03-07 21:54:48 +0000301
302//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000303// Load/Store Pattern Fragments
304//===----------------------------------------------------------------------===//
305
Matt Arsenaultc6fd5ab2019-07-16 17:38:50 +0000306class AddressSpaceList<list<int> AS> {
307 list<int> AddrSpaces = AS;
308}
309
Matt Arsenault52c26242019-07-31 00:14:43 +0000310class Aligned<int Bytes> {
311 int MinAlignment = Bytes;
312}
Farhana Aleena7cb3112018-03-09 17:41:39 +0000313
Matt Arsenaultfcc213f2017-09-20 03:20:09 +0000314class LoadFrag <SDPatternOperator op> : PatFrag<(ops node:$ptr), (op node:$ptr)>;
Tom Stellardb02094e2014-07-21 15:45:01 +0000315
Matt Arsenaultfcc213f2017-09-20 03:20:09 +0000316class StoreFrag<SDPatternOperator op> : PatFrag <
Tom Stellardb02094e2014-07-21 15:45:01 +0000317 (ops node:$value, node:$ptr), (op node:$value, node:$ptr)
318>;
319
Matt Arsenaultfcc213f2017-09-20 03:20:09 +0000320class StoreHi16<SDPatternOperator op> : PatFrag <
321 (ops node:$value, node:$ptr), (op (srl node:$value, (i32 16)), node:$ptr)
322>;
Tom Stellardb02094e2014-07-21 15:45:01 +0000323
Matt Arsenaultc6fd5ab2019-07-16 17:38:50 +0000324def LoadAddress_constant : AddressSpaceList<[ AddrSpaces.Constant ]>;
325def LoadAddress_global : AddressSpaceList<[ AddrSpaces.Global, AddrSpaces.Constant ]>;
326def StoreAddress_global : AddressSpaceList<[ AddrSpaces.Global ]>;
Matt Arsenaultfcc213f2017-09-20 03:20:09 +0000327
Matt Arsenaultc6fd5ab2019-07-16 17:38:50 +0000328def LoadAddress_flat : AddressSpaceList<[ AddrSpaces.Flat,
329 AddrSpaces.Global,
330 AddrSpaces.Constant ]>;
331def StoreAddress_flat : AddressSpaceList<[ AddrSpaces.Flat, AddrSpaces.Global ]>;
Matt Arsenaultbc683832017-09-20 03:43:35 +0000332
Matt Arsenaultc6fd5ab2019-07-16 17:38:50 +0000333def LoadAddress_private : AddressSpaceList<[ AddrSpaces.Private ]>;
334def StoreAddress_private : AddressSpaceList<[ AddrSpaces.Private ]>;
Matt Arsenaultfcc213f2017-09-20 03:20:09 +0000335
Matt Arsenaultc6fd5ab2019-07-16 17:38:50 +0000336def LoadAddress_local : AddressSpaceList<[ AddrSpaces.Local ]>;
337def StoreAddress_local : AddressSpaceList<[ AddrSpaces.Local ]>;
Nicolai Haehnle4dc3b2b2019-07-01 17:17:45 +0000338
Matt Arsenaultc6fd5ab2019-07-16 17:38:50 +0000339def LoadAddress_region : AddressSpaceList<[ AddrSpaces.Region ]>;
340def StoreAddress_region : AddressSpaceList<[ AddrSpaces.Region ]>;
341
342
Matt Arsenaultfcc213f2017-09-20 03:20:09 +0000343
Matt Arsenaultb81495d2017-09-20 05:01:53 +0000344class GlobalLoadAddress : CodePatPred<[{
345 auto AS = cast<MemSDNode>(N)->getAddressSpace();
Matt Arsenault0da63502018-08-31 05:49:54 +0000346 return AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::CONSTANT_ADDRESS;
Matt Arsenaultb81495d2017-09-20 05:01:53 +0000347}]>;
348
Matt Arsenaultfcc213f2017-09-20 03:20:09 +0000349class FlatLoadAddress : CodePatPred<[{
350 const auto AS = cast<MemSDNode>(N)->getAddressSpace();
Matt Arsenault0da63502018-08-31 05:49:54 +0000351 return AS == AMDGPUAS::FLAT_ADDRESS ||
352 AS == AMDGPUAS::GLOBAL_ADDRESS ||
353 AS == AMDGPUAS::CONSTANT_ADDRESS;
Matt Arsenaultbc683832017-09-20 03:43:35 +0000354}]>;
355
Matt Arsenaultc6fd5ab2019-07-16 17:38:50 +0000356class GlobalAddress : CodePatPred<[{
357 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
358}]>;
359
360class PrivateAddress : CodePatPred<[{
361 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
362}]>;
363
364class LocalAddress : CodePatPred<[{
365 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
366}]>;
367
368class RegionAddress : CodePatPred<[{
369 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
370}]>;
371
Matt Arsenaultbc683832017-09-20 03:43:35 +0000372class FlatStoreAddress : CodePatPred<[{
373 const auto AS = cast<MemSDNode>(N)->getAddressSpace();
Matt Arsenault0da63502018-08-31 05:49:54 +0000374 return AS == AMDGPUAS::FLAT_ADDRESS ||
375 AS == AMDGPUAS::GLOBAL_ADDRESS;
Matt Arsenaultfcc213f2017-09-20 03:20:09 +0000376}]>;
377
Matt Arsenaultc6fd5ab2019-07-16 17:38:50 +0000378// TODO: Remove these when stores to new PatFrag format.
Matt Arsenaultbc683832017-09-20 03:43:35 +0000379class PrivateStore <SDPatternOperator op> : StoreFrag <op>, PrivateAddress;
Matt Arsenaultbc683832017-09-20 03:43:35 +0000380class LocalStore <SDPatternOperator op> : StoreFrag <op>, LocalAddress;
Nicolai Haehnle4dc3b2b2019-07-01 17:17:45 +0000381class RegionStore <SDPatternOperator op> : StoreFrag <op>, RegionAddress;
Matt Arsenaultbc683832017-09-20 03:43:35 +0000382class GlobalStore <SDPatternOperator op> : StoreFrag<op>, GlobalAddress;
Matt Arsenaultbc683832017-09-20 03:43:35 +0000383class FlatStore <SDPatternOperator op> : StoreFrag <op>, FlatStoreAddress;
384
Matt Arsenaultbc683832017-09-20 03:43:35 +0000385
Matt Arsenaultc6fd5ab2019-07-16 17:38:50 +0000386foreach as = [ "global", "flat", "constant", "local", "private", "region" ] in {
387let AddressSpaces = !cast<AddressSpaceList>("LoadAddress_"#as).AddrSpaces in {
Matt Arsenaultbc683832017-09-20 03:43:35 +0000388
Matt Arsenaultc6fd5ab2019-07-16 17:38:50 +0000389def load_#as : PatFrag<(ops node:$ptr), (unindexedload node:$ptr)> {
390 let IsLoad = 1;
391 let IsNonExtLoad = 1;
392}
393
394def extloadi8_#as : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
395 let IsLoad = 1;
396 let MemoryVT = i8;
397}
398
399def extloadi16_#as : PatFrag<(ops node:$ptr), (extload node:$ptr)> {
400 let IsLoad = 1;
401 let MemoryVT = i16;
402}
403
404def sextloadi8_#as : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
405 let IsLoad = 1;
406 let MemoryVT = i8;
407}
408
409def sextloadi16_#as : PatFrag<(ops node:$ptr), (sextload node:$ptr)> {
410 let IsLoad = 1;
411 let MemoryVT = i16;
412}
413
414def zextloadi8_#as : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
415 let IsLoad = 1;
416 let MemoryVT = i8;
417}
418
419def zextloadi16_#as : PatFrag<(ops node:$ptr), (zextload node:$ptr)> {
420 let IsLoad = 1;
421 let MemoryVT = i16;
422}
423
424def atomic_load_32_#as : PatFrag<(ops node:$ptr), (atomic_load_32 node:$ptr)> {
425 let IsAtomic = 1;
426 let MemoryVT = i32;
427}
428
429def atomic_load_64_#as : PatFrag<(ops node:$ptr), (atomic_load_64 node:$ptr)> {
430 let IsAtomic = 1;
431 let MemoryVT = i64;
432}
433
Matt Arsenault8f8d07e2019-07-16 18:21:25 +0000434def store_#as : PatFrag<(ops node:$val, node:$ptr),
435 (unindexedstore node:$val, node:$ptr)> {
436 let IsStore = 1;
437 let IsTruncStore = 0;
438}
439
440// truncstore fragments.
441def truncstore_#as : PatFrag<(ops node:$val, node:$ptr),
442 (unindexedstore node:$val, node:$ptr)> {
443 let IsStore = 1;
444 let IsTruncStore = 1;
445}
446
447// TODO: We don't really need the truncstore here. We can use
448// unindexedstore with MemoryVT directly, which will save an
449// unnecessary check that the memory size is less than the value type
450// in the generated matcher table.
451def truncstorei8_#as : PatFrag<(ops node:$val, node:$ptr),
452 (truncstore node:$val, node:$ptr)> {
453 let IsStore = 1;
454 let MemoryVT = i8;
455}
456
457def truncstorei16_#as : PatFrag<(ops node:$val, node:$ptr),
458 (truncstore node:$val, node:$ptr)> {
459 let IsStore = 1;
460 let MemoryVT = i16;
461}
462
463defm atomic_store_#as : binary_atomic_op<atomic_store>;
464
Matt Arsenaultc6fd5ab2019-07-16 17:38:50 +0000465} // End let AddressSpaces = ...
466} // End foreach AddrSpace
Matt Arsenaultbc683832017-09-20 03:43:35 +0000467
Matt Arsenault8f8d07e2019-07-16 18:21:25 +0000468
Matt Arsenaultbc683832017-09-20 03:43:35 +0000469def store_hi16_private : StoreHi16 <truncstorei16>, PrivateAddress;
470def truncstorei8_hi16_private : StoreHi16<truncstorei8>, PrivateAddress;
471
Matt Arsenaultbc683832017-09-20 03:43:35 +0000472def store_atomic_global : GlobalStore<atomic_store>;
473def truncstorei8_hi16_global : StoreHi16 <truncstorei8>, GlobalAddress;
474def truncstorei16_hi16_global : StoreHi16 <truncstorei16>, GlobalAddress;
Tom Stellardd3ee8c12013-08-16 01:12:06 +0000475
Matt Arsenaultbc683832017-09-20 03:43:35 +0000476def store_local_hi16 : StoreHi16 <truncstorei16>, LocalAddress;
477def truncstorei8_local_hi16 : StoreHi16<truncstorei8>, LocalAddress;
Matt Arsenault3f8e7a32018-06-22 08:39:52 +0000478def atomic_store_local : LocalStore <atomic_store>;
Tom Stellardd3ee8c12013-08-16 01:12:06 +0000479
Tom Stellardf3fc5552014-08-22 18:49:35 +0000480
Matt Arsenault52c26242019-07-31 00:14:43 +0000481def load_align8_local : PatFrag <(ops node:$ptr), (load_local node:$ptr)> {
482 let IsLoad = 1;
Matt Arsenault35940112019-08-01 00:53:38 +0000483 let IsNonExtLoad = 1;
Matt Arsenault52c26242019-07-31 00:14:43 +0000484 let MinAlignment = 8;
485}
Farhana Aleena7cb3112018-03-09 17:41:39 +0000486
Matt Arsenault52c26242019-07-31 00:14:43 +0000487def load_align16_local : PatFrag <(ops node:$ptr), (load_local node:$ptr)> {
488 let IsLoad = 1;
Matt Arsenault35940112019-08-01 00:53:38 +0000489 let IsNonExtLoad = 1;
Matt Arsenault52c26242019-07-31 00:14:43 +0000490 let MinAlignment = 16;
491}
Matt Arsenault72574102014-06-11 18:08:34 +0000492
Matt Arsenault52c26242019-07-31 00:14:43 +0000493def store_align8_local: PatFrag<(ops node:$val, node:$ptr),
494 (store_local node:$val, node:$ptr)>, Aligned<8> {
495 let IsStore = 1;
Matt Arsenault3baf4d32019-08-01 03:09:15 +0000496 let IsTruncStore = 0;
Matt Arsenault52c26242019-07-31 00:14:43 +0000497}
Matt Arsenault3baf4d32019-08-01 03:09:15 +0000498
Matt Arsenault52c26242019-07-31 00:14:43 +0000499def store_align16_local: PatFrag<(ops node:$val, node:$ptr),
500 (store_local node:$val, node:$ptr)>, Aligned<16> {
501 let IsStore = 1;
Matt Arsenault3baf4d32019-08-01 03:09:15 +0000502 let IsTruncStore = 0;
Matt Arsenault52c26242019-07-31 00:14:43 +0000503}
504
Matt Arsenaultbc683832017-09-20 03:43:35 +0000505
Matt Arsenaultbc683832017-09-20 03:43:35 +0000506def atomic_store_flat : FlatStore <atomic_store>;
507def truncstorei8_hi16_flat : StoreHi16<truncstorei8>, FlatStoreAddress;
508def truncstorei16_hi16_flat : StoreHi16<truncstorei16>, FlatStoreAddress;
509
510
Matt Arsenault72574102014-06-11 18:08:34 +0000511class local_binary_atomic_op<SDNode atomic_op> :
512 PatFrag<(ops node:$ptr, node:$value),
513 (atomic_op node:$ptr, node:$value), [{
Matt Arsenault0da63502018-08-31 05:49:54 +0000514 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
Tom Stellard13c68ef2013-09-05 18:38:09 +0000515}]>;
516
Nicolai Haehnle4dc3b2b2019-07-01 17:17:45 +0000517class region_binary_atomic_op<SDNode atomic_op> :
518 PatFrag<(ops node:$ptr, node:$value),
519 (atomic_op node:$ptr, node:$value), [{
520 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
521}]>;
522
523
Matt Arsenault72574102014-06-11 18:08:34 +0000524def atomic_swap_local : local_binary_atomic_op<atomic_swap>;
525def atomic_load_add_local : local_binary_atomic_op<atomic_load_add>;
526def atomic_load_sub_local : local_binary_atomic_op<atomic_load_sub>;
527def atomic_load_and_local : local_binary_atomic_op<atomic_load_and>;
528def atomic_load_or_local : local_binary_atomic_op<atomic_load_or>;
529def atomic_load_xor_local : local_binary_atomic_op<atomic_load_xor>;
530def atomic_load_nand_local : local_binary_atomic_op<atomic_load_nand>;
531def atomic_load_min_local : local_binary_atomic_op<atomic_load_min>;
532def atomic_load_max_local : local_binary_atomic_op<atomic_load_max>;
533def atomic_load_umin_local : local_binary_atomic_op<atomic_load_umin>;
534def atomic_load_umax_local : local_binary_atomic_op<atomic_load_umax>;
Aaron Watry372cecf2013-09-06 20:17:42 +0000535
Tom Stellardd3ee8c12013-08-16 01:12:06 +0000536def mskor_global : PatFrag<(ops node:$val, node:$ptr),
537 (AMDGPUstore_mskor node:$val, node:$ptr), [{
Matt Arsenault0da63502018-08-31 05:49:54 +0000538 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
Tom Stellardd3ee8c12013-08-16 01:12:06 +0000539}]>;
540
Matt Arsenaulta030e262017-10-23 17:16:43 +0000541class AtomicCmpSwapLocal <SDNode cmp_swap_node> : PatFrag<
Tom Stellard381a94a2015-05-12 15:00:49 +0000542 (ops node:$ptr, node:$cmp, node:$swap),
543 (cmp_swap_node node:$ptr, node:$cmp, node:$swap), [{
544 AtomicSDNode *AN = cast<AtomicSDNode>(N);
Matt Arsenault0da63502018-08-31 05:49:54 +0000545 return AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
Matt Arsenaulta030e262017-10-23 17:16:43 +0000546}]>;
Matt Arsenaultc793e1d2014-06-11 18:08:48 +0000547
Nicolai Haehnle4dc3b2b2019-07-01 17:17:45 +0000548class AtomicCmpSwapRegion <SDNode cmp_swap_node> : PatFrag<
549 (ops node:$ptr, node:$cmp, node:$swap),
550 (cmp_swap_node node:$ptr, node:$cmp, node:$swap), [{
551 AtomicSDNode *AN = cast<AtomicSDNode>(N);
552 return AN->getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
553}]>;
554
Matt Arsenaulta030e262017-10-23 17:16:43 +0000555def atomic_cmp_swap_local : AtomicCmpSwapLocal <atomic_cmp_swap>;
Matt Arsenaultcaa0ec22014-06-11 18:08:54 +0000556
Stanislav Mekhanoshine93279f2019-07-11 00:10:17 +0000557class global_binary_atomic_op_frag<SDNode atomic_op> : PatFrag<
558 (ops node:$ptr, node:$value),
559 (atomic_op node:$ptr, node:$value),
560 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;}]>;
561
Jan Vesely206a5102016-12-23 15:34:51 +0000562multiclass global_binary_atomic_op<SDNode atomic_op> {
Stanislav Mekhanoshine93279f2019-07-11 00:10:17 +0000563 def "" : global_binary_atomic_op_frag<atomic_op>;
Tom Stellard7980fc82014-09-25 18:30:26 +0000564
Jan Vesely206a5102016-12-23 15:34:51 +0000565 def _noret : PatFrag<
566 (ops node:$ptr, node:$value),
567 (atomic_op node:$ptr, node:$value),
Matt Arsenault0da63502018-08-31 05:49:54 +0000568 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && (SDValue(N, 0).use_empty());}]>;
Tom Stellard7980fc82014-09-25 18:30:26 +0000569
Jan Vesely206a5102016-12-23 15:34:51 +0000570 def _ret : PatFrag<
571 (ops node:$ptr, node:$value),
572 (atomic_op node:$ptr, node:$value),
Matt Arsenault0da63502018-08-31 05:49:54 +0000573 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && (!SDValue(N, 0).use_empty());}]>;
Jan Vesely206a5102016-12-23 15:34:51 +0000574}
575
576defm atomic_swap_global : global_binary_atomic_op<atomic_swap>;
577defm atomic_add_global : global_binary_atomic_op<atomic_load_add>;
578defm atomic_and_global : global_binary_atomic_op<atomic_load_and>;
579defm atomic_max_global : global_binary_atomic_op<atomic_load_max>;
580defm atomic_min_global : global_binary_atomic_op<atomic_load_min>;
581defm atomic_or_global : global_binary_atomic_op<atomic_load_or>;
582defm atomic_sub_global : global_binary_atomic_op<atomic_load_sub>;
583defm atomic_umax_global : global_binary_atomic_op<atomic_load_umax>;
584defm atomic_umin_global : global_binary_atomic_op<atomic_load_umin>;
585defm atomic_xor_global : global_binary_atomic_op<atomic_load_xor>;
586
Matt Arsenaultbc683832017-09-20 03:43:35 +0000587// Legacy.
Jan Vesely206a5102016-12-23 15:34:51 +0000588def AMDGPUatomic_cmp_swap_global : PatFrag<
Matt Arsenaultbc683832017-09-20 03:43:35 +0000589 (ops node:$ptr, node:$value),
590 (AMDGPUatomic_cmp_swap node:$ptr, node:$value)>, GlobalAddress;
Jan Vesely206a5102016-12-23 15:34:51 +0000591
592def atomic_cmp_swap_global : PatFrag<
Matt Arsenaultbc683832017-09-20 03:43:35 +0000593 (ops node:$ptr, node:$cmp, node:$value),
594 (atomic_cmp_swap node:$ptr, node:$cmp, node:$value)>, GlobalAddress;
595
Jan Vesely206a5102016-12-23 15:34:51 +0000596
597def atomic_cmp_swap_global_noret : PatFrag<
Matt Arsenaultbc683832017-09-20 03:43:35 +0000598 (ops node:$ptr, node:$cmp, node:$value),
599 (atomic_cmp_swap node:$ptr, node:$cmp, node:$value),
Matt Arsenault0da63502018-08-31 05:49:54 +0000600 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && (SDValue(N, 0).use_empty());}]>;
Jan Vesely206a5102016-12-23 15:34:51 +0000601
602def atomic_cmp_swap_global_ret : PatFrag<
Matt Arsenaultbc683832017-09-20 03:43:35 +0000603 (ops node:$ptr, node:$cmp, node:$value),
604 (atomic_cmp_swap node:$ptr, node:$cmp, node:$value),
Matt Arsenault0da63502018-08-31 05:49:54 +0000605 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && (!SDValue(N, 0).use_empty());}]>;
Tom Stellard354a43c2016-04-01 18:27:37 +0000606
Tom Stellardb4a313a2014-08-01 00:32:39 +0000607//===----------------------------------------------------------------------===//
608// Misc Pattern Fragments
609//===----------------------------------------------------------------------===//
610
Tom Stellard75aadc22012-12-11 21:25:42 +0000611class Constants {
612int TWO_PI = 0x40c90fdb;
613int PI = 0x40490fdb;
614int TWO_PI_INV = 0x3e22f983;
NAKAMURA Takumi4bb85f92013-10-28 04:07:23 +0000615int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding
Matt Arsenaultce841302016-12-22 03:05:37 +0000616int FP16_ONE = 0x3C00;
Matt Arsenaultde496c322018-07-30 12:16:58 +0000617int FP16_NEG_ONE = 0xBC00;
Matt Arsenaultaeca2fa2014-05-31 06:47:42 +0000618int FP32_ONE = 0x3f800000;
Matt Arsenault7fb961f2016-07-22 17:01:21 +0000619int FP32_NEG_ONE = 0xbf800000;
Matt Arsenault9cd90712016-04-14 01:42:16 +0000620int FP64_ONE = 0x3ff0000000000000;
Matt Arsenault7fb961f2016-07-22 17:01:21 +0000621int FP64_NEG_ONE = 0xbff0000000000000;
Tom Stellard75aadc22012-12-11 21:25:42 +0000622}
623def CONST : Constants;
624
625def FP_ZERO : PatLeaf <
626 (fpimm),
627 [{return N->getValueAPF().isZero();}]
628>;
629
630def FP_ONE : PatLeaf <
631 (fpimm),
632 [{return N->isExactlyValue(1.0);}]
633>;
634
Matt Arsenaulteeb2a7e2015-01-15 23:58:35 +0000635def FP_HALF : PatLeaf <
636 (fpimm),
637 [{return N->isExactlyValue(0.5);}]
638>;
639
Tom Stellard75aadc22012-12-11 21:25:42 +0000640/* Generic helper patterns for intrinsics */
641/* -------------------------------------- */
642
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000643class POW_Common <AMDGPUInst log_ieee, AMDGPUInst exp_ieee, AMDGPUInst mul>
Matt Arsenault90c75932017-10-03 00:06:41 +0000644 : AMDGPUPat <
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000645 (fpow f32:$src0, f32:$src1),
646 (exp_ieee (mul f32:$src1, (log_ieee f32:$src0)))
Tom Stellard75aadc22012-12-11 21:25:42 +0000647>;
648
649/* Other helper patterns */
650/* --------------------- */
651
652/* Extract element pattern */
Matt Arsenault530dde42014-02-26 23:00:58 +0000653class Extract_Element <ValueType sub_type, ValueType vec_type, int sub_idx,
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000654 SubRegIndex sub_reg>
Matt Arsenault90c75932017-10-03 00:06:41 +0000655 : AMDGPUPat<
Matt Arsenaultfbd9bbf2015-12-11 19:20:16 +0000656 (sub_type (extractelt vec_type:$src, sub_idx)),
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000657 (EXTRACT_SUBREG $src, sub_reg)
Matt Arsenaultd7047272019-02-08 19:18:01 +0000658>;
Tom Stellard75aadc22012-12-11 21:25:42 +0000659
660/* Insert element pattern */
661class Insert_Element <ValueType elem_type, ValueType vec_type,
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000662 int sub_idx, SubRegIndex sub_reg>
Matt Arsenault90c75932017-10-03 00:06:41 +0000663 : AMDGPUPat <
Matt Arsenaultfbd9bbf2015-12-11 19:20:16 +0000664 (insertelt vec_type:$vec, elem_type:$elem, sub_idx),
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000665 (INSERT_SUBREG $vec, $elem, sub_reg)
Matt Arsenaultd7047272019-02-08 19:18:01 +0000666>;
Tom Stellard75aadc22012-12-11 21:25:42 +0000667
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000668// XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
669// can handle COPY instructions.
Tom Stellard75aadc22012-12-11 21:25:42 +0000670// bitconvert pattern
Matt Arsenault90c75932017-10-03 00:06:41 +0000671class BitConvert <ValueType dt, ValueType st, RegisterClass rc> : AMDGPUPat <
Tom Stellard75aadc22012-12-11 21:25:42 +0000672 (dt (bitconvert (st rc:$src0))),
673 (dt rc:$src0)
674>;
675
Tom Stellard40b7f1f2013-05-02 15:30:12 +0000676// XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
677// can handle COPY instructions.
Matt Arsenault90c75932017-10-03 00:06:41 +0000678class DwordAddrPat<ValueType vt, RegisterClass rc> : AMDGPUPat <
Tom Stellard75aadc22012-12-11 21:25:42 +0000679 (vt (AMDGPUdwordaddr (vt rc:$addr))),
680 (vt rc:$addr)
681>;
682
Tom Stellard9d10c4c2013-04-19 02:11:06 +0000683// BFI_INT patterns
684
Matt Arsenault7d858d82014-11-02 23:46:54 +0000685multiclass BFIPatterns <Instruction BFI_INT,
686 Instruction LoadImm32,
687 RegisterClass RC64> {
Tom Stellard9d10c4c2013-04-19 02:11:06 +0000688 // Definition from ISA doc:
689 // (y & x) | (z & ~x)
Matt Arsenault90c75932017-10-03 00:06:41 +0000690 def : AMDGPUPat <
Tom Stellard9d10c4c2013-04-19 02:11:06 +0000691 (or (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))),
692 (BFI_INT $x, $y, $z)
693 >;
694
Matt Arsenaulta18b3bc2018-02-07 00:21:34 +0000695 // 64-bit version
696 def : AMDGPUPat <
697 (or (and i64:$y, i64:$x), (and i64:$z, (not i64:$x))),
698 (REG_SEQUENCE RC64,
699 (BFI_INT (i32 (EXTRACT_SUBREG $x, sub0)),
700 (i32 (EXTRACT_SUBREG $y, sub0)),
701 (i32 (EXTRACT_SUBREG $z, sub0))), sub0,
702 (BFI_INT (i32 (EXTRACT_SUBREG $x, sub1)),
703 (i32 (EXTRACT_SUBREG $y, sub1)),
704 (i32 (EXTRACT_SUBREG $z, sub1))), sub1)
705 >;
706
Tom Stellard9d10c4c2013-04-19 02:11:06 +0000707 // SHA-256 Ch function
708 // z ^ (x & (y ^ z))
Matt Arsenault90c75932017-10-03 00:06:41 +0000709 def : AMDGPUPat <
Tom Stellard9d10c4c2013-04-19 02:11:06 +0000710 (xor i32:$z, (and i32:$x, (xor i32:$y, i32:$z))),
711 (BFI_INT $x, $y, $z)
712 >;
713
Matt Arsenaulta18b3bc2018-02-07 00:21:34 +0000714 // 64-bit version
715 def : AMDGPUPat <
716 (xor i64:$z, (and i64:$x, (xor i64:$y, i64:$z))),
717 (REG_SEQUENCE RC64,
718 (BFI_INT (i32 (EXTRACT_SUBREG $x, sub0)),
719 (i32 (EXTRACT_SUBREG $y, sub0)),
720 (i32 (EXTRACT_SUBREG $z, sub0))), sub0,
721 (BFI_INT (i32 (EXTRACT_SUBREG $x, sub1)),
722 (i32 (EXTRACT_SUBREG $y, sub1)),
723 (i32 (EXTRACT_SUBREG $z, sub1))), sub1)
724 >;
725
Matt Arsenault90c75932017-10-03 00:06:41 +0000726 def : AMDGPUPat <
Matt Arsenault6e439652014-06-10 19:00:20 +0000727 (fcopysign f32:$src0, f32:$src1),
Tom Stellard115a6152016-11-10 16:02:37 +0000728 (BFI_INT (LoadImm32 (i32 0x7fffffff)), $src0, $src1)
Matt Arsenault6e439652014-06-10 19:00:20 +0000729 >;
730
Matt Arsenault90c75932017-10-03 00:06:41 +0000731 def : AMDGPUPat <
Konstantin Zhuravlyov7d882752017-01-13 19:49:25 +0000732 (f32 (fcopysign f32:$src0, f64:$src1)),
733 (BFI_INT (LoadImm32 (i32 0x7fffffff)), $src0,
734 (i32 (EXTRACT_SUBREG $src1, sub1)))
735 >;
736
Matt Arsenault90c75932017-10-03 00:06:41 +0000737 def : AMDGPUPat <
Matt Arsenault6e439652014-06-10 19:00:20 +0000738 (f64 (fcopysign f64:$src0, f64:$src1)),
Matt Arsenault7d858d82014-11-02 23:46:54 +0000739 (REG_SEQUENCE RC64,
740 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000741 (BFI_INT (LoadImm32 (i32 0x7fffffff)),
Matt Arsenault6e439652014-06-10 19:00:20 +0000742 (i32 (EXTRACT_SUBREG $src0, sub1)),
743 (i32 (EXTRACT_SUBREG $src1, sub1))), sub1)
744 >;
Valery Pykhtine55fd412016-10-20 16:17:54 +0000745
Matt Arsenault90c75932017-10-03 00:06:41 +0000746 def : AMDGPUPat <
Valery Pykhtine55fd412016-10-20 16:17:54 +0000747 (f64 (fcopysign f64:$src0, f32:$src1)),
748 (REG_SEQUENCE RC64,
749 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0,
Tom Stellard115a6152016-11-10 16:02:37 +0000750 (BFI_INT (LoadImm32 (i32 0x7fffffff)),
Valery Pykhtine55fd412016-10-20 16:17:54 +0000751 (i32 (EXTRACT_SUBREG $src0, sub1)),
752 $src1), sub1)
753 >;
Tom Stellard9d10c4c2013-04-19 02:11:06 +0000754}
755
Tom Stellardeac65dd2013-05-03 17:21:20 +0000756// SHA-256 Ma patterns
757
758// ((x & z) | (y & (x | z))) -> BFI_INT (XOR x, y), z, y
Matt Arsenaulta18b3bc2018-02-07 00:21:34 +0000759multiclass SHA256MaPattern <Instruction BFI_INT, Instruction XOR, RegisterClass RC64> {
760 def : AMDGPUPat <
761 (or (and i32:$x, i32:$z), (and i32:$y, (or i32:$x, i32:$z))),
762 (BFI_INT (XOR i32:$x, i32:$y), i32:$z, i32:$y)
763 >;
764
765 def : AMDGPUPat <
766 (or (and i64:$x, i64:$z), (and i64:$y, (or i64:$x, i64:$z))),
767 (REG_SEQUENCE RC64,
768 (BFI_INT (XOR (i32 (EXTRACT_SUBREG $x, sub0)),
769 (i32 (EXTRACT_SUBREG $y, sub0))),
770 (i32 (EXTRACT_SUBREG $z, sub0)),
771 (i32 (EXTRACT_SUBREG $y, sub0))), sub0,
772 (BFI_INT (XOR (i32 (EXTRACT_SUBREG $x, sub1)),
773 (i32 (EXTRACT_SUBREG $y, sub1))),
774 (i32 (EXTRACT_SUBREG $z, sub1)),
775 (i32 (EXTRACT_SUBREG $y, sub1))), sub1)
776 >;
777}
Tom Stellardeac65dd2013-05-03 17:21:20 +0000778
Tom Stellard2b971eb2013-05-10 02:09:45 +0000779// Bitfield extract patterns
780
Marek Olsak949f5da2015-03-24 13:40:34 +0000781def IMMZeroBasedBitfieldMask : PatLeaf <(imm), [{
782 return isMask_32(N->getZExtValue());
783}]>;
Tom Stellarda2a4b8e2014-01-23 18:49:33 +0000784
Marek Olsak949f5da2015-03-24 13:40:34 +0000785def IMMPopCount : SDNodeXForm<imm, [{
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +0000786 return CurDAG->getTargetConstant(countPopulation(N->getZExtValue()), SDLoc(N),
Marek Olsak949f5da2015-03-24 13:40:34 +0000787 MVT::i32);
788}]>;
Tom Stellarda2a4b8e2014-01-23 18:49:33 +0000789
Matt Arsenaulta9e16e62017-02-23 00:23:43 +0000790multiclass BFEPattern <Instruction UBFE, Instruction SBFE, Instruction MOV> {
Matt Arsenault90c75932017-10-03 00:06:41 +0000791 def : AMDGPUPat <
Matt Arsenaulta9e16e62017-02-23 00:23:43 +0000792 (i32 (and (i32 (srl i32:$src, i32:$rshift)), IMMZeroBasedBitfieldMask:$mask)),
793 (UBFE $src, $rshift, (MOV (i32 (IMMPopCount $mask))))
794 >;
795
Roman Lebedev9c17dad2018-06-15 09:56:39 +0000796 // x & ((1 << y) - 1)
797 def : AMDGPUPat <
798 (and i32:$src, (add_oneuse (shl_oneuse 1, i32:$width), -1)),
Jan Vesely6ff58ed2018-07-27 15:00:13 +0000799 (UBFE $src, (MOV (i32 0)), $width)
Roman Lebedev9c17dad2018-06-15 09:56:39 +0000800 >;
801
Roman Lebedevdec562c2018-06-15 09:56:45 +0000802 // x & ~(-1 << y)
803 def : AMDGPUPat <
804 (and i32:$src, (xor_oneuse (shl_oneuse -1, i32:$width), -1)),
Jan Vesely6ff58ed2018-07-27 15:00:13 +0000805 (UBFE $src, (MOV (i32 0)), $width)
Roman Lebedevdec562c2018-06-15 09:56:45 +0000806 >;
807
Roman Lebedevaa8587d2018-06-15 09:56:31 +0000808 // x & (-1 >> (bitwidth - y))
809 def : AMDGPUPat <
810 (and i32:$src, (srl_oneuse -1, (sub 32, i32:$width))),
Jan Vesely6ff58ed2018-07-27 15:00:13 +0000811 (UBFE $src, (MOV (i32 0)), $width)
Roman Lebedevaa8587d2018-06-15 09:56:31 +0000812 >;
813
814 // x << (bitwidth - y) >> (bitwidth - y)
Matt Arsenault90c75932017-10-03 00:06:41 +0000815 def : AMDGPUPat <
Matt Arsenaulta9e16e62017-02-23 00:23:43 +0000816 (srl (shl_oneuse i32:$src, (sub 32, i32:$width)), (sub 32, i32:$width)),
Jan Vesely6ff58ed2018-07-27 15:00:13 +0000817 (UBFE $src, (MOV (i32 0)), $width)
Matt Arsenaulta9e16e62017-02-23 00:23:43 +0000818 >;
819
Matt Arsenault90c75932017-10-03 00:06:41 +0000820 def : AMDGPUPat <
Matt Arsenaulta9e16e62017-02-23 00:23:43 +0000821 (sra (shl_oneuse i32:$src, (sub 32, i32:$width)), (sub 32, i32:$width)),
Jan Vesely6ff58ed2018-07-27 15:00:13 +0000822 (SBFE $src, (MOV (i32 0)), $width)
Matt Arsenaulta9e16e62017-02-23 00:23:43 +0000823 >;
824}
Tom Stellard2b971eb2013-05-10 02:09:45 +0000825
Tom Stellard5643c4a2013-05-20 15:02:19 +0000826// rotr pattern
Matt Arsenault90c75932017-10-03 00:06:41 +0000827class ROTRPattern <Instruction BIT_ALIGN> : AMDGPUPat <
Tom Stellard5643c4a2013-05-20 15:02:19 +0000828 (rotr i32:$src0, i32:$src1),
829 (BIT_ALIGN $src0, $src0, $src1)
830>;
831
Aakanksha Patila992c692018-11-12 21:04:06 +0000832multiclass IntMed3Pat<Instruction med3Inst,
833 SDPatternOperator min,
Matt Arsenaultc89f2912016-03-07 21:54:48 +0000834 SDPatternOperator max,
Matt Arsenault10268f92017-02-27 22:40:39 +0000835 SDPatternOperator min_oneuse,
Aakanksha Patila992c692018-11-12 21:04:06 +0000836 SDPatternOperator max_oneuse,
837 ValueType vt = i32> {
838
Matt Arsenaulte8c03a22019-03-08 20:58:11 +0000839 // This matches 16 permutations of
Aakanksha Patila992c692018-11-12 21:04:06 +0000840 // min(max(a, b), max(min(a, b), c))
841 def : AMDGPUPat <
842 (min (max_oneuse vt:$src0, vt:$src1),
843 (max_oneuse (min_oneuse vt:$src0, vt:$src1), vt:$src2)),
844 (med3Inst vt:$src0, vt:$src1, vt:$src2)
845>;
846
Matt Arsenaulte8c03a22019-03-08 20:58:11 +0000847 // This matches 16 permutations of
Aakanksha Patila992c692018-11-12 21:04:06 +0000848 // max(min(x, y), min(max(x, y), z))
849 def : AMDGPUPat <
Matt Arsenault10268f92017-02-27 22:40:39 +0000850 (max (min_oneuse vt:$src0, vt:$src1),
851 (min_oneuse (max_oneuse vt:$src0, vt:$src1), vt:$src2)),
Matt Arsenaultc89f2912016-03-07 21:54:48 +0000852 (med3Inst $src0, $src1, $src2)
853>;
Aakanksha Patila992c692018-11-12 21:04:06 +0000854}
Matt Arsenaulte8c03a22019-03-08 20:58:11 +0000855
Matt Arsenaulteeb2a7e2015-01-15 23:58:35 +0000856// Special conversion patterns
857
858def cvt_rpi_i32_f32 : PatFrag <
859 (ops node:$src),
Matt Arsenault08ad3282015-01-31 21:28:13 +0000860 (fp_to_sint (ffloor (fadd $src, FP_HALF))),
861 [{ (void) N; return TM.Options.NoNaNsFPMath; }]
Matt Arsenaulteeb2a7e2015-01-15 23:58:35 +0000862>;
863
864def cvt_flr_i32_f32 : PatFrag <
865 (ops node:$src),
Matt Arsenault08ad3282015-01-31 21:28:13 +0000866 (fp_to_sint (ffloor $src)),
867 [{ (void)N; return TM.Options.NoNaNsFPMath; }]
Matt Arsenaulteeb2a7e2015-01-15 23:58:35 +0000868>;
869
Changpeng Fang20fe3d22019-01-15 23:12:36 +0000870let AddedComplexity = 2 in {
Matt Arsenault90c75932017-10-03 00:06:41 +0000871class IMad24Pat<Instruction Inst, bit HasClamp = 0> : AMDGPUPat <
Matt Arsenaulteb260202014-05-22 18:00:15 +0000872 (add (AMDGPUmul_i24 i32:$src0, i32:$src1), i32:$src2),
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +0000873 !if(HasClamp, (Inst $src0, $src1, $src2, (i1 0)),
874 (Inst $src0, $src1, $src2))
Matt Arsenaulteb260202014-05-22 18:00:15 +0000875>;
876
Matt Arsenault90c75932017-10-03 00:06:41 +0000877class UMad24Pat<Instruction Inst, bit HasClamp = 0> : AMDGPUPat <
Matt Arsenaulteb260202014-05-22 18:00:15 +0000878 (add (AMDGPUmul_u24 i32:$src0, i32:$src1), i32:$src2),
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +0000879 !if(HasClamp, (Inst $src0, $src1, $src2, (i1 0)),
880 (Inst $src0, $src1, $src2))
Matt Arsenaulteb260202014-05-22 18:00:15 +0000881>;
Changpeng Fang20fe3d22019-01-15 23:12:36 +0000882} // AddedComplexity.
Matt Arsenaulteb260202014-05-22 18:00:15 +0000883
Matt Arsenault90c75932017-10-03 00:06:41 +0000884class RcpPat<Instruction RcpInst, ValueType vt> : AMDGPUPat <
Matt Arsenaulta0050b02014-06-19 01:19:19 +0000885 (fdiv FP_ONE, vt:$src),
886 (RcpInst $src)
887>;
888
Matt Arsenault90c75932017-10-03 00:06:41 +0000889class RsqPat<Instruction RsqInst, ValueType vt> : AMDGPUPat <
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +0000890 (AMDGPUrcp (fsqrt vt:$src)),
891 (RsqInst $src)
892>;
Matt Arsenault687ec752018-10-22 16:27:27 +0000893
894// Instructions which select to the same v_min_f*
895def fminnum_like : PatFrags<(ops node:$src0, node:$src1),
896 [(fminnum_ieee node:$src0, node:$src1),
897 (fminnum node:$src0, node:$src1)]
898>;
899
900// Instructions which select to the same v_max_f*
901def fmaxnum_like : PatFrags<(ops node:$src0, node:$src1),
902 [(fmaxnum_ieee node:$src0, node:$src1),
903 (fmaxnum node:$src0, node:$src1)]
904>;
905
906def fminnum_like_oneuse : PatFrags<(ops node:$src0, node:$src1),
907 [(fminnum_ieee_oneuse node:$src0, node:$src1),
908 (fminnum_oneuse node:$src0, node:$src1)]
909>;
910
911def fmaxnum_like_oneuse : PatFrags<(ops node:$src0, node:$src1),
912 [(fmaxnum_ieee_oneuse node:$src0, node:$src1),
913 (fmaxnum_oneuse node:$src0, node:$src1)]
914>;