blob: 6a08f04a5655ef543ce040d53c402de31386dfbb [file] [log] [blame]
Tom Stellardca166212017-01-30 21:56:46 +00001//===- AMDGPULegalizerInfo.cpp -----------------------------------*- C++ -*-==//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellardca166212017-01-30 21:56:46 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the Machinelegalizer class for
10/// AMDGPU.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
Reid Klecknerfe47ed62019-08-29 20:32:53 +000014#if defined(_MSC_VER) || defined(__MINGW32__)
15// According to Microsoft, one must set _USE_MATH_DEFINES in order to get M_PI
16// from the Visual C++ cmath / math.h headers:
17// https://docs.microsoft.com/en-us/cpp/c-runtime-library/math-constants?view=vs-2019
18#define _USE_MATH_DEFINES
19#endif
20
David Blaikie36a0f222018-03-23 23:58:31 +000021#include "AMDGPU.h"
Craig Topper2fa14362018-03-29 17:21:10 +000022#include "AMDGPULegalizerInfo.h"
Matt Arsenault85803362018-03-17 15:17:41 +000023#include "AMDGPUTargetMachine.h"
Matt Arsenaulta8b43392019-02-08 02:40:47 +000024#include "SIMachineFunctionInfo.h"
Matt Arsenault6ce1b4f2019-07-10 16:31:19 +000025#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
Matt Arsenaulta8b43392019-02-08 02:40:47 +000026#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000027#include "llvm/CodeGen/TargetOpcodes.h"
Craig Topper2fa14362018-03-29 17:21:10 +000028#include "llvm/CodeGen/ValueTypes.h"
Tom Stellardca166212017-01-30 21:56:46 +000029#include "llvm/IR/DerivedTypes.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000030#include "llvm/IR/Type.h"
Tom Stellardca166212017-01-30 21:56:46 +000031#include "llvm/Support/Debug.h"
32
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +000033#define DEBUG_TYPE "amdgpu-legalinfo"
34
Tom Stellardca166212017-01-30 21:56:46 +000035using namespace llvm;
Daniel Sanders9ade5592018-01-29 17:37:29 +000036using namespace LegalizeActions;
Matt Arsenault990f5072019-01-25 00:51:00 +000037using namespace LegalizeMutations;
Matt Arsenault7ac79ed2019-01-20 19:45:18 +000038using namespace LegalityPredicates;
Tom Stellardca166212017-01-30 21:56:46 +000039
Matt Arsenaultd9141892019-02-07 19:10:15 +000040
41static LegalityPredicate isMultiple32(unsigned TypeIdx,
42 unsigned MaxSize = 512) {
43 return [=](const LegalityQuery &Query) {
44 const LLT Ty = Query.Types[TypeIdx];
45 const LLT EltTy = Ty.getScalarType();
46 return Ty.getSizeInBits() <= MaxSize && EltTy.getSizeInBits() % 32 == 0;
47 };
48}
49
Matt Arsenault18ec3822019-02-11 22:00:39 +000050static LegalityPredicate isSmallOddVector(unsigned TypeIdx) {
51 return [=](const LegalityQuery &Query) {
52 const LLT Ty = Query.Types[TypeIdx];
53 return Ty.isVector() &&
54 Ty.getNumElements() % 2 != 0 &&
55 Ty.getElementType().getSizeInBits() < 32;
56 };
57}
58
59static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
60 return [=](const LegalityQuery &Query) {
61 const LLT Ty = Query.Types[TypeIdx];
62 const LLT EltTy = Ty.getElementType();
63 return std::make_pair(TypeIdx, LLT::vector(Ty.getNumElements() + 1, EltTy));
64 };
65}
66
Matt Arsenault26b7e852019-02-19 16:30:19 +000067static LegalizeMutation fewerEltsToSize64Vector(unsigned TypeIdx) {
68 return [=](const LegalityQuery &Query) {
69 const LLT Ty = Query.Types[TypeIdx];
70 const LLT EltTy = Ty.getElementType();
71 unsigned Size = Ty.getSizeInBits();
72 unsigned Pieces = (Size + 63) / 64;
73 unsigned NewNumElts = (Ty.getNumElements() + 1) / Pieces;
74 return std::make_pair(TypeIdx, LLT::scalarOrVector(NewNumElts, EltTy));
75 };
76}
77
78static LegalityPredicate vectorWiderThan(unsigned TypeIdx, unsigned Size) {
79 return [=](const LegalityQuery &Query) {
80 const LLT QueryTy = Query.Types[TypeIdx];
81 return QueryTy.isVector() && QueryTy.getSizeInBits() > Size;
82 };
83}
84
Matt Arsenaultb4c95b32019-02-19 17:03:09 +000085static LegalityPredicate numElementsNotEven(unsigned TypeIdx) {
86 return [=](const LegalityQuery &Query) {
87 const LLT QueryTy = Query.Types[TypeIdx];
88 return QueryTy.isVector() && QueryTy.getNumElements() % 2 != 0;
89 };
90}
Matt Arsenault18ec3822019-02-11 22:00:39 +000091
Matt Arsenault4dd57552019-07-09 14:17:31 +000092// Any combination of 32 or 64-bit elements up to 512 bits, and multiples of
93// v2s16.
94static LegalityPredicate isRegisterType(unsigned TypeIdx) {
95 return [=](const LegalityQuery &Query) {
96 const LLT Ty = Query.Types[TypeIdx];
97 if (Ty.isVector()) {
98 const int EltSize = Ty.getElementType().getSizeInBits();
99 return EltSize == 32 || EltSize == 64 ||
Matt Arsenault3f1a3452019-07-09 22:48:04 +0000100 (EltSize == 16 && Ty.getNumElements() % 2 == 0) ||
101 EltSize == 128 || EltSize == 256;
Matt Arsenault4dd57552019-07-09 14:17:31 +0000102 }
103
104 return Ty.getSizeInBits() % 32 == 0 && Ty.getSizeInBits() <= 512;
105 };
106}
107
Matt Arsenault28215ca2019-08-13 16:26:28 +0000108static LegalityPredicate elementTypeIs(unsigned TypeIdx, LLT Type) {
109 return [=](const LegalityQuery &Query) {
110 return Query.Types[TypeIdx].getElementType() == Type;
111 };
112}
113
Matt Arsenault9e8e8c62019-07-01 18:49:01 +0000114AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
115 const GCNTargetMachine &TM)
116 : ST(ST_) {
Tom Stellardca166212017-01-30 21:56:46 +0000117 using namespace TargetOpcode;
118
Matt Arsenault85803362018-03-17 15:17:41 +0000119 auto GetAddrSpacePtr = [&TM](unsigned AS) {
120 return LLT::pointer(AS, TM.getPointerSizeInBits(AS));
121 };
122
123 const LLT S1 = LLT::scalar(1);
Matt Arsenault888aa5d2019-02-03 00:07:33 +0000124 const LLT S8 = LLT::scalar(8);
Matt Arsenault45991592019-01-18 21:33:50 +0000125 const LLT S16 = LLT::scalar(16);
Tom Stellardca166212017-01-30 21:56:46 +0000126 const LLT S32 = LLT::scalar(32);
127 const LLT S64 = LLT::scalar(64);
Matt Arsenaultca676342019-01-25 02:36:32 +0000128 const LLT S128 = LLT::scalar(128);
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000129 const LLT S256 = LLT::scalar(256);
Tom Stellardeebbfc22018-06-30 04:09:44 +0000130 const LLT S512 = LLT::scalar(512);
Matt Arsenault85803362018-03-17 15:17:41 +0000131
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000132 const LLT V2S16 = LLT::vector(2, 16);
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000133 const LLT V4S16 = LLT::vector(4, 16);
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000134
135 const LLT V2S32 = LLT::vector(2, 32);
136 const LLT V3S32 = LLT::vector(3, 32);
137 const LLT V4S32 = LLT::vector(4, 32);
138 const LLT V5S32 = LLT::vector(5, 32);
139 const LLT V6S32 = LLT::vector(6, 32);
140 const LLT V7S32 = LLT::vector(7, 32);
141 const LLT V8S32 = LLT::vector(8, 32);
142 const LLT V9S32 = LLT::vector(9, 32);
143 const LLT V10S32 = LLT::vector(10, 32);
144 const LLT V11S32 = LLT::vector(11, 32);
145 const LLT V12S32 = LLT::vector(12, 32);
146 const LLT V13S32 = LLT::vector(13, 32);
147 const LLT V14S32 = LLT::vector(14, 32);
148 const LLT V15S32 = LLT::vector(15, 32);
149 const LLT V16S32 = LLT::vector(16, 32);
150
151 const LLT V2S64 = LLT::vector(2, 64);
152 const LLT V3S64 = LLT::vector(3, 64);
153 const LLT V4S64 = LLT::vector(4, 64);
154 const LLT V5S64 = LLT::vector(5, 64);
155 const LLT V6S64 = LLT::vector(6, 64);
156 const LLT V7S64 = LLT::vector(7, 64);
157 const LLT V8S64 = LLT::vector(8, 64);
158
159 std::initializer_list<LLT> AllS32Vectors =
160 {V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32,
161 V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32};
162 std::initializer_list<LLT> AllS64Vectors =
163 {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64};
164
Matt Arsenault85803362018-03-17 15:17:41 +0000165 const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS);
166 const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS);
Matt Arsenaultf3bfb852019-07-19 22:28:44 +0000167 const LLT Constant32Ptr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS_32BIT);
Matt Arsenault685d1e82018-03-17 15:17:45 +0000168 const LLT LocalPtr = GetAddrSpacePtr(AMDGPUAS::LOCAL_ADDRESS);
Matt Arsenaultf3bfb852019-07-19 22:28:44 +0000169 const LLT RegionPtr = GetAddrSpacePtr(AMDGPUAS::REGION_ADDRESS);
Matt Arsenault0da63502018-08-31 05:49:54 +0000170 const LLT FlatPtr = GetAddrSpacePtr(AMDGPUAS::FLAT_ADDRESS);
171 const LLT PrivatePtr = GetAddrSpacePtr(AMDGPUAS::PRIVATE_ADDRESS);
Matt Arsenault85803362018-03-17 15:17:41 +0000172
Matt Arsenault934e5342018-12-13 20:34:15 +0000173 const LLT CodePtr = FlatPtr;
174
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000175 const std::initializer_list<LLT> AddrSpaces64 = {
176 GlobalPtr, ConstantPtr, FlatPtr
177 };
178
179 const std::initializer_list<LLT> AddrSpaces32 = {
Matt Arsenaultf3bfb852019-07-19 22:28:44 +0000180 LocalPtr, PrivatePtr, Constant32Ptr, RegionPtr
Matt Arsenault685d1e82018-03-17 15:17:45 +0000181 };
Tom Stellardca166212017-01-30 21:56:46 +0000182
Matt Arsenault40d1faf2019-07-01 17:35:53 +0000183 const std::initializer_list<LLT> FPTypesBase = {
184 S32, S64
185 };
186
187 const std::initializer_list<LLT> FPTypes16 = {
188 S32, S64, S16
189 };
190
Matt Arsenault6ce1b4f2019-07-10 16:31:19 +0000191 const std::initializer_list<LLT> FPTypesPK16 = {
192 S32, S64, S16, V2S16
193 };
194
Matt Arsenaultadc40ba2019-01-08 01:22:47 +0000195 setAction({G_BRCOND, S1}, Legal);
196
Matt Arsenault2e0ee472019-02-21 15:48:13 +0000197 // TODO: All multiples of 32, vectors of pointers, all v2s16 pairs, more
198 // elements for v3s16
199 getActionDefinitionsBuilder(G_PHI)
200 .legalFor({S32, S64, V2S16, V4S16, S1, S128, S256})
201 .legalFor(AllS32Vectors)
202 .legalFor(AllS64Vectors)
203 .legalFor(AddrSpaces64)
204 .legalFor(AddrSpaces32)
205 .clampScalar(0, S32, S256)
206 .widenScalarToNextPow2(0, 32)
Matt Arsenaultd3093c22019-02-28 00:16:32 +0000207 .clampMaxNumElements(0, S32, 16)
Matt Arsenault72bcf152019-02-28 00:01:05 +0000208 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
Matt Arsenault2e0ee472019-02-21 15:48:13 +0000209 .legalIf(isPointer(0));
210
Matt Arsenaultef59cb62019-07-01 18:18:55 +0000211 if (ST.has16BitInsts()) {
212 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
213 .legalFor({S32, S16})
214 .clampScalar(0, S16, S32)
215 .scalarize(0);
216 } else {
217 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
218 .legalFor({S32})
219 .clampScalar(0, S32, S32)
220 .scalarize(0);
221 }
Matt Arsenault2e0ee472019-02-21 15:48:13 +0000222
Matt Arsenaultef59cb62019-07-01 18:18:55 +0000223 getActionDefinitionsBuilder({G_UMULH, G_SMULH})
Matt Arsenault5d622fb2019-01-25 03:23:04 +0000224 .legalFor({S32})
Matt Arsenault211e89d2019-01-27 00:52:51 +0000225 .clampScalar(0, S32, S32)
Matt Arsenault5d622fb2019-01-25 03:23:04 +0000226 .scalarize(0);
Matt Arsenault43398832018-12-20 01:35:49 +0000227
Matt Arsenault26a6c742019-01-26 23:47:07 +0000228 // Report legal for any types we can handle anywhere. For the cases only legal
229 // on the SALU, RegBankSelect will be able to re-legalize.
Matt Arsenault43398832018-12-20 01:35:49 +0000230 getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
Matt Arsenault22c4a142019-07-16 14:28:30 +0000231 .legalFor({S32, S1, S64, V2S32, S16, V2S16, V4S16})
Matt Arsenault26a6c742019-01-26 23:47:07 +0000232 .clampScalar(0, S32, S64)
Matt Arsenault26b7e852019-02-19 16:30:19 +0000233 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
234 .fewerElementsIf(vectorWiderThan(0, 32), fewerEltsToSize64Vector(0))
Matt Arsenaultf4bfe4c2019-02-25 21:32:48 +0000235 .widenScalarToNextPow2(0)
Matt Arsenault26a6c742019-01-26 23:47:07 +0000236 .scalarize(0);
Tom Stellardee6e6452017-06-12 20:54:56 +0000237
Matt Arsenault68c668a2019-01-08 01:09:09 +0000238 getActionDefinitionsBuilder({G_UADDO, G_SADDO, G_USUBO, G_SSUBO,
239 G_UADDE, G_SADDE, G_USUBE, G_SSUBE})
Matt Arsenault4d475942019-01-26 23:44:51 +0000240 .legalFor({{S32, S1}})
241 .clampScalar(0, S32, S32);
Matt Arsenault2cc15b62019-01-08 01:03:58 +0000242
Matt Arsenault7ac79ed2019-01-20 19:45:18 +0000243 getActionDefinitionsBuilder(G_BITCAST)
244 .legalForCartesianProduct({S32, V2S16})
245 .legalForCartesianProduct({S64, V2S32, V4S16})
246 .legalForCartesianProduct({V2S64, V4S32})
247 // Don't worry about the size constraint.
248 .legalIf(all(isPointer(0), isPointer(1)));
Tom Stellardff63ee02017-06-19 13:15:45 +0000249
Matt Arsenaultd9af7122019-09-04 16:19:45 +0000250 getActionDefinitionsBuilder(G_FCONSTANT)
251 .legalFor({S32, S64, S16})
252 .clampScalar(0, S16, S64);
Tom Stellardeebbfc22018-06-30 04:09:44 +0000253
Matt Arsenaultb3feccd2018-06-25 15:42:12 +0000254 getActionDefinitionsBuilder(G_IMPLICIT_DEF)
Matt Arsenaultd9af7122019-09-04 16:19:45 +0000255 .legalFor({S1, S32, S64, S16, V2S32, V4S32, V2S16, V4S16, GlobalPtr,
Matt Arsenaultd9141892019-02-07 19:10:15 +0000256 ConstantPtr, LocalPtr, FlatPtr, PrivatePtr})
Matt Arsenault18ec3822019-02-11 22:00:39 +0000257 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
Matt Arsenaultd9141892019-02-07 19:10:15 +0000258 .clampScalarOrElt(0, S32, S512)
Matt Arsenault0f2debb2019-02-08 14:46:27 +0000259 .legalIf(isMultiple32(0))
Matt Arsenault82b10392019-02-25 20:46:06 +0000260 .widenScalarToNextPow2(0, 32)
261 .clampMaxNumElements(0, S32, 16);
Matt Arsenaultb3feccd2018-06-25 15:42:12 +0000262
Matt Arsenaultabdc4f22018-03-17 15:17:48 +0000263
Tom Stellarde0424122017-06-03 01:13:33 +0000264 // FIXME: i1 operands to intrinsics should always be legal, but other i1
265 // values may not be legal. We need to figure out how to distinguish
266 // between these two scenarios.
Matt Arsenault45991592019-01-18 21:33:50 +0000267 getActionDefinitionsBuilder(G_CONSTANT)
Matt Arsenaultd9af7122019-09-04 16:19:45 +0000268 .legalFor({S1, S32, S64, S16, GlobalPtr,
Matt Arsenault2065c942019-02-02 23:33:49 +0000269 LocalPtr, ConstantPtr, PrivatePtr, FlatPtr })
Matt Arsenault45991592019-01-18 21:33:50 +0000270 .clampScalar(0, S32, S64)
Matt Arsenault2065c942019-02-02 23:33:49 +0000271 .widenScalarToNextPow2(0)
272 .legalIf(isPointer(0));
Matt Arsenault06cbb272018-03-01 19:16:52 +0000273
Matt Arsenaultc94e26c2018-12-18 09:46:13 +0000274 setAction({G_FRAME_INDEX, PrivatePtr}, Legal);
275
Matt Arsenault93fdec72019-02-07 18:03:11 +0000276 auto &FPOpActions = getActionDefinitionsBuilder(
Matt Arsenault9dba67f2019-02-11 17:05:20 +0000277 { G_FADD, G_FMUL, G_FNEG, G_FABS, G_FMA, G_FCANONICALIZE})
Matt Arsenault93fdec72019-02-07 18:03:11 +0000278 .legalFor({S32, S64});
Matt Arsenaultcbd17822019-08-29 20:06:48 +0000279 auto &TrigActions = getActionDefinitionsBuilder({G_FSIN, G_FCOS})
280 .customFor({S32, S64});
Matt Arsenault93fdec72019-02-07 18:03:11 +0000281
282 if (ST.has16BitInsts()) {
283 if (ST.hasVOP3PInsts())
284 FPOpActions.legalFor({S16, V2S16});
285 else
286 FPOpActions.legalFor({S16});
Matt Arsenaultcbd17822019-08-29 20:06:48 +0000287
288 TrigActions.customFor({S16});
Matt Arsenault93fdec72019-02-07 18:03:11 +0000289 }
290
Matt Arsenault6ce1b4f2019-07-10 16:31:19 +0000291 auto &MinNumMaxNum = getActionDefinitionsBuilder({
292 G_FMINNUM, G_FMAXNUM, G_FMINNUM_IEEE, G_FMAXNUM_IEEE});
293
294 if (ST.hasVOP3PInsts()) {
295 MinNumMaxNum.customFor(FPTypesPK16)
296 .clampMaxNumElements(0, S16, 2)
297 .clampScalar(0, S16, S64)
298 .scalarize(0);
299 } else if (ST.has16BitInsts()) {
300 MinNumMaxNum.customFor(FPTypes16)
301 .clampScalar(0, S16, S64)
302 .scalarize(0);
303 } else {
304 MinNumMaxNum.customFor(FPTypesBase)
305 .clampScalar(0, S32, S64)
306 .scalarize(0);
307 }
308
309 // TODO: Implement
310 getActionDefinitionsBuilder({G_FMINIMUM, G_FMAXIMUM}).lower();
311
Matt Arsenault93fdec72019-02-07 18:03:11 +0000312 if (ST.hasVOP3PInsts())
313 FPOpActions.clampMaxNumElements(0, S16, 2);
Matt Arsenaultcbd17822019-08-29 20:06:48 +0000314
Matt Arsenault93fdec72019-02-07 18:03:11 +0000315 FPOpActions
316 .scalarize(0)
317 .clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);
Tom Stellardd0c6cf22017-10-27 23:57:41 +0000318
Matt Arsenaultcbd17822019-08-29 20:06:48 +0000319 TrigActions
320 .scalarize(0)
321 .clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);
322
Matt Arsenaultc0f75692019-02-07 18:14:39 +0000323 if (ST.has16BitInsts()) {
324 getActionDefinitionsBuilder(G_FSQRT)
325 .legalFor({S32, S64, S16})
326 .scalarize(0)
327 .clampScalar(0, S16, S64);
328 } else {
329 getActionDefinitionsBuilder(G_FSQRT)
330 .legalFor({S32, S64})
331 .scalarize(0)
332 .clampScalar(0, S32, S64);
333 }
334
Matt Arsenaultdff33c32018-12-20 00:37:02 +0000335 getActionDefinitionsBuilder(G_FPTRUNC)
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000336 .legalFor({{S32, S64}, {S16, S32}})
337 .scalarize(0);
Matt Arsenaultdff33c32018-12-20 00:37:02 +0000338
Matt Arsenault24563ef2019-01-20 18:34:24 +0000339 getActionDefinitionsBuilder(G_FPEXT)
340 .legalFor({{S64, S32}, {S32, S16}})
Matt Arsenaultca676342019-01-25 02:36:32 +0000341 .lowerFor({{S64, S16}}) // FIXME: Implement
342 .scalarize(0);
Matt Arsenault24563ef2019-01-20 18:34:24 +0000343
Matt Arsenaultb1843e12019-07-09 23:34:29 +0000344 // TODO: Verify V_BFI_B32 is generated from expanded bit ops.
345 getActionDefinitionsBuilder(G_FCOPYSIGN).lower();
Matt Arsenault1448f562019-05-17 12:19:52 +0000346
Matt Arsenault745fd9f2019-01-20 19:10:31 +0000347 getActionDefinitionsBuilder(G_FSUB)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000348 // Use actual fsub instruction
349 .legalFor({S32})
350 // Must use fadd + fneg
351 .lowerFor({S64, S16, V2S16})
Matt Arsenault990f5072019-01-25 00:51:00 +0000352 .scalarize(0)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000353 .clampScalar(0, S32, S64);
Matt Arsenaulte01e7c82018-12-18 09:19:03 +0000354
Matt Arsenault24563ef2019-01-20 18:34:24 +0000355 getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT})
Matt Arsenault46ffe682019-01-20 19:28:20 +0000356 .legalFor({{S64, S32}, {S32, S16}, {S64, S16},
Matt Arsenaultca676342019-01-25 02:36:32 +0000357 {S32, S1}, {S64, S1}, {S16, S1},
358 // FIXME: Hack
Matt Arsenaultf4bfe4c2019-02-25 21:32:48 +0000359 {S64, LLT::scalar(33)},
Matt Arsenault888aa5d2019-02-03 00:07:33 +0000360 {S32, S8}, {S128, S32}, {S128, S64}, {S32, LLT::scalar(24)}})
Matt Arsenaultca676342019-01-25 02:36:32 +0000361 .scalarize(0);
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000362
Matt Arsenaultfb671642019-01-22 00:20:17 +0000363 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000364 .legalFor({{S32, S32}, {S64, S32}})
Matt Arsenault02b5ca82019-05-17 23:05:13 +0000365 .lowerFor({{S32, S64}})
Matt Arsenault2f292202019-05-17 23:05:18 +0000366 .customFor({{S64, S64}})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000367 .scalarize(0);
Matt Arsenaultdd022ce2018-03-01 19:04:25 +0000368
Matt Arsenaultfb671642019-01-22 00:20:17 +0000369 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000370 .legalFor({{S32, S32}, {S32, S64}})
371 .scalarize(0);
Tom Stellard33445762018-02-07 04:47:59 +0000372
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000373 getActionDefinitionsBuilder(G_INTRINSIC_ROUND)
Matt Arsenault2e5f9002019-01-27 00:12:21 +0000374 .legalFor({S32, S64})
375 .scalarize(0);
Matt Arsenaultf4c21c52018-12-21 03:14:45 +0000376
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000377 if (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
Matt Arsenaulta510b572019-05-17 12:20:05 +0000378 getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000379 .legalFor({S32, S64})
380 .clampScalar(0, S32, S64)
381 .scalarize(0);
382 } else {
Matt Arsenaulta510b572019-05-17 12:20:05 +0000383 getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000384 .legalFor({S32})
385 .customFor({S64})
386 .clampScalar(0, S32, S64)
387 .scalarize(0);
388 }
Tom Stellardca166212017-01-30 21:56:46 +0000389
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000390 getActionDefinitionsBuilder(G_GEP)
391 .legalForCartesianProduct(AddrSpaces64, {S64})
392 .legalForCartesianProduct(AddrSpaces32, {S32})
393 .scalarize(0);
Matt Arsenault3b9a82f2019-01-25 04:54:00 +0000394
Matt Arsenaultc34b4032019-09-09 15:46:13 +0000395 getActionDefinitionsBuilder(G_PTR_MASK)
396 .scalarize(0)
397 .alwaysLegal();
398
Matt Arsenault934e5342018-12-13 20:34:15 +0000399 setAction({G_BLOCK_ADDR, CodePtr}, Legal);
400
Matt Arsenault8b8eee52019-07-09 14:10:43 +0000401 auto &CmpBuilder =
402 getActionDefinitionsBuilder(G_ICMP)
Matt Arsenault58f9d3d2019-02-02 23:35:15 +0000403 .legalForCartesianProduct(
404 {S1}, {S32, S64, GlobalPtr, LocalPtr, ConstantPtr, PrivatePtr, FlatPtr})
Matt Arsenault8b8eee52019-07-09 14:10:43 +0000405 .legalFor({{S1, S32}, {S1, S64}});
406 if (ST.has16BitInsts()) {
407 CmpBuilder.legalFor({{S1, S16}});
408 }
409
410 CmpBuilder
Matt Arsenault58f9d3d2019-02-02 23:35:15 +0000411 .widenScalarToNextPow2(1)
412 .clampScalar(1, S32, S64)
413 .scalarize(0)
414 .legalIf(all(typeIs(0, S1), isPointer(1)));
415
416 getActionDefinitionsBuilder(G_FCMP)
Matt Arsenault40d1faf2019-07-01 17:35:53 +0000417 .legalForCartesianProduct({S1}, ST.has16BitInsts() ? FPTypes16 : FPTypesBase)
Matt Arsenault1b1e6852019-01-25 02:59:34 +0000418 .widenScalarToNextPow2(1)
419 .clampScalar(1, S32, S64)
Matt Arsenaultded2f822019-01-26 23:54:53 +0000420 .scalarize(0);
Matt Arsenault1b1e6852019-01-25 02:59:34 +0000421
Matt Arsenault95fd95c2019-01-25 04:03:38 +0000422 // FIXME: fexp, flog2, flog10 needs to be custom lowered.
423 getActionDefinitionsBuilder({G_FPOW, G_FEXP, G_FEXP2,
424 G_FLOG, G_FLOG2, G_FLOG10})
425 .legalFor({S32})
426 .scalarize(0);
Tom Stellard8cd60a52017-06-06 14:16:50 +0000427
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000428 // The 64-bit versions produce 32-bit results, but only on the SALU.
429 getActionDefinitionsBuilder({G_CTLZ, G_CTLZ_ZERO_UNDEF,
430 G_CTTZ, G_CTTZ_ZERO_UNDEF,
431 G_CTPOP})
432 .legalFor({{S32, S32}, {S32, S64}})
433 .clampScalar(0, S32, S32)
Matt Arsenault75e30c42019-02-20 16:42:52 +0000434 .clampScalar(1, S32, S64)
Matt Arsenaultb10fa8d2019-02-21 15:22:20 +0000435 .scalarize(0)
436 .widenScalarToNextPow2(0, 32)
437 .widenScalarToNextPow2(1, 32);
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000438
Matt Arsenaultd1bfc8d2019-01-31 02:34:03 +0000439 // TODO: Expand for > s32
Matt Arsenault5ff310e2019-09-04 20:46:15 +0000440 getActionDefinitionsBuilder({G_BSWAP, G_BITREVERSE})
Matt Arsenaultd1bfc8d2019-01-31 02:34:03 +0000441 .legalFor({S32})
442 .clampScalar(0, S32, S32)
443 .scalarize(0);
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000444
Matt Arsenault0f3ba442019-05-23 17:58:48 +0000445 if (ST.has16BitInsts()) {
446 if (ST.hasVOP3PInsts()) {
447 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
448 .legalFor({S32, S16, V2S16})
449 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
450 .clampMaxNumElements(0, S16, 2)
451 .clampScalar(0, S16, S32)
452 .widenScalarToNextPow2(0)
453 .scalarize(0);
454 } else {
455 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
456 .legalFor({S32, S16})
457 .widenScalarToNextPow2(0)
458 .clampScalar(0, S16, S32)
459 .scalarize(0);
460 }
461 } else {
462 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
463 .legalFor({S32})
464 .clampScalar(0, S32, S32)
465 .widenScalarToNextPow2(0)
466 .scalarize(0);
467 }
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000468
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000469 auto smallerThan = [](unsigned TypeIdx0, unsigned TypeIdx1) {
470 return [=](const LegalityQuery &Query) {
471 return Query.Types[TypeIdx0].getSizeInBits() <
472 Query.Types[TypeIdx1].getSizeInBits();
473 };
474 };
475
476 auto greaterThan = [](unsigned TypeIdx0, unsigned TypeIdx1) {
477 return [=](const LegalityQuery &Query) {
478 return Query.Types[TypeIdx0].getSizeInBits() >
479 Query.Types[TypeIdx1].getSizeInBits();
480 };
481 };
482
Tom Stellard7c650782018-10-05 04:34:09 +0000483 getActionDefinitionsBuilder(G_INTTOPTR)
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000484 // List the common cases
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000485 .legalForCartesianProduct(AddrSpaces64, {S64})
486 .legalForCartesianProduct(AddrSpaces32, {S32})
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000487 .scalarize(0)
488 // Accept any address space as long as the size matches
489 .legalIf(sameSize(0, 1))
490 .widenScalarIf(smallerThan(1, 0),
491 [](const LegalityQuery &Query) {
492 return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
493 })
494 .narrowScalarIf(greaterThan(1, 0),
495 [](const LegalityQuery &Query) {
496 return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
497 });
Matt Arsenault85803362018-03-17 15:17:41 +0000498
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000499 getActionDefinitionsBuilder(G_PTRTOINT)
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000500 // List the common cases
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000501 .legalForCartesianProduct(AddrSpaces64, {S64})
502 .legalForCartesianProduct(AddrSpaces32, {S32})
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000503 .scalarize(0)
504 // Accept any address space as long as the size matches
505 .legalIf(sameSize(0, 1))
506 .widenScalarIf(smallerThan(0, 1),
507 [](const LegalityQuery &Query) {
508 return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
509 })
510 .narrowScalarIf(
511 greaterThan(0, 1),
512 [](const LegalityQuery &Query) {
513 return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
514 });
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000515
Matt Arsenault5c7e96dc2019-08-28 00:58:24 +0000516 getActionDefinitionsBuilder(G_ADDRSPACE_CAST)
517 .scalarize(0)
518 .custom();
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000519
Matt Arsenault35c96592019-07-16 18:05:29 +0000520 // TODO: Should load to s16 be legal? Most loads extend to 32-bits, but we
521 // handle some operations by just promoting the register during
522 // selection. There are also d16 loads on GFX9+ which preserve the high bits.
Matt Arsenault85803362018-03-17 15:17:41 +0000523 getActionDefinitionsBuilder({G_LOAD, G_STORE})
Matt Arsenault18619af2019-01-29 18:13:02 +0000524 .narrowScalarIf([](const LegalityQuery &Query) {
525 unsigned Size = Query.Types[0].getSizeInBits();
526 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
527 return (Size > 32 && MemSize < Size);
528 },
529 [](const LegalityQuery &Query) {
530 return std::make_pair(0, LLT::scalar(32));
531 })
Matt Arsenault7bedceb2019-08-01 01:44:22 +0000532 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
Matt Arsenault9e8e8c62019-07-01 18:49:01 +0000533 .fewerElementsIf([=](const LegalityQuery &Query) {
Matt Arsenault045bc9a2019-01-30 02:35:38 +0000534 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
Matt Arsenaultc7bce732019-01-31 02:46:05 +0000535 return (MemSize == 96) &&
536 Query.Types[0].isVector() &&
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +0000537 !ST.hasDwordx3LoadStores();
Matt Arsenault045bc9a2019-01-30 02:35:38 +0000538 },
539 [=](const LegalityQuery &Query) {
540 return std::make_pair(0, V2S32);
541 })
Matt Arsenault9e8e8c62019-07-01 18:49:01 +0000542 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenault85803362018-03-17 15:17:41 +0000543 const LLT &Ty0 = Query.Types[0];
544
Matt Arsenault18619af2019-01-29 18:13:02 +0000545 unsigned Size = Ty0.getSizeInBits();
546 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
Matt Arsenaulteb2603c2019-02-02 23:39:13 +0000547 if (Size < 32 || (Size > 32 && MemSize < Size))
Matt Arsenault18619af2019-01-29 18:13:02 +0000548 return false;
549
550 if (Ty0.isVector() && Size != MemSize)
551 return false;
552
Matt Arsenault85803362018-03-17 15:17:41 +0000553 // TODO: Decompose private loads into 4-byte components.
554 // TODO: Illegal flat loads on SI
Matt Arsenault18619af2019-01-29 18:13:02 +0000555 switch (MemSize) {
556 case 8:
557 case 16:
558 return Size == 32;
Matt Arsenault85803362018-03-17 15:17:41 +0000559 case 32:
560 case 64:
561 case 128:
562 return true;
563
564 case 96:
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +0000565 return ST.hasDwordx3LoadStores();
Matt Arsenault85803362018-03-17 15:17:41 +0000566
567 case 256:
568 case 512:
Tom Stellardd0ba79f2019-07-10 00:22:41 +0000569 // TODO: Possibly support loads of i256 and i512 . This will require
570 // adding i256 and i512 types to MVT in order for to be able to use
571 // TableGen.
572 // TODO: Add support for other vector types, this will require
573 // defining more value mappings for the new types.
574 return Ty0.isVector() && (Ty0.getScalarType().getSizeInBits() == 32 ||
575 Ty0.getScalarType().getSizeInBits() == 64);
576
Matt Arsenault85803362018-03-17 15:17:41 +0000577 default:
578 return false;
579 }
Matt Arsenault18619af2019-01-29 18:13:02 +0000580 })
581 .clampScalar(0, S32, S64);
Matt Arsenault85803362018-03-17 15:17:41 +0000582
583
Matt Arsenault530d05e2019-02-14 22:41:09 +0000584 // FIXME: Handle alignment requirements.
Matt Arsenault6614f852019-01-22 19:02:10 +0000585 auto &ExtLoads = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
Matt Arsenault530d05e2019-02-14 22:41:09 +0000586 .legalForTypesWithMemDesc({
587 {S32, GlobalPtr, 8, 8},
588 {S32, GlobalPtr, 16, 8},
589 {S32, LocalPtr, 8, 8},
590 {S32, LocalPtr, 16, 8},
591 {S32, PrivatePtr, 8, 8},
592 {S32, PrivatePtr, 16, 8}});
Matt Arsenault6614f852019-01-22 19:02:10 +0000593 if (ST.hasFlatAddressSpace()) {
Matt Arsenault530d05e2019-02-14 22:41:09 +0000594 ExtLoads.legalForTypesWithMemDesc({{S32, FlatPtr, 8, 8},
595 {S32, FlatPtr, 16, 8}});
Matt Arsenault6614f852019-01-22 19:02:10 +0000596 }
597
598 ExtLoads.clampScalar(0, S32, S32)
599 .widenScalarToNextPow2(0)
600 .unsupportedIfMemSizeNotPow2()
601 .lower();
602
Matt Arsenault36d40922018-12-20 00:33:49 +0000603 auto &Atomics = getActionDefinitionsBuilder(
604 {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB,
605 G_ATOMICRMW_AND, G_ATOMICRMW_OR, G_ATOMICRMW_XOR,
606 G_ATOMICRMW_MAX, G_ATOMICRMW_MIN, G_ATOMICRMW_UMAX,
607 G_ATOMICRMW_UMIN, G_ATOMIC_CMPXCHG})
608 .legalFor({{S32, GlobalPtr}, {S32, LocalPtr},
609 {S64, GlobalPtr}, {S64, LocalPtr}});
610 if (ST.hasFlatAddressSpace()) {
611 Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}});
612 }
Tom Stellardca166212017-01-30 21:56:46 +0000613
Matt Arsenault26cb53b2019-08-01 03:33:15 +0000614 getActionDefinitionsBuilder(G_ATOMICRMW_FADD)
615 .legalFor({{S32, LocalPtr}});
616
Matt Arsenault96e47012019-01-18 21:42:55 +0000617 // TODO: Pointer types, any 32-bit or 64-bit vector
618 getActionDefinitionsBuilder(G_SELECT)
Matt Arsenaultfdf36722019-07-01 15:42:47 +0000619 .legalForCartesianProduct({S32, S64, S16, V2S32, V2S16, V4S16,
Matt Arsenault10547232019-02-04 14:04:52 +0000620 GlobalPtr, LocalPtr, FlatPtr, PrivatePtr,
621 LLT::vector(2, LocalPtr), LLT::vector(2, PrivatePtr)}, {S1})
Matt Arsenaultfdf36722019-07-01 15:42:47 +0000622 .clampScalar(0, S16, S64)
Matt Arsenaultb4c95b32019-02-19 17:03:09 +0000623 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
624 .fewerElementsIf(numElementsNotEven(0), scalarize(0))
Matt Arsenaultdc6c7852019-01-30 04:19:31 +0000625 .scalarize(1)
Matt Arsenault2491f822019-02-02 23:31:50 +0000626 .clampMaxNumElements(0, S32, 2)
627 .clampMaxNumElements(0, LocalPtr, 2)
628 .clampMaxNumElements(0, PrivatePtr, 2)
Matt Arsenaultb4c95b32019-02-19 17:03:09 +0000629 .scalarize(0)
Matt Arsenault4ed6cca2019-04-05 14:03:04 +0000630 .widenScalarToNextPow2(0)
Matt Arsenault2491f822019-02-02 23:31:50 +0000631 .legalIf(all(isPointer(0), typeIs(1, S1)));
Tom Stellard2860a422017-06-07 13:54:51 +0000632
Matt Arsenault4c5e8f512019-01-22 22:00:19 +0000633 // TODO: Only the low 4/5/6 bits of the shift amount are observed, so we can
634 // be more flexible with the shift amount type.
635 auto &Shifts = getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR})
636 .legalFor({{S32, S32}, {S64, S32}});
Matt Arsenaultf6cab162019-01-30 03:36:25 +0000637 if (ST.has16BitInsts()) {
Matt Arsenaultc83b8232019-02-07 17:38:00 +0000638 if (ST.hasVOP3PInsts()) {
639 Shifts.legalFor({{S16, S32}, {S16, S16}, {V2S16, V2S16}})
640 .clampMaxNumElements(0, S16, 2);
641 } else
642 Shifts.legalFor({{S16, S32}, {S16, S16}});
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000643
644 Shifts.clampScalar(1, S16, S32);
Matt Arsenaultf6cab162019-01-30 03:36:25 +0000645 Shifts.clampScalar(0, S16, S64);
Matt Arsenaultb0a22702019-02-08 15:06:24 +0000646 Shifts.widenScalarToNextPow2(0, 16);
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000647 } else {
648 // Make sure we legalize the shift amount type first, as the general
649 // expansion for the shifted type will produce much worse code if it hasn't
650 // been truncated already.
651 Shifts.clampScalar(1, S32, S32);
Matt Arsenault4c5e8f512019-01-22 22:00:19 +0000652 Shifts.clampScalar(0, S32, S64);
Matt Arsenaultb0a22702019-02-08 15:06:24 +0000653 Shifts.widenScalarToNextPow2(0, 32);
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000654 }
655 Shifts.scalarize(0);
Tom Stellardca166212017-01-30 21:56:46 +0000656
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000657 for (unsigned Op : {G_EXTRACT_VECTOR_ELT, G_INSERT_VECTOR_ELT}) {
Matt Arsenault63786292019-01-22 20:38:15 +0000658 unsigned VecTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 1 : 0;
659 unsigned EltTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 0 : 1;
660 unsigned IdxTypeIdx = 2;
661
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000662 getActionDefinitionsBuilder(Op)
Matt Arsenaultb0e04c02019-07-15 19:40:59 +0000663 .customIf([=](const LegalityQuery &Query) {
Matt Arsenault90bdfb32019-07-15 18:31:10 +0000664 const LLT EltTy = Query.Types[EltTypeIdx];
665 const LLT VecTy = Query.Types[VecTypeIdx];
666 const LLT IdxTy = Query.Types[IdxTypeIdx];
667 return (EltTy.getSizeInBits() == 16 ||
668 EltTy.getSizeInBits() % 32 == 0) &&
669 VecTy.getSizeInBits() % 32 == 0 &&
670 VecTy.getSizeInBits() <= 512 &&
671 IdxTy.getSizeInBits() == 32;
Matt Arsenault63786292019-01-22 20:38:15 +0000672 })
673 .clampScalar(EltTypeIdx, S32, S64)
674 .clampScalar(VecTypeIdx, S32, S64)
675 .clampScalar(IdxTypeIdx, S32, S32);
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000676 }
677
Matt Arsenault63786292019-01-22 20:38:15 +0000678 getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
679 .unsupportedIf([=](const LegalityQuery &Query) {
680 const LLT &EltTy = Query.Types[1].getElementType();
681 return Query.Types[0] != EltTy;
682 });
683
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000684 for (unsigned Op : {G_EXTRACT, G_INSERT}) {
685 unsigned BigTyIdx = Op == G_EXTRACT ? 1 : 0;
686 unsigned LitTyIdx = Op == G_EXTRACT ? 0 : 1;
687
688 // FIXME: Doesn't handle extract of illegal sizes.
689 getActionDefinitionsBuilder(Op)
Matt Arsenault91be65b2019-02-07 17:25:51 +0000690 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000691 const LLT BigTy = Query.Types[BigTyIdx];
692 const LLT LitTy = Query.Types[LitTyIdx];
693 return (BigTy.getSizeInBits() % 32 == 0) &&
694 (LitTy.getSizeInBits() % 16 == 0);
695 })
Matt Arsenault91be65b2019-02-07 17:25:51 +0000696 .widenScalarIf(
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000697 [=](const LegalityQuery &Query) {
698 const LLT BigTy = Query.Types[BigTyIdx];
699 return (BigTy.getScalarSizeInBits() < 16);
700 },
701 LegalizeMutations::widenScalarOrEltToNextPow2(BigTyIdx, 16))
702 .widenScalarIf(
703 [=](const LegalityQuery &Query) {
704 const LLT LitTy = Query.Types[LitTyIdx];
705 return (LitTy.getScalarSizeInBits() < 16);
706 },
707 LegalizeMutations::widenScalarOrEltToNextPow2(LitTyIdx, 16))
Matt Arsenault2b6f76f2019-04-22 15:22:46 +0000708 .moreElementsIf(isSmallOddVector(BigTyIdx), oneMoreElement(BigTyIdx))
709 .widenScalarToNextPow2(BigTyIdx, 32);
710
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000711 }
Matt Arsenault71272e62018-03-05 16:25:15 +0000712
Amara Emerson5ec14602018-12-10 18:44:58 +0000713 getActionDefinitionsBuilder(G_BUILD_VECTOR)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000714 .legalForCartesianProduct(AllS32Vectors, {S32})
715 .legalForCartesianProduct(AllS64Vectors, {S64})
716 .clampNumElements(0, V16S32, V16S32)
717 .clampNumElements(0, V2S64, V8S64)
718 .minScalarSameAs(1, 0)
Matt Arsenault3f1a3452019-07-09 22:48:04 +0000719 .legalIf(isRegisterType(0))
720 .minScalarOrElt(0, S32);
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000721
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000722 getActionDefinitionsBuilder(G_CONCAT_VECTORS)
Matt Arsenault4dd57552019-07-09 14:17:31 +0000723 .legalIf(isRegisterType(0));
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000724
Matt Arsenault690645b2019-08-13 16:09:07 +0000725 // TODO: Don't fully scalarize v2s16 pieces
726 getActionDefinitionsBuilder(G_SHUFFLE_VECTOR).lower();
727
Matt Arsenault503afda2018-03-12 13:35:43 +0000728 // Merge/Unmerge
729 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
730 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
731 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
732
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000733 auto notValidElt = [=](const LegalityQuery &Query, unsigned TypeIdx) {
734 const LLT &Ty = Query.Types[TypeIdx];
735 if (Ty.isVector()) {
736 const LLT &EltTy = Ty.getElementType();
737 if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64)
738 return true;
739 if (!isPowerOf2_32(EltTy.getSizeInBits()))
740 return true;
741 }
742 return false;
743 };
744
Matt Arsenault503afda2018-03-12 13:35:43 +0000745 getActionDefinitionsBuilder(Op)
Matt Arsenaultd8d193d2019-01-29 23:17:35 +0000746 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16)
747 // Clamp the little scalar to s8-s256 and make it a power of 2. It's not
748 // worth considering the multiples of 64 since 2*192 and 2*384 are not
749 // valid.
750 .clampScalar(LitTyIdx, S16, S256)
751 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32)
Matt Arsenault954a0122019-08-21 16:59:10 +0000752 .moreElementsIf(isSmallOddVector(BigTyIdx), oneMoreElement(BigTyIdx))
Matt Arsenault28215ca2019-08-13 16:26:28 +0000753 .fewerElementsIf(all(typeIs(0, S16), vectorWiderThan(1, 32),
754 elementTypeIs(1, S16)),
755 changeTo(1, V2S16))
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000756 // Break up vectors with weird elements into scalars
757 .fewerElementsIf(
758 [=](const LegalityQuery &Query) { return notValidElt(Query, 0); },
Matt Arsenault990f5072019-01-25 00:51:00 +0000759 scalarize(0))
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000760 .fewerElementsIf(
761 [=](const LegalityQuery &Query) { return notValidElt(Query, 1); },
Matt Arsenault990f5072019-01-25 00:51:00 +0000762 scalarize(1))
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000763 .clampScalar(BigTyIdx, S32, S512)
Matt Arsenaultd9d30a42019-08-01 19:10:05 +0000764 .lowerFor({{S16, V2S16}})
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000765 .widenScalarIf(
766 [=](const LegalityQuery &Query) {
767 const LLT &Ty = Query.Types[BigTyIdx];
768 return !isPowerOf2_32(Ty.getSizeInBits()) &&
769 Ty.getSizeInBits() % 16 != 0;
770 },
771 [=](const LegalityQuery &Query) {
772 // Pick the next power of 2, or a multiple of 64 over 128.
773 // Whichever is smaller.
774 const LLT &Ty = Query.Types[BigTyIdx];
775 unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1);
776 if (NewSizeInBits >= 256) {
777 unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
778 if (RoundedTo < NewSizeInBits)
779 NewSizeInBits = RoundedTo;
780 }
781 return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
782 })
Matt Arsenault503afda2018-03-12 13:35:43 +0000783 .legalIf([=](const LegalityQuery &Query) {
784 const LLT &BigTy = Query.Types[BigTyIdx];
785 const LLT &LitTy = Query.Types[LitTyIdx];
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000786
787 if (BigTy.isVector() && BigTy.getSizeInBits() < 32)
788 return false;
789 if (LitTy.isVector() && LitTy.getSizeInBits() < 32)
790 return false;
791
792 return BigTy.getSizeInBits() % 16 == 0 &&
793 LitTy.getSizeInBits() % 16 == 0 &&
Matt Arsenault503afda2018-03-12 13:35:43 +0000794 BigTy.getSizeInBits() <= 512;
795 })
796 // Any vectors left are the wrong size. Scalarize them.
Matt Arsenault990f5072019-01-25 00:51:00 +0000797 .scalarize(0)
798 .scalarize(1);
Matt Arsenault503afda2018-03-12 13:35:43 +0000799 }
800
Daniel Sanderse9a57c22019-08-09 21:11:20 +0000801 getActionDefinitionsBuilder(G_SEXT_INREG).lower();
802
Tom Stellardca166212017-01-30 21:56:46 +0000803 computeTables();
Roman Tereshin76c29c62018-05-31 16:16:48 +0000804 verify(*ST.getInstrInfo());
Tom Stellardca166212017-01-30 21:56:46 +0000805}
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000806
807bool AMDGPULegalizerInfo::legalizeCustom(MachineInstr &MI,
808 MachineRegisterInfo &MRI,
809 MachineIRBuilder &MIRBuilder,
810 GISelChangeObserver &Observer) const {
811 switch (MI.getOpcode()) {
812 case TargetOpcode::G_ADDRSPACE_CAST:
813 return legalizeAddrSpaceCast(MI, MRI, MIRBuilder);
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000814 case TargetOpcode::G_FRINT:
815 return legalizeFrint(MI, MRI, MIRBuilder);
Matt Arsenaulta510b572019-05-17 12:20:05 +0000816 case TargetOpcode::G_FCEIL:
817 return legalizeFceil(MI, MRI, MIRBuilder);
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000818 case TargetOpcode::G_INTRINSIC_TRUNC:
819 return legalizeIntrinsicTrunc(MI, MRI, MIRBuilder);
Matt Arsenault2f292202019-05-17 23:05:18 +0000820 case TargetOpcode::G_SITOFP:
821 return legalizeITOFP(MI, MRI, MIRBuilder, true);
822 case TargetOpcode::G_UITOFP:
823 return legalizeITOFP(MI, MRI, MIRBuilder, false);
Matt Arsenault6ce1b4f2019-07-10 16:31:19 +0000824 case TargetOpcode::G_FMINNUM:
825 case TargetOpcode::G_FMAXNUM:
826 case TargetOpcode::G_FMINNUM_IEEE:
827 case TargetOpcode::G_FMAXNUM_IEEE:
828 return legalizeMinNumMaxNum(MI, MRI, MIRBuilder);
Matt Arsenaultb0e04c02019-07-15 19:40:59 +0000829 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
830 return legalizeExtractVectorElt(MI, MRI, MIRBuilder);
831 case TargetOpcode::G_INSERT_VECTOR_ELT:
Matt Arsenault6ed315f2019-07-15 19:43:04 +0000832 return legalizeInsertVectorElt(MI, MRI, MIRBuilder);
Matt Arsenaultcbd17822019-08-29 20:06:48 +0000833 case TargetOpcode::G_FSIN:
834 case TargetOpcode::G_FCOS:
835 return legalizeSinCos(MI, MRI, MIRBuilder);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000836 default:
837 return false;
838 }
839
840 llvm_unreachable("expected switch to return");
841}
842
Matt Arsenault1178dc32019-06-28 01:16:46 +0000843Register AMDGPULegalizerInfo::getSegmentAperture(
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000844 unsigned AS,
845 MachineRegisterInfo &MRI,
846 MachineIRBuilder &MIRBuilder) const {
847 MachineFunction &MF = MIRBuilder.getMF();
848 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
849 const LLT S32 = LLT::scalar(32);
850
851 if (ST.hasApertureRegs()) {
852 // FIXME: Use inline constants (src_{shared, private}_base) instead of
853 // getreg.
854 unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
855 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
856 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
857 unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
858 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
859 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
860 unsigned Encoding =
861 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
862 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
863 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
864
Matt Arsenault1178dc32019-06-28 01:16:46 +0000865 Register ApertureReg = MRI.createGenericVirtualRegister(S32);
866 Register GetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000867
868 MIRBuilder.buildInstr(AMDGPU::S_GETREG_B32)
869 .addDef(GetReg)
870 .addImm(Encoding);
871 MRI.setType(GetReg, S32);
872
Amara Emerson946b1242019-04-15 05:04:20 +0000873 auto ShiftAmt = MIRBuilder.buildConstant(S32, WidthM1 + 1);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000874 MIRBuilder.buildInstr(TargetOpcode::G_SHL)
875 .addDef(ApertureReg)
876 .addUse(GetReg)
Amara Emerson946b1242019-04-15 05:04:20 +0000877 .addUse(ShiftAmt.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000878
879 return ApertureReg;
880 }
881
Matt Arsenault1178dc32019-06-28 01:16:46 +0000882 Register QueuePtr = MRI.createGenericVirtualRegister(
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000883 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
884
Matt Arsenault25156ae2019-09-05 02:20:29 +0000885 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
886 if (!loadInputValue(QueuePtr, MIRBuilder, &MFI->getArgInfo().QueuePtr))
887 return Register();
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000888
889 // Offset into amd_queue_t for group_segment_aperture_base_hi /
890 // private_segment_aperture_base_hi.
891 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
892
893 // FIXME: Don't use undef
894 Value *V = UndefValue::get(PointerType::get(
895 Type::getInt8Ty(MF.getFunction().getContext()),
896 AMDGPUAS::CONSTANT_ADDRESS));
897
898 MachinePointerInfo PtrInfo(V, StructOffset);
899 MachineMemOperand *MMO = MF.getMachineMemOperand(
900 PtrInfo,
901 MachineMemOperand::MOLoad |
902 MachineMemOperand::MODereferenceable |
903 MachineMemOperand::MOInvariant,
904 4,
905 MinAlign(64, StructOffset));
906
Matt Arsenaulte3a676e2019-06-24 15:50:29 +0000907 Register LoadResult = MRI.createGenericVirtualRegister(S32);
908 Register LoadAddr;
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000909
910 MIRBuilder.materializeGEP(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
911 MIRBuilder.buildLoad(LoadResult, LoadAddr, *MMO);
912 return LoadResult;
913}
914
915bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
916 MachineInstr &MI, MachineRegisterInfo &MRI,
917 MachineIRBuilder &MIRBuilder) const {
918 MachineFunction &MF = MIRBuilder.getMF();
919
920 MIRBuilder.setInstr(MI);
921
Matt Arsenault5c7e96dc2019-08-28 00:58:24 +0000922 const LLT S32 = LLT::scalar(32);
Matt Arsenaulte3a676e2019-06-24 15:50:29 +0000923 Register Dst = MI.getOperand(0).getReg();
924 Register Src = MI.getOperand(1).getReg();
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000925
926 LLT DstTy = MRI.getType(Dst);
927 LLT SrcTy = MRI.getType(Src);
928 unsigned DestAS = DstTy.getAddressSpace();
929 unsigned SrcAS = SrcTy.getAddressSpace();
930
931 // TODO: Avoid reloading from the queue ptr for each cast, or at least each
932 // vector element.
933 assert(!DstTy.isVector());
934
935 const AMDGPUTargetMachine &TM
936 = static_cast<const AMDGPUTargetMachine &>(MF.getTarget());
937
938 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
939 if (ST.getTargetLowering()->isNoopAddrSpaceCast(SrcAS, DestAS)) {
Matt Arsenaultdc88a2c2019-02-08 14:16:11 +0000940 MI.setDesc(MIRBuilder.getTII().get(TargetOpcode::G_BITCAST));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000941 return true;
942 }
943
Matt Arsenault5c7e96dc2019-08-28 00:58:24 +0000944 if (DestAS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
945 // Truncate.
946 MIRBuilder.buildExtract(Dst, Src, 0);
947 MI.eraseFromParent();
948 return true;
949 }
950
951 if (SrcAS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
952 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
953 uint32_t AddrHiVal = Info->get32BitAddressHighBits();
954
955 // FIXME: This is a bit ugly due to creating a merge of 2 pointers to
956 // another. Merge operands are required to be the same type, but creating an
957 // extra ptrtoint would be kind of pointless.
958 auto HighAddr = MIRBuilder.buildConstant(
959 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS_32BIT, 32), AddrHiVal);
960 MIRBuilder.buildMerge(Dst, {Src, HighAddr.getReg(0)});
961 MI.eraseFromParent();
962 return true;
963 }
964
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000965 if (SrcAS == AMDGPUAS::FLAT_ADDRESS) {
966 assert(DestAS == AMDGPUAS::LOCAL_ADDRESS ||
967 DestAS == AMDGPUAS::PRIVATE_ADDRESS);
968 unsigned NullVal = TM.getNullPointerValue(DestAS);
969
Amara Emerson946b1242019-04-15 05:04:20 +0000970 auto SegmentNull = MIRBuilder.buildConstant(DstTy, NullVal);
971 auto FlatNull = MIRBuilder.buildConstant(SrcTy, 0);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000972
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000973 Register PtrLo32 = MRI.createGenericVirtualRegister(DstTy);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000974
975 // Extract low 32-bits of the pointer.
976 MIRBuilder.buildExtract(PtrLo32, Src, 0);
977
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000978 Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
Amara Emerson946b1242019-04-15 05:04:20 +0000979 MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, FlatNull.getReg(0));
980 MIRBuilder.buildSelect(Dst, CmpRes, PtrLo32, SegmentNull.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000981
982 MI.eraseFromParent();
983 return true;
984 }
985
Matt Arsenault5c7e96dc2019-08-28 00:58:24 +0000986 if (SrcAS != AMDGPUAS::LOCAL_ADDRESS && SrcAS != AMDGPUAS::PRIVATE_ADDRESS)
987 return false;
988
989 if (!ST.hasFlatAddressSpace())
990 return false;
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000991
Amara Emerson946b1242019-04-15 05:04:20 +0000992 auto SegmentNull =
993 MIRBuilder.buildConstant(SrcTy, TM.getNullPointerValue(SrcAS));
994 auto FlatNull =
995 MIRBuilder.buildConstant(DstTy, TM.getNullPointerValue(DestAS));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000996
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000997 Register ApertureReg = getSegmentAperture(DestAS, MRI, MIRBuilder);
Matt Arsenault25156ae2019-09-05 02:20:29 +0000998 if (!ApertureReg.isValid())
999 return false;
Matt Arsenaulta8b43392019-02-08 02:40:47 +00001000
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001001 Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
Amara Emerson946b1242019-04-15 05:04:20 +00001002 MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, SegmentNull.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +00001003
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001004 Register BuildPtr = MRI.createGenericVirtualRegister(DstTy);
Matt Arsenaulta8b43392019-02-08 02:40:47 +00001005
1006 // Coerce the type of the low half of the result so we can use merge_values.
Matt Arsenault5c7e96dc2019-08-28 00:58:24 +00001007 Register SrcAsInt = MRI.createGenericVirtualRegister(S32);
Matt Arsenaulta8b43392019-02-08 02:40:47 +00001008 MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT)
1009 .addDef(SrcAsInt)
1010 .addUse(Src);
1011
1012 // TODO: Should we allow mismatched types but matching sizes in merges to
1013 // avoid the ptrtoint?
1014 MIRBuilder.buildMerge(BuildPtr, {SrcAsInt, ApertureReg});
Amara Emerson946b1242019-04-15 05:04:20 +00001015 MIRBuilder.buildSelect(Dst, CmpRes, BuildPtr, FlatNull.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +00001016
1017 MI.eraseFromParent();
1018 return true;
1019}
Matt Arsenault6aafc5e2019-05-17 12:19:57 +00001020
1021bool AMDGPULegalizerInfo::legalizeFrint(
1022 MachineInstr &MI, MachineRegisterInfo &MRI,
1023 MachineIRBuilder &MIRBuilder) const {
1024 MIRBuilder.setInstr(MI);
1025
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001026 Register Src = MI.getOperand(1).getReg();
Matt Arsenault6aafc5e2019-05-17 12:19:57 +00001027 LLT Ty = MRI.getType(Src);
1028 assert(Ty.isScalar() && Ty.getSizeInBits() == 64);
1029
1030 APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
1031 APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
1032
1033 auto C1 = MIRBuilder.buildFConstant(Ty, C1Val);
1034 auto CopySign = MIRBuilder.buildFCopysign(Ty, C1, Src);
1035
1036 // TODO: Should this propagate fast-math-flags?
1037 auto Tmp1 = MIRBuilder.buildFAdd(Ty, Src, CopySign);
1038 auto Tmp2 = MIRBuilder.buildFSub(Ty, Tmp1, CopySign);
1039
1040 auto C2 = MIRBuilder.buildFConstant(Ty, C2Val);
1041 auto Fabs = MIRBuilder.buildFAbs(Ty, Src);
1042
1043 auto Cond = MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, LLT::scalar(1), Fabs, C2);
1044 MIRBuilder.buildSelect(MI.getOperand(0).getReg(), Cond, Src, Tmp2);
1045 return true;
1046}
Matt Arsenault6aebcd52019-05-17 12:20:01 +00001047
Matt Arsenaulta510b572019-05-17 12:20:05 +00001048bool AMDGPULegalizerInfo::legalizeFceil(
1049 MachineInstr &MI, MachineRegisterInfo &MRI,
1050 MachineIRBuilder &B) const {
1051 B.setInstr(MI);
1052
Matt Arsenault1a02d302019-05-17 12:59:27 +00001053 const LLT S1 = LLT::scalar(1);
1054 const LLT S64 = LLT::scalar(64);
1055
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001056 Register Src = MI.getOperand(1).getReg();
Matt Arsenault1a02d302019-05-17 12:59:27 +00001057 assert(MRI.getType(Src) == S64);
Matt Arsenaulta510b572019-05-17 12:20:05 +00001058
1059 // result = trunc(src)
1060 // if (src > 0.0 && src != result)
1061 // result += 1.0
1062
Matt Arsenaulta510b572019-05-17 12:20:05 +00001063 auto Trunc = B.buildInstr(TargetOpcode::G_INTRINSIC_TRUNC, {S64}, {Src});
1064
Matt Arsenaulta510b572019-05-17 12:20:05 +00001065 const auto Zero = B.buildFConstant(S64, 0.0);
1066 const auto One = B.buildFConstant(S64, 1.0);
1067 auto Lt0 = B.buildFCmp(CmpInst::FCMP_OGT, S1, Src, Zero);
1068 auto NeTrunc = B.buildFCmp(CmpInst::FCMP_ONE, S1, Src, Trunc);
1069 auto And = B.buildAnd(S1, Lt0, NeTrunc);
1070 auto Add = B.buildSelect(S64, And, One, Zero);
1071
1072 // TODO: Should this propagate fast-math-flags?
1073 B.buildFAdd(MI.getOperand(0).getReg(), Trunc, Add);
1074 return true;
1075}
1076
Matt Arsenault6aebcd52019-05-17 12:20:01 +00001077static MachineInstrBuilder extractF64Exponent(unsigned Hi,
1078 MachineIRBuilder &B) {
1079 const unsigned FractBits = 52;
1080 const unsigned ExpBits = 11;
1081 LLT S32 = LLT::scalar(32);
1082
1083 auto Const0 = B.buildConstant(S32, FractBits - 32);
1084 auto Const1 = B.buildConstant(S32, ExpBits);
1085
1086 auto ExpPart = B.buildIntrinsic(Intrinsic::amdgcn_ubfe, {S32}, false)
1087 .addUse(Const0.getReg(0))
1088 .addUse(Const1.getReg(0));
1089
1090 return B.buildSub(S32, ExpPart, B.buildConstant(S32, 1023));
1091}
1092
1093bool AMDGPULegalizerInfo::legalizeIntrinsicTrunc(
1094 MachineInstr &MI, MachineRegisterInfo &MRI,
1095 MachineIRBuilder &B) const {
1096 B.setInstr(MI);
1097
Matt Arsenault1a02d302019-05-17 12:59:27 +00001098 const LLT S1 = LLT::scalar(1);
1099 const LLT S32 = LLT::scalar(32);
1100 const LLT S64 = LLT::scalar(64);
Matt Arsenault6aebcd52019-05-17 12:20:01 +00001101
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001102 Register Src = MI.getOperand(1).getReg();
Matt Arsenault1a02d302019-05-17 12:59:27 +00001103 assert(MRI.getType(Src) == S64);
Matt Arsenault6aebcd52019-05-17 12:20:01 +00001104
1105 // TODO: Should this use extract since the low half is unused?
1106 auto Unmerge = B.buildUnmerge({S32, S32}, Src);
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001107 Register Hi = Unmerge.getReg(1);
Matt Arsenault6aebcd52019-05-17 12:20:01 +00001108
1109 // Extract the upper half, since this is where we will find the sign and
1110 // exponent.
1111 auto Exp = extractF64Exponent(Hi, B);
1112
1113 const unsigned FractBits = 52;
1114
1115 // Extract the sign bit.
1116 const auto SignBitMask = B.buildConstant(S32, UINT32_C(1) << 31);
1117 auto SignBit = B.buildAnd(S32, Hi, SignBitMask);
1118
1119 const auto FractMask = B.buildConstant(S64, (UINT64_C(1) << FractBits) - 1);
1120
1121 const auto Zero32 = B.buildConstant(S32, 0);
1122
1123 // Extend back to 64-bits.
1124 auto SignBit64 = B.buildMerge(S64, {Zero32.getReg(0), SignBit.getReg(0)});
1125
1126 auto Shr = B.buildAShr(S64, FractMask, Exp);
1127 auto Not = B.buildNot(S64, Shr);
1128 auto Tmp0 = B.buildAnd(S64, Src, Not);
1129 auto FiftyOne = B.buildConstant(S32, FractBits - 1);
1130
1131 auto ExpLt0 = B.buildICmp(CmpInst::ICMP_SLT, S1, Exp, Zero32);
1132 auto ExpGt51 = B.buildICmp(CmpInst::ICMP_SGT, S1, Exp, FiftyOne);
1133
1134 auto Tmp1 = B.buildSelect(S64, ExpLt0, SignBit64, Tmp0);
1135 B.buildSelect(MI.getOperand(0).getReg(), ExpGt51, Src, Tmp1);
1136 return true;
1137}
Matt Arsenault2f292202019-05-17 23:05:18 +00001138
1139bool AMDGPULegalizerInfo::legalizeITOFP(
1140 MachineInstr &MI, MachineRegisterInfo &MRI,
1141 MachineIRBuilder &B, bool Signed) const {
1142 B.setInstr(MI);
1143
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001144 Register Dst = MI.getOperand(0).getReg();
1145 Register Src = MI.getOperand(1).getReg();
Matt Arsenault2f292202019-05-17 23:05:18 +00001146
1147 const LLT S64 = LLT::scalar(64);
1148 const LLT S32 = LLT::scalar(32);
1149
1150 assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S64);
1151
1152 auto Unmerge = B.buildUnmerge({S32, S32}, Src);
1153
1154 auto CvtHi = Signed ?
1155 B.buildSITOFP(S64, Unmerge.getReg(1)) :
1156 B.buildUITOFP(S64, Unmerge.getReg(1));
1157
1158 auto CvtLo = B.buildUITOFP(S64, Unmerge.getReg(0));
1159
1160 auto ThirtyTwo = B.buildConstant(S32, 32);
1161 auto LdExp = B.buildIntrinsic(Intrinsic::amdgcn_ldexp, {S64}, false)
1162 .addUse(CvtHi.getReg(0))
1163 .addUse(ThirtyTwo.getReg(0));
1164
1165 // TODO: Should this propagate fast-math-flags?
1166 B.buildFAdd(Dst, LdExp, CvtLo);
1167 MI.eraseFromParent();
1168 return true;
1169}
Matt Arsenaulte15770a2019-07-01 18:40:23 +00001170
Matt Arsenault6ce1b4f2019-07-10 16:31:19 +00001171bool AMDGPULegalizerInfo::legalizeMinNumMaxNum(
1172 MachineInstr &MI, MachineRegisterInfo &MRI,
1173 MachineIRBuilder &B) const {
1174 MachineFunction &MF = B.getMF();
1175 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1176
1177 const bool IsIEEEOp = MI.getOpcode() == AMDGPU::G_FMINNUM_IEEE ||
1178 MI.getOpcode() == AMDGPU::G_FMAXNUM_IEEE;
1179
1180 // With ieee_mode disabled, the instructions have the correct behavior
1181 // already for G_FMINNUM/G_FMAXNUM
1182 if (!MFI->getMode().IEEE)
1183 return !IsIEEEOp;
1184
1185 if (IsIEEEOp)
1186 return true;
1187
1188 MachineIRBuilder HelperBuilder(MI);
1189 GISelObserverWrapper DummyObserver;
1190 LegalizerHelper Helper(MF, DummyObserver, HelperBuilder);
1191 HelperBuilder.setMBB(*MI.getParent());
1192 return Helper.lowerFMinNumMaxNum(MI) == LegalizerHelper::Legalized;
1193}
1194
Matt Arsenaultb0e04c02019-07-15 19:40:59 +00001195bool AMDGPULegalizerInfo::legalizeExtractVectorElt(
1196 MachineInstr &MI, MachineRegisterInfo &MRI,
1197 MachineIRBuilder &B) const {
1198 // TODO: Should move some of this into LegalizerHelper.
1199
1200 // TODO: Promote dynamic indexing of s16 to s32
1201 // TODO: Dynamic s64 indexing is only legal for SGPR.
1202 Optional<int64_t> IdxVal = getConstantVRegVal(MI.getOperand(2).getReg(), MRI);
1203 if (!IdxVal) // Dynamic case will be selected to register indexing.
1204 return true;
1205
1206 Register Dst = MI.getOperand(0).getReg();
1207 Register Vec = MI.getOperand(1).getReg();
1208
1209 LLT VecTy = MRI.getType(Vec);
1210 LLT EltTy = VecTy.getElementType();
1211 assert(EltTy == MRI.getType(Dst));
1212
1213 B.setInstr(MI);
1214
1215 if (IdxVal.getValue() < VecTy.getNumElements())
1216 B.buildExtract(Dst, Vec, IdxVal.getValue() * EltTy.getSizeInBits());
1217 else
1218 B.buildUndef(Dst);
1219
1220 MI.eraseFromParent();
1221 return true;
1222}
1223
Matt Arsenault6ed315f2019-07-15 19:43:04 +00001224bool AMDGPULegalizerInfo::legalizeInsertVectorElt(
1225 MachineInstr &MI, MachineRegisterInfo &MRI,
1226 MachineIRBuilder &B) const {
1227 // TODO: Should move some of this into LegalizerHelper.
1228
1229 // TODO: Promote dynamic indexing of s16 to s32
1230 // TODO: Dynamic s64 indexing is only legal for SGPR.
1231 Optional<int64_t> IdxVal = getConstantVRegVal(MI.getOperand(3).getReg(), MRI);
1232 if (!IdxVal) // Dynamic case will be selected to register indexing.
1233 return true;
1234
1235 Register Dst = MI.getOperand(0).getReg();
1236 Register Vec = MI.getOperand(1).getReg();
1237 Register Ins = MI.getOperand(2).getReg();
1238
1239 LLT VecTy = MRI.getType(Vec);
1240 LLT EltTy = VecTy.getElementType();
1241 assert(EltTy == MRI.getType(Ins));
1242
1243 B.setInstr(MI);
1244
1245 if (IdxVal.getValue() < VecTy.getNumElements())
1246 B.buildInsert(Dst, Vec, Ins, IdxVal.getValue() * EltTy.getSizeInBits());
1247 else
1248 B.buildUndef(Dst);
1249
1250 MI.eraseFromParent();
1251 return true;
1252}
1253
Matt Arsenaultcbd17822019-08-29 20:06:48 +00001254bool AMDGPULegalizerInfo::legalizeSinCos(
1255 MachineInstr &MI, MachineRegisterInfo &MRI,
1256 MachineIRBuilder &B) const {
1257 B.setInstr(MI);
1258
1259 Register DstReg = MI.getOperand(0).getReg();
1260 Register SrcReg = MI.getOperand(1).getReg();
1261 LLT Ty = MRI.getType(DstReg);
1262 unsigned Flags = MI.getFlags();
1263
1264 Register TrigVal;
1265 auto OneOver2Pi = B.buildFConstant(Ty, 0.5 / M_PI);
1266 if (ST.hasTrigReducedRange()) {
1267 auto MulVal = B.buildFMul(Ty, SrcReg, OneOver2Pi, Flags);
1268 TrigVal = B.buildIntrinsic(Intrinsic::amdgcn_fract, {Ty}, false)
1269 .addUse(MulVal.getReg(0))
1270 .setMIFlags(Flags).getReg(0);
1271 } else
1272 TrigVal = B.buildFMul(Ty, SrcReg, OneOver2Pi, Flags).getReg(0);
1273
1274 Intrinsic::ID TrigIntrin = MI.getOpcode() == AMDGPU::G_FSIN ?
1275 Intrinsic::amdgcn_sin : Intrinsic::amdgcn_cos;
1276 B.buildIntrinsic(TrigIntrin, makeArrayRef<Register>(DstReg), false)
1277 .addUse(TrigVal)
1278 .setMIFlags(Flags);
1279 MI.eraseFromParent();
1280 return true;
1281}
1282
Matt Arsenaulte15770a2019-07-01 18:40:23 +00001283// Return the use branch instruction, otherwise null if the usage is invalid.
1284static MachineInstr *verifyCFIntrinsic(MachineInstr &MI,
1285 MachineRegisterInfo &MRI) {
1286 Register CondDef = MI.getOperand(0).getReg();
1287 if (!MRI.hasOneNonDBGUse(CondDef))
1288 return nullptr;
1289
1290 MachineInstr &UseMI = *MRI.use_instr_nodbg_begin(CondDef);
1291 return UseMI.getParent() == MI.getParent() &&
1292 UseMI.getOpcode() == AMDGPU::G_BRCOND ? &UseMI : nullptr;
1293}
1294
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +00001295Register AMDGPULegalizerInfo::getLiveInRegister(MachineRegisterInfo &MRI,
1296 Register Reg, LLT Ty) const {
1297 Register LiveIn = MRI.getLiveInVirtReg(Reg);
1298 if (LiveIn)
1299 return LiveIn;
1300
1301 Register NewReg = MRI.createGenericVirtualRegister(Ty);
1302 MRI.addLiveIn(Reg, NewReg);
1303 return NewReg;
1304}
1305
1306bool AMDGPULegalizerInfo::loadInputValue(Register DstReg, MachineIRBuilder &B,
1307 const ArgDescriptor *Arg) const {
Matt Arsenault25156ae2019-09-05 02:20:29 +00001308 if (!Arg->isRegister() || !Arg->getRegister().isValid())
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +00001309 return false; // TODO: Handle these
1310
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +00001311 assert(Arg->getRegister().isPhysical());
1312
1313 MachineRegisterInfo &MRI = *B.getMRI();
1314
1315 LLT Ty = MRI.getType(DstReg);
1316 Register LiveIn = getLiveInRegister(MRI, Arg->getRegister(), Ty);
1317
1318 if (Arg->isMasked()) {
1319 // TODO: Should we try to emit this once in the entry block?
1320 const LLT S32 = LLT::scalar(32);
1321 const unsigned Mask = Arg->getMask();
1322 const unsigned Shift = countTrailingZeros<unsigned>(Mask);
1323
1324 auto ShiftAmt = B.buildConstant(S32, Shift);
1325 auto LShr = B.buildLShr(S32, LiveIn, ShiftAmt);
1326 B.buildAnd(DstReg, LShr, B.buildConstant(S32, Mask >> Shift));
1327 } else
1328 B.buildCopy(DstReg, LiveIn);
1329
1330 // Insert the argument copy if it doens't already exist.
1331 // FIXME: It seems EmitLiveInCopies isn't called anywhere?
1332 if (!MRI.getVRegDef(LiveIn)) {
Matt Arsenault69b1a2a2019-09-05 02:20:32 +00001333 // FIXME: Should have scoped insert pt
1334 MachineBasicBlock &OrigInsBB = B.getMBB();
1335 auto OrigInsPt = B.getInsertPt();
1336
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +00001337 MachineBasicBlock &EntryMBB = B.getMF().front();
1338 EntryMBB.addLiveIn(Arg->getRegister());
1339 B.setInsertPt(EntryMBB, EntryMBB.begin());
1340 B.buildCopy(LiveIn, Arg->getRegister());
Matt Arsenault69b1a2a2019-09-05 02:20:32 +00001341
1342 B.setInsertPt(OrigInsBB, OrigInsPt);
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +00001343 }
1344
1345 return true;
1346}
1347
1348bool AMDGPULegalizerInfo::legalizePreloadedArgIntrin(
1349 MachineInstr &MI,
1350 MachineRegisterInfo &MRI,
1351 MachineIRBuilder &B,
1352 AMDGPUFunctionArgInfo::PreloadedValue ArgType) const {
1353 B.setInstr(MI);
1354
1355 const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
1356
1357 const ArgDescriptor *Arg;
1358 const TargetRegisterClass *RC;
1359 std::tie(Arg, RC) = MFI->getPreloadedValue(ArgType);
1360 if (!Arg) {
1361 LLVM_DEBUG(dbgs() << "Required arg register missing\n");
1362 return false;
1363 }
1364
1365 if (loadInputValue(MI.getOperand(0).getReg(), B, Arg)) {
1366 MI.eraseFromParent();
1367 return true;
1368 }
1369
1370 return false;
1371}
1372
Austin Kerbowc99f62e2019-07-30 18:49:16 +00001373bool AMDGPULegalizerInfo::legalizeFDIVFast(MachineInstr &MI,
1374 MachineRegisterInfo &MRI,
1375 MachineIRBuilder &B) const {
1376 B.setInstr(MI);
1377 Register Res = MI.getOperand(0).getReg();
1378 Register LHS = MI.getOperand(2).getReg();
1379 Register RHS = MI.getOperand(3).getReg();
1380 uint16_t Flags = MI.getFlags();
1381
1382 LLT S32 = LLT::scalar(32);
1383 LLT S1 = LLT::scalar(1);
1384
1385 auto Abs = B.buildFAbs(S32, RHS, Flags);
1386 const APFloat C0Val(1.0f);
1387
1388 auto C0 = B.buildConstant(S32, 0x6f800000);
1389 auto C1 = B.buildConstant(S32, 0x2f800000);
1390 auto C2 = B.buildConstant(S32, FloatToBits(1.0f));
1391
1392 auto CmpRes = B.buildFCmp(CmpInst::FCMP_OGT, S1, Abs, C0, Flags);
1393 auto Sel = B.buildSelect(S32, CmpRes, C1, C2, Flags);
1394
1395 auto Mul0 = B.buildFMul(S32, RHS, Sel, Flags);
1396
1397 auto RCP = B.buildIntrinsic(Intrinsic::amdgcn_rcp, {S32}, false)
1398 .addUse(Mul0.getReg(0))
1399 .setMIFlags(Flags);
1400
1401 auto Mul1 = B.buildFMul(S32, LHS, RCP, Flags);
1402
1403 B.buildFMul(Res, Sel, Mul1, Flags);
1404
1405 MI.eraseFromParent();
1406 return true;
1407}
1408
Matt Arsenault9e8e8c62019-07-01 18:49:01 +00001409bool AMDGPULegalizerInfo::legalizeImplicitArgPtr(MachineInstr &MI,
1410 MachineRegisterInfo &MRI,
1411 MachineIRBuilder &B) const {
1412 const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
1413 if (!MFI->isEntryFunction()) {
1414 return legalizePreloadedArgIntrin(MI, MRI, B,
1415 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
1416 }
1417
1418 B.setInstr(MI);
1419
1420 uint64_t Offset =
1421 ST.getTargetLowering()->getImplicitParameterOffset(
1422 B.getMF(), AMDGPUTargetLowering::FIRST_IMPLICIT);
1423 Register DstReg = MI.getOperand(0).getReg();
1424 LLT DstTy = MRI.getType(DstReg);
1425 LLT IdxTy = LLT::scalar(DstTy.getSizeInBits());
1426
1427 const ArgDescriptor *Arg;
1428 const TargetRegisterClass *RC;
1429 std::tie(Arg, RC)
1430 = MFI->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
1431 if (!Arg)
1432 return false;
1433
1434 Register KernargPtrReg = MRI.createGenericVirtualRegister(DstTy);
1435 if (!loadInputValue(KernargPtrReg, B, Arg))
1436 return false;
1437
1438 B.buildGEP(DstReg, KernargPtrReg, B.buildConstant(IdxTy, Offset).getReg(0));
1439 MI.eraseFromParent();
1440 return true;
1441}
1442
Matt Arsenaultf581d572019-09-05 02:20:39 +00001443bool AMDGPULegalizerInfo::legalizeIsAddrSpace(MachineInstr &MI,
1444 MachineRegisterInfo &MRI,
1445 MachineIRBuilder &B,
1446 unsigned AddrSpace) const {
1447 B.setInstr(MI);
1448 Register ApertureReg = getSegmentAperture(AddrSpace, MRI, B);
1449 auto Hi32 = B.buildExtract(LLT::scalar(32), MI.getOperand(2).getReg(), 32);
1450 B.buildICmp(ICmpInst::ICMP_EQ, MI.getOperand(0), Hi32, ApertureReg);
1451 MI.eraseFromParent();
1452 return true;
1453}
1454
Matt Arsenaulte15770a2019-07-01 18:40:23 +00001455bool AMDGPULegalizerInfo::legalizeIntrinsic(MachineInstr &MI,
1456 MachineRegisterInfo &MRI,
1457 MachineIRBuilder &B) const {
1458 // Replace the use G_BRCOND with the exec manipulate and branch pseudos.
1459 switch (MI.getOperand(MI.getNumExplicitDefs()).getIntrinsicID()) {
1460 case Intrinsic::amdgcn_if: {
1461 if (MachineInstr *BrCond = verifyCFIntrinsic(MI, MRI)) {
1462 const SIRegisterInfo *TRI
1463 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo());
1464
1465 B.setInstr(*BrCond);
1466 Register Def = MI.getOperand(1).getReg();
1467 Register Use = MI.getOperand(3).getReg();
1468 B.buildInstr(AMDGPU::SI_IF)
1469 .addDef(Def)
1470 .addUse(Use)
1471 .addMBB(BrCond->getOperand(1).getMBB());
1472
1473 MRI.setRegClass(Def, TRI->getWaveMaskRegClass());
1474 MRI.setRegClass(Use, TRI->getWaveMaskRegClass());
1475 MI.eraseFromParent();
1476 BrCond->eraseFromParent();
1477 return true;
1478 }
1479
1480 return false;
1481 }
1482 case Intrinsic::amdgcn_loop: {
1483 if (MachineInstr *BrCond = verifyCFIntrinsic(MI, MRI)) {
1484 const SIRegisterInfo *TRI
1485 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo());
1486
1487 B.setInstr(*BrCond);
1488 Register Reg = MI.getOperand(2).getReg();
1489 B.buildInstr(AMDGPU::SI_LOOP)
1490 .addUse(Reg)
1491 .addMBB(BrCond->getOperand(1).getMBB());
1492 MI.eraseFromParent();
1493 BrCond->eraseFromParent();
1494 MRI.setRegClass(Reg, TRI->getWaveMaskRegClass());
1495 return true;
1496 }
1497
1498 return false;
1499 }
Matt Arsenault9e8e8c62019-07-01 18:49:01 +00001500 case Intrinsic::amdgcn_kernarg_segment_ptr:
1501 return legalizePreloadedArgIntrin(
1502 MI, MRI, B, AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
1503 case Intrinsic::amdgcn_implicitarg_ptr:
1504 return legalizeImplicitArgPtr(MI, MRI, B);
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +00001505 case Intrinsic::amdgcn_workitem_id_x:
1506 return legalizePreloadedArgIntrin(MI, MRI, B,
1507 AMDGPUFunctionArgInfo::WORKITEM_ID_X);
1508 case Intrinsic::amdgcn_workitem_id_y:
1509 return legalizePreloadedArgIntrin(MI, MRI, B,
1510 AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
1511 case Intrinsic::amdgcn_workitem_id_z:
1512 return legalizePreloadedArgIntrin(MI, MRI, B,
1513 AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
Matt Arsenault756d8192019-07-01 18:47:22 +00001514 case Intrinsic::amdgcn_workgroup_id_x:
1515 return legalizePreloadedArgIntrin(MI, MRI, B,
1516 AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
1517 case Intrinsic::amdgcn_workgroup_id_y:
1518 return legalizePreloadedArgIntrin(MI, MRI, B,
1519 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
1520 case Intrinsic::amdgcn_workgroup_id_z:
1521 return legalizePreloadedArgIntrin(MI, MRI, B,
1522 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
Matt Arsenaultbae36362019-07-01 18:50:50 +00001523 case Intrinsic::amdgcn_dispatch_ptr:
1524 return legalizePreloadedArgIntrin(MI, MRI, B,
1525 AMDGPUFunctionArgInfo::DISPATCH_PTR);
1526 case Intrinsic::amdgcn_queue_ptr:
1527 return legalizePreloadedArgIntrin(MI, MRI, B,
1528 AMDGPUFunctionArgInfo::QUEUE_PTR);
1529 case Intrinsic::amdgcn_implicit_buffer_ptr:
1530 return legalizePreloadedArgIntrin(
1531 MI, MRI, B, AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
1532 case Intrinsic::amdgcn_dispatch_id:
1533 return legalizePreloadedArgIntrin(MI, MRI, B,
1534 AMDGPUFunctionArgInfo::DISPATCH_ID);
Austin Kerbowc99f62e2019-07-30 18:49:16 +00001535 case Intrinsic::amdgcn_fdiv_fast:
1536 return legalizeFDIVFast(MI, MRI, B);
Matt Arsenaultf581d572019-09-05 02:20:39 +00001537 case Intrinsic::amdgcn_is_shared:
1538 return legalizeIsAddrSpace(MI, MRI, B, AMDGPUAS::LOCAL_ADDRESS);
1539 case Intrinsic::amdgcn_is_private:
1540 return legalizeIsAddrSpace(MI, MRI, B, AMDGPUAS::PRIVATE_ADDRESS);
Matt Arsenault8e3bc9b2019-09-09 15:20:49 +00001541 case Intrinsic::amdgcn_wavefrontsize: {
1542 B.setInstr(MI);
1543 B.buildConstant(MI.getOperand(0), ST.getWavefrontSize());
1544 MI.eraseFromParent();
1545 return true;
1546 }
Matt Arsenaulte15770a2019-07-01 18:40:23 +00001547 default:
1548 return true;
1549 }
1550
1551 return true;
1552}