blob: 9bee38f1424f561194d66945676ed450f6b09d0b [file] [log] [blame]
Tom Stellardca166212017-01-30 21:56:46 +00001//===- AMDGPULegalizerInfo.cpp -----------------------------------*- C++ -*-==//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellardca166212017-01-30 21:56:46 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the Machinelegalizer class for
10/// AMDGPU.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
David Blaikie36a0f222018-03-23 23:58:31 +000014#include "AMDGPU.h"
Craig Topper2fa14362018-03-29 17:21:10 +000015#include "AMDGPULegalizerInfo.h"
Matt Arsenault85803362018-03-17 15:17:41 +000016#include "AMDGPUTargetMachine.h"
Matt Arsenaulta8b43392019-02-08 02:40:47 +000017#include "SIMachineFunctionInfo.h"
Matt Arsenault6ce1b4f2019-07-10 16:31:19 +000018#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
Matt Arsenaulta8b43392019-02-08 02:40:47 +000019#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000020#include "llvm/CodeGen/TargetOpcodes.h"
Craig Topper2fa14362018-03-29 17:21:10 +000021#include "llvm/CodeGen/ValueTypes.h"
Tom Stellardca166212017-01-30 21:56:46 +000022#include "llvm/IR/DerivedTypes.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000023#include "llvm/IR/Type.h"
Tom Stellardca166212017-01-30 21:56:46 +000024#include "llvm/Support/Debug.h"
25
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +000026#define DEBUG_TYPE "amdgpu-legalinfo"
27
Tom Stellardca166212017-01-30 21:56:46 +000028using namespace llvm;
Daniel Sanders9ade5592018-01-29 17:37:29 +000029using namespace LegalizeActions;
Matt Arsenault990f5072019-01-25 00:51:00 +000030using namespace LegalizeMutations;
Matt Arsenault7ac79ed2019-01-20 19:45:18 +000031using namespace LegalityPredicates;
Tom Stellardca166212017-01-30 21:56:46 +000032
Matt Arsenaultd9141892019-02-07 19:10:15 +000033
34static LegalityPredicate isMultiple32(unsigned TypeIdx,
35 unsigned MaxSize = 512) {
36 return [=](const LegalityQuery &Query) {
37 const LLT Ty = Query.Types[TypeIdx];
38 const LLT EltTy = Ty.getScalarType();
39 return Ty.getSizeInBits() <= MaxSize && EltTy.getSizeInBits() % 32 == 0;
40 };
41}
42
Matt Arsenault18ec3822019-02-11 22:00:39 +000043static LegalityPredicate isSmallOddVector(unsigned TypeIdx) {
44 return [=](const LegalityQuery &Query) {
45 const LLT Ty = Query.Types[TypeIdx];
46 return Ty.isVector() &&
47 Ty.getNumElements() % 2 != 0 &&
48 Ty.getElementType().getSizeInBits() < 32;
49 };
50}
51
52static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
53 return [=](const LegalityQuery &Query) {
54 const LLT Ty = Query.Types[TypeIdx];
55 const LLT EltTy = Ty.getElementType();
56 return std::make_pair(TypeIdx, LLT::vector(Ty.getNumElements() + 1, EltTy));
57 };
58}
59
Matt Arsenault26b7e852019-02-19 16:30:19 +000060static LegalizeMutation fewerEltsToSize64Vector(unsigned TypeIdx) {
61 return [=](const LegalityQuery &Query) {
62 const LLT Ty = Query.Types[TypeIdx];
63 const LLT EltTy = Ty.getElementType();
64 unsigned Size = Ty.getSizeInBits();
65 unsigned Pieces = (Size + 63) / 64;
66 unsigned NewNumElts = (Ty.getNumElements() + 1) / Pieces;
67 return std::make_pair(TypeIdx, LLT::scalarOrVector(NewNumElts, EltTy));
68 };
69}
70
71static LegalityPredicate vectorWiderThan(unsigned TypeIdx, unsigned Size) {
72 return [=](const LegalityQuery &Query) {
73 const LLT QueryTy = Query.Types[TypeIdx];
74 return QueryTy.isVector() && QueryTy.getSizeInBits() > Size;
75 };
76}
77
Matt Arsenaultb4c95b32019-02-19 17:03:09 +000078static LegalityPredicate numElementsNotEven(unsigned TypeIdx) {
79 return [=](const LegalityQuery &Query) {
80 const LLT QueryTy = Query.Types[TypeIdx];
81 return QueryTy.isVector() && QueryTy.getNumElements() % 2 != 0;
82 };
83}
Matt Arsenault18ec3822019-02-11 22:00:39 +000084
Matt Arsenault4dd57552019-07-09 14:17:31 +000085// Any combination of 32 or 64-bit elements up to 512 bits, and multiples of
86// v2s16.
87static LegalityPredicate isRegisterType(unsigned TypeIdx) {
88 return [=](const LegalityQuery &Query) {
89 const LLT Ty = Query.Types[TypeIdx];
90 if (Ty.isVector()) {
91 const int EltSize = Ty.getElementType().getSizeInBits();
92 return EltSize == 32 || EltSize == 64 ||
Matt Arsenault3f1a3452019-07-09 22:48:04 +000093 (EltSize == 16 && Ty.getNumElements() % 2 == 0) ||
94 EltSize == 128 || EltSize == 256;
Matt Arsenault4dd57552019-07-09 14:17:31 +000095 }
96
97 return Ty.getSizeInBits() % 32 == 0 && Ty.getSizeInBits() <= 512;
98 };
99}
100
Matt Arsenault9e8e8c62019-07-01 18:49:01 +0000101AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
102 const GCNTargetMachine &TM)
103 : ST(ST_) {
Tom Stellardca166212017-01-30 21:56:46 +0000104 using namespace TargetOpcode;
105
Matt Arsenault85803362018-03-17 15:17:41 +0000106 auto GetAddrSpacePtr = [&TM](unsigned AS) {
107 return LLT::pointer(AS, TM.getPointerSizeInBits(AS));
108 };
109
110 const LLT S1 = LLT::scalar(1);
Matt Arsenault888aa5d2019-02-03 00:07:33 +0000111 const LLT S8 = LLT::scalar(8);
Matt Arsenault45991592019-01-18 21:33:50 +0000112 const LLT S16 = LLT::scalar(16);
Tom Stellardca166212017-01-30 21:56:46 +0000113 const LLT S32 = LLT::scalar(32);
114 const LLT S64 = LLT::scalar(64);
Matt Arsenaultca676342019-01-25 02:36:32 +0000115 const LLT S128 = LLT::scalar(128);
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000116 const LLT S256 = LLT::scalar(256);
Tom Stellardeebbfc22018-06-30 04:09:44 +0000117 const LLT S512 = LLT::scalar(512);
Matt Arsenault85803362018-03-17 15:17:41 +0000118
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000119 const LLT V2S16 = LLT::vector(2, 16);
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000120 const LLT V4S16 = LLT::vector(4, 16);
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000121
122 const LLT V2S32 = LLT::vector(2, 32);
123 const LLT V3S32 = LLT::vector(3, 32);
124 const LLT V4S32 = LLT::vector(4, 32);
125 const LLT V5S32 = LLT::vector(5, 32);
126 const LLT V6S32 = LLT::vector(6, 32);
127 const LLT V7S32 = LLT::vector(7, 32);
128 const LLT V8S32 = LLT::vector(8, 32);
129 const LLT V9S32 = LLT::vector(9, 32);
130 const LLT V10S32 = LLT::vector(10, 32);
131 const LLT V11S32 = LLT::vector(11, 32);
132 const LLT V12S32 = LLT::vector(12, 32);
133 const LLT V13S32 = LLT::vector(13, 32);
134 const LLT V14S32 = LLT::vector(14, 32);
135 const LLT V15S32 = LLT::vector(15, 32);
136 const LLT V16S32 = LLT::vector(16, 32);
137
138 const LLT V2S64 = LLT::vector(2, 64);
139 const LLT V3S64 = LLT::vector(3, 64);
140 const LLT V4S64 = LLT::vector(4, 64);
141 const LLT V5S64 = LLT::vector(5, 64);
142 const LLT V6S64 = LLT::vector(6, 64);
143 const LLT V7S64 = LLT::vector(7, 64);
144 const LLT V8S64 = LLT::vector(8, 64);
145
146 std::initializer_list<LLT> AllS32Vectors =
147 {V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32,
148 V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32};
149 std::initializer_list<LLT> AllS64Vectors =
150 {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64};
151
Matt Arsenault85803362018-03-17 15:17:41 +0000152 const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS);
153 const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS);
Matt Arsenault685d1e82018-03-17 15:17:45 +0000154 const LLT LocalPtr = GetAddrSpacePtr(AMDGPUAS::LOCAL_ADDRESS);
Matt Arsenault0da63502018-08-31 05:49:54 +0000155 const LLT FlatPtr = GetAddrSpacePtr(AMDGPUAS::FLAT_ADDRESS);
156 const LLT PrivatePtr = GetAddrSpacePtr(AMDGPUAS::PRIVATE_ADDRESS);
Matt Arsenault85803362018-03-17 15:17:41 +0000157
Matt Arsenault934e5342018-12-13 20:34:15 +0000158 const LLT CodePtr = FlatPtr;
159
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000160 const std::initializer_list<LLT> AddrSpaces64 = {
161 GlobalPtr, ConstantPtr, FlatPtr
162 };
163
164 const std::initializer_list<LLT> AddrSpaces32 = {
165 LocalPtr, PrivatePtr
Matt Arsenault685d1e82018-03-17 15:17:45 +0000166 };
Tom Stellardca166212017-01-30 21:56:46 +0000167
Matt Arsenault40d1faf2019-07-01 17:35:53 +0000168 const std::initializer_list<LLT> FPTypesBase = {
169 S32, S64
170 };
171
172 const std::initializer_list<LLT> FPTypes16 = {
173 S32, S64, S16
174 };
175
Matt Arsenault6ce1b4f2019-07-10 16:31:19 +0000176 const std::initializer_list<LLT> FPTypesPK16 = {
177 S32, S64, S16, V2S16
178 };
179
Matt Arsenaultadc40ba2019-01-08 01:22:47 +0000180 setAction({G_BRCOND, S1}, Legal);
181
Matt Arsenault2e0ee472019-02-21 15:48:13 +0000182 // TODO: All multiples of 32, vectors of pointers, all v2s16 pairs, more
183 // elements for v3s16
184 getActionDefinitionsBuilder(G_PHI)
185 .legalFor({S32, S64, V2S16, V4S16, S1, S128, S256})
186 .legalFor(AllS32Vectors)
187 .legalFor(AllS64Vectors)
188 .legalFor(AddrSpaces64)
189 .legalFor(AddrSpaces32)
190 .clampScalar(0, S32, S256)
191 .widenScalarToNextPow2(0, 32)
Matt Arsenaultd3093c22019-02-28 00:16:32 +0000192 .clampMaxNumElements(0, S32, 16)
Matt Arsenault72bcf152019-02-28 00:01:05 +0000193 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
Matt Arsenault2e0ee472019-02-21 15:48:13 +0000194 .legalIf(isPointer(0));
195
Matt Arsenaultef59cb62019-07-01 18:18:55 +0000196 if (ST.has16BitInsts()) {
197 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
198 .legalFor({S32, S16})
199 .clampScalar(0, S16, S32)
200 .scalarize(0);
201 } else {
202 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
203 .legalFor({S32})
204 .clampScalar(0, S32, S32)
205 .scalarize(0);
206 }
Matt Arsenault2e0ee472019-02-21 15:48:13 +0000207
Matt Arsenaultef59cb62019-07-01 18:18:55 +0000208 getActionDefinitionsBuilder({G_UMULH, G_SMULH})
Matt Arsenault5d622fb2019-01-25 03:23:04 +0000209 .legalFor({S32})
Matt Arsenault211e89d2019-01-27 00:52:51 +0000210 .clampScalar(0, S32, S32)
Matt Arsenault5d622fb2019-01-25 03:23:04 +0000211 .scalarize(0);
Matt Arsenault43398832018-12-20 01:35:49 +0000212
Matt Arsenault26a6c742019-01-26 23:47:07 +0000213 // Report legal for any types we can handle anywhere. For the cases only legal
214 // on the SALU, RegBankSelect will be able to re-legalize.
Matt Arsenault43398832018-12-20 01:35:49 +0000215 getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
Matt Arsenault26a6c742019-01-26 23:47:07 +0000216 .legalFor({S32, S1, S64, V2S32, V2S16, V4S16})
217 .clampScalar(0, S32, S64)
Matt Arsenault26b7e852019-02-19 16:30:19 +0000218 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
219 .fewerElementsIf(vectorWiderThan(0, 32), fewerEltsToSize64Vector(0))
Matt Arsenaultf4bfe4c2019-02-25 21:32:48 +0000220 .widenScalarToNextPow2(0)
Matt Arsenault26a6c742019-01-26 23:47:07 +0000221 .scalarize(0);
Tom Stellardee6e6452017-06-12 20:54:56 +0000222
Matt Arsenault68c668a2019-01-08 01:09:09 +0000223 getActionDefinitionsBuilder({G_UADDO, G_SADDO, G_USUBO, G_SSUBO,
224 G_UADDE, G_SADDE, G_USUBE, G_SSUBE})
Matt Arsenault4d475942019-01-26 23:44:51 +0000225 .legalFor({{S32, S1}})
226 .clampScalar(0, S32, S32);
Matt Arsenault2cc15b62019-01-08 01:03:58 +0000227
Matt Arsenault7ac79ed2019-01-20 19:45:18 +0000228 getActionDefinitionsBuilder(G_BITCAST)
229 .legalForCartesianProduct({S32, V2S16})
230 .legalForCartesianProduct({S64, V2S32, V4S16})
231 .legalForCartesianProduct({V2S64, V4S32})
232 // Don't worry about the size constraint.
233 .legalIf(all(isPointer(0), isPointer(1)));
Tom Stellardff63ee02017-06-19 13:15:45 +0000234
Matt Arsenault00ccd132019-02-12 14:54:55 +0000235 if (ST.has16BitInsts()) {
236 getActionDefinitionsBuilder(G_FCONSTANT)
237 .legalFor({S32, S64, S16})
238 .clampScalar(0, S16, S64);
239 } else {
240 getActionDefinitionsBuilder(G_FCONSTANT)
241 .legalFor({S32, S64})
242 .clampScalar(0, S32, S64);
243 }
Tom Stellardeebbfc22018-06-30 04:09:44 +0000244
Matt Arsenaultb3feccd2018-06-25 15:42:12 +0000245 getActionDefinitionsBuilder(G_IMPLICIT_DEF)
Matt Arsenaultd9141892019-02-07 19:10:15 +0000246 .legalFor({S1, S32, S64, V2S32, V4S32, V2S16, V4S16, GlobalPtr,
247 ConstantPtr, LocalPtr, FlatPtr, PrivatePtr})
Matt Arsenault18ec3822019-02-11 22:00:39 +0000248 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
Matt Arsenaultd9141892019-02-07 19:10:15 +0000249 .clampScalarOrElt(0, S32, S512)
Matt Arsenault0f2debb2019-02-08 14:46:27 +0000250 .legalIf(isMultiple32(0))
Matt Arsenault82b10392019-02-25 20:46:06 +0000251 .widenScalarToNextPow2(0, 32)
252 .clampMaxNumElements(0, S32, 16);
Matt Arsenaultb3feccd2018-06-25 15:42:12 +0000253
Matt Arsenaultabdc4f22018-03-17 15:17:48 +0000254
Tom Stellarde0424122017-06-03 01:13:33 +0000255 // FIXME: i1 operands to intrinsics should always be legal, but other i1
256 // values may not be legal. We need to figure out how to distinguish
257 // between these two scenarios.
Matt Arsenault45991592019-01-18 21:33:50 +0000258 getActionDefinitionsBuilder(G_CONSTANT)
Matt Arsenault2065c942019-02-02 23:33:49 +0000259 .legalFor({S1, S32, S64, GlobalPtr,
260 LocalPtr, ConstantPtr, PrivatePtr, FlatPtr })
Matt Arsenault45991592019-01-18 21:33:50 +0000261 .clampScalar(0, S32, S64)
Matt Arsenault2065c942019-02-02 23:33:49 +0000262 .widenScalarToNextPow2(0)
263 .legalIf(isPointer(0));
Matt Arsenault06cbb272018-03-01 19:16:52 +0000264
Matt Arsenaultc94e26c2018-12-18 09:46:13 +0000265 setAction({G_FRAME_INDEX, PrivatePtr}, Legal);
266
Matt Arsenault93fdec72019-02-07 18:03:11 +0000267 auto &FPOpActions = getActionDefinitionsBuilder(
Matt Arsenault9dba67f2019-02-11 17:05:20 +0000268 { G_FADD, G_FMUL, G_FNEG, G_FABS, G_FMA, G_FCANONICALIZE})
Matt Arsenault93fdec72019-02-07 18:03:11 +0000269 .legalFor({S32, S64});
270
271 if (ST.has16BitInsts()) {
272 if (ST.hasVOP3PInsts())
273 FPOpActions.legalFor({S16, V2S16});
274 else
275 FPOpActions.legalFor({S16});
276 }
277
Matt Arsenault6ce1b4f2019-07-10 16:31:19 +0000278 auto &MinNumMaxNum = getActionDefinitionsBuilder({
279 G_FMINNUM, G_FMAXNUM, G_FMINNUM_IEEE, G_FMAXNUM_IEEE});
280
281 if (ST.hasVOP3PInsts()) {
282 MinNumMaxNum.customFor(FPTypesPK16)
283 .clampMaxNumElements(0, S16, 2)
284 .clampScalar(0, S16, S64)
285 .scalarize(0);
286 } else if (ST.has16BitInsts()) {
287 MinNumMaxNum.customFor(FPTypes16)
288 .clampScalar(0, S16, S64)
289 .scalarize(0);
290 } else {
291 MinNumMaxNum.customFor(FPTypesBase)
292 .clampScalar(0, S32, S64)
293 .scalarize(0);
294 }
295
296 // TODO: Implement
297 getActionDefinitionsBuilder({G_FMINIMUM, G_FMAXIMUM}).lower();
298
Matt Arsenault93fdec72019-02-07 18:03:11 +0000299 if (ST.hasVOP3PInsts())
300 FPOpActions.clampMaxNumElements(0, S16, 2);
301 FPOpActions
302 .scalarize(0)
303 .clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);
Tom Stellardd0c6cf22017-10-27 23:57:41 +0000304
Matt Arsenaultc0f75692019-02-07 18:14:39 +0000305 if (ST.has16BitInsts()) {
306 getActionDefinitionsBuilder(G_FSQRT)
307 .legalFor({S32, S64, S16})
308 .scalarize(0)
309 .clampScalar(0, S16, S64);
310 } else {
311 getActionDefinitionsBuilder(G_FSQRT)
312 .legalFor({S32, S64})
313 .scalarize(0)
314 .clampScalar(0, S32, S64);
315 }
316
Matt Arsenaultdff33c32018-12-20 00:37:02 +0000317 getActionDefinitionsBuilder(G_FPTRUNC)
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000318 .legalFor({{S32, S64}, {S16, S32}})
319 .scalarize(0);
Matt Arsenaultdff33c32018-12-20 00:37:02 +0000320
Matt Arsenault24563ef2019-01-20 18:34:24 +0000321 getActionDefinitionsBuilder(G_FPEXT)
322 .legalFor({{S64, S32}, {S32, S16}})
Matt Arsenaultca676342019-01-25 02:36:32 +0000323 .lowerFor({{S64, S16}}) // FIXME: Implement
324 .scalarize(0);
Matt Arsenault24563ef2019-01-20 18:34:24 +0000325
Matt Arsenaultb1843e12019-07-09 23:34:29 +0000326 // TODO: Verify V_BFI_B32 is generated from expanded bit ops.
327 getActionDefinitionsBuilder(G_FCOPYSIGN).lower();
Matt Arsenault1448f562019-05-17 12:19:52 +0000328
Matt Arsenault745fd9f2019-01-20 19:10:31 +0000329 getActionDefinitionsBuilder(G_FSUB)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000330 // Use actual fsub instruction
331 .legalFor({S32})
332 // Must use fadd + fneg
333 .lowerFor({S64, S16, V2S16})
Matt Arsenault990f5072019-01-25 00:51:00 +0000334 .scalarize(0)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000335 .clampScalar(0, S32, S64);
Matt Arsenaulte01e7c82018-12-18 09:19:03 +0000336
Matt Arsenault24563ef2019-01-20 18:34:24 +0000337 getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT})
Matt Arsenault46ffe682019-01-20 19:28:20 +0000338 .legalFor({{S64, S32}, {S32, S16}, {S64, S16},
Matt Arsenaultca676342019-01-25 02:36:32 +0000339 {S32, S1}, {S64, S1}, {S16, S1},
340 // FIXME: Hack
Matt Arsenaultf4bfe4c2019-02-25 21:32:48 +0000341 {S64, LLT::scalar(33)},
Matt Arsenault888aa5d2019-02-03 00:07:33 +0000342 {S32, S8}, {S128, S32}, {S128, S64}, {S32, LLT::scalar(24)}})
Matt Arsenaultca676342019-01-25 02:36:32 +0000343 .scalarize(0);
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000344
Matt Arsenaultfb671642019-01-22 00:20:17 +0000345 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000346 .legalFor({{S32, S32}, {S64, S32}})
Matt Arsenault02b5ca82019-05-17 23:05:13 +0000347 .lowerFor({{S32, S64}})
Matt Arsenault2f292202019-05-17 23:05:18 +0000348 .customFor({{S64, S64}})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000349 .scalarize(0);
Matt Arsenaultdd022ce2018-03-01 19:04:25 +0000350
Matt Arsenaultfb671642019-01-22 00:20:17 +0000351 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000352 .legalFor({{S32, S32}, {S32, S64}})
353 .scalarize(0);
Tom Stellard33445762018-02-07 04:47:59 +0000354
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000355 getActionDefinitionsBuilder(G_INTRINSIC_ROUND)
Matt Arsenault2e5f9002019-01-27 00:12:21 +0000356 .legalFor({S32, S64})
357 .scalarize(0);
Matt Arsenaultf4c21c52018-12-21 03:14:45 +0000358
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000359 if (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
Matt Arsenaulta510b572019-05-17 12:20:05 +0000360 getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000361 .legalFor({S32, S64})
362 .clampScalar(0, S32, S64)
363 .scalarize(0);
364 } else {
Matt Arsenaulta510b572019-05-17 12:20:05 +0000365 getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000366 .legalFor({S32})
367 .customFor({S64})
368 .clampScalar(0, S32, S64)
369 .scalarize(0);
370 }
Tom Stellardca166212017-01-30 21:56:46 +0000371
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000372 getActionDefinitionsBuilder(G_GEP)
373 .legalForCartesianProduct(AddrSpaces64, {S64})
374 .legalForCartesianProduct(AddrSpaces32, {S32})
375 .scalarize(0);
Matt Arsenault3b9a82f2019-01-25 04:54:00 +0000376
Matt Arsenault934e5342018-12-13 20:34:15 +0000377 setAction({G_BLOCK_ADDR, CodePtr}, Legal);
378
Matt Arsenault8b8eee52019-07-09 14:10:43 +0000379 auto &CmpBuilder =
380 getActionDefinitionsBuilder(G_ICMP)
Matt Arsenault58f9d3d2019-02-02 23:35:15 +0000381 .legalForCartesianProduct(
382 {S1}, {S32, S64, GlobalPtr, LocalPtr, ConstantPtr, PrivatePtr, FlatPtr})
Matt Arsenault8b8eee52019-07-09 14:10:43 +0000383 .legalFor({{S1, S32}, {S1, S64}});
384 if (ST.has16BitInsts()) {
385 CmpBuilder.legalFor({{S1, S16}});
386 }
387
388 CmpBuilder
Matt Arsenault58f9d3d2019-02-02 23:35:15 +0000389 .widenScalarToNextPow2(1)
390 .clampScalar(1, S32, S64)
391 .scalarize(0)
392 .legalIf(all(typeIs(0, S1), isPointer(1)));
393
394 getActionDefinitionsBuilder(G_FCMP)
Matt Arsenault40d1faf2019-07-01 17:35:53 +0000395 .legalForCartesianProduct({S1}, ST.has16BitInsts() ? FPTypes16 : FPTypesBase)
Matt Arsenault1b1e6852019-01-25 02:59:34 +0000396 .widenScalarToNextPow2(1)
397 .clampScalar(1, S32, S64)
Matt Arsenaultded2f822019-01-26 23:54:53 +0000398 .scalarize(0);
Matt Arsenault1b1e6852019-01-25 02:59:34 +0000399
Matt Arsenault95fd95c2019-01-25 04:03:38 +0000400 // FIXME: fexp, flog2, flog10 needs to be custom lowered.
401 getActionDefinitionsBuilder({G_FPOW, G_FEXP, G_FEXP2,
402 G_FLOG, G_FLOG2, G_FLOG10})
403 .legalFor({S32})
404 .scalarize(0);
Tom Stellard8cd60a52017-06-06 14:16:50 +0000405
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000406 // The 64-bit versions produce 32-bit results, but only on the SALU.
407 getActionDefinitionsBuilder({G_CTLZ, G_CTLZ_ZERO_UNDEF,
408 G_CTTZ, G_CTTZ_ZERO_UNDEF,
409 G_CTPOP})
410 .legalFor({{S32, S32}, {S32, S64}})
411 .clampScalar(0, S32, S32)
Matt Arsenault75e30c42019-02-20 16:42:52 +0000412 .clampScalar(1, S32, S64)
Matt Arsenaultb10fa8d2019-02-21 15:22:20 +0000413 .scalarize(0)
414 .widenScalarToNextPow2(0, 32)
415 .widenScalarToNextPow2(1, 32);
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000416
Matt Arsenaultd1bfc8d2019-01-31 02:34:03 +0000417 // TODO: Expand for > s32
418 getActionDefinitionsBuilder(G_BSWAP)
419 .legalFor({S32})
420 .clampScalar(0, S32, S32)
421 .scalarize(0);
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000422
Matt Arsenault0f3ba442019-05-23 17:58:48 +0000423 if (ST.has16BitInsts()) {
424 if (ST.hasVOP3PInsts()) {
425 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
426 .legalFor({S32, S16, V2S16})
427 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
428 .clampMaxNumElements(0, S16, 2)
429 .clampScalar(0, S16, S32)
430 .widenScalarToNextPow2(0)
431 .scalarize(0);
432 } else {
433 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
434 .legalFor({S32, S16})
435 .widenScalarToNextPow2(0)
436 .clampScalar(0, S16, S32)
437 .scalarize(0);
438 }
439 } else {
440 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
441 .legalFor({S32})
442 .clampScalar(0, S32, S32)
443 .widenScalarToNextPow2(0)
444 .scalarize(0);
445 }
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000446
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000447 auto smallerThan = [](unsigned TypeIdx0, unsigned TypeIdx1) {
448 return [=](const LegalityQuery &Query) {
449 return Query.Types[TypeIdx0].getSizeInBits() <
450 Query.Types[TypeIdx1].getSizeInBits();
451 };
452 };
453
454 auto greaterThan = [](unsigned TypeIdx0, unsigned TypeIdx1) {
455 return [=](const LegalityQuery &Query) {
456 return Query.Types[TypeIdx0].getSizeInBits() >
457 Query.Types[TypeIdx1].getSizeInBits();
458 };
459 };
460
Tom Stellard7c650782018-10-05 04:34:09 +0000461 getActionDefinitionsBuilder(G_INTTOPTR)
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000462 // List the common cases
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000463 .legalForCartesianProduct(AddrSpaces64, {S64})
464 .legalForCartesianProduct(AddrSpaces32, {S32})
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000465 .scalarize(0)
466 // Accept any address space as long as the size matches
467 .legalIf(sameSize(0, 1))
468 .widenScalarIf(smallerThan(1, 0),
469 [](const LegalityQuery &Query) {
470 return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
471 })
472 .narrowScalarIf(greaterThan(1, 0),
473 [](const LegalityQuery &Query) {
474 return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
475 });
Matt Arsenault85803362018-03-17 15:17:41 +0000476
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000477 getActionDefinitionsBuilder(G_PTRTOINT)
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000478 // List the common cases
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000479 .legalForCartesianProduct(AddrSpaces64, {S64})
480 .legalForCartesianProduct(AddrSpaces32, {S32})
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000481 .scalarize(0)
482 // Accept any address space as long as the size matches
483 .legalIf(sameSize(0, 1))
484 .widenScalarIf(smallerThan(0, 1),
485 [](const LegalityQuery &Query) {
486 return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
487 })
488 .narrowScalarIf(
489 greaterThan(0, 1),
490 [](const LegalityQuery &Query) {
491 return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
492 });
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000493
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000494 if (ST.hasFlatAddressSpace()) {
495 getActionDefinitionsBuilder(G_ADDRSPACE_CAST)
496 .scalarize(0)
497 .custom();
498 }
499
Matt Arsenault85803362018-03-17 15:17:41 +0000500 getActionDefinitionsBuilder({G_LOAD, G_STORE})
Matt Arsenault18619af2019-01-29 18:13:02 +0000501 .narrowScalarIf([](const LegalityQuery &Query) {
502 unsigned Size = Query.Types[0].getSizeInBits();
503 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
504 return (Size > 32 && MemSize < Size);
505 },
506 [](const LegalityQuery &Query) {
507 return std::make_pair(0, LLT::scalar(32));
508 })
Matt Arsenault9e8e8c62019-07-01 18:49:01 +0000509 .fewerElementsIf([=](const LegalityQuery &Query) {
Matt Arsenault045bc9a2019-01-30 02:35:38 +0000510 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
Matt Arsenaultc7bce732019-01-31 02:46:05 +0000511 return (MemSize == 96) &&
512 Query.Types[0].isVector() &&
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +0000513 !ST.hasDwordx3LoadStores();
Matt Arsenault045bc9a2019-01-30 02:35:38 +0000514 },
515 [=](const LegalityQuery &Query) {
516 return std::make_pair(0, V2S32);
517 })
Matt Arsenault9e8e8c62019-07-01 18:49:01 +0000518 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenault85803362018-03-17 15:17:41 +0000519 const LLT &Ty0 = Query.Types[0];
520
Matt Arsenault18619af2019-01-29 18:13:02 +0000521 unsigned Size = Ty0.getSizeInBits();
522 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
Matt Arsenaulteb2603c2019-02-02 23:39:13 +0000523 if (Size < 32 || (Size > 32 && MemSize < Size))
Matt Arsenault18619af2019-01-29 18:13:02 +0000524 return false;
525
526 if (Ty0.isVector() && Size != MemSize)
527 return false;
528
Matt Arsenault85803362018-03-17 15:17:41 +0000529 // TODO: Decompose private loads into 4-byte components.
530 // TODO: Illegal flat loads on SI
Matt Arsenault18619af2019-01-29 18:13:02 +0000531 switch (MemSize) {
532 case 8:
533 case 16:
534 return Size == 32;
Matt Arsenault85803362018-03-17 15:17:41 +0000535 case 32:
536 case 64:
537 case 128:
538 return true;
539
540 case 96:
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +0000541 return ST.hasDwordx3LoadStores();
Matt Arsenault85803362018-03-17 15:17:41 +0000542
543 case 256:
544 case 512:
Tom Stellardd0ba79f2019-07-10 00:22:41 +0000545 // TODO: Possibly support loads of i256 and i512 . This will require
546 // adding i256 and i512 types to MVT in order for to be able to use
547 // TableGen.
548 // TODO: Add support for other vector types, this will require
549 // defining more value mappings for the new types.
550 return Ty0.isVector() && (Ty0.getScalarType().getSizeInBits() == 32 ||
551 Ty0.getScalarType().getSizeInBits() == 64);
552
Matt Arsenault85803362018-03-17 15:17:41 +0000553 default:
554 return false;
555 }
Matt Arsenault18619af2019-01-29 18:13:02 +0000556 })
557 .clampScalar(0, S32, S64);
Matt Arsenault85803362018-03-17 15:17:41 +0000558
559
Matt Arsenault530d05e2019-02-14 22:41:09 +0000560 // FIXME: Handle alignment requirements.
Matt Arsenault6614f852019-01-22 19:02:10 +0000561 auto &ExtLoads = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
Matt Arsenault530d05e2019-02-14 22:41:09 +0000562 .legalForTypesWithMemDesc({
563 {S32, GlobalPtr, 8, 8},
564 {S32, GlobalPtr, 16, 8},
565 {S32, LocalPtr, 8, 8},
566 {S32, LocalPtr, 16, 8},
567 {S32, PrivatePtr, 8, 8},
568 {S32, PrivatePtr, 16, 8}});
Matt Arsenault6614f852019-01-22 19:02:10 +0000569 if (ST.hasFlatAddressSpace()) {
Matt Arsenault530d05e2019-02-14 22:41:09 +0000570 ExtLoads.legalForTypesWithMemDesc({{S32, FlatPtr, 8, 8},
571 {S32, FlatPtr, 16, 8}});
Matt Arsenault6614f852019-01-22 19:02:10 +0000572 }
573
574 ExtLoads.clampScalar(0, S32, S32)
575 .widenScalarToNextPow2(0)
576 .unsupportedIfMemSizeNotPow2()
577 .lower();
578
Matt Arsenault36d40922018-12-20 00:33:49 +0000579 auto &Atomics = getActionDefinitionsBuilder(
580 {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB,
581 G_ATOMICRMW_AND, G_ATOMICRMW_OR, G_ATOMICRMW_XOR,
582 G_ATOMICRMW_MAX, G_ATOMICRMW_MIN, G_ATOMICRMW_UMAX,
583 G_ATOMICRMW_UMIN, G_ATOMIC_CMPXCHG})
584 .legalFor({{S32, GlobalPtr}, {S32, LocalPtr},
585 {S64, GlobalPtr}, {S64, LocalPtr}});
586 if (ST.hasFlatAddressSpace()) {
587 Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}});
588 }
Tom Stellardca166212017-01-30 21:56:46 +0000589
Matt Arsenault96e47012019-01-18 21:42:55 +0000590 // TODO: Pointer types, any 32-bit or 64-bit vector
591 getActionDefinitionsBuilder(G_SELECT)
Matt Arsenaultfdf36722019-07-01 15:42:47 +0000592 .legalForCartesianProduct({S32, S64, S16, V2S32, V2S16, V4S16,
Matt Arsenault10547232019-02-04 14:04:52 +0000593 GlobalPtr, LocalPtr, FlatPtr, PrivatePtr,
594 LLT::vector(2, LocalPtr), LLT::vector(2, PrivatePtr)}, {S1})
Matt Arsenaultfdf36722019-07-01 15:42:47 +0000595 .clampScalar(0, S16, S64)
Matt Arsenaultb4c95b32019-02-19 17:03:09 +0000596 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
597 .fewerElementsIf(numElementsNotEven(0), scalarize(0))
Matt Arsenaultdc6c7852019-01-30 04:19:31 +0000598 .scalarize(1)
Matt Arsenault2491f822019-02-02 23:31:50 +0000599 .clampMaxNumElements(0, S32, 2)
600 .clampMaxNumElements(0, LocalPtr, 2)
601 .clampMaxNumElements(0, PrivatePtr, 2)
Matt Arsenaultb4c95b32019-02-19 17:03:09 +0000602 .scalarize(0)
Matt Arsenault4ed6cca2019-04-05 14:03:04 +0000603 .widenScalarToNextPow2(0)
Matt Arsenault2491f822019-02-02 23:31:50 +0000604 .legalIf(all(isPointer(0), typeIs(1, S1)));
Tom Stellard2860a422017-06-07 13:54:51 +0000605
Matt Arsenault4c5e8f512019-01-22 22:00:19 +0000606 // TODO: Only the low 4/5/6 bits of the shift amount are observed, so we can
607 // be more flexible with the shift amount type.
608 auto &Shifts = getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR})
609 .legalFor({{S32, S32}, {S64, S32}});
Matt Arsenaultf6cab162019-01-30 03:36:25 +0000610 if (ST.has16BitInsts()) {
Matt Arsenaultc83b8232019-02-07 17:38:00 +0000611 if (ST.hasVOP3PInsts()) {
612 Shifts.legalFor({{S16, S32}, {S16, S16}, {V2S16, V2S16}})
613 .clampMaxNumElements(0, S16, 2);
614 } else
615 Shifts.legalFor({{S16, S32}, {S16, S16}});
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000616
617 Shifts.clampScalar(1, S16, S32);
Matt Arsenaultf6cab162019-01-30 03:36:25 +0000618 Shifts.clampScalar(0, S16, S64);
Matt Arsenaultb0a22702019-02-08 15:06:24 +0000619 Shifts.widenScalarToNextPow2(0, 16);
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000620 } else {
621 // Make sure we legalize the shift amount type first, as the general
622 // expansion for the shifted type will produce much worse code if it hasn't
623 // been truncated already.
624 Shifts.clampScalar(1, S32, S32);
Matt Arsenault4c5e8f512019-01-22 22:00:19 +0000625 Shifts.clampScalar(0, S32, S64);
Matt Arsenaultb0a22702019-02-08 15:06:24 +0000626 Shifts.widenScalarToNextPow2(0, 32);
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000627 }
628 Shifts.scalarize(0);
Tom Stellardca166212017-01-30 21:56:46 +0000629
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000630 for (unsigned Op : {G_EXTRACT_VECTOR_ELT, G_INSERT_VECTOR_ELT}) {
Matt Arsenault63786292019-01-22 20:38:15 +0000631 unsigned VecTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 1 : 0;
632 unsigned EltTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 0 : 1;
633 unsigned IdxTypeIdx = 2;
634
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000635 getActionDefinitionsBuilder(Op)
Matt Arsenaultb0e04c02019-07-15 19:40:59 +0000636 .customIf([=](const LegalityQuery &Query) {
Matt Arsenault90bdfb32019-07-15 18:31:10 +0000637 const LLT EltTy = Query.Types[EltTypeIdx];
638 const LLT VecTy = Query.Types[VecTypeIdx];
639 const LLT IdxTy = Query.Types[IdxTypeIdx];
640 return (EltTy.getSizeInBits() == 16 ||
641 EltTy.getSizeInBits() % 32 == 0) &&
642 VecTy.getSizeInBits() % 32 == 0 &&
643 VecTy.getSizeInBits() <= 512 &&
644 IdxTy.getSizeInBits() == 32;
Matt Arsenault63786292019-01-22 20:38:15 +0000645 })
646 .clampScalar(EltTypeIdx, S32, S64)
647 .clampScalar(VecTypeIdx, S32, S64)
648 .clampScalar(IdxTypeIdx, S32, S32);
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000649 }
650
Matt Arsenault63786292019-01-22 20:38:15 +0000651 getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
652 .unsupportedIf([=](const LegalityQuery &Query) {
653 const LLT &EltTy = Query.Types[1].getElementType();
654 return Query.Types[0] != EltTy;
655 });
656
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000657 for (unsigned Op : {G_EXTRACT, G_INSERT}) {
658 unsigned BigTyIdx = Op == G_EXTRACT ? 1 : 0;
659 unsigned LitTyIdx = Op == G_EXTRACT ? 0 : 1;
660
661 // FIXME: Doesn't handle extract of illegal sizes.
662 getActionDefinitionsBuilder(Op)
Matt Arsenault91be65b2019-02-07 17:25:51 +0000663 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000664 const LLT BigTy = Query.Types[BigTyIdx];
665 const LLT LitTy = Query.Types[LitTyIdx];
666 return (BigTy.getSizeInBits() % 32 == 0) &&
667 (LitTy.getSizeInBits() % 16 == 0);
668 })
Matt Arsenault91be65b2019-02-07 17:25:51 +0000669 .widenScalarIf(
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000670 [=](const LegalityQuery &Query) {
671 const LLT BigTy = Query.Types[BigTyIdx];
672 return (BigTy.getScalarSizeInBits() < 16);
673 },
674 LegalizeMutations::widenScalarOrEltToNextPow2(BigTyIdx, 16))
675 .widenScalarIf(
676 [=](const LegalityQuery &Query) {
677 const LLT LitTy = Query.Types[LitTyIdx];
678 return (LitTy.getScalarSizeInBits() < 16);
679 },
680 LegalizeMutations::widenScalarOrEltToNextPow2(LitTyIdx, 16))
Matt Arsenault2b6f76f2019-04-22 15:22:46 +0000681 .moreElementsIf(isSmallOddVector(BigTyIdx), oneMoreElement(BigTyIdx))
682 .widenScalarToNextPow2(BigTyIdx, 32);
683
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000684 }
Matt Arsenault71272e62018-03-05 16:25:15 +0000685
Amara Emerson5ec14602018-12-10 18:44:58 +0000686 getActionDefinitionsBuilder(G_BUILD_VECTOR)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000687 .legalForCartesianProduct(AllS32Vectors, {S32})
688 .legalForCartesianProduct(AllS64Vectors, {S64})
689 .clampNumElements(0, V16S32, V16S32)
690 .clampNumElements(0, V2S64, V8S64)
691 .minScalarSameAs(1, 0)
Matt Arsenault3f1a3452019-07-09 22:48:04 +0000692 .legalIf(isRegisterType(0))
693 .minScalarOrElt(0, S32);
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000694
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000695 getActionDefinitionsBuilder(G_CONCAT_VECTORS)
Matt Arsenault4dd57552019-07-09 14:17:31 +0000696 .legalIf(isRegisterType(0));
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000697
Matt Arsenault503afda2018-03-12 13:35:43 +0000698 // Merge/Unmerge
699 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
700 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
701 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
702
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000703 auto notValidElt = [=](const LegalityQuery &Query, unsigned TypeIdx) {
704 const LLT &Ty = Query.Types[TypeIdx];
705 if (Ty.isVector()) {
706 const LLT &EltTy = Ty.getElementType();
707 if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64)
708 return true;
709 if (!isPowerOf2_32(EltTy.getSizeInBits()))
710 return true;
711 }
712 return false;
713 };
714
Matt Arsenault503afda2018-03-12 13:35:43 +0000715 getActionDefinitionsBuilder(Op)
Matt Arsenaultd8d193d2019-01-29 23:17:35 +0000716 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16)
717 // Clamp the little scalar to s8-s256 and make it a power of 2. It's not
718 // worth considering the multiples of 64 since 2*192 and 2*384 are not
719 // valid.
720 .clampScalar(LitTyIdx, S16, S256)
721 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32)
722
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000723 // Break up vectors with weird elements into scalars
724 .fewerElementsIf(
725 [=](const LegalityQuery &Query) { return notValidElt(Query, 0); },
Matt Arsenault990f5072019-01-25 00:51:00 +0000726 scalarize(0))
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000727 .fewerElementsIf(
728 [=](const LegalityQuery &Query) { return notValidElt(Query, 1); },
Matt Arsenault990f5072019-01-25 00:51:00 +0000729 scalarize(1))
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000730 .clampScalar(BigTyIdx, S32, S512)
731 .widenScalarIf(
732 [=](const LegalityQuery &Query) {
733 const LLT &Ty = Query.Types[BigTyIdx];
734 return !isPowerOf2_32(Ty.getSizeInBits()) &&
735 Ty.getSizeInBits() % 16 != 0;
736 },
737 [=](const LegalityQuery &Query) {
738 // Pick the next power of 2, or a multiple of 64 over 128.
739 // Whichever is smaller.
740 const LLT &Ty = Query.Types[BigTyIdx];
741 unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1);
742 if (NewSizeInBits >= 256) {
743 unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
744 if (RoundedTo < NewSizeInBits)
745 NewSizeInBits = RoundedTo;
746 }
747 return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
748 })
Matt Arsenault503afda2018-03-12 13:35:43 +0000749 .legalIf([=](const LegalityQuery &Query) {
750 const LLT &BigTy = Query.Types[BigTyIdx];
751 const LLT &LitTy = Query.Types[LitTyIdx];
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000752
753 if (BigTy.isVector() && BigTy.getSizeInBits() < 32)
754 return false;
755 if (LitTy.isVector() && LitTy.getSizeInBits() < 32)
756 return false;
757
758 return BigTy.getSizeInBits() % 16 == 0 &&
759 LitTy.getSizeInBits() % 16 == 0 &&
Matt Arsenault503afda2018-03-12 13:35:43 +0000760 BigTy.getSizeInBits() <= 512;
761 })
762 // Any vectors left are the wrong size. Scalarize them.
Matt Arsenault990f5072019-01-25 00:51:00 +0000763 .scalarize(0)
764 .scalarize(1);
Matt Arsenault503afda2018-03-12 13:35:43 +0000765 }
766
Tom Stellardca166212017-01-30 21:56:46 +0000767 computeTables();
Roman Tereshin76c29c62018-05-31 16:16:48 +0000768 verify(*ST.getInstrInfo());
Tom Stellardca166212017-01-30 21:56:46 +0000769}
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000770
771bool AMDGPULegalizerInfo::legalizeCustom(MachineInstr &MI,
772 MachineRegisterInfo &MRI,
773 MachineIRBuilder &MIRBuilder,
774 GISelChangeObserver &Observer) const {
775 switch (MI.getOpcode()) {
776 case TargetOpcode::G_ADDRSPACE_CAST:
777 return legalizeAddrSpaceCast(MI, MRI, MIRBuilder);
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000778 case TargetOpcode::G_FRINT:
779 return legalizeFrint(MI, MRI, MIRBuilder);
Matt Arsenaulta510b572019-05-17 12:20:05 +0000780 case TargetOpcode::G_FCEIL:
781 return legalizeFceil(MI, MRI, MIRBuilder);
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000782 case TargetOpcode::G_INTRINSIC_TRUNC:
783 return legalizeIntrinsicTrunc(MI, MRI, MIRBuilder);
Matt Arsenault2f292202019-05-17 23:05:18 +0000784 case TargetOpcode::G_SITOFP:
785 return legalizeITOFP(MI, MRI, MIRBuilder, true);
786 case TargetOpcode::G_UITOFP:
787 return legalizeITOFP(MI, MRI, MIRBuilder, false);
Matt Arsenault6ce1b4f2019-07-10 16:31:19 +0000788 case TargetOpcode::G_FMINNUM:
789 case TargetOpcode::G_FMAXNUM:
790 case TargetOpcode::G_FMINNUM_IEEE:
791 case TargetOpcode::G_FMAXNUM_IEEE:
792 return legalizeMinNumMaxNum(MI, MRI, MIRBuilder);
Matt Arsenaultb0e04c02019-07-15 19:40:59 +0000793 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
794 return legalizeExtractVectorElt(MI, MRI, MIRBuilder);
795 case TargetOpcode::G_INSERT_VECTOR_ELT:
796 return true; // TODO
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000797 default:
798 return false;
799 }
800
801 llvm_unreachable("expected switch to return");
802}
803
Matt Arsenault1178dc32019-06-28 01:16:46 +0000804Register AMDGPULegalizerInfo::getSegmentAperture(
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000805 unsigned AS,
806 MachineRegisterInfo &MRI,
807 MachineIRBuilder &MIRBuilder) const {
808 MachineFunction &MF = MIRBuilder.getMF();
809 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
810 const LLT S32 = LLT::scalar(32);
811
812 if (ST.hasApertureRegs()) {
813 // FIXME: Use inline constants (src_{shared, private}_base) instead of
814 // getreg.
815 unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
816 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
817 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
818 unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
819 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
820 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
821 unsigned Encoding =
822 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
823 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
824 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
825
Matt Arsenault1178dc32019-06-28 01:16:46 +0000826 Register ApertureReg = MRI.createGenericVirtualRegister(S32);
827 Register GetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000828
829 MIRBuilder.buildInstr(AMDGPU::S_GETREG_B32)
830 .addDef(GetReg)
831 .addImm(Encoding);
832 MRI.setType(GetReg, S32);
833
Amara Emerson946b1242019-04-15 05:04:20 +0000834 auto ShiftAmt = MIRBuilder.buildConstant(S32, WidthM1 + 1);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000835 MIRBuilder.buildInstr(TargetOpcode::G_SHL)
836 .addDef(ApertureReg)
837 .addUse(GetReg)
Amara Emerson946b1242019-04-15 05:04:20 +0000838 .addUse(ShiftAmt.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000839
840 return ApertureReg;
841 }
842
Matt Arsenault1178dc32019-06-28 01:16:46 +0000843 Register QueuePtr = MRI.createGenericVirtualRegister(
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000844 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
845
846 // FIXME: Placeholder until we can track the input registers.
847 MIRBuilder.buildConstant(QueuePtr, 0xdeadbeef);
848
849 // Offset into amd_queue_t for group_segment_aperture_base_hi /
850 // private_segment_aperture_base_hi.
851 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
852
853 // FIXME: Don't use undef
854 Value *V = UndefValue::get(PointerType::get(
855 Type::getInt8Ty(MF.getFunction().getContext()),
856 AMDGPUAS::CONSTANT_ADDRESS));
857
858 MachinePointerInfo PtrInfo(V, StructOffset);
859 MachineMemOperand *MMO = MF.getMachineMemOperand(
860 PtrInfo,
861 MachineMemOperand::MOLoad |
862 MachineMemOperand::MODereferenceable |
863 MachineMemOperand::MOInvariant,
864 4,
865 MinAlign(64, StructOffset));
866
Matt Arsenaulte3a676e2019-06-24 15:50:29 +0000867 Register LoadResult = MRI.createGenericVirtualRegister(S32);
868 Register LoadAddr;
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000869
870 MIRBuilder.materializeGEP(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
871 MIRBuilder.buildLoad(LoadResult, LoadAddr, *MMO);
872 return LoadResult;
873}
874
875bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
876 MachineInstr &MI, MachineRegisterInfo &MRI,
877 MachineIRBuilder &MIRBuilder) const {
878 MachineFunction &MF = MIRBuilder.getMF();
879
880 MIRBuilder.setInstr(MI);
881
Matt Arsenaulte3a676e2019-06-24 15:50:29 +0000882 Register Dst = MI.getOperand(0).getReg();
883 Register Src = MI.getOperand(1).getReg();
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000884
885 LLT DstTy = MRI.getType(Dst);
886 LLT SrcTy = MRI.getType(Src);
887 unsigned DestAS = DstTy.getAddressSpace();
888 unsigned SrcAS = SrcTy.getAddressSpace();
889
890 // TODO: Avoid reloading from the queue ptr for each cast, or at least each
891 // vector element.
892 assert(!DstTy.isVector());
893
894 const AMDGPUTargetMachine &TM
895 = static_cast<const AMDGPUTargetMachine &>(MF.getTarget());
896
897 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
898 if (ST.getTargetLowering()->isNoopAddrSpaceCast(SrcAS, DestAS)) {
Matt Arsenaultdc88a2c2019-02-08 14:16:11 +0000899 MI.setDesc(MIRBuilder.getTII().get(TargetOpcode::G_BITCAST));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000900 return true;
901 }
902
903 if (SrcAS == AMDGPUAS::FLAT_ADDRESS) {
904 assert(DestAS == AMDGPUAS::LOCAL_ADDRESS ||
905 DestAS == AMDGPUAS::PRIVATE_ADDRESS);
906 unsigned NullVal = TM.getNullPointerValue(DestAS);
907
Amara Emerson946b1242019-04-15 05:04:20 +0000908 auto SegmentNull = MIRBuilder.buildConstant(DstTy, NullVal);
909 auto FlatNull = MIRBuilder.buildConstant(SrcTy, 0);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000910
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000911 Register PtrLo32 = MRI.createGenericVirtualRegister(DstTy);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000912
913 // Extract low 32-bits of the pointer.
914 MIRBuilder.buildExtract(PtrLo32, Src, 0);
915
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000916 Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
Amara Emerson946b1242019-04-15 05:04:20 +0000917 MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, FlatNull.getReg(0));
918 MIRBuilder.buildSelect(Dst, CmpRes, PtrLo32, SegmentNull.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000919
920 MI.eraseFromParent();
921 return true;
922 }
923
924 assert(SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
925 SrcAS == AMDGPUAS::PRIVATE_ADDRESS);
926
Amara Emerson946b1242019-04-15 05:04:20 +0000927 auto SegmentNull =
928 MIRBuilder.buildConstant(SrcTy, TM.getNullPointerValue(SrcAS));
929 auto FlatNull =
930 MIRBuilder.buildConstant(DstTy, TM.getNullPointerValue(DestAS));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000931
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000932 Register ApertureReg = getSegmentAperture(DestAS, MRI, MIRBuilder);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000933
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000934 Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
Amara Emerson946b1242019-04-15 05:04:20 +0000935 MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, SegmentNull.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000936
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000937 Register BuildPtr = MRI.createGenericVirtualRegister(DstTy);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000938
939 // Coerce the type of the low half of the result so we can use merge_values.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000940 Register SrcAsInt = MRI.createGenericVirtualRegister(LLT::scalar(32));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000941 MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT)
942 .addDef(SrcAsInt)
943 .addUse(Src);
944
945 // TODO: Should we allow mismatched types but matching sizes in merges to
946 // avoid the ptrtoint?
947 MIRBuilder.buildMerge(BuildPtr, {SrcAsInt, ApertureReg});
Amara Emerson946b1242019-04-15 05:04:20 +0000948 MIRBuilder.buildSelect(Dst, CmpRes, BuildPtr, FlatNull.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000949
950 MI.eraseFromParent();
951 return true;
952}
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000953
954bool AMDGPULegalizerInfo::legalizeFrint(
955 MachineInstr &MI, MachineRegisterInfo &MRI,
956 MachineIRBuilder &MIRBuilder) const {
957 MIRBuilder.setInstr(MI);
958
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000959 Register Src = MI.getOperand(1).getReg();
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000960 LLT Ty = MRI.getType(Src);
961 assert(Ty.isScalar() && Ty.getSizeInBits() == 64);
962
963 APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
964 APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
965
966 auto C1 = MIRBuilder.buildFConstant(Ty, C1Val);
967 auto CopySign = MIRBuilder.buildFCopysign(Ty, C1, Src);
968
969 // TODO: Should this propagate fast-math-flags?
970 auto Tmp1 = MIRBuilder.buildFAdd(Ty, Src, CopySign);
971 auto Tmp2 = MIRBuilder.buildFSub(Ty, Tmp1, CopySign);
972
973 auto C2 = MIRBuilder.buildFConstant(Ty, C2Val);
974 auto Fabs = MIRBuilder.buildFAbs(Ty, Src);
975
976 auto Cond = MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, LLT::scalar(1), Fabs, C2);
977 MIRBuilder.buildSelect(MI.getOperand(0).getReg(), Cond, Src, Tmp2);
978 return true;
979}
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000980
Matt Arsenaulta510b572019-05-17 12:20:05 +0000981bool AMDGPULegalizerInfo::legalizeFceil(
982 MachineInstr &MI, MachineRegisterInfo &MRI,
983 MachineIRBuilder &B) const {
984 B.setInstr(MI);
985
Matt Arsenault1a02d302019-05-17 12:59:27 +0000986 const LLT S1 = LLT::scalar(1);
987 const LLT S64 = LLT::scalar(64);
988
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000989 Register Src = MI.getOperand(1).getReg();
Matt Arsenault1a02d302019-05-17 12:59:27 +0000990 assert(MRI.getType(Src) == S64);
Matt Arsenaulta510b572019-05-17 12:20:05 +0000991
992 // result = trunc(src)
993 // if (src > 0.0 && src != result)
994 // result += 1.0
995
Matt Arsenaulta510b572019-05-17 12:20:05 +0000996 auto Trunc = B.buildInstr(TargetOpcode::G_INTRINSIC_TRUNC, {S64}, {Src});
997
Matt Arsenaulta510b572019-05-17 12:20:05 +0000998 const auto Zero = B.buildFConstant(S64, 0.0);
999 const auto One = B.buildFConstant(S64, 1.0);
1000 auto Lt0 = B.buildFCmp(CmpInst::FCMP_OGT, S1, Src, Zero);
1001 auto NeTrunc = B.buildFCmp(CmpInst::FCMP_ONE, S1, Src, Trunc);
1002 auto And = B.buildAnd(S1, Lt0, NeTrunc);
1003 auto Add = B.buildSelect(S64, And, One, Zero);
1004
1005 // TODO: Should this propagate fast-math-flags?
1006 B.buildFAdd(MI.getOperand(0).getReg(), Trunc, Add);
1007 return true;
1008}
1009
Matt Arsenault6aebcd52019-05-17 12:20:01 +00001010static MachineInstrBuilder extractF64Exponent(unsigned Hi,
1011 MachineIRBuilder &B) {
1012 const unsigned FractBits = 52;
1013 const unsigned ExpBits = 11;
1014 LLT S32 = LLT::scalar(32);
1015
1016 auto Const0 = B.buildConstant(S32, FractBits - 32);
1017 auto Const1 = B.buildConstant(S32, ExpBits);
1018
1019 auto ExpPart = B.buildIntrinsic(Intrinsic::amdgcn_ubfe, {S32}, false)
1020 .addUse(Const0.getReg(0))
1021 .addUse(Const1.getReg(0));
1022
1023 return B.buildSub(S32, ExpPart, B.buildConstant(S32, 1023));
1024}
1025
1026bool AMDGPULegalizerInfo::legalizeIntrinsicTrunc(
1027 MachineInstr &MI, MachineRegisterInfo &MRI,
1028 MachineIRBuilder &B) const {
1029 B.setInstr(MI);
1030
Matt Arsenault1a02d302019-05-17 12:59:27 +00001031 const LLT S1 = LLT::scalar(1);
1032 const LLT S32 = LLT::scalar(32);
1033 const LLT S64 = LLT::scalar(64);
Matt Arsenault6aebcd52019-05-17 12:20:01 +00001034
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001035 Register Src = MI.getOperand(1).getReg();
Matt Arsenault1a02d302019-05-17 12:59:27 +00001036 assert(MRI.getType(Src) == S64);
Matt Arsenault6aebcd52019-05-17 12:20:01 +00001037
1038 // TODO: Should this use extract since the low half is unused?
1039 auto Unmerge = B.buildUnmerge({S32, S32}, Src);
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001040 Register Hi = Unmerge.getReg(1);
Matt Arsenault6aebcd52019-05-17 12:20:01 +00001041
1042 // Extract the upper half, since this is where we will find the sign and
1043 // exponent.
1044 auto Exp = extractF64Exponent(Hi, B);
1045
1046 const unsigned FractBits = 52;
1047
1048 // Extract the sign bit.
1049 const auto SignBitMask = B.buildConstant(S32, UINT32_C(1) << 31);
1050 auto SignBit = B.buildAnd(S32, Hi, SignBitMask);
1051
1052 const auto FractMask = B.buildConstant(S64, (UINT64_C(1) << FractBits) - 1);
1053
1054 const auto Zero32 = B.buildConstant(S32, 0);
1055
1056 // Extend back to 64-bits.
1057 auto SignBit64 = B.buildMerge(S64, {Zero32.getReg(0), SignBit.getReg(0)});
1058
1059 auto Shr = B.buildAShr(S64, FractMask, Exp);
1060 auto Not = B.buildNot(S64, Shr);
1061 auto Tmp0 = B.buildAnd(S64, Src, Not);
1062 auto FiftyOne = B.buildConstant(S32, FractBits - 1);
1063
1064 auto ExpLt0 = B.buildICmp(CmpInst::ICMP_SLT, S1, Exp, Zero32);
1065 auto ExpGt51 = B.buildICmp(CmpInst::ICMP_SGT, S1, Exp, FiftyOne);
1066
1067 auto Tmp1 = B.buildSelect(S64, ExpLt0, SignBit64, Tmp0);
1068 B.buildSelect(MI.getOperand(0).getReg(), ExpGt51, Src, Tmp1);
1069 return true;
1070}
Matt Arsenault2f292202019-05-17 23:05:18 +00001071
1072bool AMDGPULegalizerInfo::legalizeITOFP(
1073 MachineInstr &MI, MachineRegisterInfo &MRI,
1074 MachineIRBuilder &B, bool Signed) const {
1075 B.setInstr(MI);
1076
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001077 Register Dst = MI.getOperand(0).getReg();
1078 Register Src = MI.getOperand(1).getReg();
Matt Arsenault2f292202019-05-17 23:05:18 +00001079
1080 const LLT S64 = LLT::scalar(64);
1081 const LLT S32 = LLT::scalar(32);
1082
1083 assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S64);
1084
1085 auto Unmerge = B.buildUnmerge({S32, S32}, Src);
1086
1087 auto CvtHi = Signed ?
1088 B.buildSITOFP(S64, Unmerge.getReg(1)) :
1089 B.buildUITOFP(S64, Unmerge.getReg(1));
1090
1091 auto CvtLo = B.buildUITOFP(S64, Unmerge.getReg(0));
1092
1093 auto ThirtyTwo = B.buildConstant(S32, 32);
1094 auto LdExp = B.buildIntrinsic(Intrinsic::amdgcn_ldexp, {S64}, false)
1095 .addUse(CvtHi.getReg(0))
1096 .addUse(ThirtyTwo.getReg(0));
1097
1098 // TODO: Should this propagate fast-math-flags?
1099 B.buildFAdd(Dst, LdExp, CvtLo);
1100 MI.eraseFromParent();
1101 return true;
1102}
Matt Arsenaulte15770a2019-07-01 18:40:23 +00001103
Matt Arsenault6ce1b4f2019-07-10 16:31:19 +00001104bool AMDGPULegalizerInfo::legalizeMinNumMaxNum(
1105 MachineInstr &MI, MachineRegisterInfo &MRI,
1106 MachineIRBuilder &B) const {
1107 MachineFunction &MF = B.getMF();
1108 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1109
1110 const bool IsIEEEOp = MI.getOpcode() == AMDGPU::G_FMINNUM_IEEE ||
1111 MI.getOpcode() == AMDGPU::G_FMAXNUM_IEEE;
1112
1113 // With ieee_mode disabled, the instructions have the correct behavior
1114 // already for G_FMINNUM/G_FMAXNUM
1115 if (!MFI->getMode().IEEE)
1116 return !IsIEEEOp;
1117
1118 if (IsIEEEOp)
1119 return true;
1120
1121 MachineIRBuilder HelperBuilder(MI);
1122 GISelObserverWrapper DummyObserver;
1123 LegalizerHelper Helper(MF, DummyObserver, HelperBuilder);
1124 HelperBuilder.setMBB(*MI.getParent());
1125 return Helper.lowerFMinNumMaxNum(MI) == LegalizerHelper::Legalized;
1126}
1127
Matt Arsenaultb0e04c02019-07-15 19:40:59 +00001128bool AMDGPULegalizerInfo::legalizeExtractVectorElt(
1129 MachineInstr &MI, MachineRegisterInfo &MRI,
1130 MachineIRBuilder &B) const {
1131 // TODO: Should move some of this into LegalizerHelper.
1132
1133 // TODO: Promote dynamic indexing of s16 to s32
1134 // TODO: Dynamic s64 indexing is only legal for SGPR.
1135 Optional<int64_t> IdxVal = getConstantVRegVal(MI.getOperand(2).getReg(), MRI);
1136 if (!IdxVal) // Dynamic case will be selected to register indexing.
1137 return true;
1138
1139 Register Dst = MI.getOperand(0).getReg();
1140 Register Vec = MI.getOperand(1).getReg();
1141
1142 LLT VecTy = MRI.getType(Vec);
1143 LLT EltTy = VecTy.getElementType();
1144 assert(EltTy == MRI.getType(Dst));
1145
1146 B.setInstr(MI);
1147
1148 if (IdxVal.getValue() < VecTy.getNumElements())
1149 B.buildExtract(Dst, Vec, IdxVal.getValue() * EltTy.getSizeInBits());
1150 else
1151 B.buildUndef(Dst);
1152
1153 MI.eraseFromParent();
1154 return true;
1155}
1156
Matt Arsenaulte15770a2019-07-01 18:40:23 +00001157// Return the use branch instruction, otherwise null if the usage is invalid.
1158static MachineInstr *verifyCFIntrinsic(MachineInstr &MI,
1159 MachineRegisterInfo &MRI) {
1160 Register CondDef = MI.getOperand(0).getReg();
1161 if (!MRI.hasOneNonDBGUse(CondDef))
1162 return nullptr;
1163
1164 MachineInstr &UseMI = *MRI.use_instr_nodbg_begin(CondDef);
1165 return UseMI.getParent() == MI.getParent() &&
1166 UseMI.getOpcode() == AMDGPU::G_BRCOND ? &UseMI : nullptr;
1167}
1168
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +00001169Register AMDGPULegalizerInfo::getLiveInRegister(MachineRegisterInfo &MRI,
1170 Register Reg, LLT Ty) const {
1171 Register LiveIn = MRI.getLiveInVirtReg(Reg);
1172 if (LiveIn)
1173 return LiveIn;
1174
1175 Register NewReg = MRI.createGenericVirtualRegister(Ty);
1176 MRI.addLiveIn(Reg, NewReg);
1177 return NewReg;
1178}
1179
1180bool AMDGPULegalizerInfo::loadInputValue(Register DstReg, MachineIRBuilder &B,
1181 const ArgDescriptor *Arg) const {
1182 if (!Arg->isRegister())
1183 return false; // TODO: Handle these
1184
1185 assert(Arg->getRegister() != 0);
1186 assert(Arg->getRegister().isPhysical());
1187
1188 MachineRegisterInfo &MRI = *B.getMRI();
1189
1190 LLT Ty = MRI.getType(DstReg);
1191 Register LiveIn = getLiveInRegister(MRI, Arg->getRegister(), Ty);
1192
1193 if (Arg->isMasked()) {
1194 // TODO: Should we try to emit this once in the entry block?
1195 const LLT S32 = LLT::scalar(32);
1196 const unsigned Mask = Arg->getMask();
1197 const unsigned Shift = countTrailingZeros<unsigned>(Mask);
1198
1199 auto ShiftAmt = B.buildConstant(S32, Shift);
1200 auto LShr = B.buildLShr(S32, LiveIn, ShiftAmt);
1201 B.buildAnd(DstReg, LShr, B.buildConstant(S32, Mask >> Shift));
1202 } else
1203 B.buildCopy(DstReg, LiveIn);
1204
1205 // Insert the argument copy if it doens't already exist.
1206 // FIXME: It seems EmitLiveInCopies isn't called anywhere?
1207 if (!MRI.getVRegDef(LiveIn)) {
1208 MachineBasicBlock &EntryMBB = B.getMF().front();
1209 EntryMBB.addLiveIn(Arg->getRegister());
1210 B.setInsertPt(EntryMBB, EntryMBB.begin());
1211 B.buildCopy(LiveIn, Arg->getRegister());
1212 }
1213
1214 return true;
1215}
1216
1217bool AMDGPULegalizerInfo::legalizePreloadedArgIntrin(
1218 MachineInstr &MI,
1219 MachineRegisterInfo &MRI,
1220 MachineIRBuilder &B,
1221 AMDGPUFunctionArgInfo::PreloadedValue ArgType) const {
1222 B.setInstr(MI);
1223
1224 const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
1225
1226 const ArgDescriptor *Arg;
1227 const TargetRegisterClass *RC;
1228 std::tie(Arg, RC) = MFI->getPreloadedValue(ArgType);
1229 if (!Arg) {
1230 LLVM_DEBUG(dbgs() << "Required arg register missing\n");
1231 return false;
1232 }
1233
1234 if (loadInputValue(MI.getOperand(0).getReg(), B, Arg)) {
1235 MI.eraseFromParent();
1236 return true;
1237 }
1238
1239 return false;
1240}
1241
Matt Arsenault9e8e8c62019-07-01 18:49:01 +00001242bool AMDGPULegalizerInfo::legalizeImplicitArgPtr(MachineInstr &MI,
1243 MachineRegisterInfo &MRI,
1244 MachineIRBuilder &B) const {
1245 const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
1246 if (!MFI->isEntryFunction()) {
1247 return legalizePreloadedArgIntrin(MI, MRI, B,
1248 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
1249 }
1250
1251 B.setInstr(MI);
1252
1253 uint64_t Offset =
1254 ST.getTargetLowering()->getImplicitParameterOffset(
1255 B.getMF(), AMDGPUTargetLowering::FIRST_IMPLICIT);
1256 Register DstReg = MI.getOperand(0).getReg();
1257 LLT DstTy = MRI.getType(DstReg);
1258 LLT IdxTy = LLT::scalar(DstTy.getSizeInBits());
1259
1260 const ArgDescriptor *Arg;
1261 const TargetRegisterClass *RC;
1262 std::tie(Arg, RC)
1263 = MFI->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
1264 if (!Arg)
1265 return false;
1266
1267 Register KernargPtrReg = MRI.createGenericVirtualRegister(DstTy);
1268 if (!loadInputValue(KernargPtrReg, B, Arg))
1269 return false;
1270
1271 B.buildGEP(DstReg, KernargPtrReg, B.buildConstant(IdxTy, Offset).getReg(0));
1272 MI.eraseFromParent();
1273 return true;
1274}
1275
Matt Arsenaulte15770a2019-07-01 18:40:23 +00001276bool AMDGPULegalizerInfo::legalizeIntrinsic(MachineInstr &MI,
1277 MachineRegisterInfo &MRI,
1278 MachineIRBuilder &B) const {
1279 // Replace the use G_BRCOND with the exec manipulate and branch pseudos.
1280 switch (MI.getOperand(MI.getNumExplicitDefs()).getIntrinsicID()) {
1281 case Intrinsic::amdgcn_if: {
1282 if (MachineInstr *BrCond = verifyCFIntrinsic(MI, MRI)) {
1283 const SIRegisterInfo *TRI
1284 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo());
1285
1286 B.setInstr(*BrCond);
1287 Register Def = MI.getOperand(1).getReg();
1288 Register Use = MI.getOperand(3).getReg();
1289 B.buildInstr(AMDGPU::SI_IF)
1290 .addDef(Def)
1291 .addUse(Use)
1292 .addMBB(BrCond->getOperand(1).getMBB());
1293
1294 MRI.setRegClass(Def, TRI->getWaveMaskRegClass());
1295 MRI.setRegClass(Use, TRI->getWaveMaskRegClass());
1296 MI.eraseFromParent();
1297 BrCond->eraseFromParent();
1298 return true;
1299 }
1300
1301 return false;
1302 }
1303 case Intrinsic::amdgcn_loop: {
1304 if (MachineInstr *BrCond = verifyCFIntrinsic(MI, MRI)) {
1305 const SIRegisterInfo *TRI
1306 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo());
1307
1308 B.setInstr(*BrCond);
1309 Register Reg = MI.getOperand(2).getReg();
1310 B.buildInstr(AMDGPU::SI_LOOP)
1311 .addUse(Reg)
1312 .addMBB(BrCond->getOperand(1).getMBB());
1313 MI.eraseFromParent();
1314 BrCond->eraseFromParent();
1315 MRI.setRegClass(Reg, TRI->getWaveMaskRegClass());
1316 return true;
1317 }
1318
1319 return false;
1320 }
Matt Arsenault9e8e8c62019-07-01 18:49:01 +00001321 case Intrinsic::amdgcn_kernarg_segment_ptr:
1322 return legalizePreloadedArgIntrin(
1323 MI, MRI, B, AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
1324 case Intrinsic::amdgcn_implicitarg_ptr:
1325 return legalizeImplicitArgPtr(MI, MRI, B);
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +00001326 case Intrinsic::amdgcn_workitem_id_x:
1327 return legalizePreloadedArgIntrin(MI, MRI, B,
1328 AMDGPUFunctionArgInfo::WORKITEM_ID_X);
1329 case Intrinsic::amdgcn_workitem_id_y:
1330 return legalizePreloadedArgIntrin(MI, MRI, B,
1331 AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
1332 case Intrinsic::amdgcn_workitem_id_z:
1333 return legalizePreloadedArgIntrin(MI, MRI, B,
1334 AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
Matt Arsenault756d8192019-07-01 18:47:22 +00001335 case Intrinsic::amdgcn_workgroup_id_x:
1336 return legalizePreloadedArgIntrin(MI, MRI, B,
1337 AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
1338 case Intrinsic::amdgcn_workgroup_id_y:
1339 return legalizePreloadedArgIntrin(MI, MRI, B,
1340 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
1341 case Intrinsic::amdgcn_workgroup_id_z:
1342 return legalizePreloadedArgIntrin(MI, MRI, B,
1343 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
Matt Arsenaultbae36362019-07-01 18:50:50 +00001344 case Intrinsic::amdgcn_dispatch_ptr:
1345 return legalizePreloadedArgIntrin(MI, MRI, B,
1346 AMDGPUFunctionArgInfo::DISPATCH_PTR);
1347 case Intrinsic::amdgcn_queue_ptr:
1348 return legalizePreloadedArgIntrin(MI, MRI, B,
1349 AMDGPUFunctionArgInfo::QUEUE_PTR);
1350 case Intrinsic::amdgcn_implicit_buffer_ptr:
1351 return legalizePreloadedArgIntrin(
1352 MI, MRI, B, AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
1353 case Intrinsic::amdgcn_dispatch_id:
1354 return legalizePreloadedArgIntrin(MI, MRI, B,
1355 AMDGPUFunctionArgInfo::DISPATCH_ID);
Matt Arsenaulte15770a2019-07-01 18:40:23 +00001356 default:
1357 return true;
1358 }
1359
1360 return true;
1361}