blob: f42e00ac69522da8d9402589806b2b04ce5af9e0 [file] [log] [blame]
Tom Stellardca166212017-01-30 21:56:46 +00001//===- AMDGPULegalizerInfo.cpp -----------------------------------*- C++ -*-==//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellardca166212017-01-30 21:56:46 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the Machinelegalizer class for
10/// AMDGPU.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
David Blaikie36a0f222018-03-23 23:58:31 +000014#include "AMDGPU.h"
Craig Topper2fa14362018-03-29 17:21:10 +000015#include "AMDGPULegalizerInfo.h"
Matt Arsenault85803362018-03-17 15:17:41 +000016#include "AMDGPUTargetMachine.h"
Matt Arsenaulta8b43392019-02-08 02:40:47 +000017#include "SIMachineFunctionInfo.h"
18
19#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000020#include "llvm/CodeGen/TargetOpcodes.h"
Craig Topper2fa14362018-03-29 17:21:10 +000021#include "llvm/CodeGen/ValueTypes.h"
Tom Stellardca166212017-01-30 21:56:46 +000022#include "llvm/IR/DerivedTypes.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000023#include "llvm/IR/Type.h"
Tom Stellardca166212017-01-30 21:56:46 +000024#include "llvm/Support/Debug.h"
25
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +000026#define DEBUG_TYPE "amdgpu-legalinfo"
27
Tom Stellardca166212017-01-30 21:56:46 +000028using namespace llvm;
Daniel Sanders9ade5592018-01-29 17:37:29 +000029using namespace LegalizeActions;
Matt Arsenault990f5072019-01-25 00:51:00 +000030using namespace LegalizeMutations;
Matt Arsenault7ac79ed2019-01-20 19:45:18 +000031using namespace LegalityPredicates;
Tom Stellardca166212017-01-30 21:56:46 +000032
Matt Arsenaultd9141892019-02-07 19:10:15 +000033
34static LegalityPredicate isMultiple32(unsigned TypeIdx,
35 unsigned MaxSize = 512) {
36 return [=](const LegalityQuery &Query) {
37 const LLT Ty = Query.Types[TypeIdx];
38 const LLT EltTy = Ty.getScalarType();
39 return Ty.getSizeInBits() <= MaxSize && EltTy.getSizeInBits() % 32 == 0;
40 };
41}
42
Matt Arsenault18ec3822019-02-11 22:00:39 +000043static LegalityPredicate isSmallOddVector(unsigned TypeIdx) {
44 return [=](const LegalityQuery &Query) {
45 const LLT Ty = Query.Types[TypeIdx];
46 return Ty.isVector() &&
47 Ty.getNumElements() % 2 != 0 &&
48 Ty.getElementType().getSizeInBits() < 32;
49 };
50}
51
52static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
53 return [=](const LegalityQuery &Query) {
54 const LLT Ty = Query.Types[TypeIdx];
55 const LLT EltTy = Ty.getElementType();
56 return std::make_pair(TypeIdx, LLT::vector(Ty.getNumElements() + 1, EltTy));
57 };
58}
59
Matt Arsenault26b7e852019-02-19 16:30:19 +000060static LegalizeMutation fewerEltsToSize64Vector(unsigned TypeIdx) {
61 return [=](const LegalityQuery &Query) {
62 const LLT Ty = Query.Types[TypeIdx];
63 const LLT EltTy = Ty.getElementType();
64 unsigned Size = Ty.getSizeInBits();
65 unsigned Pieces = (Size + 63) / 64;
66 unsigned NewNumElts = (Ty.getNumElements() + 1) / Pieces;
67 return std::make_pair(TypeIdx, LLT::scalarOrVector(NewNumElts, EltTy));
68 };
69}
70
71static LegalityPredicate vectorWiderThan(unsigned TypeIdx, unsigned Size) {
72 return [=](const LegalityQuery &Query) {
73 const LLT QueryTy = Query.Types[TypeIdx];
74 return QueryTy.isVector() && QueryTy.getSizeInBits() > Size;
75 };
76}
77
Matt Arsenaultb4c95b32019-02-19 17:03:09 +000078static LegalityPredicate numElementsNotEven(unsigned TypeIdx) {
79 return [=](const LegalityQuery &Query) {
80 const LLT QueryTy = Query.Types[TypeIdx];
81 return QueryTy.isVector() && QueryTy.getNumElements() % 2 != 0;
82 };
83}
Matt Arsenault18ec3822019-02-11 22:00:39 +000084
Tom Stellard5bfbae52018-07-11 20:59:01 +000085AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST,
Matt Arsenaultc3fe46b2018-03-08 16:24:16 +000086 const GCNTargetMachine &TM) {
Tom Stellardca166212017-01-30 21:56:46 +000087 using namespace TargetOpcode;
88
Matt Arsenault85803362018-03-17 15:17:41 +000089 auto GetAddrSpacePtr = [&TM](unsigned AS) {
90 return LLT::pointer(AS, TM.getPointerSizeInBits(AS));
91 };
92
93 const LLT S1 = LLT::scalar(1);
Matt Arsenault888aa5d2019-02-03 00:07:33 +000094 const LLT S8 = LLT::scalar(8);
Matt Arsenault45991592019-01-18 21:33:50 +000095 const LLT S16 = LLT::scalar(16);
Tom Stellardca166212017-01-30 21:56:46 +000096 const LLT S32 = LLT::scalar(32);
97 const LLT S64 = LLT::scalar(64);
Matt Arsenaultca676342019-01-25 02:36:32 +000098 const LLT S128 = LLT::scalar(128);
Matt Arsenaultff6a9a22019-01-20 18:40:36 +000099 const LLT S256 = LLT::scalar(256);
Tom Stellardeebbfc22018-06-30 04:09:44 +0000100 const LLT S512 = LLT::scalar(512);
Matt Arsenault85803362018-03-17 15:17:41 +0000101
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000102 const LLT V2S16 = LLT::vector(2, 16);
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000103 const LLT V4S16 = LLT::vector(4, 16);
104 const LLT V8S16 = LLT::vector(8, 16);
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000105
106 const LLT V2S32 = LLT::vector(2, 32);
107 const LLT V3S32 = LLT::vector(3, 32);
108 const LLT V4S32 = LLT::vector(4, 32);
109 const LLT V5S32 = LLT::vector(5, 32);
110 const LLT V6S32 = LLT::vector(6, 32);
111 const LLT V7S32 = LLT::vector(7, 32);
112 const LLT V8S32 = LLT::vector(8, 32);
113 const LLT V9S32 = LLT::vector(9, 32);
114 const LLT V10S32 = LLT::vector(10, 32);
115 const LLT V11S32 = LLT::vector(11, 32);
116 const LLT V12S32 = LLT::vector(12, 32);
117 const LLT V13S32 = LLT::vector(13, 32);
118 const LLT V14S32 = LLT::vector(14, 32);
119 const LLT V15S32 = LLT::vector(15, 32);
120 const LLT V16S32 = LLT::vector(16, 32);
121
122 const LLT V2S64 = LLT::vector(2, 64);
123 const LLT V3S64 = LLT::vector(3, 64);
124 const LLT V4S64 = LLT::vector(4, 64);
125 const LLT V5S64 = LLT::vector(5, 64);
126 const LLT V6S64 = LLT::vector(6, 64);
127 const LLT V7S64 = LLT::vector(7, 64);
128 const LLT V8S64 = LLT::vector(8, 64);
129
130 std::initializer_list<LLT> AllS32Vectors =
131 {V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32,
132 V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32};
133 std::initializer_list<LLT> AllS64Vectors =
134 {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64};
135
Matt Arsenault85803362018-03-17 15:17:41 +0000136 const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS);
137 const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS);
Matt Arsenault685d1e82018-03-17 15:17:45 +0000138 const LLT LocalPtr = GetAddrSpacePtr(AMDGPUAS::LOCAL_ADDRESS);
Matt Arsenault0da63502018-08-31 05:49:54 +0000139 const LLT FlatPtr = GetAddrSpacePtr(AMDGPUAS::FLAT_ADDRESS);
140 const LLT PrivatePtr = GetAddrSpacePtr(AMDGPUAS::PRIVATE_ADDRESS);
Matt Arsenault85803362018-03-17 15:17:41 +0000141
Matt Arsenault934e5342018-12-13 20:34:15 +0000142 const LLT CodePtr = FlatPtr;
143
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000144 const std::initializer_list<LLT> AddrSpaces64 = {
145 GlobalPtr, ConstantPtr, FlatPtr
146 };
147
148 const std::initializer_list<LLT> AddrSpaces32 = {
149 LocalPtr, PrivatePtr
Matt Arsenault685d1e82018-03-17 15:17:45 +0000150 };
Tom Stellardca166212017-01-30 21:56:46 +0000151
Matt Arsenault40d1faf2019-07-01 17:35:53 +0000152 const std::initializer_list<LLT> FPTypesBase = {
153 S32, S64
154 };
155
156 const std::initializer_list<LLT> FPTypes16 = {
157 S32, S64, S16
158 };
159
Matt Arsenaultadc40ba2019-01-08 01:22:47 +0000160 setAction({G_BRCOND, S1}, Legal);
161
Matt Arsenault2e0ee472019-02-21 15:48:13 +0000162 // TODO: All multiples of 32, vectors of pointers, all v2s16 pairs, more
163 // elements for v3s16
164 getActionDefinitionsBuilder(G_PHI)
165 .legalFor({S32, S64, V2S16, V4S16, S1, S128, S256})
166 .legalFor(AllS32Vectors)
167 .legalFor(AllS64Vectors)
168 .legalFor(AddrSpaces64)
169 .legalFor(AddrSpaces32)
170 .clampScalar(0, S32, S256)
171 .widenScalarToNextPow2(0, 32)
Matt Arsenaultd3093c22019-02-28 00:16:32 +0000172 .clampMaxNumElements(0, S32, 16)
Matt Arsenault72bcf152019-02-28 00:01:05 +0000173 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
Matt Arsenault2e0ee472019-02-21 15:48:13 +0000174 .legalIf(isPointer(0));
175
Matt Arsenaultef59cb62019-07-01 18:18:55 +0000176 if (ST.has16BitInsts()) {
177 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
178 .legalFor({S32, S16})
179 .clampScalar(0, S16, S32)
180 .scalarize(0);
181 } else {
182 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
183 .legalFor({S32})
184 .clampScalar(0, S32, S32)
185 .scalarize(0);
186 }
Matt Arsenault2e0ee472019-02-21 15:48:13 +0000187
Matt Arsenaultef59cb62019-07-01 18:18:55 +0000188 getActionDefinitionsBuilder({G_UMULH, G_SMULH})
Matt Arsenault5d622fb2019-01-25 03:23:04 +0000189 .legalFor({S32})
Matt Arsenault211e89d2019-01-27 00:52:51 +0000190 .clampScalar(0, S32, S32)
Matt Arsenault5d622fb2019-01-25 03:23:04 +0000191 .scalarize(0);
Matt Arsenault43398832018-12-20 01:35:49 +0000192
Matt Arsenault26a6c742019-01-26 23:47:07 +0000193 // Report legal for any types we can handle anywhere. For the cases only legal
194 // on the SALU, RegBankSelect will be able to re-legalize.
Matt Arsenault43398832018-12-20 01:35:49 +0000195 getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
Matt Arsenault26a6c742019-01-26 23:47:07 +0000196 .legalFor({S32, S1, S64, V2S32, V2S16, V4S16})
197 .clampScalar(0, S32, S64)
Matt Arsenault26b7e852019-02-19 16:30:19 +0000198 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
199 .fewerElementsIf(vectorWiderThan(0, 32), fewerEltsToSize64Vector(0))
Matt Arsenaultf4bfe4c2019-02-25 21:32:48 +0000200 .widenScalarToNextPow2(0)
Matt Arsenault26a6c742019-01-26 23:47:07 +0000201 .scalarize(0);
Tom Stellardee6e6452017-06-12 20:54:56 +0000202
Matt Arsenault68c668a2019-01-08 01:09:09 +0000203 getActionDefinitionsBuilder({G_UADDO, G_SADDO, G_USUBO, G_SSUBO,
204 G_UADDE, G_SADDE, G_USUBE, G_SSUBE})
Matt Arsenault4d475942019-01-26 23:44:51 +0000205 .legalFor({{S32, S1}})
206 .clampScalar(0, S32, S32);
Matt Arsenault2cc15b62019-01-08 01:03:58 +0000207
Matt Arsenault7ac79ed2019-01-20 19:45:18 +0000208 getActionDefinitionsBuilder(G_BITCAST)
209 .legalForCartesianProduct({S32, V2S16})
210 .legalForCartesianProduct({S64, V2S32, V4S16})
211 .legalForCartesianProduct({V2S64, V4S32})
212 // Don't worry about the size constraint.
213 .legalIf(all(isPointer(0), isPointer(1)));
Tom Stellardff63ee02017-06-19 13:15:45 +0000214
Matt Arsenault00ccd132019-02-12 14:54:55 +0000215 if (ST.has16BitInsts()) {
216 getActionDefinitionsBuilder(G_FCONSTANT)
217 .legalFor({S32, S64, S16})
218 .clampScalar(0, S16, S64);
219 } else {
220 getActionDefinitionsBuilder(G_FCONSTANT)
221 .legalFor({S32, S64})
222 .clampScalar(0, S32, S64);
223 }
Tom Stellardeebbfc22018-06-30 04:09:44 +0000224
Matt Arsenaultb3feccd2018-06-25 15:42:12 +0000225 getActionDefinitionsBuilder(G_IMPLICIT_DEF)
Matt Arsenaultd9141892019-02-07 19:10:15 +0000226 .legalFor({S1, S32, S64, V2S32, V4S32, V2S16, V4S16, GlobalPtr,
227 ConstantPtr, LocalPtr, FlatPtr, PrivatePtr})
Matt Arsenault18ec3822019-02-11 22:00:39 +0000228 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
Matt Arsenaultd9141892019-02-07 19:10:15 +0000229 .clampScalarOrElt(0, S32, S512)
Matt Arsenault0f2debb2019-02-08 14:46:27 +0000230 .legalIf(isMultiple32(0))
Matt Arsenault82b10392019-02-25 20:46:06 +0000231 .widenScalarToNextPow2(0, 32)
232 .clampMaxNumElements(0, S32, 16);
Matt Arsenaultb3feccd2018-06-25 15:42:12 +0000233
Matt Arsenaultabdc4f22018-03-17 15:17:48 +0000234
Tom Stellarde0424122017-06-03 01:13:33 +0000235 // FIXME: i1 operands to intrinsics should always be legal, but other i1
236 // values may not be legal. We need to figure out how to distinguish
237 // between these two scenarios.
Matt Arsenault45991592019-01-18 21:33:50 +0000238 getActionDefinitionsBuilder(G_CONSTANT)
Matt Arsenault2065c942019-02-02 23:33:49 +0000239 .legalFor({S1, S32, S64, GlobalPtr,
240 LocalPtr, ConstantPtr, PrivatePtr, FlatPtr })
Matt Arsenault45991592019-01-18 21:33:50 +0000241 .clampScalar(0, S32, S64)
Matt Arsenault2065c942019-02-02 23:33:49 +0000242 .widenScalarToNextPow2(0)
243 .legalIf(isPointer(0));
Matt Arsenault06cbb272018-03-01 19:16:52 +0000244
Matt Arsenaultc94e26c2018-12-18 09:46:13 +0000245 setAction({G_FRAME_INDEX, PrivatePtr}, Legal);
246
Matt Arsenault93fdec72019-02-07 18:03:11 +0000247 auto &FPOpActions = getActionDefinitionsBuilder(
Matt Arsenault9dba67f2019-02-11 17:05:20 +0000248 { G_FADD, G_FMUL, G_FNEG, G_FABS, G_FMA, G_FCANONICALIZE})
Matt Arsenault93fdec72019-02-07 18:03:11 +0000249 .legalFor({S32, S64});
250
251 if (ST.has16BitInsts()) {
252 if (ST.hasVOP3PInsts())
253 FPOpActions.legalFor({S16, V2S16});
254 else
255 FPOpActions.legalFor({S16});
256 }
257
258 if (ST.hasVOP3PInsts())
259 FPOpActions.clampMaxNumElements(0, S16, 2);
260 FPOpActions
261 .scalarize(0)
262 .clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);
Tom Stellardd0c6cf22017-10-27 23:57:41 +0000263
Matt Arsenaultc0f75692019-02-07 18:14:39 +0000264 if (ST.has16BitInsts()) {
265 getActionDefinitionsBuilder(G_FSQRT)
266 .legalFor({S32, S64, S16})
267 .scalarize(0)
268 .clampScalar(0, S16, S64);
269 } else {
270 getActionDefinitionsBuilder(G_FSQRT)
271 .legalFor({S32, S64})
272 .scalarize(0)
273 .clampScalar(0, S32, S64);
274 }
275
Matt Arsenaultdff33c32018-12-20 00:37:02 +0000276 getActionDefinitionsBuilder(G_FPTRUNC)
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000277 .legalFor({{S32, S64}, {S16, S32}})
278 .scalarize(0);
Matt Arsenaultdff33c32018-12-20 00:37:02 +0000279
Matt Arsenault24563ef2019-01-20 18:34:24 +0000280 getActionDefinitionsBuilder(G_FPEXT)
281 .legalFor({{S64, S32}, {S32, S16}})
Matt Arsenaultca676342019-01-25 02:36:32 +0000282 .lowerFor({{S64, S16}}) // FIXME: Implement
283 .scalarize(0);
Matt Arsenault24563ef2019-01-20 18:34:24 +0000284
Matt Arsenault1448f562019-05-17 12:19:52 +0000285 getActionDefinitionsBuilder(G_FCOPYSIGN)
286 .legalForCartesianProduct({S16, S32, S64}, {S16, S32, S64})
287 .scalarize(0);
288
Matt Arsenault745fd9f2019-01-20 19:10:31 +0000289 getActionDefinitionsBuilder(G_FSUB)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000290 // Use actual fsub instruction
291 .legalFor({S32})
292 // Must use fadd + fneg
293 .lowerFor({S64, S16, V2S16})
Matt Arsenault990f5072019-01-25 00:51:00 +0000294 .scalarize(0)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000295 .clampScalar(0, S32, S64);
Matt Arsenaulte01e7c82018-12-18 09:19:03 +0000296
Matt Arsenault24563ef2019-01-20 18:34:24 +0000297 getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT})
Matt Arsenault46ffe682019-01-20 19:28:20 +0000298 .legalFor({{S64, S32}, {S32, S16}, {S64, S16},
Matt Arsenaultca676342019-01-25 02:36:32 +0000299 {S32, S1}, {S64, S1}, {S16, S1},
300 // FIXME: Hack
Matt Arsenaultf4bfe4c2019-02-25 21:32:48 +0000301 {S64, LLT::scalar(33)},
Matt Arsenault888aa5d2019-02-03 00:07:33 +0000302 {S32, S8}, {S128, S32}, {S128, S64}, {S32, LLT::scalar(24)}})
Matt Arsenaultca676342019-01-25 02:36:32 +0000303 .scalarize(0);
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000304
Matt Arsenaultfb671642019-01-22 00:20:17 +0000305 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000306 .legalFor({{S32, S32}, {S64, S32}})
Matt Arsenault02b5ca82019-05-17 23:05:13 +0000307 .lowerFor({{S32, S64}})
Matt Arsenault2f292202019-05-17 23:05:18 +0000308 .customFor({{S64, S64}})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000309 .scalarize(0);
Matt Arsenaultdd022ce2018-03-01 19:04:25 +0000310
Matt Arsenaultfb671642019-01-22 00:20:17 +0000311 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000312 .legalFor({{S32, S32}, {S32, S64}})
313 .scalarize(0);
Tom Stellard33445762018-02-07 04:47:59 +0000314
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000315 getActionDefinitionsBuilder(G_INTRINSIC_ROUND)
Matt Arsenault2e5f9002019-01-27 00:12:21 +0000316 .legalFor({S32, S64})
317 .scalarize(0);
Matt Arsenaultf4c21c52018-12-21 03:14:45 +0000318
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000319 if (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
Matt Arsenaulta510b572019-05-17 12:20:05 +0000320 getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000321 .legalFor({S32, S64})
322 .clampScalar(0, S32, S64)
323 .scalarize(0);
324 } else {
Matt Arsenaulta510b572019-05-17 12:20:05 +0000325 getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000326 .legalFor({S32})
327 .customFor({S64})
328 .clampScalar(0, S32, S64)
329 .scalarize(0);
330 }
Tom Stellardca166212017-01-30 21:56:46 +0000331
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000332 getActionDefinitionsBuilder(G_GEP)
333 .legalForCartesianProduct(AddrSpaces64, {S64})
334 .legalForCartesianProduct(AddrSpaces32, {S32})
335 .scalarize(0);
Matt Arsenault3b9a82f2019-01-25 04:54:00 +0000336
Matt Arsenault934e5342018-12-13 20:34:15 +0000337 setAction({G_BLOCK_ADDR, CodePtr}, Legal);
338
Matt Arsenault58f9d3d2019-02-02 23:35:15 +0000339 getActionDefinitionsBuilder(G_ICMP)
340 .legalForCartesianProduct(
341 {S1}, {S32, S64, GlobalPtr, LocalPtr, ConstantPtr, PrivatePtr, FlatPtr})
342 .legalFor({{S1, S32}, {S1, S64}})
343 .widenScalarToNextPow2(1)
344 .clampScalar(1, S32, S64)
345 .scalarize(0)
346 .legalIf(all(typeIs(0, S1), isPointer(1)));
347
348 getActionDefinitionsBuilder(G_FCMP)
Matt Arsenault40d1faf2019-07-01 17:35:53 +0000349 .legalForCartesianProduct({S1}, ST.has16BitInsts() ? FPTypes16 : FPTypesBase)
Matt Arsenault1b1e6852019-01-25 02:59:34 +0000350 .widenScalarToNextPow2(1)
351 .clampScalar(1, S32, S64)
Matt Arsenaultded2f822019-01-26 23:54:53 +0000352 .scalarize(0);
Matt Arsenault1b1e6852019-01-25 02:59:34 +0000353
Matt Arsenault95fd95c2019-01-25 04:03:38 +0000354 // FIXME: fexp, flog2, flog10 needs to be custom lowered.
355 getActionDefinitionsBuilder({G_FPOW, G_FEXP, G_FEXP2,
356 G_FLOG, G_FLOG2, G_FLOG10})
357 .legalFor({S32})
358 .scalarize(0);
Tom Stellard8cd60a52017-06-06 14:16:50 +0000359
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000360 // The 64-bit versions produce 32-bit results, but only on the SALU.
361 getActionDefinitionsBuilder({G_CTLZ, G_CTLZ_ZERO_UNDEF,
362 G_CTTZ, G_CTTZ_ZERO_UNDEF,
363 G_CTPOP})
364 .legalFor({{S32, S32}, {S32, S64}})
365 .clampScalar(0, S32, S32)
Matt Arsenault75e30c42019-02-20 16:42:52 +0000366 .clampScalar(1, S32, S64)
Matt Arsenaultb10fa8d2019-02-21 15:22:20 +0000367 .scalarize(0)
368 .widenScalarToNextPow2(0, 32)
369 .widenScalarToNextPow2(1, 32);
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000370
Matt Arsenaultd1bfc8d2019-01-31 02:34:03 +0000371 // TODO: Expand for > s32
372 getActionDefinitionsBuilder(G_BSWAP)
373 .legalFor({S32})
374 .clampScalar(0, S32, S32)
375 .scalarize(0);
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000376
Matt Arsenault0f3ba442019-05-23 17:58:48 +0000377 if (ST.has16BitInsts()) {
378 if (ST.hasVOP3PInsts()) {
379 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
380 .legalFor({S32, S16, V2S16})
381 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
382 .clampMaxNumElements(0, S16, 2)
383 .clampScalar(0, S16, S32)
384 .widenScalarToNextPow2(0)
385 .scalarize(0);
386 } else {
387 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
388 .legalFor({S32, S16})
389 .widenScalarToNextPow2(0)
390 .clampScalar(0, S16, S32)
391 .scalarize(0);
392 }
393 } else {
394 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
395 .legalFor({S32})
396 .clampScalar(0, S32, S32)
397 .widenScalarToNextPow2(0)
398 .scalarize(0);
399 }
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000400
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000401 auto smallerThan = [](unsigned TypeIdx0, unsigned TypeIdx1) {
402 return [=](const LegalityQuery &Query) {
403 return Query.Types[TypeIdx0].getSizeInBits() <
404 Query.Types[TypeIdx1].getSizeInBits();
405 };
406 };
407
408 auto greaterThan = [](unsigned TypeIdx0, unsigned TypeIdx1) {
409 return [=](const LegalityQuery &Query) {
410 return Query.Types[TypeIdx0].getSizeInBits() >
411 Query.Types[TypeIdx1].getSizeInBits();
412 };
413 };
414
Tom Stellard7c650782018-10-05 04:34:09 +0000415 getActionDefinitionsBuilder(G_INTTOPTR)
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000416 // List the common cases
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000417 .legalForCartesianProduct(AddrSpaces64, {S64})
418 .legalForCartesianProduct(AddrSpaces32, {S32})
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000419 .scalarize(0)
420 // Accept any address space as long as the size matches
421 .legalIf(sameSize(0, 1))
422 .widenScalarIf(smallerThan(1, 0),
423 [](const LegalityQuery &Query) {
424 return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
425 })
426 .narrowScalarIf(greaterThan(1, 0),
427 [](const LegalityQuery &Query) {
428 return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
429 });
Matt Arsenault85803362018-03-17 15:17:41 +0000430
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000431 getActionDefinitionsBuilder(G_PTRTOINT)
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000432 // List the common cases
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000433 .legalForCartesianProduct(AddrSpaces64, {S64})
434 .legalForCartesianProduct(AddrSpaces32, {S32})
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000435 .scalarize(0)
436 // Accept any address space as long as the size matches
437 .legalIf(sameSize(0, 1))
438 .widenScalarIf(smallerThan(0, 1),
439 [](const LegalityQuery &Query) {
440 return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
441 })
442 .narrowScalarIf(
443 greaterThan(0, 1),
444 [](const LegalityQuery &Query) {
445 return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
446 });
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000447
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000448 if (ST.hasFlatAddressSpace()) {
449 getActionDefinitionsBuilder(G_ADDRSPACE_CAST)
450 .scalarize(0)
451 .custom();
452 }
453
Matt Arsenault85803362018-03-17 15:17:41 +0000454 getActionDefinitionsBuilder({G_LOAD, G_STORE})
Matt Arsenault18619af2019-01-29 18:13:02 +0000455 .narrowScalarIf([](const LegalityQuery &Query) {
456 unsigned Size = Query.Types[0].getSizeInBits();
457 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
458 return (Size > 32 && MemSize < Size);
459 },
460 [](const LegalityQuery &Query) {
461 return std::make_pair(0, LLT::scalar(32));
462 })
Matt Arsenault045bc9a2019-01-30 02:35:38 +0000463 .fewerElementsIf([=, &ST](const LegalityQuery &Query) {
464 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
Matt Arsenaultc7bce732019-01-31 02:46:05 +0000465 return (MemSize == 96) &&
466 Query.Types[0].isVector() &&
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +0000467 !ST.hasDwordx3LoadStores();
Matt Arsenault045bc9a2019-01-30 02:35:38 +0000468 },
469 [=](const LegalityQuery &Query) {
470 return std::make_pair(0, V2S32);
471 })
Matt Arsenault85803362018-03-17 15:17:41 +0000472 .legalIf([=, &ST](const LegalityQuery &Query) {
473 const LLT &Ty0 = Query.Types[0];
474
Matt Arsenault18619af2019-01-29 18:13:02 +0000475 unsigned Size = Ty0.getSizeInBits();
476 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
Matt Arsenaulteb2603c2019-02-02 23:39:13 +0000477 if (Size < 32 || (Size > 32 && MemSize < Size))
Matt Arsenault18619af2019-01-29 18:13:02 +0000478 return false;
479
480 if (Ty0.isVector() && Size != MemSize)
481 return false;
482
Matt Arsenault85803362018-03-17 15:17:41 +0000483 // TODO: Decompose private loads into 4-byte components.
484 // TODO: Illegal flat loads on SI
Matt Arsenault18619af2019-01-29 18:13:02 +0000485 switch (MemSize) {
486 case 8:
487 case 16:
488 return Size == 32;
Matt Arsenault85803362018-03-17 15:17:41 +0000489 case 32:
490 case 64:
491 case 128:
492 return true;
493
494 case 96:
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +0000495 return ST.hasDwordx3LoadStores();
Matt Arsenault85803362018-03-17 15:17:41 +0000496
497 case 256:
498 case 512:
499 // TODO: constant loads
500 default:
501 return false;
502 }
Matt Arsenault18619af2019-01-29 18:13:02 +0000503 })
504 .clampScalar(0, S32, S64);
Matt Arsenault85803362018-03-17 15:17:41 +0000505
506
Matt Arsenault530d05e2019-02-14 22:41:09 +0000507 // FIXME: Handle alignment requirements.
Matt Arsenault6614f852019-01-22 19:02:10 +0000508 auto &ExtLoads = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
Matt Arsenault530d05e2019-02-14 22:41:09 +0000509 .legalForTypesWithMemDesc({
510 {S32, GlobalPtr, 8, 8},
511 {S32, GlobalPtr, 16, 8},
512 {S32, LocalPtr, 8, 8},
513 {S32, LocalPtr, 16, 8},
514 {S32, PrivatePtr, 8, 8},
515 {S32, PrivatePtr, 16, 8}});
Matt Arsenault6614f852019-01-22 19:02:10 +0000516 if (ST.hasFlatAddressSpace()) {
Matt Arsenault530d05e2019-02-14 22:41:09 +0000517 ExtLoads.legalForTypesWithMemDesc({{S32, FlatPtr, 8, 8},
518 {S32, FlatPtr, 16, 8}});
Matt Arsenault6614f852019-01-22 19:02:10 +0000519 }
520
521 ExtLoads.clampScalar(0, S32, S32)
522 .widenScalarToNextPow2(0)
523 .unsupportedIfMemSizeNotPow2()
524 .lower();
525
Matt Arsenault36d40922018-12-20 00:33:49 +0000526 auto &Atomics = getActionDefinitionsBuilder(
527 {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB,
528 G_ATOMICRMW_AND, G_ATOMICRMW_OR, G_ATOMICRMW_XOR,
529 G_ATOMICRMW_MAX, G_ATOMICRMW_MIN, G_ATOMICRMW_UMAX,
530 G_ATOMICRMW_UMIN, G_ATOMIC_CMPXCHG})
531 .legalFor({{S32, GlobalPtr}, {S32, LocalPtr},
532 {S64, GlobalPtr}, {S64, LocalPtr}});
533 if (ST.hasFlatAddressSpace()) {
534 Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}});
535 }
Tom Stellardca166212017-01-30 21:56:46 +0000536
Matt Arsenault96e47012019-01-18 21:42:55 +0000537 // TODO: Pointer types, any 32-bit or 64-bit vector
538 getActionDefinitionsBuilder(G_SELECT)
Matt Arsenaultfdf36722019-07-01 15:42:47 +0000539 .legalForCartesianProduct({S32, S64, S16, V2S32, V2S16, V4S16,
Matt Arsenault10547232019-02-04 14:04:52 +0000540 GlobalPtr, LocalPtr, FlatPtr, PrivatePtr,
541 LLT::vector(2, LocalPtr), LLT::vector(2, PrivatePtr)}, {S1})
Matt Arsenaultfdf36722019-07-01 15:42:47 +0000542 .clampScalar(0, S16, S64)
Matt Arsenaultb4c95b32019-02-19 17:03:09 +0000543 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
544 .fewerElementsIf(numElementsNotEven(0), scalarize(0))
Matt Arsenaultdc6c7852019-01-30 04:19:31 +0000545 .scalarize(1)
Matt Arsenault2491f822019-02-02 23:31:50 +0000546 .clampMaxNumElements(0, S32, 2)
547 .clampMaxNumElements(0, LocalPtr, 2)
548 .clampMaxNumElements(0, PrivatePtr, 2)
Matt Arsenaultb4c95b32019-02-19 17:03:09 +0000549 .scalarize(0)
Matt Arsenault4ed6cca2019-04-05 14:03:04 +0000550 .widenScalarToNextPow2(0)
Matt Arsenault2491f822019-02-02 23:31:50 +0000551 .legalIf(all(isPointer(0), typeIs(1, S1)));
Tom Stellard2860a422017-06-07 13:54:51 +0000552
Matt Arsenault4c5e8f512019-01-22 22:00:19 +0000553 // TODO: Only the low 4/5/6 bits of the shift amount are observed, so we can
554 // be more flexible with the shift amount type.
555 auto &Shifts = getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR})
556 .legalFor({{S32, S32}, {S64, S32}});
Matt Arsenaultf6cab162019-01-30 03:36:25 +0000557 if (ST.has16BitInsts()) {
Matt Arsenaultc83b8232019-02-07 17:38:00 +0000558 if (ST.hasVOP3PInsts()) {
559 Shifts.legalFor({{S16, S32}, {S16, S16}, {V2S16, V2S16}})
560 .clampMaxNumElements(0, S16, 2);
561 } else
562 Shifts.legalFor({{S16, S32}, {S16, S16}});
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000563
564 Shifts.clampScalar(1, S16, S32);
Matt Arsenaultf6cab162019-01-30 03:36:25 +0000565 Shifts.clampScalar(0, S16, S64);
Matt Arsenaultb0a22702019-02-08 15:06:24 +0000566 Shifts.widenScalarToNextPow2(0, 16);
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000567 } else {
568 // Make sure we legalize the shift amount type first, as the general
569 // expansion for the shifted type will produce much worse code if it hasn't
570 // been truncated already.
571 Shifts.clampScalar(1, S32, S32);
Matt Arsenault4c5e8f512019-01-22 22:00:19 +0000572 Shifts.clampScalar(0, S32, S64);
Matt Arsenaultb0a22702019-02-08 15:06:24 +0000573 Shifts.widenScalarToNextPow2(0, 32);
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000574 }
575 Shifts.scalarize(0);
Tom Stellardca166212017-01-30 21:56:46 +0000576
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000577 for (unsigned Op : {G_EXTRACT_VECTOR_ELT, G_INSERT_VECTOR_ELT}) {
Matt Arsenault63786292019-01-22 20:38:15 +0000578 unsigned VecTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 1 : 0;
579 unsigned EltTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 0 : 1;
580 unsigned IdxTypeIdx = 2;
581
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000582 getActionDefinitionsBuilder(Op)
583 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenault63786292019-01-22 20:38:15 +0000584 const LLT &VecTy = Query.Types[VecTypeIdx];
585 const LLT &IdxTy = Query.Types[IdxTypeIdx];
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000586 return VecTy.getSizeInBits() % 32 == 0 &&
587 VecTy.getSizeInBits() <= 512 &&
588 IdxTy.getSizeInBits() == 32;
Matt Arsenault63786292019-01-22 20:38:15 +0000589 })
590 .clampScalar(EltTypeIdx, S32, S64)
591 .clampScalar(VecTypeIdx, S32, S64)
592 .clampScalar(IdxTypeIdx, S32, S32);
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000593 }
594
Matt Arsenault63786292019-01-22 20:38:15 +0000595 getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
596 .unsupportedIf([=](const LegalityQuery &Query) {
597 const LLT &EltTy = Query.Types[1].getElementType();
598 return Query.Types[0] != EltTy;
599 });
600
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000601 for (unsigned Op : {G_EXTRACT, G_INSERT}) {
602 unsigned BigTyIdx = Op == G_EXTRACT ? 1 : 0;
603 unsigned LitTyIdx = Op == G_EXTRACT ? 0 : 1;
604
605 // FIXME: Doesn't handle extract of illegal sizes.
606 getActionDefinitionsBuilder(Op)
Matt Arsenault91be65b2019-02-07 17:25:51 +0000607 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000608 const LLT BigTy = Query.Types[BigTyIdx];
609 const LLT LitTy = Query.Types[LitTyIdx];
610 return (BigTy.getSizeInBits() % 32 == 0) &&
611 (LitTy.getSizeInBits() % 16 == 0);
612 })
Matt Arsenault91be65b2019-02-07 17:25:51 +0000613 .widenScalarIf(
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000614 [=](const LegalityQuery &Query) {
615 const LLT BigTy = Query.Types[BigTyIdx];
616 return (BigTy.getScalarSizeInBits() < 16);
617 },
618 LegalizeMutations::widenScalarOrEltToNextPow2(BigTyIdx, 16))
619 .widenScalarIf(
620 [=](const LegalityQuery &Query) {
621 const LLT LitTy = Query.Types[LitTyIdx];
622 return (LitTy.getScalarSizeInBits() < 16);
623 },
624 LegalizeMutations::widenScalarOrEltToNextPow2(LitTyIdx, 16))
Matt Arsenault2b6f76f2019-04-22 15:22:46 +0000625 .moreElementsIf(isSmallOddVector(BigTyIdx), oneMoreElement(BigTyIdx))
626 .widenScalarToNextPow2(BigTyIdx, 32);
627
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000628 }
Matt Arsenault71272e62018-03-05 16:25:15 +0000629
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000630 // TODO: vectors of pointers
Amara Emerson5ec14602018-12-10 18:44:58 +0000631 getActionDefinitionsBuilder(G_BUILD_VECTOR)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000632 .legalForCartesianProduct(AllS32Vectors, {S32})
633 .legalForCartesianProduct(AllS64Vectors, {S64})
634 .clampNumElements(0, V16S32, V16S32)
635 .clampNumElements(0, V2S64, V8S64)
636 .minScalarSameAs(1, 0)
637 // FIXME: Sort of a hack to make progress on other legalizations.
638 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenault2491f822019-02-02 23:31:50 +0000639 return Query.Types[0].getScalarSizeInBits() <= 32 ||
640 Query.Types[0].getScalarSizeInBits() == 64;
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000641 });
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000642
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000643 // TODO: Support any combination of v2s32
644 getActionDefinitionsBuilder(G_CONCAT_VECTORS)
645 .legalFor({{V4S32, V2S32},
646 {V8S32, V2S32},
647 {V8S32, V4S32},
648 {V4S64, V2S64},
649 {V4S16, V2S16},
650 {V8S16, V2S16},
Matt Arsenault2491f822019-02-02 23:31:50 +0000651 {V8S16, V4S16},
652 {LLT::vector(4, LocalPtr), LLT::vector(2, LocalPtr)},
653 {LLT::vector(4, PrivatePtr), LLT::vector(2, PrivatePtr)}});
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000654
Matt Arsenault503afda2018-03-12 13:35:43 +0000655 // Merge/Unmerge
656 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
657 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
658 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
659
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000660 auto notValidElt = [=](const LegalityQuery &Query, unsigned TypeIdx) {
661 const LLT &Ty = Query.Types[TypeIdx];
662 if (Ty.isVector()) {
663 const LLT &EltTy = Ty.getElementType();
664 if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64)
665 return true;
666 if (!isPowerOf2_32(EltTy.getSizeInBits()))
667 return true;
668 }
669 return false;
670 };
671
Matt Arsenault503afda2018-03-12 13:35:43 +0000672 getActionDefinitionsBuilder(Op)
Matt Arsenaultd8d193d2019-01-29 23:17:35 +0000673 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16)
674 // Clamp the little scalar to s8-s256 and make it a power of 2. It's not
675 // worth considering the multiples of 64 since 2*192 and 2*384 are not
676 // valid.
677 .clampScalar(LitTyIdx, S16, S256)
678 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32)
679
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000680 // Break up vectors with weird elements into scalars
681 .fewerElementsIf(
682 [=](const LegalityQuery &Query) { return notValidElt(Query, 0); },
Matt Arsenault990f5072019-01-25 00:51:00 +0000683 scalarize(0))
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000684 .fewerElementsIf(
685 [=](const LegalityQuery &Query) { return notValidElt(Query, 1); },
Matt Arsenault990f5072019-01-25 00:51:00 +0000686 scalarize(1))
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000687 .clampScalar(BigTyIdx, S32, S512)
688 .widenScalarIf(
689 [=](const LegalityQuery &Query) {
690 const LLT &Ty = Query.Types[BigTyIdx];
691 return !isPowerOf2_32(Ty.getSizeInBits()) &&
692 Ty.getSizeInBits() % 16 != 0;
693 },
694 [=](const LegalityQuery &Query) {
695 // Pick the next power of 2, or a multiple of 64 over 128.
696 // Whichever is smaller.
697 const LLT &Ty = Query.Types[BigTyIdx];
698 unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1);
699 if (NewSizeInBits >= 256) {
700 unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
701 if (RoundedTo < NewSizeInBits)
702 NewSizeInBits = RoundedTo;
703 }
704 return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
705 })
Matt Arsenault503afda2018-03-12 13:35:43 +0000706 .legalIf([=](const LegalityQuery &Query) {
707 const LLT &BigTy = Query.Types[BigTyIdx];
708 const LLT &LitTy = Query.Types[LitTyIdx];
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000709
710 if (BigTy.isVector() && BigTy.getSizeInBits() < 32)
711 return false;
712 if (LitTy.isVector() && LitTy.getSizeInBits() < 32)
713 return false;
714
715 return BigTy.getSizeInBits() % 16 == 0 &&
716 LitTy.getSizeInBits() % 16 == 0 &&
Matt Arsenault503afda2018-03-12 13:35:43 +0000717 BigTy.getSizeInBits() <= 512;
718 })
719 // Any vectors left are the wrong size. Scalarize them.
Matt Arsenault990f5072019-01-25 00:51:00 +0000720 .scalarize(0)
721 .scalarize(1);
Matt Arsenault503afda2018-03-12 13:35:43 +0000722 }
723
Tom Stellardca166212017-01-30 21:56:46 +0000724 computeTables();
Roman Tereshin76c29c62018-05-31 16:16:48 +0000725 verify(*ST.getInstrInfo());
Tom Stellardca166212017-01-30 21:56:46 +0000726}
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000727
728bool AMDGPULegalizerInfo::legalizeCustom(MachineInstr &MI,
729 MachineRegisterInfo &MRI,
730 MachineIRBuilder &MIRBuilder,
731 GISelChangeObserver &Observer) const {
732 switch (MI.getOpcode()) {
733 case TargetOpcode::G_ADDRSPACE_CAST:
734 return legalizeAddrSpaceCast(MI, MRI, MIRBuilder);
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000735 case TargetOpcode::G_FRINT:
736 return legalizeFrint(MI, MRI, MIRBuilder);
Matt Arsenaulta510b572019-05-17 12:20:05 +0000737 case TargetOpcode::G_FCEIL:
738 return legalizeFceil(MI, MRI, MIRBuilder);
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000739 case TargetOpcode::G_INTRINSIC_TRUNC:
740 return legalizeIntrinsicTrunc(MI, MRI, MIRBuilder);
Matt Arsenault2f292202019-05-17 23:05:18 +0000741 case TargetOpcode::G_SITOFP:
742 return legalizeITOFP(MI, MRI, MIRBuilder, true);
743 case TargetOpcode::G_UITOFP:
744 return legalizeITOFP(MI, MRI, MIRBuilder, false);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000745 default:
746 return false;
747 }
748
749 llvm_unreachable("expected switch to return");
750}
751
Matt Arsenault1178dc32019-06-28 01:16:46 +0000752Register AMDGPULegalizerInfo::getSegmentAperture(
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000753 unsigned AS,
754 MachineRegisterInfo &MRI,
755 MachineIRBuilder &MIRBuilder) const {
756 MachineFunction &MF = MIRBuilder.getMF();
757 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
758 const LLT S32 = LLT::scalar(32);
759
760 if (ST.hasApertureRegs()) {
761 // FIXME: Use inline constants (src_{shared, private}_base) instead of
762 // getreg.
763 unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
764 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
765 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
766 unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
767 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
768 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
769 unsigned Encoding =
770 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
771 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
772 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
773
Matt Arsenault1178dc32019-06-28 01:16:46 +0000774 Register ApertureReg = MRI.createGenericVirtualRegister(S32);
775 Register GetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000776
777 MIRBuilder.buildInstr(AMDGPU::S_GETREG_B32)
778 .addDef(GetReg)
779 .addImm(Encoding);
780 MRI.setType(GetReg, S32);
781
Amara Emerson946b1242019-04-15 05:04:20 +0000782 auto ShiftAmt = MIRBuilder.buildConstant(S32, WidthM1 + 1);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000783 MIRBuilder.buildInstr(TargetOpcode::G_SHL)
784 .addDef(ApertureReg)
785 .addUse(GetReg)
Amara Emerson946b1242019-04-15 05:04:20 +0000786 .addUse(ShiftAmt.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000787
788 return ApertureReg;
789 }
790
Matt Arsenault1178dc32019-06-28 01:16:46 +0000791 Register QueuePtr = MRI.createGenericVirtualRegister(
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000792 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
793
794 // FIXME: Placeholder until we can track the input registers.
795 MIRBuilder.buildConstant(QueuePtr, 0xdeadbeef);
796
797 // Offset into amd_queue_t for group_segment_aperture_base_hi /
798 // private_segment_aperture_base_hi.
799 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
800
801 // FIXME: Don't use undef
802 Value *V = UndefValue::get(PointerType::get(
803 Type::getInt8Ty(MF.getFunction().getContext()),
804 AMDGPUAS::CONSTANT_ADDRESS));
805
806 MachinePointerInfo PtrInfo(V, StructOffset);
807 MachineMemOperand *MMO = MF.getMachineMemOperand(
808 PtrInfo,
809 MachineMemOperand::MOLoad |
810 MachineMemOperand::MODereferenceable |
811 MachineMemOperand::MOInvariant,
812 4,
813 MinAlign(64, StructOffset));
814
Matt Arsenaulte3a676e2019-06-24 15:50:29 +0000815 Register LoadResult = MRI.createGenericVirtualRegister(S32);
816 Register LoadAddr;
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000817
818 MIRBuilder.materializeGEP(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
819 MIRBuilder.buildLoad(LoadResult, LoadAddr, *MMO);
820 return LoadResult;
821}
822
823bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
824 MachineInstr &MI, MachineRegisterInfo &MRI,
825 MachineIRBuilder &MIRBuilder) const {
826 MachineFunction &MF = MIRBuilder.getMF();
827
828 MIRBuilder.setInstr(MI);
829
Matt Arsenaulte3a676e2019-06-24 15:50:29 +0000830 Register Dst = MI.getOperand(0).getReg();
831 Register Src = MI.getOperand(1).getReg();
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000832
833 LLT DstTy = MRI.getType(Dst);
834 LLT SrcTy = MRI.getType(Src);
835 unsigned DestAS = DstTy.getAddressSpace();
836 unsigned SrcAS = SrcTy.getAddressSpace();
837
838 // TODO: Avoid reloading from the queue ptr for each cast, or at least each
839 // vector element.
840 assert(!DstTy.isVector());
841
842 const AMDGPUTargetMachine &TM
843 = static_cast<const AMDGPUTargetMachine &>(MF.getTarget());
844
845 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
846 if (ST.getTargetLowering()->isNoopAddrSpaceCast(SrcAS, DestAS)) {
Matt Arsenaultdc88a2c2019-02-08 14:16:11 +0000847 MI.setDesc(MIRBuilder.getTII().get(TargetOpcode::G_BITCAST));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000848 return true;
849 }
850
851 if (SrcAS == AMDGPUAS::FLAT_ADDRESS) {
852 assert(DestAS == AMDGPUAS::LOCAL_ADDRESS ||
853 DestAS == AMDGPUAS::PRIVATE_ADDRESS);
854 unsigned NullVal = TM.getNullPointerValue(DestAS);
855
Amara Emerson946b1242019-04-15 05:04:20 +0000856 auto SegmentNull = MIRBuilder.buildConstant(DstTy, NullVal);
857 auto FlatNull = MIRBuilder.buildConstant(SrcTy, 0);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000858
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000859 Register PtrLo32 = MRI.createGenericVirtualRegister(DstTy);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000860
861 // Extract low 32-bits of the pointer.
862 MIRBuilder.buildExtract(PtrLo32, Src, 0);
863
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000864 Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
Amara Emerson946b1242019-04-15 05:04:20 +0000865 MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, FlatNull.getReg(0));
866 MIRBuilder.buildSelect(Dst, CmpRes, PtrLo32, SegmentNull.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000867
868 MI.eraseFromParent();
869 return true;
870 }
871
872 assert(SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
873 SrcAS == AMDGPUAS::PRIVATE_ADDRESS);
874
Amara Emerson946b1242019-04-15 05:04:20 +0000875 auto SegmentNull =
876 MIRBuilder.buildConstant(SrcTy, TM.getNullPointerValue(SrcAS));
877 auto FlatNull =
878 MIRBuilder.buildConstant(DstTy, TM.getNullPointerValue(DestAS));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000879
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000880 Register ApertureReg = getSegmentAperture(DestAS, MRI, MIRBuilder);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000881
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000882 Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
Amara Emerson946b1242019-04-15 05:04:20 +0000883 MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, SegmentNull.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000884
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000885 Register BuildPtr = MRI.createGenericVirtualRegister(DstTy);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000886
887 // Coerce the type of the low half of the result so we can use merge_values.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000888 Register SrcAsInt = MRI.createGenericVirtualRegister(LLT::scalar(32));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000889 MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT)
890 .addDef(SrcAsInt)
891 .addUse(Src);
892
893 // TODO: Should we allow mismatched types but matching sizes in merges to
894 // avoid the ptrtoint?
895 MIRBuilder.buildMerge(BuildPtr, {SrcAsInt, ApertureReg});
Amara Emerson946b1242019-04-15 05:04:20 +0000896 MIRBuilder.buildSelect(Dst, CmpRes, BuildPtr, FlatNull.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000897
898 MI.eraseFromParent();
899 return true;
900}
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000901
902bool AMDGPULegalizerInfo::legalizeFrint(
903 MachineInstr &MI, MachineRegisterInfo &MRI,
904 MachineIRBuilder &MIRBuilder) const {
905 MIRBuilder.setInstr(MI);
906
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000907 Register Src = MI.getOperand(1).getReg();
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000908 LLT Ty = MRI.getType(Src);
909 assert(Ty.isScalar() && Ty.getSizeInBits() == 64);
910
911 APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
912 APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
913
914 auto C1 = MIRBuilder.buildFConstant(Ty, C1Val);
915 auto CopySign = MIRBuilder.buildFCopysign(Ty, C1, Src);
916
917 // TODO: Should this propagate fast-math-flags?
918 auto Tmp1 = MIRBuilder.buildFAdd(Ty, Src, CopySign);
919 auto Tmp2 = MIRBuilder.buildFSub(Ty, Tmp1, CopySign);
920
921 auto C2 = MIRBuilder.buildFConstant(Ty, C2Val);
922 auto Fabs = MIRBuilder.buildFAbs(Ty, Src);
923
924 auto Cond = MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, LLT::scalar(1), Fabs, C2);
925 MIRBuilder.buildSelect(MI.getOperand(0).getReg(), Cond, Src, Tmp2);
926 return true;
927}
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000928
Matt Arsenaulta510b572019-05-17 12:20:05 +0000929bool AMDGPULegalizerInfo::legalizeFceil(
930 MachineInstr &MI, MachineRegisterInfo &MRI,
931 MachineIRBuilder &B) const {
932 B.setInstr(MI);
933
Matt Arsenault1a02d302019-05-17 12:59:27 +0000934 const LLT S1 = LLT::scalar(1);
935 const LLT S64 = LLT::scalar(64);
936
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000937 Register Src = MI.getOperand(1).getReg();
Matt Arsenault1a02d302019-05-17 12:59:27 +0000938 assert(MRI.getType(Src) == S64);
Matt Arsenaulta510b572019-05-17 12:20:05 +0000939
940 // result = trunc(src)
941 // if (src > 0.0 && src != result)
942 // result += 1.0
943
Matt Arsenaulta510b572019-05-17 12:20:05 +0000944 auto Trunc = B.buildInstr(TargetOpcode::G_INTRINSIC_TRUNC, {S64}, {Src});
945
Matt Arsenaulta510b572019-05-17 12:20:05 +0000946 const auto Zero = B.buildFConstant(S64, 0.0);
947 const auto One = B.buildFConstant(S64, 1.0);
948 auto Lt0 = B.buildFCmp(CmpInst::FCMP_OGT, S1, Src, Zero);
949 auto NeTrunc = B.buildFCmp(CmpInst::FCMP_ONE, S1, Src, Trunc);
950 auto And = B.buildAnd(S1, Lt0, NeTrunc);
951 auto Add = B.buildSelect(S64, And, One, Zero);
952
953 // TODO: Should this propagate fast-math-flags?
954 B.buildFAdd(MI.getOperand(0).getReg(), Trunc, Add);
955 return true;
956}
957
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000958static MachineInstrBuilder extractF64Exponent(unsigned Hi,
959 MachineIRBuilder &B) {
960 const unsigned FractBits = 52;
961 const unsigned ExpBits = 11;
962 LLT S32 = LLT::scalar(32);
963
964 auto Const0 = B.buildConstant(S32, FractBits - 32);
965 auto Const1 = B.buildConstant(S32, ExpBits);
966
967 auto ExpPart = B.buildIntrinsic(Intrinsic::amdgcn_ubfe, {S32}, false)
968 .addUse(Const0.getReg(0))
969 .addUse(Const1.getReg(0));
970
971 return B.buildSub(S32, ExpPart, B.buildConstant(S32, 1023));
972}
973
974bool AMDGPULegalizerInfo::legalizeIntrinsicTrunc(
975 MachineInstr &MI, MachineRegisterInfo &MRI,
976 MachineIRBuilder &B) const {
977 B.setInstr(MI);
978
Matt Arsenault1a02d302019-05-17 12:59:27 +0000979 const LLT S1 = LLT::scalar(1);
980 const LLT S32 = LLT::scalar(32);
981 const LLT S64 = LLT::scalar(64);
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000982
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000983 Register Src = MI.getOperand(1).getReg();
Matt Arsenault1a02d302019-05-17 12:59:27 +0000984 assert(MRI.getType(Src) == S64);
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000985
986 // TODO: Should this use extract since the low half is unused?
987 auto Unmerge = B.buildUnmerge({S32, S32}, Src);
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000988 Register Hi = Unmerge.getReg(1);
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000989
990 // Extract the upper half, since this is where we will find the sign and
991 // exponent.
992 auto Exp = extractF64Exponent(Hi, B);
993
994 const unsigned FractBits = 52;
995
996 // Extract the sign bit.
997 const auto SignBitMask = B.buildConstant(S32, UINT32_C(1) << 31);
998 auto SignBit = B.buildAnd(S32, Hi, SignBitMask);
999
1000 const auto FractMask = B.buildConstant(S64, (UINT64_C(1) << FractBits) - 1);
1001
1002 const auto Zero32 = B.buildConstant(S32, 0);
1003
1004 // Extend back to 64-bits.
1005 auto SignBit64 = B.buildMerge(S64, {Zero32.getReg(0), SignBit.getReg(0)});
1006
1007 auto Shr = B.buildAShr(S64, FractMask, Exp);
1008 auto Not = B.buildNot(S64, Shr);
1009 auto Tmp0 = B.buildAnd(S64, Src, Not);
1010 auto FiftyOne = B.buildConstant(S32, FractBits - 1);
1011
1012 auto ExpLt0 = B.buildICmp(CmpInst::ICMP_SLT, S1, Exp, Zero32);
1013 auto ExpGt51 = B.buildICmp(CmpInst::ICMP_SGT, S1, Exp, FiftyOne);
1014
1015 auto Tmp1 = B.buildSelect(S64, ExpLt0, SignBit64, Tmp0);
1016 B.buildSelect(MI.getOperand(0).getReg(), ExpGt51, Src, Tmp1);
1017 return true;
1018}
Matt Arsenault2f292202019-05-17 23:05:18 +00001019
1020bool AMDGPULegalizerInfo::legalizeITOFP(
1021 MachineInstr &MI, MachineRegisterInfo &MRI,
1022 MachineIRBuilder &B, bool Signed) const {
1023 B.setInstr(MI);
1024
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001025 Register Dst = MI.getOperand(0).getReg();
1026 Register Src = MI.getOperand(1).getReg();
Matt Arsenault2f292202019-05-17 23:05:18 +00001027
1028 const LLT S64 = LLT::scalar(64);
1029 const LLT S32 = LLT::scalar(32);
1030
1031 assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S64);
1032
1033 auto Unmerge = B.buildUnmerge({S32, S32}, Src);
1034
1035 auto CvtHi = Signed ?
1036 B.buildSITOFP(S64, Unmerge.getReg(1)) :
1037 B.buildUITOFP(S64, Unmerge.getReg(1));
1038
1039 auto CvtLo = B.buildUITOFP(S64, Unmerge.getReg(0));
1040
1041 auto ThirtyTwo = B.buildConstant(S32, 32);
1042 auto LdExp = B.buildIntrinsic(Intrinsic::amdgcn_ldexp, {S64}, false)
1043 .addUse(CvtHi.getReg(0))
1044 .addUse(ThirtyTwo.getReg(0));
1045
1046 // TODO: Should this propagate fast-math-flags?
1047 B.buildFAdd(Dst, LdExp, CvtLo);
1048 MI.eraseFromParent();
1049 return true;
1050}
Matt Arsenaulte15770a2019-07-01 18:40:23 +00001051
1052// Return the use branch instruction, otherwise null if the usage is invalid.
1053static MachineInstr *verifyCFIntrinsic(MachineInstr &MI,
1054 MachineRegisterInfo &MRI) {
1055 Register CondDef = MI.getOperand(0).getReg();
1056 if (!MRI.hasOneNonDBGUse(CondDef))
1057 return nullptr;
1058
1059 MachineInstr &UseMI = *MRI.use_instr_nodbg_begin(CondDef);
1060 return UseMI.getParent() == MI.getParent() &&
1061 UseMI.getOpcode() == AMDGPU::G_BRCOND ? &UseMI : nullptr;
1062}
1063
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +00001064Register AMDGPULegalizerInfo::getLiveInRegister(MachineRegisterInfo &MRI,
1065 Register Reg, LLT Ty) const {
1066 Register LiveIn = MRI.getLiveInVirtReg(Reg);
1067 if (LiveIn)
1068 return LiveIn;
1069
1070 Register NewReg = MRI.createGenericVirtualRegister(Ty);
1071 MRI.addLiveIn(Reg, NewReg);
1072 return NewReg;
1073}
1074
1075bool AMDGPULegalizerInfo::loadInputValue(Register DstReg, MachineIRBuilder &B,
1076 const ArgDescriptor *Arg) const {
1077 if (!Arg->isRegister())
1078 return false; // TODO: Handle these
1079
1080 assert(Arg->getRegister() != 0);
1081 assert(Arg->getRegister().isPhysical());
1082
1083 MachineRegisterInfo &MRI = *B.getMRI();
1084
1085 LLT Ty = MRI.getType(DstReg);
1086 Register LiveIn = getLiveInRegister(MRI, Arg->getRegister(), Ty);
1087
1088 if (Arg->isMasked()) {
1089 // TODO: Should we try to emit this once in the entry block?
1090 const LLT S32 = LLT::scalar(32);
1091 const unsigned Mask = Arg->getMask();
1092 const unsigned Shift = countTrailingZeros<unsigned>(Mask);
1093
1094 auto ShiftAmt = B.buildConstant(S32, Shift);
1095 auto LShr = B.buildLShr(S32, LiveIn, ShiftAmt);
1096 B.buildAnd(DstReg, LShr, B.buildConstant(S32, Mask >> Shift));
1097 } else
1098 B.buildCopy(DstReg, LiveIn);
1099
1100 // Insert the argument copy if it doens't already exist.
1101 // FIXME: It seems EmitLiveInCopies isn't called anywhere?
1102 if (!MRI.getVRegDef(LiveIn)) {
1103 MachineBasicBlock &EntryMBB = B.getMF().front();
1104 EntryMBB.addLiveIn(Arg->getRegister());
1105 B.setInsertPt(EntryMBB, EntryMBB.begin());
1106 B.buildCopy(LiveIn, Arg->getRegister());
1107 }
1108
1109 return true;
1110}
1111
1112bool AMDGPULegalizerInfo::legalizePreloadedArgIntrin(
1113 MachineInstr &MI,
1114 MachineRegisterInfo &MRI,
1115 MachineIRBuilder &B,
1116 AMDGPUFunctionArgInfo::PreloadedValue ArgType) const {
1117 B.setInstr(MI);
1118
1119 const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
1120
1121 const ArgDescriptor *Arg;
1122 const TargetRegisterClass *RC;
1123 std::tie(Arg, RC) = MFI->getPreloadedValue(ArgType);
1124 if (!Arg) {
1125 LLVM_DEBUG(dbgs() << "Required arg register missing\n");
1126 return false;
1127 }
1128
1129 if (loadInputValue(MI.getOperand(0).getReg(), B, Arg)) {
1130 MI.eraseFromParent();
1131 return true;
1132 }
1133
1134 return false;
1135}
1136
Matt Arsenaulte15770a2019-07-01 18:40:23 +00001137bool AMDGPULegalizerInfo::legalizeIntrinsic(MachineInstr &MI,
1138 MachineRegisterInfo &MRI,
1139 MachineIRBuilder &B) const {
1140 // Replace the use G_BRCOND with the exec manipulate and branch pseudos.
1141 switch (MI.getOperand(MI.getNumExplicitDefs()).getIntrinsicID()) {
1142 case Intrinsic::amdgcn_if: {
1143 if (MachineInstr *BrCond = verifyCFIntrinsic(MI, MRI)) {
1144 const SIRegisterInfo *TRI
1145 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo());
1146
1147 B.setInstr(*BrCond);
1148 Register Def = MI.getOperand(1).getReg();
1149 Register Use = MI.getOperand(3).getReg();
1150 B.buildInstr(AMDGPU::SI_IF)
1151 .addDef(Def)
1152 .addUse(Use)
1153 .addMBB(BrCond->getOperand(1).getMBB());
1154
1155 MRI.setRegClass(Def, TRI->getWaveMaskRegClass());
1156 MRI.setRegClass(Use, TRI->getWaveMaskRegClass());
1157 MI.eraseFromParent();
1158 BrCond->eraseFromParent();
1159 return true;
1160 }
1161
1162 return false;
1163 }
1164 case Intrinsic::amdgcn_loop: {
1165 if (MachineInstr *BrCond = verifyCFIntrinsic(MI, MRI)) {
1166 const SIRegisterInfo *TRI
1167 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo());
1168
1169 B.setInstr(*BrCond);
1170 Register Reg = MI.getOperand(2).getReg();
1171 B.buildInstr(AMDGPU::SI_LOOP)
1172 .addUse(Reg)
1173 .addMBB(BrCond->getOperand(1).getMBB());
1174 MI.eraseFromParent();
1175 BrCond->eraseFromParent();
1176 MRI.setRegClass(Reg, TRI->getWaveMaskRegClass());
1177 return true;
1178 }
1179
1180 return false;
1181 }
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +00001182 case Intrinsic::amdgcn_workitem_id_x:
1183 return legalizePreloadedArgIntrin(MI, MRI, B,
1184 AMDGPUFunctionArgInfo::WORKITEM_ID_X);
1185 case Intrinsic::amdgcn_workitem_id_y:
1186 return legalizePreloadedArgIntrin(MI, MRI, B,
1187 AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
1188 case Intrinsic::amdgcn_workitem_id_z:
1189 return legalizePreloadedArgIntrin(MI, MRI, B,
1190 AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
Matt Arsenault756d8192019-07-01 18:47:22 +00001191 case Intrinsic::amdgcn_workgroup_id_x:
1192 return legalizePreloadedArgIntrin(MI, MRI, B,
1193 AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
1194 case Intrinsic::amdgcn_workgroup_id_y:
1195 return legalizePreloadedArgIntrin(MI, MRI, B,
1196 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
1197 case Intrinsic::amdgcn_workgroup_id_z:
1198 return legalizePreloadedArgIntrin(MI, MRI, B,
1199 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
Matt Arsenaulte15770a2019-07-01 18:40:23 +00001200 default:
1201 return true;
1202 }
1203
1204 return true;
1205}