blob: 410d038fc65b4a659ae0be1679322751b2144cbc [file] [log] [blame]
Tom Stellardca166212017-01-30 21:56:46 +00001//===- AMDGPULegalizerInfo.cpp -----------------------------------*- C++ -*-==//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellardca166212017-01-30 21:56:46 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the Machinelegalizer class for
10/// AMDGPU.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
David Blaikie36a0f222018-03-23 23:58:31 +000014#include "AMDGPU.h"
Craig Topper2fa14362018-03-29 17:21:10 +000015#include "AMDGPULegalizerInfo.h"
Matt Arsenault85803362018-03-17 15:17:41 +000016#include "AMDGPUTargetMachine.h"
Matt Arsenaulta8b43392019-02-08 02:40:47 +000017#include "SIMachineFunctionInfo.h"
18
19#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000020#include "llvm/CodeGen/TargetOpcodes.h"
Craig Topper2fa14362018-03-29 17:21:10 +000021#include "llvm/CodeGen/ValueTypes.h"
Tom Stellardca166212017-01-30 21:56:46 +000022#include "llvm/IR/DerivedTypes.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000023#include "llvm/IR/Type.h"
Tom Stellardca166212017-01-30 21:56:46 +000024#include "llvm/Support/Debug.h"
25
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +000026#define DEBUG_TYPE "amdgpu-legalinfo"
27
Tom Stellardca166212017-01-30 21:56:46 +000028using namespace llvm;
Daniel Sanders9ade5592018-01-29 17:37:29 +000029using namespace LegalizeActions;
Matt Arsenault990f5072019-01-25 00:51:00 +000030using namespace LegalizeMutations;
Matt Arsenault7ac79ed2019-01-20 19:45:18 +000031using namespace LegalityPredicates;
Tom Stellardca166212017-01-30 21:56:46 +000032
Matt Arsenaultd9141892019-02-07 19:10:15 +000033
34static LegalityPredicate isMultiple32(unsigned TypeIdx,
35 unsigned MaxSize = 512) {
36 return [=](const LegalityQuery &Query) {
37 const LLT Ty = Query.Types[TypeIdx];
38 const LLT EltTy = Ty.getScalarType();
39 return Ty.getSizeInBits() <= MaxSize && EltTy.getSizeInBits() % 32 == 0;
40 };
41}
42
Matt Arsenault18ec3822019-02-11 22:00:39 +000043static LegalityPredicate isSmallOddVector(unsigned TypeIdx) {
44 return [=](const LegalityQuery &Query) {
45 const LLT Ty = Query.Types[TypeIdx];
46 return Ty.isVector() &&
47 Ty.getNumElements() % 2 != 0 &&
48 Ty.getElementType().getSizeInBits() < 32;
49 };
50}
51
52static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
53 return [=](const LegalityQuery &Query) {
54 const LLT Ty = Query.Types[TypeIdx];
55 const LLT EltTy = Ty.getElementType();
56 return std::make_pair(TypeIdx, LLT::vector(Ty.getNumElements() + 1, EltTy));
57 };
58}
59
Matt Arsenault26b7e852019-02-19 16:30:19 +000060static LegalizeMutation fewerEltsToSize64Vector(unsigned TypeIdx) {
61 return [=](const LegalityQuery &Query) {
62 const LLT Ty = Query.Types[TypeIdx];
63 const LLT EltTy = Ty.getElementType();
64 unsigned Size = Ty.getSizeInBits();
65 unsigned Pieces = (Size + 63) / 64;
66 unsigned NewNumElts = (Ty.getNumElements() + 1) / Pieces;
67 return std::make_pair(TypeIdx, LLT::scalarOrVector(NewNumElts, EltTy));
68 };
69}
70
71static LegalityPredicate vectorWiderThan(unsigned TypeIdx, unsigned Size) {
72 return [=](const LegalityQuery &Query) {
73 const LLT QueryTy = Query.Types[TypeIdx];
74 return QueryTy.isVector() && QueryTy.getSizeInBits() > Size;
75 };
76}
77
Matt Arsenaultb4c95b32019-02-19 17:03:09 +000078static LegalityPredicate numElementsNotEven(unsigned TypeIdx) {
79 return [=](const LegalityQuery &Query) {
80 const LLT QueryTy = Query.Types[TypeIdx];
81 return QueryTy.isVector() && QueryTy.getNumElements() % 2 != 0;
82 };
83}
Matt Arsenault18ec3822019-02-11 22:00:39 +000084
Matt Arsenault4dd57552019-07-09 14:17:31 +000085// Any combination of 32 or 64-bit elements up to 512 bits, and multiples of
86// v2s16.
87static LegalityPredicate isRegisterType(unsigned TypeIdx) {
88 return [=](const LegalityQuery &Query) {
89 const LLT Ty = Query.Types[TypeIdx];
90 if (Ty.isVector()) {
91 const int EltSize = Ty.getElementType().getSizeInBits();
92 return EltSize == 32 || EltSize == 64 ||
Matt Arsenault3f1a3452019-07-09 22:48:04 +000093 (EltSize == 16 && Ty.getNumElements() % 2 == 0) ||
94 EltSize == 128 || EltSize == 256;
Matt Arsenault4dd57552019-07-09 14:17:31 +000095 }
96
97 return Ty.getSizeInBits() % 32 == 0 && Ty.getSizeInBits() <= 512;
98 };
99}
100
Matt Arsenault9e8e8c62019-07-01 18:49:01 +0000101AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
102 const GCNTargetMachine &TM)
103 : ST(ST_) {
Tom Stellardca166212017-01-30 21:56:46 +0000104 using namespace TargetOpcode;
105
Matt Arsenault85803362018-03-17 15:17:41 +0000106 auto GetAddrSpacePtr = [&TM](unsigned AS) {
107 return LLT::pointer(AS, TM.getPointerSizeInBits(AS));
108 };
109
110 const LLT S1 = LLT::scalar(1);
Matt Arsenault888aa5d2019-02-03 00:07:33 +0000111 const LLT S8 = LLT::scalar(8);
Matt Arsenault45991592019-01-18 21:33:50 +0000112 const LLT S16 = LLT::scalar(16);
Tom Stellardca166212017-01-30 21:56:46 +0000113 const LLT S32 = LLT::scalar(32);
114 const LLT S64 = LLT::scalar(64);
Matt Arsenaultca676342019-01-25 02:36:32 +0000115 const LLT S128 = LLT::scalar(128);
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000116 const LLT S256 = LLT::scalar(256);
Tom Stellardeebbfc22018-06-30 04:09:44 +0000117 const LLT S512 = LLT::scalar(512);
Matt Arsenault85803362018-03-17 15:17:41 +0000118
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000119 const LLT V2S16 = LLT::vector(2, 16);
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000120 const LLT V4S16 = LLT::vector(4, 16);
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000121
122 const LLT V2S32 = LLT::vector(2, 32);
123 const LLT V3S32 = LLT::vector(3, 32);
124 const LLT V4S32 = LLT::vector(4, 32);
125 const LLT V5S32 = LLT::vector(5, 32);
126 const LLT V6S32 = LLT::vector(6, 32);
127 const LLT V7S32 = LLT::vector(7, 32);
128 const LLT V8S32 = LLT::vector(8, 32);
129 const LLT V9S32 = LLT::vector(9, 32);
130 const LLT V10S32 = LLT::vector(10, 32);
131 const LLT V11S32 = LLT::vector(11, 32);
132 const LLT V12S32 = LLT::vector(12, 32);
133 const LLT V13S32 = LLT::vector(13, 32);
134 const LLT V14S32 = LLT::vector(14, 32);
135 const LLT V15S32 = LLT::vector(15, 32);
136 const LLT V16S32 = LLT::vector(16, 32);
137
138 const LLT V2S64 = LLT::vector(2, 64);
139 const LLT V3S64 = LLT::vector(3, 64);
140 const LLT V4S64 = LLT::vector(4, 64);
141 const LLT V5S64 = LLT::vector(5, 64);
142 const LLT V6S64 = LLT::vector(6, 64);
143 const LLT V7S64 = LLT::vector(7, 64);
144 const LLT V8S64 = LLT::vector(8, 64);
145
146 std::initializer_list<LLT> AllS32Vectors =
147 {V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32,
148 V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32};
149 std::initializer_list<LLT> AllS64Vectors =
150 {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64};
151
Matt Arsenault85803362018-03-17 15:17:41 +0000152 const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS);
153 const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS);
Matt Arsenault685d1e82018-03-17 15:17:45 +0000154 const LLT LocalPtr = GetAddrSpacePtr(AMDGPUAS::LOCAL_ADDRESS);
Matt Arsenault0da63502018-08-31 05:49:54 +0000155 const LLT FlatPtr = GetAddrSpacePtr(AMDGPUAS::FLAT_ADDRESS);
156 const LLT PrivatePtr = GetAddrSpacePtr(AMDGPUAS::PRIVATE_ADDRESS);
Matt Arsenault85803362018-03-17 15:17:41 +0000157
Matt Arsenault934e5342018-12-13 20:34:15 +0000158 const LLT CodePtr = FlatPtr;
159
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000160 const std::initializer_list<LLT> AddrSpaces64 = {
161 GlobalPtr, ConstantPtr, FlatPtr
162 };
163
164 const std::initializer_list<LLT> AddrSpaces32 = {
165 LocalPtr, PrivatePtr
Matt Arsenault685d1e82018-03-17 15:17:45 +0000166 };
Tom Stellardca166212017-01-30 21:56:46 +0000167
Matt Arsenault40d1faf2019-07-01 17:35:53 +0000168 const std::initializer_list<LLT> FPTypesBase = {
169 S32, S64
170 };
171
172 const std::initializer_list<LLT> FPTypes16 = {
173 S32, S64, S16
174 };
175
Matt Arsenaultadc40ba2019-01-08 01:22:47 +0000176 setAction({G_BRCOND, S1}, Legal);
177
Matt Arsenault2e0ee472019-02-21 15:48:13 +0000178 // TODO: All multiples of 32, vectors of pointers, all v2s16 pairs, more
179 // elements for v3s16
180 getActionDefinitionsBuilder(G_PHI)
181 .legalFor({S32, S64, V2S16, V4S16, S1, S128, S256})
182 .legalFor(AllS32Vectors)
183 .legalFor(AllS64Vectors)
184 .legalFor(AddrSpaces64)
185 .legalFor(AddrSpaces32)
186 .clampScalar(0, S32, S256)
187 .widenScalarToNextPow2(0, 32)
Matt Arsenaultd3093c22019-02-28 00:16:32 +0000188 .clampMaxNumElements(0, S32, 16)
Matt Arsenault72bcf152019-02-28 00:01:05 +0000189 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
Matt Arsenault2e0ee472019-02-21 15:48:13 +0000190 .legalIf(isPointer(0));
191
Matt Arsenaultef59cb62019-07-01 18:18:55 +0000192 if (ST.has16BitInsts()) {
193 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
194 .legalFor({S32, S16})
195 .clampScalar(0, S16, S32)
196 .scalarize(0);
197 } else {
198 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
199 .legalFor({S32})
200 .clampScalar(0, S32, S32)
201 .scalarize(0);
202 }
Matt Arsenault2e0ee472019-02-21 15:48:13 +0000203
Matt Arsenaultef59cb62019-07-01 18:18:55 +0000204 getActionDefinitionsBuilder({G_UMULH, G_SMULH})
Matt Arsenault5d622fb2019-01-25 03:23:04 +0000205 .legalFor({S32})
Matt Arsenault211e89d2019-01-27 00:52:51 +0000206 .clampScalar(0, S32, S32)
Matt Arsenault5d622fb2019-01-25 03:23:04 +0000207 .scalarize(0);
Matt Arsenault43398832018-12-20 01:35:49 +0000208
Matt Arsenault26a6c742019-01-26 23:47:07 +0000209 // Report legal for any types we can handle anywhere. For the cases only legal
210 // on the SALU, RegBankSelect will be able to re-legalize.
Matt Arsenault43398832018-12-20 01:35:49 +0000211 getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
Matt Arsenault26a6c742019-01-26 23:47:07 +0000212 .legalFor({S32, S1, S64, V2S32, V2S16, V4S16})
213 .clampScalar(0, S32, S64)
Matt Arsenault26b7e852019-02-19 16:30:19 +0000214 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
215 .fewerElementsIf(vectorWiderThan(0, 32), fewerEltsToSize64Vector(0))
Matt Arsenaultf4bfe4c2019-02-25 21:32:48 +0000216 .widenScalarToNextPow2(0)
Matt Arsenault26a6c742019-01-26 23:47:07 +0000217 .scalarize(0);
Tom Stellardee6e6452017-06-12 20:54:56 +0000218
Matt Arsenault68c668a2019-01-08 01:09:09 +0000219 getActionDefinitionsBuilder({G_UADDO, G_SADDO, G_USUBO, G_SSUBO,
220 G_UADDE, G_SADDE, G_USUBE, G_SSUBE})
Matt Arsenault4d475942019-01-26 23:44:51 +0000221 .legalFor({{S32, S1}})
222 .clampScalar(0, S32, S32);
Matt Arsenault2cc15b62019-01-08 01:03:58 +0000223
Matt Arsenault7ac79ed2019-01-20 19:45:18 +0000224 getActionDefinitionsBuilder(G_BITCAST)
225 .legalForCartesianProduct({S32, V2S16})
226 .legalForCartesianProduct({S64, V2S32, V4S16})
227 .legalForCartesianProduct({V2S64, V4S32})
228 // Don't worry about the size constraint.
229 .legalIf(all(isPointer(0), isPointer(1)));
Tom Stellardff63ee02017-06-19 13:15:45 +0000230
Matt Arsenault00ccd132019-02-12 14:54:55 +0000231 if (ST.has16BitInsts()) {
232 getActionDefinitionsBuilder(G_FCONSTANT)
233 .legalFor({S32, S64, S16})
234 .clampScalar(0, S16, S64);
235 } else {
236 getActionDefinitionsBuilder(G_FCONSTANT)
237 .legalFor({S32, S64})
238 .clampScalar(0, S32, S64);
239 }
Tom Stellardeebbfc22018-06-30 04:09:44 +0000240
Matt Arsenaultb3feccd2018-06-25 15:42:12 +0000241 getActionDefinitionsBuilder(G_IMPLICIT_DEF)
Matt Arsenaultd9141892019-02-07 19:10:15 +0000242 .legalFor({S1, S32, S64, V2S32, V4S32, V2S16, V4S16, GlobalPtr,
243 ConstantPtr, LocalPtr, FlatPtr, PrivatePtr})
Matt Arsenault18ec3822019-02-11 22:00:39 +0000244 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
Matt Arsenaultd9141892019-02-07 19:10:15 +0000245 .clampScalarOrElt(0, S32, S512)
Matt Arsenault0f2debb2019-02-08 14:46:27 +0000246 .legalIf(isMultiple32(0))
Matt Arsenault82b10392019-02-25 20:46:06 +0000247 .widenScalarToNextPow2(0, 32)
248 .clampMaxNumElements(0, S32, 16);
Matt Arsenaultb3feccd2018-06-25 15:42:12 +0000249
Matt Arsenaultabdc4f22018-03-17 15:17:48 +0000250
Tom Stellarde0424122017-06-03 01:13:33 +0000251 // FIXME: i1 operands to intrinsics should always be legal, but other i1
252 // values may not be legal. We need to figure out how to distinguish
253 // between these two scenarios.
Matt Arsenault45991592019-01-18 21:33:50 +0000254 getActionDefinitionsBuilder(G_CONSTANT)
Matt Arsenault2065c942019-02-02 23:33:49 +0000255 .legalFor({S1, S32, S64, GlobalPtr,
256 LocalPtr, ConstantPtr, PrivatePtr, FlatPtr })
Matt Arsenault45991592019-01-18 21:33:50 +0000257 .clampScalar(0, S32, S64)
Matt Arsenault2065c942019-02-02 23:33:49 +0000258 .widenScalarToNextPow2(0)
259 .legalIf(isPointer(0));
Matt Arsenault06cbb272018-03-01 19:16:52 +0000260
Matt Arsenaultc94e26c2018-12-18 09:46:13 +0000261 setAction({G_FRAME_INDEX, PrivatePtr}, Legal);
262
Matt Arsenault93fdec72019-02-07 18:03:11 +0000263 auto &FPOpActions = getActionDefinitionsBuilder(
Matt Arsenault9dba67f2019-02-11 17:05:20 +0000264 { G_FADD, G_FMUL, G_FNEG, G_FABS, G_FMA, G_FCANONICALIZE})
Matt Arsenault93fdec72019-02-07 18:03:11 +0000265 .legalFor({S32, S64});
266
267 if (ST.has16BitInsts()) {
268 if (ST.hasVOP3PInsts())
269 FPOpActions.legalFor({S16, V2S16});
270 else
271 FPOpActions.legalFor({S16});
272 }
273
274 if (ST.hasVOP3PInsts())
275 FPOpActions.clampMaxNumElements(0, S16, 2);
276 FPOpActions
277 .scalarize(0)
278 .clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);
Tom Stellardd0c6cf22017-10-27 23:57:41 +0000279
Matt Arsenaultc0f75692019-02-07 18:14:39 +0000280 if (ST.has16BitInsts()) {
281 getActionDefinitionsBuilder(G_FSQRT)
282 .legalFor({S32, S64, S16})
283 .scalarize(0)
284 .clampScalar(0, S16, S64);
285 } else {
286 getActionDefinitionsBuilder(G_FSQRT)
287 .legalFor({S32, S64})
288 .scalarize(0)
289 .clampScalar(0, S32, S64);
290 }
291
Matt Arsenaultdff33c32018-12-20 00:37:02 +0000292 getActionDefinitionsBuilder(G_FPTRUNC)
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000293 .legalFor({{S32, S64}, {S16, S32}})
294 .scalarize(0);
Matt Arsenaultdff33c32018-12-20 00:37:02 +0000295
Matt Arsenault24563ef2019-01-20 18:34:24 +0000296 getActionDefinitionsBuilder(G_FPEXT)
297 .legalFor({{S64, S32}, {S32, S16}})
Matt Arsenaultca676342019-01-25 02:36:32 +0000298 .lowerFor({{S64, S16}}) // FIXME: Implement
299 .scalarize(0);
Matt Arsenault24563ef2019-01-20 18:34:24 +0000300
Matt Arsenault1448f562019-05-17 12:19:52 +0000301 getActionDefinitionsBuilder(G_FCOPYSIGN)
302 .legalForCartesianProduct({S16, S32, S64}, {S16, S32, S64})
303 .scalarize(0);
304
Matt Arsenault745fd9f2019-01-20 19:10:31 +0000305 getActionDefinitionsBuilder(G_FSUB)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000306 // Use actual fsub instruction
307 .legalFor({S32})
308 // Must use fadd + fneg
309 .lowerFor({S64, S16, V2S16})
Matt Arsenault990f5072019-01-25 00:51:00 +0000310 .scalarize(0)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000311 .clampScalar(0, S32, S64);
Matt Arsenaulte01e7c82018-12-18 09:19:03 +0000312
Matt Arsenault24563ef2019-01-20 18:34:24 +0000313 getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT})
Matt Arsenault46ffe682019-01-20 19:28:20 +0000314 .legalFor({{S64, S32}, {S32, S16}, {S64, S16},
Matt Arsenaultca676342019-01-25 02:36:32 +0000315 {S32, S1}, {S64, S1}, {S16, S1},
316 // FIXME: Hack
Matt Arsenaultf4bfe4c2019-02-25 21:32:48 +0000317 {S64, LLT::scalar(33)},
Matt Arsenault888aa5d2019-02-03 00:07:33 +0000318 {S32, S8}, {S128, S32}, {S128, S64}, {S32, LLT::scalar(24)}})
Matt Arsenaultca676342019-01-25 02:36:32 +0000319 .scalarize(0);
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000320
Matt Arsenaultfb671642019-01-22 00:20:17 +0000321 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000322 .legalFor({{S32, S32}, {S64, S32}})
Matt Arsenault02b5ca82019-05-17 23:05:13 +0000323 .lowerFor({{S32, S64}})
Matt Arsenault2f292202019-05-17 23:05:18 +0000324 .customFor({{S64, S64}})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000325 .scalarize(0);
Matt Arsenaultdd022ce2018-03-01 19:04:25 +0000326
Matt Arsenaultfb671642019-01-22 00:20:17 +0000327 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000328 .legalFor({{S32, S32}, {S32, S64}})
329 .scalarize(0);
Tom Stellard33445762018-02-07 04:47:59 +0000330
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000331 getActionDefinitionsBuilder(G_INTRINSIC_ROUND)
Matt Arsenault2e5f9002019-01-27 00:12:21 +0000332 .legalFor({S32, S64})
333 .scalarize(0);
Matt Arsenaultf4c21c52018-12-21 03:14:45 +0000334
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000335 if (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
Matt Arsenaulta510b572019-05-17 12:20:05 +0000336 getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000337 .legalFor({S32, S64})
338 .clampScalar(0, S32, S64)
339 .scalarize(0);
340 } else {
Matt Arsenaulta510b572019-05-17 12:20:05 +0000341 getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000342 .legalFor({S32})
343 .customFor({S64})
344 .clampScalar(0, S32, S64)
345 .scalarize(0);
346 }
Tom Stellardca166212017-01-30 21:56:46 +0000347
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000348 getActionDefinitionsBuilder(G_GEP)
349 .legalForCartesianProduct(AddrSpaces64, {S64})
350 .legalForCartesianProduct(AddrSpaces32, {S32})
351 .scalarize(0);
Matt Arsenault3b9a82f2019-01-25 04:54:00 +0000352
Matt Arsenault934e5342018-12-13 20:34:15 +0000353 setAction({G_BLOCK_ADDR, CodePtr}, Legal);
354
Matt Arsenault8b8eee52019-07-09 14:10:43 +0000355 auto &CmpBuilder =
356 getActionDefinitionsBuilder(G_ICMP)
Matt Arsenault58f9d3d2019-02-02 23:35:15 +0000357 .legalForCartesianProduct(
358 {S1}, {S32, S64, GlobalPtr, LocalPtr, ConstantPtr, PrivatePtr, FlatPtr})
Matt Arsenault8b8eee52019-07-09 14:10:43 +0000359 .legalFor({{S1, S32}, {S1, S64}});
360 if (ST.has16BitInsts()) {
361 CmpBuilder.legalFor({{S1, S16}});
362 }
363
364 CmpBuilder
Matt Arsenault58f9d3d2019-02-02 23:35:15 +0000365 .widenScalarToNextPow2(1)
366 .clampScalar(1, S32, S64)
367 .scalarize(0)
368 .legalIf(all(typeIs(0, S1), isPointer(1)));
369
370 getActionDefinitionsBuilder(G_FCMP)
Matt Arsenault40d1faf2019-07-01 17:35:53 +0000371 .legalForCartesianProduct({S1}, ST.has16BitInsts() ? FPTypes16 : FPTypesBase)
Matt Arsenault1b1e6852019-01-25 02:59:34 +0000372 .widenScalarToNextPow2(1)
373 .clampScalar(1, S32, S64)
Matt Arsenaultded2f822019-01-26 23:54:53 +0000374 .scalarize(0);
Matt Arsenault1b1e6852019-01-25 02:59:34 +0000375
Matt Arsenault95fd95c2019-01-25 04:03:38 +0000376 // FIXME: fexp, flog2, flog10 needs to be custom lowered.
377 getActionDefinitionsBuilder({G_FPOW, G_FEXP, G_FEXP2,
378 G_FLOG, G_FLOG2, G_FLOG10})
379 .legalFor({S32})
380 .scalarize(0);
Tom Stellard8cd60a52017-06-06 14:16:50 +0000381
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000382 // The 64-bit versions produce 32-bit results, but only on the SALU.
383 getActionDefinitionsBuilder({G_CTLZ, G_CTLZ_ZERO_UNDEF,
384 G_CTTZ, G_CTTZ_ZERO_UNDEF,
385 G_CTPOP})
386 .legalFor({{S32, S32}, {S32, S64}})
387 .clampScalar(0, S32, S32)
Matt Arsenault75e30c42019-02-20 16:42:52 +0000388 .clampScalar(1, S32, S64)
Matt Arsenaultb10fa8d2019-02-21 15:22:20 +0000389 .scalarize(0)
390 .widenScalarToNextPow2(0, 32)
391 .widenScalarToNextPow2(1, 32);
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000392
Matt Arsenaultd1bfc8d2019-01-31 02:34:03 +0000393 // TODO: Expand for > s32
394 getActionDefinitionsBuilder(G_BSWAP)
395 .legalFor({S32})
396 .clampScalar(0, S32, S32)
397 .scalarize(0);
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000398
Matt Arsenault0f3ba442019-05-23 17:58:48 +0000399 if (ST.has16BitInsts()) {
400 if (ST.hasVOP3PInsts()) {
401 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
402 .legalFor({S32, S16, V2S16})
403 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
404 .clampMaxNumElements(0, S16, 2)
405 .clampScalar(0, S16, S32)
406 .widenScalarToNextPow2(0)
407 .scalarize(0);
408 } else {
409 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
410 .legalFor({S32, S16})
411 .widenScalarToNextPow2(0)
412 .clampScalar(0, S16, S32)
413 .scalarize(0);
414 }
415 } else {
416 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
417 .legalFor({S32})
418 .clampScalar(0, S32, S32)
419 .widenScalarToNextPow2(0)
420 .scalarize(0);
421 }
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000422
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000423 auto smallerThan = [](unsigned TypeIdx0, unsigned TypeIdx1) {
424 return [=](const LegalityQuery &Query) {
425 return Query.Types[TypeIdx0].getSizeInBits() <
426 Query.Types[TypeIdx1].getSizeInBits();
427 };
428 };
429
430 auto greaterThan = [](unsigned TypeIdx0, unsigned TypeIdx1) {
431 return [=](const LegalityQuery &Query) {
432 return Query.Types[TypeIdx0].getSizeInBits() >
433 Query.Types[TypeIdx1].getSizeInBits();
434 };
435 };
436
Tom Stellard7c650782018-10-05 04:34:09 +0000437 getActionDefinitionsBuilder(G_INTTOPTR)
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000438 // List the common cases
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000439 .legalForCartesianProduct(AddrSpaces64, {S64})
440 .legalForCartesianProduct(AddrSpaces32, {S32})
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000441 .scalarize(0)
442 // Accept any address space as long as the size matches
443 .legalIf(sameSize(0, 1))
444 .widenScalarIf(smallerThan(1, 0),
445 [](const LegalityQuery &Query) {
446 return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
447 })
448 .narrowScalarIf(greaterThan(1, 0),
449 [](const LegalityQuery &Query) {
450 return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
451 });
Matt Arsenault85803362018-03-17 15:17:41 +0000452
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000453 getActionDefinitionsBuilder(G_PTRTOINT)
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000454 // List the common cases
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000455 .legalForCartesianProduct(AddrSpaces64, {S64})
456 .legalForCartesianProduct(AddrSpaces32, {S32})
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000457 .scalarize(0)
458 // Accept any address space as long as the size matches
459 .legalIf(sameSize(0, 1))
460 .widenScalarIf(smallerThan(0, 1),
461 [](const LegalityQuery &Query) {
462 return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
463 })
464 .narrowScalarIf(
465 greaterThan(0, 1),
466 [](const LegalityQuery &Query) {
467 return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
468 });
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000469
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000470 if (ST.hasFlatAddressSpace()) {
471 getActionDefinitionsBuilder(G_ADDRSPACE_CAST)
472 .scalarize(0)
473 .custom();
474 }
475
Matt Arsenault85803362018-03-17 15:17:41 +0000476 getActionDefinitionsBuilder({G_LOAD, G_STORE})
Matt Arsenault18619af2019-01-29 18:13:02 +0000477 .narrowScalarIf([](const LegalityQuery &Query) {
478 unsigned Size = Query.Types[0].getSizeInBits();
479 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
480 return (Size > 32 && MemSize < Size);
481 },
482 [](const LegalityQuery &Query) {
483 return std::make_pair(0, LLT::scalar(32));
484 })
Matt Arsenault9e8e8c62019-07-01 18:49:01 +0000485 .fewerElementsIf([=](const LegalityQuery &Query) {
Matt Arsenault045bc9a2019-01-30 02:35:38 +0000486 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
Matt Arsenaultc7bce732019-01-31 02:46:05 +0000487 return (MemSize == 96) &&
488 Query.Types[0].isVector() &&
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +0000489 !ST.hasDwordx3LoadStores();
Matt Arsenault045bc9a2019-01-30 02:35:38 +0000490 },
491 [=](const LegalityQuery &Query) {
492 return std::make_pair(0, V2S32);
493 })
Matt Arsenault9e8e8c62019-07-01 18:49:01 +0000494 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenault85803362018-03-17 15:17:41 +0000495 const LLT &Ty0 = Query.Types[0];
496
Matt Arsenault18619af2019-01-29 18:13:02 +0000497 unsigned Size = Ty0.getSizeInBits();
498 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
Matt Arsenaulteb2603c2019-02-02 23:39:13 +0000499 if (Size < 32 || (Size > 32 && MemSize < Size))
Matt Arsenault18619af2019-01-29 18:13:02 +0000500 return false;
501
502 if (Ty0.isVector() && Size != MemSize)
503 return false;
504
Matt Arsenault85803362018-03-17 15:17:41 +0000505 // TODO: Decompose private loads into 4-byte components.
506 // TODO: Illegal flat loads on SI
Matt Arsenault18619af2019-01-29 18:13:02 +0000507 switch (MemSize) {
508 case 8:
509 case 16:
510 return Size == 32;
Matt Arsenault85803362018-03-17 15:17:41 +0000511 case 32:
512 case 64:
513 case 128:
514 return true;
515
516 case 96:
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +0000517 return ST.hasDwordx3LoadStores();
Matt Arsenault85803362018-03-17 15:17:41 +0000518
519 case 256:
520 case 512:
521 // TODO: constant loads
522 default:
523 return false;
524 }
Matt Arsenault18619af2019-01-29 18:13:02 +0000525 })
526 .clampScalar(0, S32, S64);
Matt Arsenault85803362018-03-17 15:17:41 +0000527
528
Matt Arsenault530d05e2019-02-14 22:41:09 +0000529 // FIXME: Handle alignment requirements.
Matt Arsenault6614f852019-01-22 19:02:10 +0000530 auto &ExtLoads = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
Matt Arsenault530d05e2019-02-14 22:41:09 +0000531 .legalForTypesWithMemDesc({
532 {S32, GlobalPtr, 8, 8},
533 {S32, GlobalPtr, 16, 8},
534 {S32, LocalPtr, 8, 8},
535 {S32, LocalPtr, 16, 8},
536 {S32, PrivatePtr, 8, 8},
537 {S32, PrivatePtr, 16, 8}});
Matt Arsenault6614f852019-01-22 19:02:10 +0000538 if (ST.hasFlatAddressSpace()) {
Matt Arsenault530d05e2019-02-14 22:41:09 +0000539 ExtLoads.legalForTypesWithMemDesc({{S32, FlatPtr, 8, 8},
540 {S32, FlatPtr, 16, 8}});
Matt Arsenault6614f852019-01-22 19:02:10 +0000541 }
542
543 ExtLoads.clampScalar(0, S32, S32)
544 .widenScalarToNextPow2(0)
545 .unsupportedIfMemSizeNotPow2()
546 .lower();
547
Matt Arsenault36d40922018-12-20 00:33:49 +0000548 auto &Atomics = getActionDefinitionsBuilder(
549 {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB,
550 G_ATOMICRMW_AND, G_ATOMICRMW_OR, G_ATOMICRMW_XOR,
551 G_ATOMICRMW_MAX, G_ATOMICRMW_MIN, G_ATOMICRMW_UMAX,
552 G_ATOMICRMW_UMIN, G_ATOMIC_CMPXCHG})
553 .legalFor({{S32, GlobalPtr}, {S32, LocalPtr},
554 {S64, GlobalPtr}, {S64, LocalPtr}});
555 if (ST.hasFlatAddressSpace()) {
556 Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}});
557 }
Tom Stellardca166212017-01-30 21:56:46 +0000558
Matt Arsenault96e47012019-01-18 21:42:55 +0000559 // TODO: Pointer types, any 32-bit or 64-bit vector
560 getActionDefinitionsBuilder(G_SELECT)
Matt Arsenaultfdf36722019-07-01 15:42:47 +0000561 .legalForCartesianProduct({S32, S64, S16, V2S32, V2S16, V4S16,
Matt Arsenault10547232019-02-04 14:04:52 +0000562 GlobalPtr, LocalPtr, FlatPtr, PrivatePtr,
563 LLT::vector(2, LocalPtr), LLT::vector(2, PrivatePtr)}, {S1})
Matt Arsenaultfdf36722019-07-01 15:42:47 +0000564 .clampScalar(0, S16, S64)
Matt Arsenaultb4c95b32019-02-19 17:03:09 +0000565 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
566 .fewerElementsIf(numElementsNotEven(0), scalarize(0))
Matt Arsenaultdc6c7852019-01-30 04:19:31 +0000567 .scalarize(1)
Matt Arsenault2491f822019-02-02 23:31:50 +0000568 .clampMaxNumElements(0, S32, 2)
569 .clampMaxNumElements(0, LocalPtr, 2)
570 .clampMaxNumElements(0, PrivatePtr, 2)
Matt Arsenaultb4c95b32019-02-19 17:03:09 +0000571 .scalarize(0)
Matt Arsenault4ed6cca2019-04-05 14:03:04 +0000572 .widenScalarToNextPow2(0)
Matt Arsenault2491f822019-02-02 23:31:50 +0000573 .legalIf(all(isPointer(0), typeIs(1, S1)));
Tom Stellard2860a422017-06-07 13:54:51 +0000574
Matt Arsenault4c5e8f512019-01-22 22:00:19 +0000575 // TODO: Only the low 4/5/6 bits of the shift amount are observed, so we can
576 // be more flexible with the shift amount type.
577 auto &Shifts = getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR})
578 .legalFor({{S32, S32}, {S64, S32}});
Matt Arsenaultf6cab162019-01-30 03:36:25 +0000579 if (ST.has16BitInsts()) {
Matt Arsenaultc83b8232019-02-07 17:38:00 +0000580 if (ST.hasVOP3PInsts()) {
581 Shifts.legalFor({{S16, S32}, {S16, S16}, {V2S16, V2S16}})
582 .clampMaxNumElements(0, S16, 2);
583 } else
584 Shifts.legalFor({{S16, S32}, {S16, S16}});
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000585
586 Shifts.clampScalar(1, S16, S32);
Matt Arsenaultf6cab162019-01-30 03:36:25 +0000587 Shifts.clampScalar(0, S16, S64);
Matt Arsenaultb0a22702019-02-08 15:06:24 +0000588 Shifts.widenScalarToNextPow2(0, 16);
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000589 } else {
590 // Make sure we legalize the shift amount type first, as the general
591 // expansion for the shifted type will produce much worse code if it hasn't
592 // been truncated already.
593 Shifts.clampScalar(1, S32, S32);
Matt Arsenault4c5e8f512019-01-22 22:00:19 +0000594 Shifts.clampScalar(0, S32, S64);
Matt Arsenaultb0a22702019-02-08 15:06:24 +0000595 Shifts.widenScalarToNextPow2(0, 32);
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000596 }
597 Shifts.scalarize(0);
Tom Stellardca166212017-01-30 21:56:46 +0000598
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000599 for (unsigned Op : {G_EXTRACT_VECTOR_ELT, G_INSERT_VECTOR_ELT}) {
Matt Arsenault63786292019-01-22 20:38:15 +0000600 unsigned VecTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 1 : 0;
601 unsigned EltTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 0 : 1;
602 unsigned IdxTypeIdx = 2;
603
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000604 getActionDefinitionsBuilder(Op)
605 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenault63786292019-01-22 20:38:15 +0000606 const LLT &VecTy = Query.Types[VecTypeIdx];
607 const LLT &IdxTy = Query.Types[IdxTypeIdx];
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000608 return VecTy.getSizeInBits() % 32 == 0 &&
609 VecTy.getSizeInBits() <= 512 &&
610 IdxTy.getSizeInBits() == 32;
Matt Arsenault63786292019-01-22 20:38:15 +0000611 })
612 .clampScalar(EltTypeIdx, S32, S64)
613 .clampScalar(VecTypeIdx, S32, S64)
614 .clampScalar(IdxTypeIdx, S32, S32);
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000615 }
616
Matt Arsenault63786292019-01-22 20:38:15 +0000617 getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
618 .unsupportedIf([=](const LegalityQuery &Query) {
619 const LLT &EltTy = Query.Types[1].getElementType();
620 return Query.Types[0] != EltTy;
621 });
622
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000623 for (unsigned Op : {G_EXTRACT, G_INSERT}) {
624 unsigned BigTyIdx = Op == G_EXTRACT ? 1 : 0;
625 unsigned LitTyIdx = Op == G_EXTRACT ? 0 : 1;
626
627 // FIXME: Doesn't handle extract of illegal sizes.
628 getActionDefinitionsBuilder(Op)
Matt Arsenault91be65b2019-02-07 17:25:51 +0000629 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000630 const LLT BigTy = Query.Types[BigTyIdx];
631 const LLT LitTy = Query.Types[LitTyIdx];
632 return (BigTy.getSizeInBits() % 32 == 0) &&
633 (LitTy.getSizeInBits() % 16 == 0);
634 })
Matt Arsenault91be65b2019-02-07 17:25:51 +0000635 .widenScalarIf(
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000636 [=](const LegalityQuery &Query) {
637 const LLT BigTy = Query.Types[BigTyIdx];
638 return (BigTy.getScalarSizeInBits() < 16);
639 },
640 LegalizeMutations::widenScalarOrEltToNextPow2(BigTyIdx, 16))
641 .widenScalarIf(
642 [=](const LegalityQuery &Query) {
643 const LLT LitTy = Query.Types[LitTyIdx];
644 return (LitTy.getScalarSizeInBits() < 16);
645 },
646 LegalizeMutations::widenScalarOrEltToNextPow2(LitTyIdx, 16))
Matt Arsenault2b6f76f2019-04-22 15:22:46 +0000647 .moreElementsIf(isSmallOddVector(BigTyIdx), oneMoreElement(BigTyIdx))
648 .widenScalarToNextPow2(BigTyIdx, 32);
649
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000650 }
Matt Arsenault71272e62018-03-05 16:25:15 +0000651
Amara Emerson5ec14602018-12-10 18:44:58 +0000652 getActionDefinitionsBuilder(G_BUILD_VECTOR)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000653 .legalForCartesianProduct(AllS32Vectors, {S32})
654 .legalForCartesianProduct(AllS64Vectors, {S64})
655 .clampNumElements(0, V16S32, V16S32)
656 .clampNumElements(0, V2S64, V8S64)
657 .minScalarSameAs(1, 0)
Matt Arsenault3f1a3452019-07-09 22:48:04 +0000658 .legalIf(isRegisterType(0))
659 .minScalarOrElt(0, S32);
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000660
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000661 getActionDefinitionsBuilder(G_CONCAT_VECTORS)
Matt Arsenault4dd57552019-07-09 14:17:31 +0000662 .legalIf(isRegisterType(0));
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000663
Matt Arsenault503afda2018-03-12 13:35:43 +0000664 // Merge/Unmerge
665 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
666 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
667 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
668
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000669 auto notValidElt = [=](const LegalityQuery &Query, unsigned TypeIdx) {
670 const LLT &Ty = Query.Types[TypeIdx];
671 if (Ty.isVector()) {
672 const LLT &EltTy = Ty.getElementType();
673 if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64)
674 return true;
675 if (!isPowerOf2_32(EltTy.getSizeInBits()))
676 return true;
677 }
678 return false;
679 };
680
Matt Arsenault503afda2018-03-12 13:35:43 +0000681 getActionDefinitionsBuilder(Op)
Matt Arsenaultd8d193d2019-01-29 23:17:35 +0000682 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16)
683 // Clamp the little scalar to s8-s256 and make it a power of 2. It's not
684 // worth considering the multiples of 64 since 2*192 and 2*384 are not
685 // valid.
686 .clampScalar(LitTyIdx, S16, S256)
687 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32)
688
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000689 // Break up vectors with weird elements into scalars
690 .fewerElementsIf(
691 [=](const LegalityQuery &Query) { return notValidElt(Query, 0); },
Matt Arsenault990f5072019-01-25 00:51:00 +0000692 scalarize(0))
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000693 .fewerElementsIf(
694 [=](const LegalityQuery &Query) { return notValidElt(Query, 1); },
Matt Arsenault990f5072019-01-25 00:51:00 +0000695 scalarize(1))
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000696 .clampScalar(BigTyIdx, S32, S512)
697 .widenScalarIf(
698 [=](const LegalityQuery &Query) {
699 const LLT &Ty = Query.Types[BigTyIdx];
700 return !isPowerOf2_32(Ty.getSizeInBits()) &&
701 Ty.getSizeInBits() % 16 != 0;
702 },
703 [=](const LegalityQuery &Query) {
704 // Pick the next power of 2, or a multiple of 64 over 128.
705 // Whichever is smaller.
706 const LLT &Ty = Query.Types[BigTyIdx];
707 unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1);
708 if (NewSizeInBits >= 256) {
709 unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
710 if (RoundedTo < NewSizeInBits)
711 NewSizeInBits = RoundedTo;
712 }
713 return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
714 })
Matt Arsenault503afda2018-03-12 13:35:43 +0000715 .legalIf([=](const LegalityQuery &Query) {
716 const LLT &BigTy = Query.Types[BigTyIdx];
717 const LLT &LitTy = Query.Types[LitTyIdx];
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000718
719 if (BigTy.isVector() && BigTy.getSizeInBits() < 32)
720 return false;
721 if (LitTy.isVector() && LitTy.getSizeInBits() < 32)
722 return false;
723
724 return BigTy.getSizeInBits() % 16 == 0 &&
725 LitTy.getSizeInBits() % 16 == 0 &&
Matt Arsenault503afda2018-03-12 13:35:43 +0000726 BigTy.getSizeInBits() <= 512;
727 })
728 // Any vectors left are the wrong size. Scalarize them.
Matt Arsenault990f5072019-01-25 00:51:00 +0000729 .scalarize(0)
730 .scalarize(1);
Matt Arsenault503afda2018-03-12 13:35:43 +0000731 }
732
Tom Stellardca166212017-01-30 21:56:46 +0000733 computeTables();
Roman Tereshin76c29c62018-05-31 16:16:48 +0000734 verify(*ST.getInstrInfo());
Tom Stellardca166212017-01-30 21:56:46 +0000735}
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000736
737bool AMDGPULegalizerInfo::legalizeCustom(MachineInstr &MI,
738 MachineRegisterInfo &MRI,
739 MachineIRBuilder &MIRBuilder,
740 GISelChangeObserver &Observer) const {
741 switch (MI.getOpcode()) {
742 case TargetOpcode::G_ADDRSPACE_CAST:
743 return legalizeAddrSpaceCast(MI, MRI, MIRBuilder);
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000744 case TargetOpcode::G_FRINT:
745 return legalizeFrint(MI, MRI, MIRBuilder);
Matt Arsenaulta510b572019-05-17 12:20:05 +0000746 case TargetOpcode::G_FCEIL:
747 return legalizeFceil(MI, MRI, MIRBuilder);
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000748 case TargetOpcode::G_INTRINSIC_TRUNC:
749 return legalizeIntrinsicTrunc(MI, MRI, MIRBuilder);
Matt Arsenault2f292202019-05-17 23:05:18 +0000750 case TargetOpcode::G_SITOFP:
751 return legalizeITOFP(MI, MRI, MIRBuilder, true);
752 case TargetOpcode::G_UITOFP:
753 return legalizeITOFP(MI, MRI, MIRBuilder, false);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000754 default:
755 return false;
756 }
757
758 llvm_unreachable("expected switch to return");
759}
760
Matt Arsenault1178dc32019-06-28 01:16:46 +0000761Register AMDGPULegalizerInfo::getSegmentAperture(
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000762 unsigned AS,
763 MachineRegisterInfo &MRI,
764 MachineIRBuilder &MIRBuilder) const {
765 MachineFunction &MF = MIRBuilder.getMF();
766 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
767 const LLT S32 = LLT::scalar(32);
768
769 if (ST.hasApertureRegs()) {
770 // FIXME: Use inline constants (src_{shared, private}_base) instead of
771 // getreg.
772 unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
773 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
774 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
775 unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
776 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
777 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
778 unsigned Encoding =
779 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
780 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
781 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
782
Matt Arsenault1178dc32019-06-28 01:16:46 +0000783 Register ApertureReg = MRI.createGenericVirtualRegister(S32);
784 Register GetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000785
786 MIRBuilder.buildInstr(AMDGPU::S_GETREG_B32)
787 .addDef(GetReg)
788 .addImm(Encoding);
789 MRI.setType(GetReg, S32);
790
Amara Emerson946b1242019-04-15 05:04:20 +0000791 auto ShiftAmt = MIRBuilder.buildConstant(S32, WidthM1 + 1);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000792 MIRBuilder.buildInstr(TargetOpcode::G_SHL)
793 .addDef(ApertureReg)
794 .addUse(GetReg)
Amara Emerson946b1242019-04-15 05:04:20 +0000795 .addUse(ShiftAmt.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000796
797 return ApertureReg;
798 }
799
Matt Arsenault1178dc32019-06-28 01:16:46 +0000800 Register QueuePtr = MRI.createGenericVirtualRegister(
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000801 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
802
803 // FIXME: Placeholder until we can track the input registers.
804 MIRBuilder.buildConstant(QueuePtr, 0xdeadbeef);
805
806 // Offset into amd_queue_t for group_segment_aperture_base_hi /
807 // private_segment_aperture_base_hi.
808 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
809
810 // FIXME: Don't use undef
811 Value *V = UndefValue::get(PointerType::get(
812 Type::getInt8Ty(MF.getFunction().getContext()),
813 AMDGPUAS::CONSTANT_ADDRESS));
814
815 MachinePointerInfo PtrInfo(V, StructOffset);
816 MachineMemOperand *MMO = MF.getMachineMemOperand(
817 PtrInfo,
818 MachineMemOperand::MOLoad |
819 MachineMemOperand::MODereferenceable |
820 MachineMemOperand::MOInvariant,
821 4,
822 MinAlign(64, StructOffset));
823
Matt Arsenaulte3a676e2019-06-24 15:50:29 +0000824 Register LoadResult = MRI.createGenericVirtualRegister(S32);
825 Register LoadAddr;
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000826
827 MIRBuilder.materializeGEP(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
828 MIRBuilder.buildLoad(LoadResult, LoadAddr, *MMO);
829 return LoadResult;
830}
831
832bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
833 MachineInstr &MI, MachineRegisterInfo &MRI,
834 MachineIRBuilder &MIRBuilder) const {
835 MachineFunction &MF = MIRBuilder.getMF();
836
837 MIRBuilder.setInstr(MI);
838
Matt Arsenaulte3a676e2019-06-24 15:50:29 +0000839 Register Dst = MI.getOperand(0).getReg();
840 Register Src = MI.getOperand(1).getReg();
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000841
842 LLT DstTy = MRI.getType(Dst);
843 LLT SrcTy = MRI.getType(Src);
844 unsigned DestAS = DstTy.getAddressSpace();
845 unsigned SrcAS = SrcTy.getAddressSpace();
846
847 // TODO: Avoid reloading from the queue ptr for each cast, or at least each
848 // vector element.
849 assert(!DstTy.isVector());
850
851 const AMDGPUTargetMachine &TM
852 = static_cast<const AMDGPUTargetMachine &>(MF.getTarget());
853
854 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
855 if (ST.getTargetLowering()->isNoopAddrSpaceCast(SrcAS, DestAS)) {
Matt Arsenaultdc88a2c2019-02-08 14:16:11 +0000856 MI.setDesc(MIRBuilder.getTII().get(TargetOpcode::G_BITCAST));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000857 return true;
858 }
859
860 if (SrcAS == AMDGPUAS::FLAT_ADDRESS) {
861 assert(DestAS == AMDGPUAS::LOCAL_ADDRESS ||
862 DestAS == AMDGPUAS::PRIVATE_ADDRESS);
863 unsigned NullVal = TM.getNullPointerValue(DestAS);
864
Amara Emerson946b1242019-04-15 05:04:20 +0000865 auto SegmentNull = MIRBuilder.buildConstant(DstTy, NullVal);
866 auto FlatNull = MIRBuilder.buildConstant(SrcTy, 0);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000867
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000868 Register PtrLo32 = MRI.createGenericVirtualRegister(DstTy);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000869
870 // Extract low 32-bits of the pointer.
871 MIRBuilder.buildExtract(PtrLo32, Src, 0);
872
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000873 Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
Amara Emerson946b1242019-04-15 05:04:20 +0000874 MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, FlatNull.getReg(0));
875 MIRBuilder.buildSelect(Dst, CmpRes, PtrLo32, SegmentNull.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000876
877 MI.eraseFromParent();
878 return true;
879 }
880
881 assert(SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
882 SrcAS == AMDGPUAS::PRIVATE_ADDRESS);
883
Amara Emerson946b1242019-04-15 05:04:20 +0000884 auto SegmentNull =
885 MIRBuilder.buildConstant(SrcTy, TM.getNullPointerValue(SrcAS));
886 auto FlatNull =
887 MIRBuilder.buildConstant(DstTy, TM.getNullPointerValue(DestAS));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000888
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000889 Register ApertureReg = getSegmentAperture(DestAS, MRI, MIRBuilder);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000890
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000891 Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
Amara Emerson946b1242019-04-15 05:04:20 +0000892 MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, SegmentNull.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000893
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000894 Register BuildPtr = MRI.createGenericVirtualRegister(DstTy);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000895
896 // Coerce the type of the low half of the result so we can use merge_values.
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000897 Register SrcAsInt = MRI.createGenericVirtualRegister(LLT::scalar(32));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000898 MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT)
899 .addDef(SrcAsInt)
900 .addUse(Src);
901
902 // TODO: Should we allow mismatched types but matching sizes in merges to
903 // avoid the ptrtoint?
904 MIRBuilder.buildMerge(BuildPtr, {SrcAsInt, ApertureReg});
Amara Emerson946b1242019-04-15 05:04:20 +0000905 MIRBuilder.buildSelect(Dst, CmpRes, BuildPtr, FlatNull.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000906
907 MI.eraseFromParent();
908 return true;
909}
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000910
911bool AMDGPULegalizerInfo::legalizeFrint(
912 MachineInstr &MI, MachineRegisterInfo &MRI,
913 MachineIRBuilder &MIRBuilder) const {
914 MIRBuilder.setInstr(MI);
915
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000916 Register Src = MI.getOperand(1).getReg();
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000917 LLT Ty = MRI.getType(Src);
918 assert(Ty.isScalar() && Ty.getSizeInBits() == 64);
919
920 APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
921 APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
922
923 auto C1 = MIRBuilder.buildFConstant(Ty, C1Val);
924 auto CopySign = MIRBuilder.buildFCopysign(Ty, C1, Src);
925
926 // TODO: Should this propagate fast-math-flags?
927 auto Tmp1 = MIRBuilder.buildFAdd(Ty, Src, CopySign);
928 auto Tmp2 = MIRBuilder.buildFSub(Ty, Tmp1, CopySign);
929
930 auto C2 = MIRBuilder.buildFConstant(Ty, C2Val);
931 auto Fabs = MIRBuilder.buildFAbs(Ty, Src);
932
933 auto Cond = MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, LLT::scalar(1), Fabs, C2);
934 MIRBuilder.buildSelect(MI.getOperand(0).getReg(), Cond, Src, Tmp2);
935 return true;
936}
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000937
Matt Arsenaulta510b572019-05-17 12:20:05 +0000938bool AMDGPULegalizerInfo::legalizeFceil(
939 MachineInstr &MI, MachineRegisterInfo &MRI,
940 MachineIRBuilder &B) const {
941 B.setInstr(MI);
942
Matt Arsenault1a02d302019-05-17 12:59:27 +0000943 const LLT S1 = LLT::scalar(1);
944 const LLT S64 = LLT::scalar(64);
945
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000946 Register Src = MI.getOperand(1).getReg();
Matt Arsenault1a02d302019-05-17 12:59:27 +0000947 assert(MRI.getType(Src) == S64);
Matt Arsenaulta510b572019-05-17 12:20:05 +0000948
949 // result = trunc(src)
950 // if (src > 0.0 && src != result)
951 // result += 1.0
952
Matt Arsenaulta510b572019-05-17 12:20:05 +0000953 auto Trunc = B.buildInstr(TargetOpcode::G_INTRINSIC_TRUNC, {S64}, {Src});
954
Matt Arsenaulta510b572019-05-17 12:20:05 +0000955 const auto Zero = B.buildFConstant(S64, 0.0);
956 const auto One = B.buildFConstant(S64, 1.0);
957 auto Lt0 = B.buildFCmp(CmpInst::FCMP_OGT, S1, Src, Zero);
958 auto NeTrunc = B.buildFCmp(CmpInst::FCMP_ONE, S1, Src, Trunc);
959 auto And = B.buildAnd(S1, Lt0, NeTrunc);
960 auto Add = B.buildSelect(S64, And, One, Zero);
961
962 // TODO: Should this propagate fast-math-flags?
963 B.buildFAdd(MI.getOperand(0).getReg(), Trunc, Add);
964 return true;
965}
966
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000967static MachineInstrBuilder extractF64Exponent(unsigned Hi,
968 MachineIRBuilder &B) {
969 const unsigned FractBits = 52;
970 const unsigned ExpBits = 11;
971 LLT S32 = LLT::scalar(32);
972
973 auto Const0 = B.buildConstant(S32, FractBits - 32);
974 auto Const1 = B.buildConstant(S32, ExpBits);
975
976 auto ExpPart = B.buildIntrinsic(Intrinsic::amdgcn_ubfe, {S32}, false)
977 .addUse(Const0.getReg(0))
978 .addUse(Const1.getReg(0));
979
980 return B.buildSub(S32, ExpPart, B.buildConstant(S32, 1023));
981}
982
983bool AMDGPULegalizerInfo::legalizeIntrinsicTrunc(
984 MachineInstr &MI, MachineRegisterInfo &MRI,
985 MachineIRBuilder &B) const {
986 B.setInstr(MI);
987
Matt Arsenault1a02d302019-05-17 12:59:27 +0000988 const LLT S1 = LLT::scalar(1);
989 const LLT S32 = LLT::scalar(32);
990 const LLT S64 = LLT::scalar(64);
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000991
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000992 Register Src = MI.getOperand(1).getReg();
Matt Arsenault1a02d302019-05-17 12:59:27 +0000993 assert(MRI.getType(Src) == S64);
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000994
995 // TODO: Should this use extract since the low half is unused?
996 auto Unmerge = B.buildUnmerge({S32, S32}, Src);
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000997 Register Hi = Unmerge.getReg(1);
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000998
999 // Extract the upper half, since this is where we will find the sign and
1000 // exponent.
1001 auto Exp = extractF64Exponent(Hi, B);
1002
1003 const unsigned FractBits = 52;
1004
1005 // Extract the sign bit.
1006 const auto SignBitMask = B.buildConstant(S32, UINT32_C(1) << 31);
1007 auto SignBit = B.buildAnd(S32, Hi, SignBitMask);
1008
1009 const auto FractMask = B.buildConstant(S64, (UINT64_C(1) << FractBits) - 1);
1010
1011 const auto Zero32 = B.buildConstant(S32, 0);
1012
1013 // Extend back to 64-bits.
1014 auto SignBit64 = B.buildMerge(S64, {Zero32.getReg(0), SignBit.getReg(0)});
1015
1016 auto Shr = B.buildAShr(S64, FractMask, Exp);
1017 auto Not = B.buildNot(S64, Shr);
1018 auto Tmp0 = B.buildAnd(S64, Src, Not);
1019 auto FiftyOne = B.buildConstant(S32, FractBits - 1);
1020
1021 auto ExpLt0 = B.buildICmp(CmpInst::ICMP_SLT, S1, Exp, Zero32);
1022 auto ExpGt51 = B.buildICmp(CmpInst::ICMP_SGT, S1, Exp, FiftyOne);
1023
1024 auto Tmp1 = B.buildSelect(S64, ExpLt0, SignBit64, Tmp0);
1025 B.buildSelect(MI.getOperand(0).getReg(), ExpGt51, Src, Tmp1);
1026 return true;
1027}
Matt Arsenault2f292202019-05-17 23:05:18 +00001028
1029bool AMDGPULegalizerInfo::legalizeITOFP(
1030 MachineInstr &MI, MachineRegisterInfo &MRI,
1031 MachineIRBuilder &B, bool Signed) const {
1032 B.setInstr(MI);
1033
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001034 Register Dst = MI.getOperand(0).getReg();
1035 Register Src = MI.getOperand(1).getReg();
Matt Arsenault2f292202019-05-17 23:05:18 +00001036
1037 const LLT S64 = LLT::scalar(64);
1038 const LLT S32 = LLT::scalar(32);
1039
1040 assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S64);
1041
1042 auto Unmerge = B.buildUnmerge({S32, S32}, Src);
1043
1044 auto CvtHi = Signed ?
1045 B.buildSITOFP(S64, Unmerge.getReg(1)) :
1046 B.buildUITOFP(S64, Unmerge.getReg(1));
1047
1048 auto CvtLo = B.buildUITOFP(S64, Unmerge.getReg(0));
1049
1050 auto ThirtyTwo = B.buildConstant(S32, 32);
1051 auto LdExp = B.buildIntrinsic(Intrinsic::amdgcn_ldexp, {S64}, false)
1052 .addUse(CvtHi.getReg(0))
1053 .addUse(ThirtyTwo.getReg(0));
1054
1055 // TODO: Should this propagate fast-math-flags?
1056 B.buildFAdd(Dst, LdExp, CvtLo);
1057 MI.eraseFromParent();
1058 return true;
1059}
Matt Arsenaulte15770a2019-07-01 18:40:23 +00001060
1061// Return the use branch instruction, otherwise null if the usage is invalid.
1062static MachineInstr *verifyCFIntrinsic(MachineInstr &MI,
1063 MachineRegisterInfo &MRI) {
1064 Register CondDef = MI.getOperand(0).getReg();
1065 if (!MRI.hasOneNonDBGUse(CondDef))
1066 return nullptr;
1067
1068 MachineInstr &UseMI = *MRI.use_instr_nodbg_begin(CondDef);
1069 return UseMI.getParent() == MI.getParent() &&
1070 UseMI.getOpcode() == AMDGPU::G_BRCOND ? &UseMI : nullptr;
1071}
1072
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +00001073Register AMDGPULegalizerInfo::getLiveInRegister(MachineRegisterInfo &MRI,
1074 Register Reg, LLT Ty) const {
1075 Register LiveIn = MRI.getLiveInVirtReg(Reg);
1076 if (LiveIn)
1077 return LiveIn;
1078
1079 Register NewReg = MRI.createGenericVirtualRegister(Ty);
1080 MRI.addLiveIn(Reg, NewReg);
1081 return NewReg;
1082}
1083
1084bool AMDGPULegalizerInfo::loadInputValue(Register DstReg, MachineIRBuilder &B,
1085 const ArgDescriptor *Arg) const {
1086 if (!Arg->isRegister())
1087 return false; // TODO: Handle these
1088
1089 assert(Arg->getRegister() != 0);
1090 assert(Arg->getRegister().isPhysical());
1091
1092 MachineRegisterInfo &MRI = *B.getMRI();
1093
1094 LLT Ty = MRI.getType(DstReg);
1095 Register LiveIn = getLiveInRegister(MRI, Arg->getRegister(), Ty);
1096
1097 if (Arg->isMasked()) {
1098 // TODO: Should we try to emit this once in the entry block?
1099 const LLT S32 = LLT::scalar(32);
1100 const unsigned Mask = Arg->getMask();
1101 const unsigned Shift = countTrailingZeros<unsigned>(Mask);
1102
1103 auto ShiftAmt = B.buildConstant(S32, Shift);
1104 auto LShr = B.buildLShr(S32, LiveIn, ShiftAmt);
1105 B.buildAnd(DstReg, LShr, B.buildConstant(S32, Mask >> Shift));
1106 } else
1107 B.buildCopy(DstReg, LiveIn);
1108
1109 // Insert the argument copy if it doens't already exist.
1110 // FIXME: It seems EmitLiveInCopies isn't called anywhere?
1111 if (!MRI.getVRegDef(LiveIn)) {
1112 MachineBasicBlock &EntryMBB = B.getMF().front();
1113 EntryMBB.addLiveIn(Arg->getRegister());
1114 B.setInsertPt(EntryMBB, EntryMBB.begin());
1115 B.buildCopy(LiveIn, Arg->getRegister());
1116 }
1117
1118 return true;
1119}
1120
1121bool AMDGPULegalizerInfo::legalizePreloadedArgIntrin(
1122 MachineInstr &MI,
1123 MachineRegisterInfo &MRI,
1124 MachineIRBuilder &B,
1125 AMDGPUFunctionArgInfo::PreloadedValue ArgType) const {
1126 B.setInstr(MI);
1127
1128 const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
1129
1130 const ArgDescriptor *Arg;
1131 const TargetRegisterClass *RC;
1132 std::tie(Arg, RC) = MFI->getPreloadedValue(ArgType);
1133 if (!Arg) {
1134 LLVM_DEBUG(dbgs() << "Required arg register missing\n");
1135 return false;
1136 }
1137
1138 if (loadInputValue(MI.getOperand(0).getReg(), B, Arg)) {
1139 MI.eraseFromParent();
1140 return true;
1141 }
1142
1143 return false;
1144}
1145
Matt Arsenault9e8e8c62019-07-01 18:49:01 +00001146bool AMDGPULegalizerInfo::legalizeImplicitArgPtr(MachineInstr &MI,
1147 MachineRegisterInfo &MRI,
1148 MachineIRBuilder &B) const {
1149 const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
1150 if (!MFI->isEntryFunction()) {
1151 return legalizePreloadedArgIntrin(MI, MRI, B,
1152 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
1153 }
1154
1155 B.setInstr(MI);
1156
1157 uint64_t Offset =
1158 ST.getTargetLowering()->getImplicitParameterOffset(
1159 B.getMF(), AMDGPUTargetLowering::FIRST_IMPLICIT);
1160 Register DstReg = MI.getOperand(0).getReg();
1161 LLT DstTy = MRI.getType(DstReg);
1162 LLT IdxTy = LLT::scalar(DstTy.getSizeInBits());
1163
1164 const ArgDescriptor *Arg;
1165 const TargetRegisterClass *RC;
1166 std::tie(Arg, RC)
1167 = MFI->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
1168 if (!Arg)
1169 return false;
1170
1171 Register KernargPtrReg = MRI.createGenericVirtualRegister(DstTy);
1172 if (!loadInputValue(KernargPtrReg, B, Arg))
1173 return false;
1174
1175 B.buildGEP(DstReg, KernargPtrReg, B.buildConstant(IdxTy, Offset).getReg(0));
1176 MI.eraseFromParent();
1177 return true;
1178}
1179
Matt Arsenaulte15770a2019-07-01 18:40:23 +00001180bool AMDGPULegalizerInfo::legalizeIntrinsic(MachineInstr &MI,
1181 MachineRegisterInfo &MRI,
1182 MachineIRBuilder &B) const {
1183 // Replace the use G_BRCOND with the exec manipulate and branch pseudos.
1184 switch (MI.getOperand(MI.getNumExplicitDefs()).getIntrinsicID()) {
1185 case Intrinsic::amdgcn_if: {
1186 if (MachineInstr *BrCond = verifyCFIntrinsic(MI, MRI)) {
1187 const SIRegisterInfo *TRI
1188 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo());
1189
1190 B.setInstr(*BrCond);
1191 Register Def = MI.getOperand(1).getReg();
1192 Register Use = MI.getOperand(3).getReg();
1193 B.buildInstr(AMDGPU::SI_IF)
1194 .addDef(Def)
1195 .addUse(Use)
1196 .addMBB(BrCond->getOperand(1).getMBB());
1197
1198 MRI.setRegClass(Def, TRI->getWaveMaskRegClass());
1199 MRI.setRegClass(Use, TRI->getWaveMaskRegClass());
1200 MI.eraseFromParent();
1201 BrCond->eraseFromParent();
1202 return true;
1203 }
1204
1205 return false;
1206 }
1207 case Intrinsic::amdgcn_loop: {
1208 if (MachineInstr *BrCond = verifyCFIntrinsic(MI, MRI)) {
1209 const SIRegisterInfo *TRI
1210 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo());
1211
1212 B.setInstr(*BrCond);
1213 Register Reg = MI.getOperand(2).getReg();
1214 B.buildInstr(AMDGPU::SI_LOOP)
1215 .addUse(Reg)
1216 .addMBB(BrCond->getOperand(1).getMBB());
1217 MI.eraseFromParent();
1218 BrCond->eraseFromParent();
1219 MRI.setRegClass(Reg, TRI->getWaveMaskRegClass());
1220 return true;
1221 }
1222
1223 return false;
1224 }
Matt Arsenault9e8e8c62019-07-01 18:49:01 +00001225 case Intrinsic::amdgcn_kernarg_segment_ptr:
1226 return legalizePreloadedArgIntrin(
1227 MI, MRI, B, AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
1228 case Intrinsic::amdgcn_implicitarg_ptr:
1229 return legalizeImplicitArgPtr(MI, MRI, B);
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +00001230 case Intrinsic::amdgcn_workitem_id_x:
1231 return legalizePreloadedArgIntrin(MI, MRI, B,
1232 AMDGPUFunctionArgInfo::WORKITEM_ID_X);
1233 case Intrinsic::amdgcn_workitem_id_y:
1234 return legalizePreloadedArgIntrin(MI, MRI, B,
1235 AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
1236 case Intrinsic::amdgcn_workitem_id_z:
1237 return legalizePreloadedArgIntrin(MI, MRI, B,
1238 AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
Matt Arsenault756d8192019-07-01 18:47:22 +00001239 case Intrinsic::amdgcn_workgroup_id_x:
1240 return legalizePreloadedArgIntrin(MI, MRI, B,
1241 AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
1242 case Intrinsic::amdgcn_workgroup_id_y:
1243 return legalizePreloadedArgIntrin(MI, MRI, B,
1244 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
1245 case Intrinsic::amdgcn_workgroup_id_z:
1246 return legalizePreloadedArgIntrin(MI, MRI, B,
1247 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
Matt Arsenaultbae36362019-07-01 18:50:50 +00001248 case Intrinsic::amdgcn_dispatch_ptr:
1249 return legalizePreloadedArgIntrin(MI, MRI, B,
1250 AMDGPUFunctionArgInfo::DISPATCH_PTR);
1251 case Intrinsic::amdgcn_queue_ptr:
1252 return legalizePreloadedArgIntrin(MI, MRI, B,
1253 AMDGPUFunctionArgInfo::QUEUE_PTR);
1254 case Intrinsic::amdgcn_implicit_buffer_ptr:
1255 return legalizePreloadedArgIntrin(
1256 MI, MRI, B, AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
1257 case Intrinsic::amdgcn_dispatch_id:
1258 return legalizePreloadedArgIntrin(MI, MRI, B,
1259 AMDGPUFunctionArgInfo::DISPATCH_ID);
Matt Arsenaulte15770a2019-07-01 18:40:23 +00001260 default:
1261 return true;
1262 }
1263
1264 return true;
1265}