blob: 87d4e995270ba0d462b1f91a6bb3ecfc5aab0b4b [file] [log] [blame]
Tom Stellardca166212017-01-30 21:56:46 +00001//===- AMDGPULegalizerInfo.cpp -----------------------------------*- C++ -*-==//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellardca166212017-01-30 21:56:46 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the Machinelegalizer class for
10/// AMDGPU.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
David Blaikie36a0f222018-03-23 23:58:31 +000014#include "AMDGPU.h"
Craig Topper2fa14362018-03-29 17:21:10 +000015#include "AMDGPULegalizerInfo.h"
Matt Arsenault85803362018-03-17 15:17:41 +000016#include "AMDGPUTargetMachine.h"
Matt Arsenaulta8b43392019-02-08 02:40:47 +000017#include "SIMachineFunctionInfo.h"
Matt Arsenault6ce1b4f2019-07-10 16:31:19 +000018#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
Matt Arsenaulta8b43392019-02-08 02:40:47 +000019#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000020#include "llvm/CodeGen/TargetOpcodes.h"
Craig Topper2fa14362018-03-29 17:21:10 +000021#include "llvm/CodeGen/ValueTypes.h"
Tom Stellardca166212017-01-30 21:56:46 +000022#include "llvm/IR/DerivedTypes.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000023#include "llvm/IR/Type.h"
Tom Stellardca166212017-01-30 21:56:46 +000024#include "llvm/Support/Debug.h"
25
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +000026#define DEBUG_TYPE "amdgpu-legalinfo"
27
Tom Stellardca166212017-01-30 21:56:46 +000028using namespace llvm;
Daniel Sanders9ade5592018-01-29 17:37:29 +000029using namespace LegalizeActions;
Matt Arsenault990f5072019-01-25 00:51:00 +000030using namespace LegalizeMutations;
Matt Arsenault7ac79ed2019-01-20 19:45:18 +000031using namespace LegalityPredicates;
Tom Stellardca166212017-01-30 21:56:46 +000032
Matt Arsenaultd9141892019-02-07 19:10:15 +000033
34static LegalityPredicate isMultiple32(unsigned TypeIdx,
35 unsigned MaxSize = 512) {
36 return [=](const LegalityQuery &Query) {
37 const LLT Ty = Query.Types[TypeIdx];
38 const LLT EltTy = Ty.getScalarType();
39 return Ty.getSizeInBits() <= MaxSize && EltTy.getSizeInBits() % 32 == 0;
40 };
41}
42
Matt Arsenault18ec3822019-02-11 22:00:39 +000043static LegalityPredicate isSmallOddVector(unsigned TypeIdx) {
44 return [=](const LegalityQuery &Query) {
45 const LLT Ty = Query.Types[TypeIdx];
46 return Ty.isVector() &&
47 Ty.getNumElements() % 2 != 0 &&
48 Ty.getElementType().getSizeInBits() < 32;
49 };
50}
51
52static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
53 return [=](const LegalityQuery &Query) {
54 const LLT Ty = Query.Types[TypeIdx];
55 const LLT EltTy = Ty.getElementType();
56 return std::make_pair(TypeIdx, LLT::vector(Ty.getNumElements() + 1, EltTy));
57 };
58}
59
Matt Arsenault26b7e852019-02-19 16:30:19 +000060static LegalizeMutation fewerEltsToSize64Vector(unsigned TypeIdx) {
61 return [=](const LegalityQuery &Query) {
62 const LLT Ty = Query.Types[TypeIdx];
63 const LLT EltTy = Ty.getElementType();
64 unsigned Size = Ty.getSizeInBits();
65 unsigned Pieces = (Size + 63) / 64;
66 unsigned NewNumElts = (Ty.getNumElements() + 1) / Pieces;
67 return std::make_pair(TypeIdx, LLT::scalarOrVector(NewNumElts, EltTy));
68 };
69}
70
71static LegalityPredicate vectorWiderThan(unsigned TypeIdx, unsigned Size) {
72 return [=](const LegalityQuery &Query) {
73 const LLT QueryTy = Query.Types[TypeIdx];
74 return QueryTy.isVector() && QueryTy.getSizeInBits() > Size;
75 };
76}
77
Matt Arsenaultb4c95b32019-02-19 17:03:09 +000078static LegalityPredicate numElementsNotEven(unsigned TypeIdx) {
79 return [=](const LegalityQuery &Query) {
80 const LLT QueryTy = Query.Types[TypeIdx];
81 return QueryTy.isVector() && QueryTy.getNumElements() % 2 != 0;
82 };
83}
Matt Arsenault18ec3822019-02-11 22:00:39 +000084
Matt Arsenault4dd57552019-07-09 14:17:31 +000085// Any combination of 32 or 64-bit elements up to 512 bits, and multiples of
86// v2s16.
87static LegalityPredicate isRegisterType(unsigned TypeIdx) {
88 return [=](const LegalityQuery &Query) {
89 const LLT Ty = Query.Types[TypeIdx];
90 if (Ty.isVector()) {
91 const int EltSize = Ty.getElementType().getSizeInBits();
92 return EltSize == 32 || EltSize == 64 ||
Matt Arsenault3f1a3452019-07-09 22:48:04 +000093 (EltSize == 16 && Ty.getNumElements() % 2 == 0) ||
94 EltSize == 128 || EltSize == 256;
Matt Arsenault4dd57552019-07-09 14:17:31 +000095 }
96
97 return Ty.getSizeInBits() % 32 == 0 && Ty.getSizeInBits() <= 512;
98 };
99}
100
Matt Arsenault28215ca2019-08-13 16:26:28 +0000101static LegalityPredicate elementTypeIs(unsigned TypeIdx, LLT Type) {
102 return [=](const LegalityQuery &Query) {
103 return Query.Types[TypeIdx].getElementType() == Type;
104 };
105}
106
Matt Arsenault9e8e8c62019-07-01 18:49:01 +0000107AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
108 const GCNTargetMachine &TM)
109 : ST(ST_) {
Tom Stellardca166212017-01-30 21:56:46 +0000110 using namespace TargetOpcode;
111
Matt Arsenault85803362018-03-17 15:17:41 +0000112 auto GetAddrSpacePtr = [&TM](unsigned AS) {
113 return LLT::pointer(AS, TM.getPointerSizeInBits(AS));
114 };
115
116 const LLT S1 = LLT::scalar(1);
Matt Arsenault888aa5d2019-02-03 00:07:33 +0000117 const LLT S8 = LLT::scalar(8);
Matt Arsenault45991592019-01-18 21:33:50 +0000118 const LLT S16 = LLT::scalar(16);
Tom Stellardca166212017-01-30 21:56:46 +0000119 const LLT S32 = LLT::scalar(32);
120 const LLT S64 = LLT::scalar(64);
Matt Arsenaultca676342019-01-25 02:36:32 +0000121 const LLT S128 = LLT::scalar(128);
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000122 const LLT S256 = LLT::scalar(256);
Tom Stellardeebbfc22018-06-30 04:09:44 +0000123 const LLT S512 = LLT::scalar(512);
Matt Arsenault85803362018-03-17 15:17:41 +0000124
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000125 const LLT V2S16 = LLT::vector(2, 16);
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000126 const LLT V4S16 = LLT::vector(4, 16);
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000127
128 const LLT V2S32 = LLT::vector(2, 32);
129 const LLT V3S32 = LLT::vector(3, 32);
130 const LLT V4S32 = LLT::vector(4, 32);
131 const LLT V5S32 = LLT::vector(5, 32);
132 const LLT V6S32 = LLT::vector(6, 32);
133 const LLT V7S32 = LLT::vector(7, 32);
134 const LLT V8S32 = LLT::vector(8, 32);
135 const LLT V9S32 = LLT::vector(9, 32);
136 const LLT V10S32 = LLT::vector(10, 32);
137 const LLT V11S32 = LLT::vector(11, 32);
138 const LLT V12S32 = LLT::vector(12, 32);
139 const LLT V13S32 = LLT::vector(13, 32);
140 const LLT V14S32 = LLT::vector(14, 32);
141 const LLT V15S32 = LLT::vector(15, 32);
142 const LLT V16S32 = LLT::vector(16, 32);
143
144 const LLT V2S64 = LLT::vector(2, 64);
145 const LLT V3S64 = LLT::vector(3, 64);
146 const LLT V4S64 = LLT::vector(4, 64);
147 const LLT V5S64 = LLT::vector(5, 64);
148 const LLT V6S64 = LLT::vector(6, 64);
149 const LLT V7S64 = LLT::vector(7, 64);
150 const LLT V8S64 = LLT::vector(8, 64);
151
152 std::initializer_list<LLT> AllS32Vectors =
153 {V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32,
154 V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32};
155 std::initializer_list<LLT> AllS64Vectors =
156 {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64};
157
Matt Arsenault85803362018-03-17 15:17:41 +0000158 const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS);
159 const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS);
Matt Arsenaultf3bfb852019-07-19 22:28:44 +0000160 const LLT Constant32Ptr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS_32BIT);
Matt Arsenault685d1e82018-03-17 15:17:45 +0000161 const LLT LocalPtr = GetAddrSpacePtr(AMDGPUAS::LOCAL_ADDRESS);
Matt Arsenaultf3bfb852019-07-19 22:28:44 +0000162 const LLT RegionPtr = GetAddrSpacePtr(AMDGPUAS::REGION_ADDRESS);
Matt Arsenault0da63502018-08-31 05:49:54 +0000163 const LLT FlatPtr = GetAddrSpacePtr(AMDGPUAS::FLAT_ADDRESS);
164 const LLT PrivatePtr = GetAddrSpacePtr(AMDGPUAS::PRIVATE_ADDRESS);
Matt Arsenault85803362018-03-17 15:17:41 +0000165
Matt Arsenault934e5342018-12-13 20:34:15 +0000166 const LLT CodePtr = FlatPtr;
167
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000168 const std::initializer_list<LLT> AddrSpaces64 = {
169 GlobalPtr, ConstantPtr, FlatPtr
170 };
171
172 const std::initializer_list<LLT> AddrSpaces32 = {
Matt Arsenaultf3bfb852019-07-19 22:28:44 +0000173 LocalPtr, PrivatePtr, Constant32Ptr, RegionPtr
Matt Arsenault685d1e82018-03-17 15:17:45 +0000174 };
Tom Stellardca166212017-01-30 21:56:46 +0000175
Matt Arsenault40d1faf2019-07-01 17:35:53 +0000176 const std::initializer_list<LLT> FPTypesBase = {
177 S32, S64
178 };
179
180 const std::initializer_list<LLT> FPTypes16 = {
181 S32, S64, S16
182 };
183
Matt Arsenault6ce1b4f2019-07-10 16:31:19 +0000184 const std::initializer_list<LLT> FPTypesPK16 = {
185 S32, S64, S16, V2S16
186 };
187
Matt Arsenaultadc40ba2019-01-08 01:22:47 +0000188 setAction({G_BRCOND, S1}, Legal);
189
Matt Arsenault2e0ee472019-02-21 15:48:13 +0000190 // TODO: All multiples of 32, vectors of pointers, all v2s16 pairs, more
191 // elements for v3s16
192 getActionDefinitionsBuilder(G_PHI)
193 .legalFor({S32, S64, V2S16, V4S16, S1, S128, S256})
194 .legalFor(AllS32Vectors)
195 .legalFor(AllS64Vectors)
196 .legalFor(AddrSpaces64)
197 .legalFor(AddrSpaces32)
198 .clampScalar(0, S32, S256)
199 .widenScalarToNextPow2(0, 32)
Matt Arsenaultd3093c22019-02-28 00:16:32 +0000200 .clampMaxNumElements(0, S32, 16)
Matt Arsenault72bcf152019-02-28 00:01:05 +0000201 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
Matt Arsenault2e0ee472019-02-21 15:48:13 +0000202 .legalIf(isPointer(0));
203
Matt Arsenaultef59cb62019-07-01 18:18:55 +0000204 if (ST.has16BitInsts()) {
205 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
206 .legalFor({S32, S16})
207 .clampScalar(0, S16, S32)
208 .scalarize(0);
209 } else {
210 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
211 .legalFor({S32})
212 .clampScalar(0, S32, S32)
213 .scalarize(0);
214 }
Matt Arsenault2e0ee472019-02-21 15:48:13 +0000215
Matt Arsenaultef59cb62019-07-01 18:18:55 +0000216 getActionDefinitionsBuilder({G_UMULH, G_SMULH})
Matt Arsenault5d622fb2019-01-25 03:23:04 +0000217 .legalFor({S32})
Matt Arsenault211e89d2019-01-27 00:52:51 +0000218 .clampScalar(0, S32, S32)
Matt Arsenault5d622fb2019-01-25 03:23:04 +0000219 .scalarize(0);
Matt Arsenault43398832018-12-20 01:35:49 +0000220
Matt Arsenault26a6c742019-01-26 23:47:07 +0000221 // Report legal for any types we can handle anywhere. For the cases only legal
222 // on the SALU, RegBankSelect will be able to re-legalize.
Matt Arsenault43398832018-12-20 01:35:49 +0000223 getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
Matt Arsenault22c4a142019-07-16 14:28:30 +0000224 .legalFor({S32, S1, S64, V2S32, S16, V2S16, V4S16})
Matt Arsenault26a6c742019-01-26 23:47:07 +0000225 .clampScalar(0, S32, S64)
Matt Arsenault26b7e852019-02-19 16:30:19 +0000226 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
227 .fewerElementsIf(vectorWiderThan(0, 32), fewerEltsToSize64Vector(0))
Matt Arsenaultf4bfe4c2019-02-25 21:32:48 +0000228 .widenScalarToNextPow2(0)
Matt Arsenault26a6c742019-01-26 23:47:07 +0000229 .scalarize(0);
Tom Stellardee6e6452017-06-12 20:54:56 +0000230
Matt Arsenault68c668a2019-01-08 01:09:09 +0000231 getActionDefinitionsBuilder({G_UADDO, G_SADDO, G_USUBO, G_SSUBO,
232 G_UADDE, G_SADDE, G_USUBE, G_SSUBE})
Matt Arsenault4d475942019-01-26 23:44:51 +0000233 .legalFor({{S32, S1}})
234 .clampScalar(0, S32, S32);
Matt Arsenault2cc15b62019-01-08 01:03:58 +0000235
Matt Arsenault7ac79ed2019-01-20 19:45:18 +0000236 getActionDefinitionsBuilder(G_BITCAST)
237 .legalForCartesianProduct({S32, V2S16})
238 .legalForCartesianProduct({S64, V2S32, V4S16})
239 .legalForCartesianProduct({V2S64, V4S32})
240 // Don't worry about the size constraint.
241 .legalIf(all(isPointer(0), isPointer(1)));
Tom Stellardff63ee02017-06-19 13:15:45 +0000242
Matt Arsenault00ccd132019-02-12 14:54:55 +0000243 if (ST.has16BitInsts()) {
244 getActionDefinitionsBuilder(G_FCONSTANT)
245 .legalFor({S32, S64, S16})
246 .clampScalar(0, S16, S64);
247 } else {
248 getActionDefinitionsBuilder(G_FCONSTANT)
249 .legalFor({S32, S64})
250 .clampScalar(0, S32, S64);
251 }
Tom Stellardeebbfc22018-06-30 04:09:44 +0000252
Matt Arsenaultb3feccd2018-06-25 15:42:12 +0000253 getActionDefinitionsBuilder(G_IMPLICIT_DEF)
Matt Arsenaultd9141892019-02-07 19:10:15 +0000254 .legalFor({S1, S32, S64, V2S32, V4S32, V2S16, V4S16, GlobalPtr,
255 ConstantPtr, LocalPtr, FlatPtr, PrivatePtr})
Matt Arsenault18ec3822019-02-11 22:00:39 +0000256 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
Matt Arsenaultd9141892019-02-07 19:10:15 +0000257 .clampScalarOrElt(0, S32, S512)
Matt Arsenault0f2debb2019-02-08 14:46:27 +0000258 .legalIf(isMultiple32(0))
Matt Arsenault82b10392019-02-25 20:46:06 +0000259 .widenScalarToNextPow2(0, 32)
260 .clampMaxNumElements(0, S32, 16);
Matt Arsenaultb3feccd2018-06-25 15:42:12 +0000261
Matt Arsenaultabdc4f22018-03-17 15:17:48 +0000262
Tom Stellarde0424122017-06-03 01:13:33 +0000263 // FIXME: i1 operands to intrinsics should always be legal, but other i1
264 // values may not be legal. We need to figure out how to distinguish
265 // between these two scenarios.
Matt Arsenault45991592019-01-18 21:33:50 +0000266 getActionDefinitionsBuilder(G_CONSTANT)
Matt Arsenault2065c942019-02-02 23:33:49 +0000267 .legalFor({S1, S32, S64, GlobalPtr,
268 LocalPtr, ConstantPtr, PrivatePtr, FlatPtr })
Matt Arsenault45991592019-01-18 21:33:50 +0000269 .clampScalar(0, S32, S64)
Matt Arsenault2065c942019-02-02 23:33:49 +0000270 .widenScalarToNextPow2(0)
271 .legalIf(isPointer(0));
Matt Arsenault06cbb272018-03-01 19:16:52 +0000272
Matt Arsenaultc94e26c2018-12-18 09:46:13 +0000273 setAction({G_FRAME_INDEX, PrivatePtr}, Legal);
274
Matt Arsenault93fdec72019-02-07 18:03:11 +0000275 auto &FPOpActions = getActionDefinitionsBuilder(
Matt Arsenault9dba67f2019-02-11 17:05:20 +0000276 { G_FADD, G_FMUL, G_FNEG, G_FABS, G_FMA, G_FCANONICALIZE})
Matt Arsenault93fdec72019-02-07 18:03:11 +0000277 .legalFor({S32, S64});
278
279 if (ST.has16BitInsts()) {
280 if (ST.hasVOP3PInsts())
281 FPOpActions.legalFor({S16, V2S16});
282 else
283 FPOpActions.legalFor({S16});
284 }
285
Matt Arsenault6ce1b4f2019-07-10 16:31:19 +0000286 auto &MinNumMaxNum = getActionDefinitionsBuilder({
287 G_FMINNUM, G_FMAXNUM, G_FMINNUM_IEEE, G_FMAXNUM_IEEE});
288
289 if (ST.hasVOP3PInsts()) {
290 MinNumMaxNum.customFor(FPTypesPK16)
291 .clampMaxNumElements(0, S16, 2)
292 .clampScalar(0, S16, S64)
293 .scalarize(0);
294 } else if (ST.has16BitInsts()) {
295 MinNumMaxNum.customFor(FPTypes16)
296 .clampScalar(0, S16, S64)
297 .scalarize(0);
298 } else {
299 MinNumMaxNum.customFor(FPTypesBase)
300 .clampScalar(0, S32, S64)
301 .scalarize(0);
302 }
303
304 // TODO: Implement
305 getActionDefinitionsBuilder({G_FMINIMUM, G_FMAXIMUM}).lower();
306
Matt Arsenault93fdec72019-02-07 18:03:11 +0000307 if (ST.hasVOP3PInsts())
308 FPOpActions.clampMaxNumElements(0, S16, 2);
309 FPOpActions
310 .scalarize(0)
311 .clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);
Tom Stellardd0c6cf22017-10-27 23:57:41 +0000312
Matt Arsenaultc0f75692019-02-07 18:14:39 +0000313 if (ST.has16BitInsts()) {
314 getActionDefinitionsBuilder(G_FSQRT)
315 .legalFor({S32, S64, S16})
316 .scalarize(0)
317 .clampScalar(0, S16, S64);
318 } else {
319 getActionDefinitionsBuilder(G_FSQRT)
320 .legalFor({S32, S64})
321 .scalarize(0)
322 .clampScalar(0, S32, S64);
323 }
324
Matt Arsenaultdff33c32018-12-20 00:37:02 +0000325 getActionDefinitionsBuilder(G_FPTRUNC)
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000326 .legalFor({{S32, S64}, {S16, S32}})
327 .scalarize(0);
Matt Arsenaultdff33c32018-12-20 00:37:02 +0000328
Matt Arsenault24563ef2019-01-20 18:34:24 +0000329 getActionDefinitionsBuilder(G_FPEXT)
330 .legalFor({{S64, S32}, {S32, S16}})
Matt Arsenaultca676342019-01-25 02:36:32 +0000331 .lowerFor({{S64, S16}}) // FIXME: Implement
332 .scalarize(0);
Matt Arsenault24563ef2019-01-20 18:34:24 +0000333
Matt Arsenaultb1843e12019-07-09 23:34:29 +0000334 // TODO: Verify V_BFI_B32 is generated from expanded bit ops.
335 getActionDefinitionsBuilder(G_FCOPYSIGN).lower();
Matt Arsenault1448f562019-05-17 12:19:52 +0000336
Matt Arsenault745fd9f2019-01-20 19:10:31 +0000337 getActionDefinitionsBuilder(G_FSUB)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000338 // Use actual fsub instruction
339 .legalFor({S32})
340 // Must use fadd + fneg
341 .lowerFor({S64, S16, V2S16})
Matt Arsenault990f5072019-01-25 00:51:00 +0000342 .scalarize(0)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000343 .clampScalar(0, S32, S64);
Matt Arsenaulte01e7c82018-12-18 09:19:03 +0000344
Matt Arsenault24563ef2019-01-20 18:34:24 +0000345 getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT})
Matt Arsenault46ffe682019-01-20 19:28:20 +0000346 .legalFor({{S64, S32}, {S32, S16}, {S64, S16},
Matt Arsenaultca676342019-01-25 02:36:32 +0000347 {S32, S1}, {S64, S1}, {S16, S1},
348 // FIXME: Hack
Matt Arsenaultf4bfe4c2019-02-25 21:32:48 +0000349 {S64, LLT::scalar(33)},
Matt Arsenault888aa5d2019-02-03 00:07:33 +0000350 {S32, S8}, {S128, S32}, {S128, S64}, {S32, LLT::scalar(24)}})
Matt Arsenaultca676342019-01-25 02:36:32 +0000351 .scalarize(0);
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000352
Matt Arsenaultfb671642019-01-22 00:20:17 +0000353 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000354 .legalFor({{S32, S32}, {S64, S32}})
Matt Arsenault02b5ca82019-05-17 23:05:13 +0000355 .lowerFor({{S32, S64}})
Matt Arsenault2f292202019-05-17 23:05:18 +0000356 .customFor({{S64, S64}})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000357 .scalarize(0);
Matt Arsenaultdd022ce2018-03-01 19:04:25 +0000358
Matt Arsenaultfb671642019-01-22 00:20:17 +0000359 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000360 .legalFor({{S32, S32}, {S32, S64}})
361 .scalarize(0);
Tom Stellard33445762018-02-07 04:47:59 +0000362
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000363 getActionDefinitionsBuilder(G_INTRINSIC_ROUND)
Matt Arsenault2e5f9002019-01-27 00:12:21 +0000364 .legalFor({S32, S64})
365 .scalarize(0);
Matt Arsenaultf4c21c52018-12-21 03:14:45 +0000366
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000367 if (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
Matt Arsenaulta510b572019-05-17 12:20:05 +0000368 getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000369 .legalFor({S32, S64})
370 .clampScalar(0, S32, S64)
371 .scalarize(0);
372 } else {
Matt Arsenaulta510b572019-05-17 12:20:05 +0000373 getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000374 .legalFor({S32})
375 .customFor({S64})
376 .clampScalar(0, S32, S64)
377 .scalarize(0);
378 }
Tom Stellardca166212017-01-30 21:56:46 +0000379
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000380 getActionDefinitionsBuilder(G_GEP)
381 .legalForCartesianProduct(AddrSpaces64, {S64})
382 .legalForCartesianProduct(AddrSpaces32, {S32})
383 .scalarize(0);
Matt Arsenault3b9a82f2019-01-25 04:54:00 +0000384
Matt Arsenault934e5342018-12-13 20:34:15 +0000385 setAction({G_BLOCK_ADDR, CodePtr}, Legal);
386
Matt Arsenault8b8eee52019-07-09 14:10:43 +0000387 auto &CmpBuilder =
388 getActionDefinitionsBuilder(G_ICMP)
Matt Arsenault58f9d3d2019-02-02 23:35:15 +0000389 .legalForCartesianProduct(
390 {S1}, {S32, S64, GlobalPtr, LocalPtr, ConstantPtr, PrivatePtr, FlatPtr})
Matt Arsenault8b8eee52019-07-09 14:10:43 +0000391 .legalFor({{S1, S32}, {S1, S64}});
392 if (ST.has16BitInsts()) {
393 CmpBuilder.legalFor({{S1, S16}});
394 }
395
396 CmpBuilder
Matt Arsenault58f9d3d2019-02-02 23:35:15 +0000397 .widenScalarToNextPow2(1)
398 .clampScalar(1, S32, S64)
399 .scalarize(0)
400 .legalIf(all(typeIs(0, S1), isPointer(1)));
401
402 getActionDefinitionsBuilder(G_FCMP)
Matt Arsenault40d1faf2019-07-01 17:35:53 +0000403 .legalForCartesianProduct({S1}, ST.has16BitInsts() ? FPTypes16 : FPTypesBase)
Matt Arsenault1b1e6852019-01-25 02:59:34 +0000404 .widenScalarToNextPow2(1)
405 .clampScalar(1, S32, S64)
Matt Arsenaultded2f822019-01-26 23:54:53 +0000406 .scalarize(0);
Matt Arsenault1b1e6852019-01-25 02:59:34 +0000407
Matt Arsenault95fd95c2019-01-25 04:03:38 +0000408 // FIXME: fexp, flog2, flog10 needs to be custom lowered.
409 getActionDefinitionsBuilder({G_FPOW, G_FEXP, G_FEXP2,
410 G_FLOG, G_FLOG2, G_FLOG10})
411 .legalFor({S32})
412 .scalarize(0);
Tom Stellard8cd60a52017-06-06 14:16:50 +0000413
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000414 // The 64-bit versions produce 32-bit results, but only on the SALU.
415 getActionDefinitionsBuilder({G_CTLZ, G_CTLZ_ZERO_UNDEF,
416 G_CTTZ, G_CTTZ_ZERO_UNDEF,
417 G_CTPOP})
418 .legalFor({{S32, S32}, {S32, S64}})
419 .clampScalar(0, S32, S32)
Matt Arsenault75e30c42019-02-20 16:42:52 +0000420 .clampScalar(1, S32, S64)
Matt Arsenaultb10fa8d2019-02-21 15:22:20 +0000421 .scalarize(0)
422 .widenScalarToNextPow2(0, 32)
423 .widenScalarToNextPow2(1, 32);
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000424
Matt Arsenaultd1bfc8d2019-01-31 02:34:03 +0000425 // TODO: Expand for > s32
426 getActionDefinitionsBuilder(G_BSWAP)
427 .legalFor({S32})
428 .clampScalar(0, S32, S32)
429 .scalarize(0);
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000430
Matt Arsenault0f3ba442019-05-23 17:58:48 +0000431 if (ST.has16BitInsts()) {
432 if (ST.hasVOP3PInsts()) {
433 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
434 .legalFor({S32, S16, V2S16})
435 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
436 .clampMaxNumElements(0, S16, 2)
437 .clampScalar(0, S16, S32)
438 .widenScalarToNextPow2(0)
439 .scalarize(0);
440 } else {
441 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
442 .legalFor({S32, S16})
443 .widenScalarToNextPow2(0)
444 .clampScalar(0, S16, S32)
445 .scalarize(0);
446 }
447 } else {
448 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
449 .legalFor({S32})
450 .clampScalar(0, S32, S32)
451 .widenScalarToNextPow2(0)
452 .scalarize(0);
453 }
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000454
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000455 auto smallerThan = [](unsigned TypeIdx0, unsigned TypeIdx1) {
456 return [=](const LegalityQuery &Query) {
457 return Query.Types[TypeIdx0].getSizeInBits() <
458 Query.Types[TypeIdx1].getSizeInBits();
459 };
460 };
461
462 auto greaterThan = [](unsigned TypeIdx0, unsigned TypeIdx1) {
463 return [=](const LegalityQuery &Query) {
464 return Query.Types[TypeIdx0].getSizeInBits() >
465 Query.Types[TypeIdx1].getSizeInBits();
466 };
467 };
468
Tom Stellard7c650782018-10-05 04:34:09 +0000469 getActionDefinitionsBuilder(G_INTTOPTR)
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000470 // List the common cases
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000471 .legalForCartesianProduct(AddrSpaces64, {S64})
472 .legalForCartesianProduct(AddrSpaces32, {S32})
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000473 .scalarize(0)
474 // Accept any address space as long as the size matches
475 .legalIf(sameSize(0, 1))
476 .widenScalarIf(smallerThan(1, 0),
477 [](const LegalityQuery &Query) {
478 return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
479 })
480 .narrowScalarIf(greaterThan(1, 0),
481 [](const LegalityQuery &Query) {
482 return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
483 });
Matt Arsenault85803362018-03-17 15:17:41 +0000484
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000485 getActionDefinitionsBuilder(G_PTRTOINT)
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000486 // List the common cases
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000487 .legalForCartesianProduct(AddrSpaces64, {S64})
488 .legalForCartesianProduct(AddrSpaces32, {S32})
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000489 .scalarize(0)
490 // Accept any address space as long as the size matches
491 .legalIf(sameSize(0, 1))
492 .widenScalarIf(smallerThan(0, 1),
493 [](const LegalityQuery &Query) {
494 return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
495 })
496 .narrowScalarIf(
497 greaterThan(0, 1),
498 [](const LegalityQuery &Query) {
499 return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
500 });
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000501
Matt Arsenault5c7e96dc2019-08-28 00:58:24 +0000502 getActionDefinitionsBuilder(G_ADDRSPACE_CAST)
503 .scalarize(0)
504 .custom();
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000505
Matt Arsenault35c96592019-07-16 18:05:29 +0000506 // TODO: Should load to s16 be legal? Most loads extend to 32-bits, but we
507 // handle some operations by just promoting the register during
508 // selection. There are also d16 loads on GFX9+ which preserve the high bits.
Matt Arsenault85803362018-03-17 15:17:41 +0000509 getActionDefinitionsBuilder({G_LOAD, G_STORE})
Matt Arsenault18619af2019-01-29 18:13:02 +0000510 .narrowScalarIf([](const LegalityQuery &Query) {
511 unsigned Size = Query.Types[0].getSizeInBits();
512 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
513 return (Size > 32 && MemSize < Size);
514 },
515 [](const LegalityQuery &Query) {
516 return std::make_pair(0, LLT::scalar(32));
517 })
Matt Arsenault7bedceb2019-08-01 01:44:22 +0000518 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
Matt Arsenault9e8e8c62019-07-01 18:49:01 +0000519 .fewerElementsIf([=](const LegalityQuery &Query) {
Matt Arsenault045bc9a2019-01-30 02:35:38 +0000520 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
Matt Arsenaultc7bce732019-01-31 02:46:05 +0000521 return (MemSize == 96) &&
522 Query.Types[0].isVector() &&
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +0000523 !ST.hasDwordx3LoadStores();
Matt Arsenault045bc9a2019-01-30 02:35:38 +0000524 },
525 [=](const LegalityQuery &Query) {
526 return std::make_pair(0, V2S32);
527 })
Matt Arsenault9e8e8c62019-07-01 18:49:01 +0000528 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenault85803362018-03-17 15:17:41 +0000529 const LLT &Ty0 = Query.Types[0];
530
Matt Arsenault18619af2019-01-29 18:13:02 +0000531 unsigned Size = Ty0.getSizeInBits();
532 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
Matt Arsenaulteb2603c2019-02-02 23:39:13 +0000533 if (Size < 32 || (Size > 32 && MemSize < Size))
Matt Arsenault18619af2019-01-29 18:13:02 +0000534 return false;
535
536 if (Ty0.isVector() && Size != MemSize)
537 return false;
538
Matt Arsenault85803362018-03-17 15:17:41 +0000539 // TODO: Decompose private loads into 4-byte components.
540 // TODO: Illegal flat loads on SI
Matt Arsenault18619af2019-01-29 18:13:02 +0000541 switch (MemSize) {
542 case 8:
543 case 16:
544 return Size == 32;
Matt Arsenault85803362018-03-17 15:17:41 +0000545 case 32:
546 case 64:
547 case 128:
548 return true;
549
550 case 96:
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +0000551 return ST.hasDwordx3LoadStores();
Matt Arsenault85803362018-03-17 15:17:41 +0000552
553 case 256:
554 case 512:
Tom Stellardd0ba79f2019-07-10 00:22:41 +0000555 // TODO: Possibly support loads of i256 and i512 . This will require
556 // adding i256 and i512 types to MVT in order for to be able to use
557 // TableGen.
558 // TODO: Add support for other vector types, this will require
559 // defining more value mappings for the new types.
560 return Ty0.isVector() && (Ty0.getScalarType().getSizeInBits() == 32 ||
561 Ty0.getScalarType().getSizeInBits() == 64);
562
Matt Arsenault85803362018-03-17 15:17:41 +0000563 default:
564 return false;
565 }
Matt Arsenault18619af2019-01-29 18:13:02 +0000566 })
567 .clampScalar(0, S32, S64);
Matt Arsenault85803362018-03-17 15:17:41 +0000568
569
Matt Arsenault530d05e2019-02-14 22:41:09 +0000570 // FIXME: Handle alignment requirements.
Matt Arsenault6614f852019-01-22 19:02:10 +0000571 auto &ExtLoads = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
Matt Arsenault530d05e2019-02-14 22:41:09 +0000572 .legalForTypesWithMemDesc({
573 {S32, GlobalPtr, 8, 8},
574 {S32, GlobalPtr, 16, 8},
575 {S32, LocalPtr, 8, 8},
576 {S32, LocalPtr, 16, 8},
577 {S32, PrivatePtr, 8, 8},
578 {S32, PrivatePtr, 16, 8}});
Matt Arsenault6614f852019-01-22 19:02:10 +0000579 if (ST.hasFlatAddressSpace()) {
Matt Arsenault530d05e2019-02-14 22:41:09 +0000580 ExtLoads.legalForTypesWithMemDesc({{S32, FlatPtr, 8, 8},
581 {S32, FlatPtr, 16, 8}});
Matt Arsenault6614f852019-01-22 19:02:10 +0000582 }
583
584 ExtLoads.clampScalar(0, S32, S32)
585 .widenScalarToNextPow2(0)
586 .unsupportedIfMemSizeNotPow2()
587 .lower();
588
Matt Arsenault36d40922018-12-20 00:33:49 +0000589 auto &Atomics = getActionDefinitionsBuilder(
590 {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB,
591 G_ATOMICRMW_AND, G_ATOMICRMW_OR, G_ATOMICRMW_XOR,
592 G_ATOMICRMW_MAX, G_ATOMICRMW_MIN, G_ATOMICRMW_UMAX,
593 G_ATOMICRMW_UMIN, G_ATOMIC_CMPXCHG})
594 .legalFor({{S32, GlobalPtr}, {S32, LocalPtr},
595 {S64, GlobalPtr}, {S64, LocalPtr}});
596 if (ST.hasFlatAddressSpace()) {
597 Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}});
598 }
Tom Stellardca166212017-01-30 21:56:46 +0000599
Matt Arsenault26cb53b2019-08-01 03:33:15 +0000600 getActionDefinitionsBuilder(G_ATOMICRMW_FADD)
601 .legalFor({{S32, LocalPtr}});
602
Matt Arsenault96e47012019-01-18 21:42:55 +0000603 // TODO: Pointer types, any 32-bit or 64-bit vector
604 getActionDefinitionsBuilder(G_SELECT)
Matt Arsenaultfdf36722019-07-01 15:42:47 +0000605 .legalForCartesianProduct({S32, S64, S16, V2S32, V2S16, V4S16,
Matt Arsenault10547232019-02-04 14:04:52 +0000606 GlobalPtr, LocalPtr, FlatPtr, PrivatePtr,
607 LLT::vector(2, LocalPtr), LLT::vector(2, PrivatePtr)}, {S1})
Matt Arsenaultfdf36722019-07-01 15:42:47 +0000608 .clampScalar(0, S16, S64)
Matt Arsenaultb4c95b32019-02-19 17:03:09 +0000609 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
610 .fewerElementsIf(numElementsNotEven(0), scalarize(0))
Matt Arsenaultdc6c7852019-01-30 04:19:31 +0000611 .scalarize(1)
Matt Arsenault2491f822019-02-02 23:31:50 +0000612 .clampMaxNumElements(0, S32, 2)
613 .clampMaxNumElements(0, LocalPtr, 2)
614 .clampMaxNumElements(0, PrivatePtr, 2)
Matt Arsenaultb4c95b32019-02-19 17:03:09 +0000615 .scalarize(0)
Matt Arsenault4ed6cca2019-04-05 14:03:04 +0000616 .widenScalarToNextPow2(0)
Matt Arsenault2491f822019-02-02 23:31:50 +0000617 .legalIf(all(isPointer(0), typeIs(1, S1)));
Tom Stellard2860a422017-06-07 13:54:51 +0000618
Matt Arsenault4c5e8f512019-01-22 22:00:19 +0000619 // TODO: Only the low 4/5/6 bits of the shift amount are observed, so we can
620 // be more flexible with the shift amount type.
621 auto &Shifts = getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR})
622 .legalFor({{S32, S32}, {S64, S32}});
Matt Arsenaultf6cab162019-01-30 03:36:25 +0000623 if (ST.has16BitInsts()) {
Matt Arsenaultc83b8232019-02-07 17:38:00 +0000624 if (ST.hasVOP3PInsts()) {
625 Shifts.legalFor({{S16, S32}, {S16, S16}, {V2S16, V2S16}})
626 .clampMaxNumElements(0, S16, 2);
627 } else
628 Shifts.legalFor({{S16, S32}, {S16, S16}});
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000629
630 Shifts.clampScalar(1, S16, S32);
Matt Arsenaultf6cab162019-01-30 03:36:25 +0000631 Shifts.clampScalar(0, S16, S64);
Matt Arsenaultb0a22702019-02-08 15:06:24 +0000632 Shifts.widenScalarToNextPow2(0, 16);
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000633 } else {
634 // Make sure we legalize the shift amount type first, as the general
635 // expansion for the shifted type will produce much worse code if it hasn't
636 // been truncated already.
637 Shifts.clampScalar(1, S32, S32);
Matt Arsenault4c5e8f512019-01-22 22:00:19 +0000638 Shifts.clampScalar(0, S32, S64);
Matt Arsenaultb0a22702019-02-08 15:06:24 +0000639 Shifts.widenScalarToNextPow2(0, 32);
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000640 }
641 Shifts.scalarize(0);
Tom Stellardca166212017-01-30 21:56:46 +0000642
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000643 for (unsigned Op : {G_EXTRACT_VECTOR_ELT, G_INSERT_VECTOR_ELT}) {
Matt Arsenault63786292019-01-22 20:38:15 +0000644 unsigned VecTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 1 : 0;
645 unsigned EltTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 0 : 1;
646 unsigned IdxTypeIdx = 2;
647
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000648 getActionDefinitionsBuilder(Op)
Matt Arsenaultb0e04c02019-07-15 19:40:59 +0000649 .customIf([=](const LegalityQuery &Query) {
Matt Arsenault90bdfb32019-07-15 18:31:10 +0000650 const LLT EltTy = Query.Types[EltTypeIdx];
651 const LLT VecTy = Query.Types[VecTypeIdx];
652 const LLT IdxTy = Query.Types[IdxTypeIdx];
653 return (EltTy.getSizeInBits() == 16 ||
654 EltTy.getSizeInBits() % 32 == 0) &&
655 VecTy.getSizeInBits() % 32 == 0 &&
656 VecTy.getSizeInBits() <= 512 &&
657 IdxTy.getSizeInBits() == 32;
Matt Arsenault63786292019-01-22 20:38:15 +0000658 })
659 .clampScalar(EltTypeIdx, S32, S64)
660 .clampScalar(VecTypeIdx, S32, S64)
661 .clampScalar(IdxTypeIdx, S32, S32);
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000662 }
663
Matt Arsenault63786292019-01-22 20:38:15 +0000664 getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
665 .unsupportedIf([=](const LegalityQuery &Query) {
666 const LLT &EltTy = Query.Types[1].getElementType();
667 return Query.Types[0] != EltTy;
668 });
669
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000670 for (unsigned Op : {G_EXTRACT, G_INSERT}) {
671 unsigned BigTyIdx = Op == G_EXTRACT ? 1 : 0;
672 unsigned LitTyIdx = Op == G_EXTRACT ? 0 : 1;
673
674 // FIXME: Doesn't handle extract of illegal sizes.
675 getActionDefinitionsBuilder(Op)
Matt Arsenault91be65b2019-02-07 17:25:51 +0000676 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000677 const LLT BigTy = Query.Types[BigTyIdx];
678 const LLT LitTy = Query.Types[LitTyIdx];
679 return (BigTy.getSizeInBits() % 32 == 0) &&
680 (LitTy.getSizeInBits() % 16 == 0);
681 })
Matt Arsenault91be65b2019-02-07 17:25:51 +0000682 .widenScalarIf(
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000683 [=](const LegalityQuery &Query) {
684 const LLT BigTy = Query.Types[BigTyIdx];
685 return (BigTy.getScalarSizeInBits() < 16);
686 },
687 LegalizeMutations::widenScalarOrEltToNextPow2(BigTyIdx, 16))
688 .widenScalarIf(
689 [=](const LegalityQuery &Query) {
690 const LLT LitTy = Query.Types[LitTyIdx];
691 return (LitTy.getScalarSizeInBits() < 16);
692 },
693 LegalizeMutations::widenScalarOrEltToNextPow2(LitTyIdx, 16))
Matt Arsenault2b6f76f2019-04-22 15:22:46 +0000694 .moreElementsIf(isSmallOddVector(BigTyIdx), oneMoreElement(BigTyIdx))
695 .widenScalarToNextPow2(BigTyIdx, 32);
696
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000697 }
Matt Arsenault71272e62018-03-05 16:25:15 +0000698
Amara Emerson5ec14602018-12-10 18:44:58 +0000699 getActionDefinitionsBuilder(G_BUILD_VECTOR)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000700 .legalForCartesianProduct(AllS32Vectors, {S32})
701 .legalForCartesianProduct(AllS64Vectors, {S64})
702 .clampNumElements(0, V16S32, V16S32)
703 .clampNumElements(0, V2S64, V8S64)
704 .minScalarSameAs(1, 0)
Matt Arsenault3f1a3452019-07-09 22:48:04 +0000705 .legalIf(isRegisterType(0))
706 .minScalarOrElt(0, S32);
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000707
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000708 getActionDefinitionsBuilder(G_CONCAT_VECTORS)
Matt Arsenault4dd57552019-07-09 14:17:31 +0000709 .legalIf(isRegisterType(0));
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000710
Matt Arsenault690645b2019-08-13 16:09:07 +0000711 // TODO: Don't fully scalarize v2s16 pieces
712 getActionDefinitionsBuilder(G_SHUFFLE_VECTOR).lower();
713
Matt Arsenault503afda2018-03-12 13:35:43 +0000714 // Merge/Unmerge
715 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
716 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
717 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
718
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000719 auto notValidElt = [=](const LegalityQuery &Query, unsigned TypeIdx) {
720 const LLT &Ty = Query.Types[TypeIdx];
721 if (Ty.isVector()) {
722 const LLT &EltTy = Ty.getElementType();
723 if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64)
724 return true;
725 if (!isPowerOf2_32(EltTy.getSizeInBits()))
726 return true;
727 }
728 return false;
729 };
730
Matt Arsenault503afda2018-03-12 13:35:43 +0000731 getActionDefinitionsBuilder(Op)
Matt Arsenaultd8d193d2019-01-29 23:17:35 +0000732 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16)
733 // Clamp the little scalar to s8-s256 and make it a power of 2. It's not
734 // worth considering the multiples of 64 since 2*192 and 2*384 are not
735 // valid.
736 .clampScalar(LitTyIdx, S16, S256)
737 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32)
Matt Arsenault954a0122019-08-21 16:59:10 +0000738 .moreElementsIf(isSmallOddVector(BigTyIdx), oneMoreElement(BigTyIdx))
Matt Arsenault28215ca2019-08-13 16:26:28 +0000739 .fewerElementsIf(all(typeIs(0, S16), vectorWiderThan(1, 32),
740 elementTypeIs(1, S16)),
741 changeTo(1, V2S16))
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000742 // Break up vectors with weird elements into scalars
743 .fewerElementsIf(
744 [=](const LegalityQuery &Query) { return notValidElt(Query, 0); },
Matt Arsenault990f5072019-01-25 00:51:00 +0000745 scalarize(0))
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000746 .fewerElementsIf(
747 [=](const LegalityQuery &Query) { return notValidElt(Query, 1); },
Matt Arsenault990f5072019-01-25 00:51:00 +0000748 scalarize(1))
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000749 .clampScalar(BigTyIdx, S32, S512)
Matt Arsenaultd9d30a42019-08-01 19:10:05 +0000750 .lowerFor({{S16, V2S16}})
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000751 .widenScalarIf(
752 [=](const LegalityQuery &Query) {
753 const LLT &Ty = Query.Types[BigTyIdx];
754 return !isPowerOf2_32(Ty.getSizeInBits()) &&
755 Ty.getSizeInBits() % 16 != 0;
756 },
757 [=](const LegalityQuery &Query) {
758 // Pick the next power of 2, or a multiple of 64 over 128.
759 // Whichever is smaller.
760 const LLT &Ty = Query.Types[BigTyIdx];
761 unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1);
762 if (NewSizeInBits >= 256) {
763 unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
764 if (RoundedTo < NewSizeInBits)
765 NewSizeInBits = RoundedTo;
766 }
767 return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
768 })
Matt Arsenault503afda2018-03-12 13:35:43 +0000769 .legalIf([=](const LegalityQuery &Query) {
770 const LLT &BigTy = Query.Types[BigTyIdx];
771 const LLT &LitTy = Query.Types[LitTyIdx];
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000772
773 if (BigTy.isVector() && BigTy.getSizeInBits() < 32)
774 return false;
775 if (LitTy.isVector() && LitTy.getSizeInBits() < 32)
776 return false;
777
778 return BigTy.getSizeInBits() % 16 == 0 &&
779 LitTy.getSizeInBits() % 16 == 0 &&
Matt Arsenault503afda2018-03-12 13:35:43 +0000780 BigTy.getSizeInBits() <= 512;
781 })
782 // Any vectors left are the wrong size. Scalarize them.
Matt Arsenault990f5072019-01-25 00:51:00 +0000783 .scalarize(0)
784 .scalarize(1);
Matt Arsenault503afda2018-03-12 13:35:43 +0000785 }
786
Daniel Sanderse9a57c22019-08-09 21:11:20 +0000787 getActionDefinitionsBuilder(G_SEXT_INREG).lower();
788
Tom Stellardca166212017-01-30 21:56:46 +0000789 computeTables();
Roman Tereshin76c29c62018-05-31 16:16:48 +0000790 verify(*ST.getInstrInfo());
Tom Stellardca166212017-01-30 21:56:46 +0000791}
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000792
793bool AMDGPULegalizerInfo::legalizeCustom(MachineInstr &MI,
794 MachineRegisterInfo &MRI,
795 MachineIRBuilder &MIRBuilder,
796 GISelChangeObserver &Observer) const {
797 switch (MI.getOpcode()) {
798 case TargetOpcode::G_ADDRSPACE_CAST:
799 return legalizeAddrSpaceCast(MI, MRI, MIRBuilder);
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000800 case TargetOpcode::G_FRINT:
801 return legalizeFrint(MI, MRI, MIRBuilder);
Matt Arsenaulta510b572019-05-17 12:20:05 +0000802 case TargetOpcode::G_FCEIL:
803 return legalizeFceil(MI, MRI, MIRBuilder);
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000804 case TargetOpcode::G_INTRINSIC_TRUNC:
805 return legalizeIntrinsicTrunc(MI, MRI, MIRBuilder);
Matt Arsenault2f292202019-05-17 23:05:18 +0000806 case TargetOpcode::G_SITOFP:
807 return legalizeITOFP(MI, MRI, MIRBuilder, true);
808 case TargetOpcode::G_UITOFP:
809 return legalizeITOFP(MI, MRI, MIRBuilder, false);
Matt Arsenault6ce1b4f2019-07-10 16:31:19 +0000810 case TargetOpcode::G_FMINNUM:
811 case TargetOpcode::G_FMAXNUM:
812 case TargetOpcode::G_FMINNUM_IEEE:
813 case TargetOpcode::G_FMAXNUM_IEEE:
814 return legalizeMinNumMaxNum(MI, MRI, MIRBuilder);
Matt Arsenaultb0e04c02019-07-15 19:40:59 +0000815 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
816 return legalizeExtractVectorElt(MI, MRI, MIRBuilder);
817 case TargetOpcode::G_INSERT_VECTOR_ELT:
Matt Arsenault6ed315f2019-07-15 19:43:04 +0000818 return legalizeInsertVectorElt(MI, MRI, MIRBuilder);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000819 default:
820 return false;
821 }
822
823 llvm_unreachable("expected switch to return");
824}
825
Matt Arsenault1178dc32019-06-28 01:16:46 +0000826Register AMDGPULegalizerInfo::getSegmentAperture(
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000827 unsigned AS,
828 MachineRegisterInfo &MRI,
829 MachineIRBuilder &MIRBuilder) const {
830 MachineFunction &MF = MIRBuilder.getMF();
831 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
832 const LLT S32 = LLT::scalar(32);
833
834 if (ST.hasApertureRegs()) {
835 // FIXME: Use inline constants (src_{shared, private}_base) instead of
836 // getreg.
837 unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
838 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
839 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
840 unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
841 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
842 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
843 unsigned Encoding =
844 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
845 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
846 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
847
Matt Arsenault1178dc32019-06-28 01:16:46 +0000848 Register ApertureReg = MRI.createGenericVirtualRegister(S32);
849 Register GetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000850
851 MIRBuilder.buildInstr(AMDGPU::S_GETREG_B32)
852 .addDef(GetReg)
853 .addImm(Encoding);
854 MRI.setType(GetReg, S32);
855
Amara Emerson946b1242019-04-15 05:04:20 +0000856 auto ShiftAmt = MIRBuilder.buildConstant(S32, WidthM1 + 1);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000857 MIRBuilder.buildInstr(TargetOpcode::G_SHL)
858 .addDef(ApertureReg)
859 .addUse(GetReg)
Amara Emerson946b1242019-04-15 05:04:20 +0000860 .addUse(ShiftAmt.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000861
862 return ApertureReg;
863 }
864
Matt Arsenault1178dc32019-06-28 01:16:46 +0000865 Register QueuePtr = MRI.createGenericVirtualRegister(
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000866 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
867
868 // FIXME: Placeholder until we can track the input registers.
869 MIRBuilder.buildConstant(QueuePtr, 0xdeadbeef);
870
871 // Offset into amd_queue_t for group_segment_aperture_base_hi /
872 // private_segment_aperture_base_hi.
873 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
874
875 // FIXME: Don't use undef
876 Value *V = UndefValue::get(PointerType::get(
877 Type::getInt8Ty(MF.getFunction().getContext()),
878 AMDGPUAS::CONSTANT_ADDRESS));
879
880 MachinePointerInfo PtrInfo(V, StructOffset);
881 MachineMemOperand *MMO = MF.getMachineMemOperand(
882 PtrInfo,
883 MachineMemOperand::MOLoad |
884 MachineMemOperand::MODereferenceable |
885 MachineMemOperand::MOInvariant,
886 4,
887 MinAlign(64, StructOffset));
888
Matt Arsenaulte3a676e2019-06-24 15:50:29 +0000889 Register LoadResult = MRI.createGenericVirtualRegister(S32);
890 Register LoadAddr;
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000891
892 MIRBuilder.materializeGEP(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
893 MIRBuilder.buildLoad(LoadResult, LoadAddr, *MMO);
894 return LoadResult;
895}
896
897bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
898 MachineInstr &MI, MachineRegisterInfo &MRI,
899 MachineIRBuilder &MIRBuilder) const {
900 MachineFunction &MF = MIRBuilder.getMF();
901
902 MIRBuilder.setInstr(MI);
903
Matt Arsenault5c7e96dc2019-08-28 00:58:24 +0000904 const LLT S32 = LLT::scalar(32);
Matt Arsenaulte3a676e2019-06-24 15:50:29 +0000905 Register Dst = MI.getOperand(0).getReg();
906 Register Src = MI.getOperand(1).getReg();
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000907
908 LLT DstTy = MRI.getType(Dst);
909 LLT SrcTy = MRI.getType(Src);
910 unsigned DestAS = DstTy.getAddressSpace();
911 unsigned SrcAS = SrcTy.getAddressSpace();
912
913 // TODO: Avoid reloading from the queue ptr for each cast, or at least each
914 // vector element.
915 assert(!DstTy.isVector());
916
917 const AMDGPUTargetMachine &TM
918 = static_cast<const AMDGPUTargetMachine &>(MF.getTarget());
919
920 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
921 if (ST.getTargetLowering()->isNoopAddrSpaceCast(SrcAS, DestAS)) {
Matt Arsenaultdc88a2c2019-02-08 14:16:11 +0000922 MI.setDesc(MIRBuilder.getTII().get(TargetOpcode::G_BITCAST));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000923 return true;
924 }
925
Matt Arsenault5c7e96dc2019-08-28 00:58:24 +0000926 if (DestAS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
927 // Truncate.
928 MIRBuilder.buildExtract(Dst, Src, 0);
929 MI.eraseFromParent();
930 return true;
931 }
932
933 if (SrcAS == AMDGPUAS::CONSTANT_ADDRESS_32BIT) {
934 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
935 uint32_t AddrHiVal = Info->get32BitAddressHighBits();
936
937 // FIXME: This is a bit ugly due to creating a merge of 2 pointers to
938 // another. Merge operands are required to be the same type, but creating an
939 // extra ptrtoint would be kind of pointless.
940 auto HighAddr = MIRBuilder.buildConstant(
941 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS_32BIT, 32), AddrHiVal);
942 MIRBuilder.buildMerge(Dst, {Src, HighAddr.getReg(0)});
943 MI.eraseFromParent();
944 return true;
945 }
946
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000947 if (SrcAS == AMDGPUAS::FLAT_ADDRESS) {
948 assert(DestAS == AMDGPUAS::LOCAL_ADDRESS ||
949 DestAS == AMDGPUAS::PRIVATE_ADDRESS);
950 unsigned NullVal = TM.getNullPointerValue(DestAS);
951
Amara Emerson946b1242019-04-15 05:04:20 +0000952 auto SegmentNull = MIRBuilder.buildConstant(DstTy, NullVal);
953 auto FlatNull = MIRBuilder.buildConstant(SrcTy, 0);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000954
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000955 Register PtrLo32 = MRI.createGenericVirtualRegister(DstTy);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000956
957 // Extract low 32-bits of the pointer.
958 MIRBuilder.buildExtract(PtrLo32, Src, 0);
959
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000960 Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
Amara Emerson946b1242019-04-15 05:04:20 +0000961 MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, FlatNull.getReg(0));
962 MIRBuilder.buildSelect(Dst, CmpRes, PtrLo32, SegmentNull.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000963
964 MI.eraseFromParent();
965 return true;
966 }
967
Matt Arsenault5c7e96dc2019-08-28 00:58:24 +0000968 if (SrcAS != AMDGPUAS::LOCAL_ADDRESS && SrcAS != AMDGPUAS::PRIVATE_ADDRESS)
969 return false;
970
971 if (!ST.hasFlatAddressSpace())
972 return false;
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000973
Amara Emerson946b1242019-04-15 05:04:20 +0000974 auto SegmentNull =
975 MIRBuilder.buildConstant(SrcTy, TM.getNullPointerValue(SrcAS));
976 auto FlatNull =
977 MIRBuilder.buildConstant(DstTy, TM.getNullPointerValue(DestAS));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000978
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000979 Register ApertureReg = getSegmentAperture(DestAS, MRI, MIRBuilder);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000980
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000981 Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
Amara Emerson946b1242019-04-15 05:04:20 +0000982 MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, SegmentNull.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000983
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +0000984 Register BuildPtr = MRI.createGenericVirtualRegister(DstTy);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000985
986 // Coerce the type of the low half of the result so we can use merge_values.
Matt Arsenault5c7e96dc2019-08-28 00:58:24 +0000987 Register SrcAsInt = MRI.createGenericVirtualRegister(S32);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000988 MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT)
989 .addDef(SrcAsInt)
990 .addUse(Src);
991
992 // TODO: Should we allow mismatched types but matching sizes in merges to
993 // avoid the ptrtoint?
994 MIRBuilder.buildMerge(BuildPtr, {SrcAsInt, ApertureReg});
Amara Emerson946b1242019-04-15 05:04:20 +0000995 MIRBuilder.buildSelect(Dst, CmpRes, BuildPtr, FlatNull.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000996
997 MI.eraseFromParent();
998 return true;
999}
Matt Arsenault6aafc5e2019-05-17 12:19:57 +00001000
1001bool AMDGPULegalizerInfo::legalizeFrint(
1002 MachineInstr &MI, MachineRegisterInfo &MRI,
1003 MachineIRBuilder &MIRBuilder) const {
1004 MIRBuilder.setInstr(MI);
1005
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001006 Register Src = MI.getOperand(1).getReg();
Matt Arsenault6aafc5e2019-05-17 12:19:57 +00001007 LLT Ty = MRI.getType(Src);
1008 assert(Ty.isScalar() && Ty.getSizeInBits() == 64);
1009
1010 APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
1011 APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
1012
1013 auto C1 = MIRBuilder.buildFConstant(Ty, C1Val);
1014 auto CopySign = MIRBuilder.buildFCopysign(Ty, C1, Src);
1015
1016 // TODO: Should this propagate fast-math-flags?
1017 auto Tmp1 = MIRBuilder.buildFAdd(Ty, Src, CopySign);
1018 auto Tmp2 = MIRBuilder.buildFSub(Ty, Tmp1, CopySign);
1019
1020 auto C2 = MIRBuilder.buildFConstant(Ty, C2Val);
1021 auto Fabs = MIRBuilder.buildFAbs(Ty, Src);
1022
1023 auto Cond = MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, LLT::scalar(1), Fabs, C2);
1024 MIRBuilder.buildSelect(MI.getOperand(0).getReg(), Cond, Src, Tmp2);
1025 return true;
1026}
Matt Arsenault6aebcd52019-05-17 12:20:01 +00001027
Matt Arsenaulta510b572019-05-17 12:20:05 +00001028bool AMDGPULegalizerInfo::legalizeFceil(
1029 MachineInstr &MI, MachineRegisterInfo &MRI,
1030 MachineIRBuilder &B) const {
1031 B.setInstr(MI);
1032
Matt Arsenault1a02d302019-05-17 12:59:27 +00001033 const LLT S1 = LLT::scalar(1);
1034 const LLT S64 = LLT::scalar(64);
1035
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001036 Register Src = MI.getOperand(1).getReg();
Matt Arsenault1a02d302019-05-17 12:59:27 +00001037 assert(MRI.getType(Src) == S64);
Matt Arsenaulta510b572019-05-17 12:20:05 +00001038
1039 // result = trunc(src)
1040 // if (src > 0.0 && src != result)
1041 // result += 1.0
1042
Matt Arsenaulta510b572019-05-17 12:20:05 +00001043 auto Trunc = B.buildInstr(TargetOpcode::G_INTRINSIC_TRUNC, {S64}, {Src});
1044
Matt Arsenaulta510b572019-05-17 12:20:05 +00001045 const auto Zero = B.buildFConstant(S64, 0.0);
1046 const auto One = B.buildFConstant(S64, 1.0);
1047 auto Lt0 = B.buildFCmp(CmpInst::FCMP_OGT, S1, Src, Zero);
1048 auto NeTrunc = B.buildFCmp(CmpInst::FCMP_ONE, S1, Src, Trunc);
1049 auto And = B.buildAnd(S1, Lt0, NeTrunc);
1050 auto Add = B.buildSelect(S64, And, One, Zero);
1051
1052 // TODO: Should this propagate fast-math-flags?
1053 B.buildFAdd(MI.getOperand(0).getReg(), Trunc, Add);
1054 return true;
1055}
1056
Matt Arsenault6aebcd52019-05-17 12:20:01 +00001057static MachineInstrBuilder extractF64Exponent(unsigned Hi,
1058 MachineIRBuilder &B) {
1059 const unsigned FractBits = 52;
1060 const unsigned ExpBits = 11;
1061 LLT S32 = LLT::scalar(32);
1062
1063 auto Const0 = B.buildConstant(S32, FractBits - 32);
1064 auto Const1 = B.buildConstant(S32, ExpBits);
1065
1066 auto ExpPart = B.buildIntrinsic(Intrinsic::amdgcn_ubfe, {S32}, false)
1067 .addUse(Const0.getReg(0))
1068 .addUse(Const1.getReg(0));
1069
1070 return B.buildSub(S32, ExpPart, B.buildConstant(S32, 1023));
1071}
1072
1073bool AMDGPULegalizerInfo::legalizeIntrinsicTrunc(
1074 MachineInstr &MI, MachineRegisterInfo &MRI,
1075 MachineIRBuilder &B) const {
1076 B.setInstr(MI);
1077
Matt Arsenault1a02d302019-05-17 12:59:27 +00001078 const LLT S1 = LLT::scalar(1);
1079 const LLT S32 = LLT::scalar(32);
1080 const LLT S64 = LLT::scalar(64);
Matt Arsenault6aebcd52019-05-17 12:20:01 +00001081
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001082 Register Src = MI.getOperand(1).getReg();
Matt Arsenault1a02d302019-05-17 12:59:27 +00001083 assert(MRI.getType(Src) == S64);
Matt Arsenault6aebcd52019-05-17 12:20:01 +00001084
1085 // TODO: Should this use extract since the low half is unused?
1086 auto Unmerge = B.buildUnmerge({S32, S32}, Src);
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001087 Register Hi = Unmerge.getReg(1);
Matt Arsenault6aebcd52019-05-17 12:20:01 +00001088
1089 // Extract the upper half, since this is where we will find the sign and
1090 // exponent.
1091 auto Exp = extractF64Exponent(Hi, B);
1092
1093 const unsigned FractBits = 52;
1094
1095 // Extract the sign bit.
1096 const auto SignBitMask = B.buildConstant(S32, UINT32_C(1) << 31);
1097 auto SignBit = B.buildAnd(S32, Hi, SignBitMask);
1098
1099 const auto FractMask = B.buildConstant(S64, (UINT64_C(1) << FractBits) - 1);
1100
1101 const auto Zero32 = B.buildConstant(S32, 0);
1102
1103 // Extend back to 64-bits.
1104 auto SignBit64 = B.buildMerge(S64, {Zero32.getReg(0), SignBit.getReg(0)});
1105
1106 auto Shr = B.buildAShr(S64, FractMask, Exp);
1107 auto Not = B.buildNot(S64, Shr);
1108 auto Tmp0 = B.buildAnd(S64, Src, Not);
1109 auto FiftyOne = B.buildConstant(S32, FractBits - 1);
1110
1111 auto ExpLt0 = B.buildICmp(CmpInst::ICMP_SLT, S1, Exp, Zero32);
1112 auto ExpGt51 = B.buildICmp(CmpInst::ICMP_SGT, S1, Exp, FiftyOne);
1113
1114 auto Tmp1 = B.buildSelect(S64, ExpLt0, SignBit64, Tmp0);
1115 B.buildSelect(MI.getOperand(0).getReg(), ExpGt51, Src, Tmp1);
1116 return true;
1117}
Matt Arsenault2f292202019-05-17 23:05:18 +00001118
1119bool AMDGPULegalizerInfo::legalizeITOFP(
1120 MachineInstr &MI, MachineRegisterInfo &MRI,
1121 MachineIRBuilder &B, bool Signed) const {
1122 B.setInstr(MI);
1123
Matt Arsenaultfaeaedf2019-06-24 16:16:12 +00001124 Register Dst = MI.getOperand(0).getReg();
1125 Register Src = MI.getOperand(1).getReg();
Matt Arsenault2f292202019-05-17 23:05:18 +00001126
1127 const LLT S64 = LLT::scalar(64);
1128 const LLT S32 = LLT::scalar(32);
1129
1130 assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S64);
1131
1132 auto Unmerge = B.buildUnmerge({S32, S32}, Src);
1133
1134 auto CvtHi = Signed ?
1135 B.buildSITOFP(S64, Unmerge.getReg(1)) :
1136 B.buildUITOFP(S64, Unmerge.getReg(1));
1137
1138 auto CvtLo = B.buildUITOFP(S64, Unmerge.getReg(0));
1139
1140 auto ThirtyTwo = B.buildConstant(S32, 32);
1141 auto LdExp = B.buildIntrinsic(Intrinsic::amdgcn_ldexp, {S64}, false)
1142 .addUse(CvtHi.getReg(0))
1143 .addUse(ThirtyTwo.getReg(0));
1144
1145 // TODO: Should this propagate fast-math-flags?
1146 B.buildFAdd(Dst, LdExp, CvtLo);
1147 MI.eraseFromParent();
1148 return true;
1149}
Matt Arsenaulte15770a2019-07-01 18:40:23 +00001150
Matt Arsenault6ce1b4f2019-07-10 16:31:19 +00001151bool AMDGPULegalizerInfo::legalizeMinNumMaxNum(
1152 MachineInstr &MI, MachineRegisterInfo &MRI,
1153 MachineIRBuilder &B) const {
1154 MachineFunction &MF = B.getMF();
1155 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1156
1157 const bool IsIEEEOp = MI.getOpcode() == AMDGPU::G_FMINNUM_IEEE ||
1158 MI.getOpcode() == AMDGPU::G_FMAXNUM_IEEE;
1159
1160 // With ieee_mode disabled, the instructions have the correct behavior
1161 // already for G_FMINNUM/G_FMAXNUM
1162 if (!MFI->getMode().IEEE)
1163 return !IsIEEEOp;
1164
1165 if (IsIEEEOp)
1166 return true;
1167
1168 MachineIRBuilder HelperBuilder(MI);
1169 GISelObserverWrapper DummyObserver;
1170 LegalizerHelper Helper(MF, DummyObserver, HelperBuilder);
1171 HelperBuilder.setMBB(*MI.getParent());
1172 return Helper.lowerFMinNumMaxNum(MI) == LegalizerHelper::Legalized;
1173}
1174
Matt Arsenaultb0e04c02019-07-15 19:40:59 +00001175bool AMDGPULegalizerInfo::legalizeExtractVectorElt(
1176 MachineInstr &MI, MachineRegisterInfo &MRI,
1177 MachineIRBuilder &B) const {
1178 // TODO: Should move some of this into LegalizerHelper.
1179
1180 // TODO: Promote dynamic indexing of s16 to s32
1181 // TODO: Dynamic s64 indexing is only legal for SGPR.
1182 Optional<int64_t> IdxVal = getConstantVRegVal(MI.getOperand(2).getReg(), MRI);
1183 if (!IdxVal) // Dynamic case will be selected to register indexing.
1184 return true;
1185
1186 Register Dst = MI.getOperand(0).getReg();
1187 Register Vec = MI.getOperand(1).getReg();
1188
1189 LLT VecTy = MRI.getType(Vec);
1190 LLT EltTy = VecTy.getElementType();
1191 assert(EltTy == MRI.getType(Dst));
1192
1193 B.setInstr(MI);
1194
1195 if (IdxVal.getValue() < VecTy.getNumElements())
1196 B.buildExtract(Dst, Vec, IdxVal.getValue() * EltTy.getSizeInBits());
1197 else
1198 B.buildUndef(Dst);
1199
1200 MI.eraseFromParent();
1201 return true;
1202}
1203
Matt Arsenault6ed315f2019-07-15 19:43:04 +00001204bool AMDGPULegalizerInfo::legalizeInsertVectorElt(
1205 MachineInstr &MI, MachineRegisterInfo &MRI,
1206 MachineIRBuilder &B) const {
1207 // TODO: Should move some of this into LegalizerHelper.
1208
1209 // TODO: Promote dynamic indexing of s16 to s32
1210 // TODO: Dynamic s64 indexing is only legal for SGPR.
1211 Optional<int64_t> IdxVal = getConstantVRegVal(MI.getOperand(3).getReg(), MRI);
1212 if (!IdxVal) // Dynamic case will be selected to register indexing.
1213 return true;
1214
1215 Register Dst = MI.getOperand(0).getReg();
1216 Register Vec = MI.getOperand(1).getReg();
1217 Register Ins = MI.getOperand(2).getReg();
1218
1219 LLT VecTy = MRI.getType(Vec);
1220 LLT EltTy = VecTy.getElementType();
1221 assert(EltTy == MRI.getType(Ins));
1222
1223 B.setInstr(MI);
1224
1225 if (IdxVal.getValue() < VecTy.getNumElements())
1226 B.buildInsert(Dst, Vec, Ins, IdxVal.getValue() * EltTy.getSizeInBits());
1227 else
1228 B.buildUndef(Dst);
1229
1230 MI.eraseFromParent();
1231 return true;
1232}
1233
Matt Arsenaulte15770a2019-07-01 18:40:23 +00001234// Return the use branch instruction, otherwise null if the usage is invalid.
1235static MachineInstr *verifyCFIntrinsic(MachineInstr &MI,
1236 MachineRegisterInfo &MRI) {
1237 Register CondDef = MI.getOperand(0).getReg();
1238 if (!MRI.hasOneNonDBGUse(CondDef))
1239 return nullptr;
1240
1241 MachineInstr &UseMI = *MRI.use_instr_nodbg_begin(CondDef);
1242 return UseMI.getParent() == MI.getParent() &&
1243 UseMI.getOpcode() == AMDGPU::G_BRCOND ? &UseMI : nullptr;
1244}
1245
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +00001246Register AMDGPULegalizerInfo::getLiveInRegister(MachineRegisterInfo &MRI,
1247 Register Reg, LLT Ty) const {
1248 Register LiveIn = MRI.getLiveInVirtReg(Reg);
1249 if (LiveIn)
1250 return LiveIn;
1251
1252 Register NewReg = MRI.createGenericVirtualRegister(Ty);
1253 MRI.addLiveIn(Reg, NewReg);
1254 return NewReg;
1255}
1256
1257bool AMDGPULegalizerInfo::loadInputValue(Register DstReg, MachineIRBuilder &B,
1258 const ArgDescriptor *Arg) const {
1259 if (!Arg->isRegister())
1260 return false; // TODO: Handle these
1261
1262 assert(Arg->getRegister() != 0);
1263 assert(Arg->getRegister().isPhysical());
1264
1265 MachineRegisterInfo &MRI = *B.getMRI();
1266
1267 LLT Ty = MRI.getType(DstReg);
1268 Register LiveIn = getLiveInRegister(MRI, Arg->getRegister(), Ty);
1269
1270 if (Arg->isMasked()) {
1271 // TODO: Should we try to emit this once in the entry block?
1272 const LLT S32 = LLT::scalar(32);
1273 const unsigned Mask = Arg->getMask();
1274 const unsigned Shift = countTrailingZeros<unsigned>(Mask);
1275
1276 auto ShiftAmt = B.buildConstant(S32, Shift);
1277 auto LShr = B.buildLShr(S32, LiveIn, ShiftAmt);
1278 B.buildAnd(DstReg, LShr, B.buildConstant(S32, Mask >> Shift));
1279 } else
1280 B.buildCopy(DstReg, LiveIn);
1281
1282 // Insert the argument copy if it doens't already exist.
1283 // FIXME: It seems EmitLiveInCopies isn't called anywhere?
1284 if (!MRI.getVRegDef(LiveIn)) {
1285 MachineBasicBlock &EntryMBB = B.getMF().front();
1286 EntryMBB.addLiveIn(Arg->getRegister());
1287 B.setInsertPt(EntryMBB, EntryMBB.begin());
1288 B.buildCopy(LiveIn, Arg->getRegister());
1289 }
1290
1291 return true;
1292}
1293
1294bool AMDGPULegalizerInfo::legalizePreloadedArgIntrin(
1295 MachineInstr &MI,
1296 MachineRegisterInfo &MRI,
1297 MachineIRBuilder &B,
1298 AMDGPUFunctionArgInfo::PreloadedValue ArgType) const {
1299 B.setInstr(MI);
1300
1301 const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
1302
1303 const ArgDescriptor *Arg;
1304 const TargetRegisterClass *RC;
1305 std::tie(Arg, RC) = MFI->getPreloadedValue(ArgType);
1306 if (!Arg) {
1307 LLVM_DEBUG(dbgs() << "Required arg register missing\n");
1308 return false;
1309 }
1310
1311 if (loadInputValue(MI.getOperand(0).getReg(), B, Arg)) {
1312 MI.eraseFromParent();
1313 return true;
1314 }
1315
1316 return false;
1317}
1318
Austin Kerbowc99f62e2019-07-30 18:49:16 +00001319bool AMDGPULegalizerInfo::legalizeFDIVFast(MachineInstr &MI,
1320 MachineRegisterInfo &MRI,
1321 MachineIRBuilder &B) const {
1322 B.setInstr(MI);
1323 Register Res = MI.getOperand(0).getReg();
1324 Register LHS = MI.getOperand(2).getReg();
1325 Register RHS = MI.getOperand(3).getReg();
1326 uint16_t Flags = MI.getFlags();
1327
1328 LLT S32 = LLT::scalar(32);
1329 LLT S1 = LLT::scalar(1);
1330
1331 auto Abs = B.buildFAbs(S32, RHS, Flags);
1332 const APFloat C0Val(1.0f);
1333
1334 auto C0 = B.buildConstant(S32, 0x6f800000);
1335 auto C1 = B.buildConstant(S32, 0x2f800000);
1336 auto C2 = B.buildConstant(S32, FloatToBits(1.0f));
1337
1338 auto CmpRes = B.buildFCmp(CmpInst::FCMP_OGT, S1, Abs, C0, Flags);
1339 auto Sel = B.buildSelect(S32, CmpRes, C1, C2, Flags);
1340
1341 auto Mul0 = B.buildFMul(S32, RHS, Sel, Flags);
1342
1343 auto RCP = B.buildIntrinsic(Intrinsic::amdgcn_rcp, {S32}, false)
1344 .addUse(Mul0.getReg(0))
1345 .setMIFlags(Flags);
1346
1347 auto Mul1 = B.buildFMul(S32, LHS, RCP, Flags);
1348
1349 B.buildFMul(Res, Sel, Mul1, Flags);
1350
1351 MI.eraseFromParent();
1352 return true;
1353}
1354
Matt Arsenault9e8e8c62019-07-01 18:49:01 +00001355bool AMDGPULegalizerInfo::legalizeImplicitArgPtr(MachineInstr &MI,
1356 MachineRegisterInfo &MRI,
1357 MachineIRBuilder &B) const {
1358 const SIMachineFunctionInfo *MFI = B.getMF().getInfo<SIMachineFunctionInfo>();
1359 if (!MFI->isEntryFunction()) {
1360 return legalizePreloadedArgIntrin(MI, MRI, B,
1361 AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR);
1362 }
1363
1364 B.setInstr(MI);
1365
1366 uint64_t Offset =
1367 ST.getTargetLowering()->getImplicitParameterOffset(
1368 B.getMF(), AMDGPUTargetLowering::FIRST_IMPLICIT);
1369 Register DstReg = MI.getOperand(0).getReg();
1370 LLT DstTy = MRI.getType(DstReg);
1371 LLT IdxTy = LLT::scalar(DstTy.getSizeInBits());
1372
1373 const ArgDescriptor *Arg;
1374 const TargetRegisterClass *RC;
1375 std::tie(Arg, RC)
1376 = MFI->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
1377 if (!Arg)
1378 return false;
1379
1380 Register KernargPtrReg = MRI.createGenericVirtualRegister(DstTy);
1381 if (!loadInputValue(KernargPtrReg, B, Arg))
1382 return false;
1383
1384 B.buildGEP(DstReg, KernargPtrReg, B.buildConstant(IdxTy, Offset).getReg(0));
1385 MI.eraseFromParent();
1386 return true;
1387}
1388
Matt Arsenaulte15770a2019-07-01 18:40:23 +00001389bool AMDGPULegalizerInfo::legalizeIntrinsic(MachineInstr &MI,
1390 MachineRegisterInfo &MRI,
1391 MachineIRBuilder &B) const {
1392 // Replace the use G_BRCOND with the exec manipulate and branch pseudos.
1393 switch (MI.getOperand(MI.getNumExplicitDefs()).getIntrinsicID()) {
1394 case Intrinsic::amdgcn_if: {
1395 if (MachineInstr *BrCond = verifyCFIntrinsic(MI, MRI)) {
1396 const SIRegisterInfo *TRI
1397 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo());
1398
1399 B.setInstr(*BrCond);
1400 Register Def = MI.getOperand(1).getReg();
1401 Register Use = MI.getOperand(3).getReg();
1402 B.buildInstr(AMDGPU::SI_IF)
1403 .addDef(Def)
1404 .addUse(Use)
1405 .addMBB(BrCond->getOperand(1).getMBB());
1406
1407 MRI.setRegClass(Def, TRI->getWaveMaskRegClass());
1408 MRI.setRegClass(Use, TRI->getWaveMaskRegClass());
1409 MI.eraseFromParent();
1410 BrCond->eraseFromParent();
1411 return true;
1412 }
1413
1414 return false;
1415 }
1416 case Intrinsic::amdgcn_loop: {
1417 if (MachineInstr *BrCond = verifyCFIntrinsic(MI, MRI)) {
1418 const SIRegisterInfo *TRI
1419 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo());
1420
1421 B.setInstr(*BrCond);
1422 Register Reg = MI.getOperand(2).getReg();
1423 B.buildInstr(AMDGPU::SI_LOOP)
1424 .addUse(Reg)
1425 .addMBB(BrCond->getOperand(1).getMBB());
1426 MI.eraseFromParent();
1427 BrCond->eraseFromParent();
1428 MRI.setRegClass(Reg, TRI->getWaveMaskRegClass());
1429 return true;
1430 }
1431
1432 return false;
1433 }
Matt Arsenault9e8e8c62019-07-01 18:49:01 +00001434 case Intrinsic::amdgcn_kernarg_segment_ptr:
1435 return legalizePreloadedArgIntrin(
1436 MI, MRI, B, AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
1437 case Intrinsic::amdgcn_implicitarg_ptr:
1438 return legalizeImplicitArgPtr(MI, MRI, B);
Matt Arsenaulte2c86cc2019-07-01 18:45:36 +00001439 case Intrinsic::amdgcn_workitem_id_x:
1440 return legalizePreloadedArgIntrin(MI, MRI, B,
1441 AMDGPUFunctionArgInfo::WORKITEM_ID_X);
1442 case Intrinsic::amdgcn_workitem_id_y:
1443 return legalizePreloadedArgIntrin(MI, MRI, B,
1444 AMDGPUFunctionArgInfo::WORKITEM_ID_Y);
1445 case Intrinsic::amdgcn_workitem_id_z:
1446 return legalizePreloadedArgIntrin(MI, MRI, B,
1447 AMDGPUFunctionArgInfo::WORKITEM_ID_Z);
Matt Arsenault756d8192019-07-01 18:47:22 +00001448 case Intrinsic::amdgcn_workgroup_id_x:
1449 return legalizePreloadedArgIntrin(MI, MRI, B,
1450 AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
1451 case Intrinsic::amdgcn_workgroup_id_y:
1452 return legalizePreloadedArgIntrin(MI, MRI, B,
1453 AMDGPUFunctionArgInfo::WORKGROUP_ID_Y);
1454 case Intrinsic::amdgcn_workgroup_id_z:
1455 return legalizePreloadedArgIntrin(MI, MRI, B,
1456 AMDGPUFunctionArgInfo::WORKGROUP_ID_Z);
Matt Arsenaultbae36362019-07-01 18:50:50 +00001457 case Intrinsic::amdgcn_dispatch_ptr:
1458 return legalizePreloadedArgIntrin(MI, MRI, B,
1459 AMDGPUFunctionArgInfo::DISPATCH_PTR);
1460 case Intrinsic::amdgcn_queue_ptr:
1461 return legalizePreloadedArgIntrin(MI, MRI, B,
1462 AMDGPUFunctionArgInfo::QUEUE_PTR);
1463 case Intrinsic::amdgcn_implicit_buffer_ptr:
1464 return legalizePreloadedArgIntrin(
1465 MI, MRI, B, AMDGPUFunctionArgInfo::IMPLICIT_BUFFER_PTR);
1466 case Intrinsic::amdgcn_dispatch_id:
1467 return legalizePreloadedArgIntrin(MI, MRI, B,
1468 AMDGPUFunctionArgInfo::DISPATCH_ID);
Austin Kerbowc99f62e2019-07-30 18:49:16 +00001469 case Intrinsic::amdgcn_fdiv_fast:
1470 return legalizeFDIVFast(MI, MRI, B);
Matt Arsenaulte15770a2019-07-01 18:40:23 +00001471 default:
1472 return true;
1473 }
1474
1475 return true;
1476}