blob: 3a7cb402335accd0841a1f6b155a1c798dbaf5b6 [file] [log] [blame]
Tom Stellardca166212017-01-30 21:56:46 +00001//===- AMDGPULegalizerInfo.cpp -----------------------------------*- C++ -*-==//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellardca166212017-01-30 21:56:46 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the Machinelegalizer class for
10/// AMDGPU.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
David Blaikie36a0f222018-03-23 23:58:31 +000014#include "AMDGPU.h"
Craig Topper2fa14362018-03-29 17:21:10 +000015#include "AMDGPULegalizerInfo.h"
Matt Arsenault85803362018-03-17 15:17:41 +000016#include "AMDGPUTargetMachine.h"
Matt Arsenaulta8b43392019-02-08 02:40:47 +000017#include "SIMachineFunctionInfo.h"
18
19#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000020#include "llvm/CodeGen/TargetOpcodes.h"
Craig Topper2fa14362018-03-29 17:21:10 +000021#include "llvm/CodeGen/ValueTypes.h"
Tom Stellardca166212017-01-30 21:56:46 +000022#include "llvm/IR/DerivedTypes.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000023#include "llvm/IR/Type.h"
Tom Stellardca166212017-01-30 21:56:46 +000024#include "llvm/Support/Debug.h"
25
26using namespace llvm;
Daniel Sanders9ade5592018-01-29 17:37:29 +000027using namespace LegalizeActions;
Matt Arsenault990f5072019-01-25 00:51:00 +000028using namespace LegalizeMutations;
Matt Arsenault7ac79ed2019-01-20 19:45:18 +000029using namespace LegalityPredicates;
Tom Stellardca166212017-01-30 21:56:46 +000030
Matt Arsenaultd9141892019-02-07 19:10:15 +000031
32static LegalityPredicate isMultiple32(unsigned TypeIdx,
33 unsigned MaxSize = 512) {
34 return [=](const LegalityQuery &Query) {
35 const LLT Ty = Query.Types[TypeIdx];
36 const LLT EltTy = Ty.getScalarType();
37 return Ty.getSizeInBits() <= MaxSize && EltTy.getSizeInBits() % 32 == 0;
38 };
39}
40
Tom Stellard5bfbae52018-07-11 20:59:01 +000041AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST,
Matt Arsenaultc3fe46b2018-03-08 16:24:16 +000042 const GCNTargetMachine &TM) {
Tom Stellardca166212017-01-30 21:56:46 +000043 using namespace TargetOpcode;
44
Matt Arsenault85803362018-03-17 15:17:41 +000045 auto GetAddrSpacePtr = [&TM](unsigned AS) {
46 return LLT::pointer(AS, TM.getPointerSizeInBits(AS));
47 };
48
49 const LLT S1 = LLT::scalar(1);
Matt Arsenault888aa5d2019-02-03 00:07:33 +000050 const LLT S8 = LLT::scalar(8);
Matt Arsenault45991592019-01-18 21:33:50 +000051 const LLT S16 = LLT::scalar(16);
Tom Stellardca166212017-01-30 21:56:46 +000052 const LLT S32 = LLT::scalar(32);
53 const LLT S64 = LLT::scalar(64);
Matt Arsenaultca676342019-01-25 02:36:32 +000054 const LLT S128 = LLT::scalar(128);
Matt Arsenaultff6a9a22019-01-20 18:40:36 +000055 const LLT S256 = LLT::scalar(256);
Tom Stellardeebbfc22018-06-30 04:09:44 +000056 const LLT S512 = LLT::scalar(512);
Matt Arsenault85803362018-03-17 15:17:41 +000057
Matt Arsenaultbee2ad72018-12-21 03:03:11 +000058 const LLT V2S16 = LLT::vector(2, 16);
Matt Arsenaulta1515d22019-01-08 01:30:02 +000059 const LLT V4S16 = LLT::vector(4, 16);
60 const LLT V8S16 = LLT::vector(8, 16);
Matt Arsenaultbee2ad72018-12-21 03:03:11 +000061
62 const LLT V2S32 = LLT::vector(2, 32);
63 const LLT V3S32 = LLT::vector(3, 32);
64 const LLT V4S32 = LLT::vector(4, 32);
65 const LLT V5S32 = LLT::vector(5, 32);
66 const LLT V6S32 = LLT::vector(6, 32);
67 const LLT V7S32 = LLT::vector(7, 32);
68 const LLT V8S32 = LLT::vector(8, 32);
69 const LLT V9S32 = LLT::vector(9, 32);
70 const LLT V10S32 = LLT::vector(10, 32);
71 const LLT V11S32 = LLT::vector(11, 32);
72 const LLT V12S32 = LLT::vector(12, 32);
73 const LLT V13S32 = LLT::vector(13, 32);
74 const LLT V14S32 = LLT::vector(14, 32);
75 const LLT V15S32 = LLT::vector(15, 32);
76 const LLT V16S32 = LLT::vector(16, 32);
77
78 const LLT V2S64 = LLT::vector(2, 64);
79 const LLT V3S64 = LLT::vector(3, 64);
80 const LLT V4S64 = LLT::vector(4, 64);
81 const LLT V5S64 = LLT::vector(5, 64);
82 const LLT V6S64 = LLT::vector(6, 64);
83 const LLT V7S64 = LLT::vector(7, 64);
84 const LLT V8S64 = LLT::vector(8, 64);
85
86 std::initializer_list<LLT> AllS32Vectors =
87 {V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32,
88 V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32};
89 std::initializer_list<LLT> AllS64Vectors =
90 {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64};
91
Matt Arsenault85803362018-03-17 15:17:41 +000092 const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS);
93 const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS);
Matt Arsenault685d1e82018-03-17 15:17:45 +000094 const LLT LocalPtr = GetAddrSpacePtr(AMDGPUAS::LOCAL_ADDRESS);
Matt Arsenault0da63502018-08-31 05:49:54 +000095 const LLT FlatPtr = GetAddrSpacePtr(AMDGPUAS::FLAT_ADDRESS);
96 const LLT PrivatePtr = GetAddrSpacePtr(AMDGPUAS::PRIVATE_ADDRESS);
Matt Arsenault85803362018-03-17 15:17:41 +000097
Matt Arsenault934e5342018-12-13 20:34:15 +000098 const LLT CodePtr = FlatPtr;
99
Matt Arsenault685d1e82018-03-17 15:17:45 +0000100 const LLT AddrSpaces[] = {
101 GlobalPtr,
102 ConstantPtr,
103 LocalPtr,
104 FlatPtr,
105 PrivatePtr
106 };
Tom Stellardca166212017-01-30 21:56:46 +0000107
Matt Arsenaultadc40ba2019-01-08 01:22:47 +0000108 setAction({G_BRCOND, S1}, Legal);
109
Matt Arsenault3e08b772019-01-25 04:53:57 +0000110 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_UMULH, G_SMULH})
Matt Arsenault5d622fb2019-01-25 03:23:04 +0000111 .legalFor({S32})
Matt Arsenault211e89d2019-01-27 00:52:51 +0000112 .clampScalar(0, S32, S32)
Matt Arsenault5d622fb2019-01-25 03:23:04 +0000113 .scalarize(0);
Matt Arsenault43398832018-12-20 01:35:49 +0000114
Matt Arsenault26a6c742019-01-26 23:47:07 +0000115 // Report legal for any types we can handle anywhere. For the cases only legal
116 // on the SALU, RegBankSelect will be able to re-legalize.
Matt Arsenault43398832018-12-20 01:35:49 +0000117 getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
Matt Arsenault26a6c742019-01-26 23:47:07 +0000118 .legalFor({S32, S1, S64, V2S32, V2S16, V4S16})
119 .clampScalar(0, S32, S64)
120 .scalarize(0);
Tom Stellardee6e6452017-06-12 20:54:56 +0000121
Matt Arsenault68c668a2019-01-08 01:09:09 +0000122 getActionDefinitionsBuilder({G_UADDO, G_SADDO, G_USUBO, G_SSUBO,
123 G_UADDE, G_SADDE, G_USUBE, G_SSUBE})
Matt Arsenault4d475942019-01-26 23:44:51 +0000124 .legalFor({{S32, S1}})
125 .clampScalar(0, S32, S32);
Matt Arsenault2cc15b62019-01-08 01:03:58 +0000126
Matt Arsenault7ac79ed2019-01-20 19:45:18 +0000127 getActionDefinitionsBuilder(G_BITCAST)
128 .legalForCartesianProduct({S32, V2S16})
129 .legalForCartesianProduct({S64, V2S32, V4S16})
130 .legalForCartesianProduct({V2S64, V4S32})
131 // Don't worry about the size constraint.
132 .legalIf(all(isPointer(0), isPointer(1)));
Tom Stellardff63ee02017-06-19 13:15:45 +0000133
Matt Arsenaultabdc4f22018-03-17 15:17:48 +0000134 getActionDefinitionsBuilder(G_FCONSTANT)
Matt Arsenault45991592019-01-18 21:33:50 +0000135 .legalFor({S32, S64, S16});
Tom Stellardeebbfc22018-06-30 04:09:44 +0000136
Matt Arsenaultb3feccd2018-06-25 15:42:12 +0000137 getActionDefinitionsBuilder(G_IMPLICIT_DEF)
Matt Arsenaultd9141892019-02-07 19:10:15 +0000138 .legalFor({S1, S32, S64, V2S32, V4S32, V2S16, V4S16, GlobalPtr,
139 ConstantPtr, LocalPtr, FlatPtr, PrivatePtr})
140 .legalFor({LLT::vector(3, 16)})// FIXME: Hack
141 .clampScalarOrElt(0, S32, S512)
142 .legalIf(isMultiple32(0));
Matt Arsenaultb3feccd2018-06-25 15:42:12 +0000143
Matt Arsenaultabdc4f22018-03-17 15:17:48 +0000144
Tom Stellarde0424122017-06-03 01:13:33 +0000145 // FIXME: i1 operands to intrinsics should always be legal, but other i1
146 // values may not be legal. We need to figure out how to distinguish
147 // between these two scenarios.
Matt Arsenault45991592019-01-18 21:33:50 +0000148 getActionDefinitionsBuilder(G_CONSTANT)
Matt Arsenault2065c942019-02-02 23:33:49 +0000149 .legalFor({S1, S32, S64, GlobalPtr,
150 LocalPtr, ConstantPtr, PrivatePtr, FlatPtr })
Matt Arsenault45991592019-01-18 21:33:50 +0000151 .clampScalar(0, S32, S64)
Matt Arsenault2065c942019-02-02 23:33:49 +0000152 .widenScalarToNextPow2(0)
153 .legalIf(isPointer(0));
Matt Arsenault06cbb272018-03-01 19:16:52 +0000154
Matt Arsenaultc94e26c2018-12-18 09:46:13 +0000155 setAction({G_FRAME_INDEX, PrivatePtr}, Legal);
156
Matt Arsenault93fdec72019-02-07 18:03:11 +0000157 auto &FPOpActions = getActionDefinitionsBuilder(
158 { G_FADD, G_FMUL, G_FNEG, G_FABS, G_FMA})
159 .legalFor({S32, S64});
160
161 if (ST.has16BitInsts()) {
162 if (ST.hasVOP3PInsts())
163 FPOpActions.legalFor({S16, V2S16});
164 else
165 FPOpActions.legalFor({S16});
166 }
167
168 if (ST.hasVOP3PInsts())
169 FPOpActions.clampMaxNumElements(0, S16, 2);
170 FPOpActions
171 .scalarize(0)
172 .clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);
Tom Stellardd0c6cf22017-10-27 23:57:41 +0000173
Matt Arsenaultc0f75692019-02-07 18:14:39 +0000174 if (ST.has16BitInsts()) {
175 getActionDefinitionsBuilder(G_FSQRT)
176 .legalFor({S32, S64, S16})
177 .scalarize(0)
178 .clampScalar(0, S16, S64);
179 } else {
180 getActionDefinitionsBuilder(G_FSQRT)
181 .legalFor({S32, S64})
182 .scalarize(0)
183 .clampScalar(0, S32, S64);
184 }
185
Matt Arsenaultdff33c32018-12-20 00:37:02 +0000186 getActionDefinitionsBuilder(G_FPTRUNC)
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000187 .legalFor({{S32, S64}, {S16, S32}})
188 .scalarize(0);
Matt Arsenaultdff33c32018-12-20 00:37:02 +0000189
Matt Arsenault24563ef2019-01-20 18:34:24 +0000190 getActionDefinitionsBuilder(G_FPEXT)
191 .legalFor({{S64, S32}, {S32, S16}})
Matt Arsenaultca676342019-01-25 02:36:32 +0000192 .lowerFor({{S64, S16}}) // FIXME: Implement
193 .scalarize(0);
Matt Arsenault24563ef2019-01-20 18:34:24 +0000194
Matt Arsenault745fd9f2019-01-20 19:10:31 +0000195 getActionDefinitionsBuilder(G_FSUB)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000196 // Use actual fsub instruction
197 .legalFor({S32})
198 // Must use fadd + fneg
199 .lowerFor({S64, S16, V2S16})
Matt Arsenault990f5072019-01-25 00:51:00 +0000200 .scalarize(0)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000201 .clampScalar(0, S32, S64);
Matt Arsenaulte01e7c82018-12-18 09:19:03 +0000202
Matt Arsenault24563ef2019-01-20 18:34:24 +0000203 getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT})
Matt Arsenault46ffe682019-01-20 19:28:20 +0000204 .legalFor({{S64, S32}, {S32, S16}, {S64, S16},
Matt Arsenaultca676342019-01-25 02:36:32 +0000205 {S32, S1}, {S64, S1}, {S16, S1},
206 // FIXME: Hack
Matt Arsenault888aa5d2019-02-03 00:07:33 +0000207 {S32, S8}, {S128, S32}, {S128, S64}, {S32, LLT::scalar(24)}})
Matt Arsenaultca676342019-01-25 02:36:32 +0000208 .scalarize(0);
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000209
Matt Arsenaultfb671642019-01-22 00:20:17 +0000210 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000211 .legalFor({{S32, S32}, {S64, S32}})
212 .scalarize(0);
Matt Arsenaultdd022ce2018-03-01 19:04:25 +0000213
Matt Arsenaultfb671642019-01-22 00:20:17 +0000214 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000215 .legalFor({{S32, S32}, {S32, S64}})
216 .scalarize(0);
Tom Stellard33445762018-02-07 04:47:59 +0000217
Matt Arsenaultf4c21c52018-12-21 03:14:45 +0000218 getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND})
Matt Arsenault2e5f9002019-01-27 00:12:21 +0000219 .legalFor({S32, S64})
220 .scalarize(0);
Matt Arsenaultf4c21c52018-12-21 03:14:45 +0000221
Matt Arsenault685d1e82018-03-17 15:17:45 +0000222 for (LLT PtrTy : AddrSpaces) {
223 LLT IdxTy = LLT::scalar(PtrTy.getSizeInBits());
224 setAction({G_GEP, PtrTy}, Legal);
225 setAction({G_GEP, 1, IdxTy}, Legal);
226 }
Tom Stellardca166212017-01-30 21:56:46 +0000227
Matt Arsenault3b9a82f2019-01-25 04:54:00 +0000228 // FIXME: When RegBankSelect inserts copies, it will only create new registers
229 // with scalar types. This means we can end up with G_LOAD/G_STORE/G_GEP
230 // instruction with scalar types for their pointer operands. In assert builds,
231 // the instruction selector will assert if it sees a generic instruction which
232 // isn't legal, so we need to tell it that scalar types are legal for pointer
233 // operands
234 setAction({G_GEP, S64}, Legal);
235
Matt Arsenault934e5342018-12-13 20:34:15 +0000236 setAction({G_BLOCK_ADDR, CodePtr}, Legal);
237
Matt Arsenault58f9d3d2019-02-02 23:35:15 +0000238 getActionDefinitionsBuilder(G_ICMP)
239 .legalForCartesianProduct(
240 {S1}, {S32, S64, GlobalPtr, LocalPtr, ConstantPtr, PrivatePtr, FlatPtr})
241 .legalFor({{S1, S32}, {S1, S64}})
242 .widenScalarToNextPow2(1)
243 .clampScalar(1, S32, S64)
244 .scalarize(0)
245 .legalIf(all(typeIs(0, S1), isPointer(1)));
246
247 getActionDefinitionsBuilder(G_FCMP)
Matt Arsenault1b1e6852019-01-25 02:59:34 +0000248 .legalFor({{S1, S32}, {S1, S64}})
249 .widenScalarToNextPow2(1)
250 .clampScalar(1, S32, S64)
Matt Arsenaultded2f822019-01-26 23:54:53 +0000251 .scalarize(0);
Matt Arsenault1b1e6852019-01-25 02:59:34 +0000252
Matt Arsenault95fd95c2019-01-25 04:03:38 +0000253 // FIXME: fexp, flog2, flog10 needs to be custom lowered.
254 getActionDefinitionsBuilder({G_FPOW, G_FEXP, G_FEXP2,
255 G_FLOG, G_FLOG2, G_FLOG10})
256 .legalFor({S32})
257 .scalarize(0);
Tom Stellard8cd60a52017-06-06 14:16:50 +0000258
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000259 // The 64-bit versions produce 32-bit results, but only on the SALU.
260 getActionDefinitionsBuilder({G_CTLZ, G_CTLZ_ZERO_UNDEF,
261 G_CTTZ, G_CTTZ_ZERO_UNDEF,
262 G_CTPOP})
263 .legalFor({{S32, S32}, {S32, S64}})
264 .clampScalar(0, S32, S32)
265 .clampScalar(1, S32, S64);
266 // TODO: Scalarize
267
Matt Arsenaultd1bfc8d2019-01-31 02:34:03 +0000268 // TODO: Expand for > s32
269 getActionDefinitionsBuilder(G_BSWAP)
270 .legalFor({S32})
271 .clampScalar(0, S32, S32)
272 .scalarize(0);
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000273
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000274
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000275 auto smallerThan = [](unsigned TypeIdx0, unsigned TypeIdx1) {
276 return [=](const LegalityQuery &Query) {
277 return Query.Types[TypeIdx0].getSizeInBits() <
278 Query.Types[TypeIdx1].getSizeInBits();
279 };
280 };
281
282 auto greaterThan = [](unsigned TypeIdx0, unsigned TypeIdx1) {
283 return [=](const LegalityQuery &Query) {
284 return Query.Types[TypeIdx0].getSizeInBits() >
285 Query.Types[TypeIdx1].getSizeInBits();
286 };
287 };
288
Tom Stellard7c650782018-10-05 04:34:09 +0000289 getActionDefinitionsBuilder(G_INTTOPTR)
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000290 // List the common cases
291 .legalForCartesianProduct({GlobalPtr, ConstantPtr, FlatPtr}, {S64})
292 .legalForCartesianProduct({LocalPtr, PrivatePtr}, {S32})
293 .scalarize(0)
294 // Accept any address space as long as the size matches
295 .legalIf(sameSize(0, 1))
296 .widenScalarIf(smallerThan(1, 0),
297 [](const LegalityQuery &Query) {
298 return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
299 })
300 .narrowScalarIf(greaterThan(1, 0),
301 [](const LegalityQuery &Query) {
302 return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
303 });
Matt Arsenault85803362018-03-17 15:17:41 +0000304
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000305 getActionDefinitionsBuilder(G_PTRTOINT)
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000306 // List the common cases
307 .legalForCartesianProduct({GlobalPtr, ConstantPtr, FlatPtr}, {S64})
308 .legalForCartesianProduct({LocalPtr, PrivatePtr}, {S32})
309 .scalarize(0)
310 // Accept any address space as long as the size matches
311 .legalIf(sameSize(0, 1))
312 .widenScalarIf(smallerThan(0, 1),
313 [](const LegalityQuery &Query) {
314 return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
315 })
316 .narrowScalarIf(
317 greaterThan(0, 1),
318 [](const LegalityQuery &Query) {
319 return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
320 });
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000321
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000322 if (ST.hasFlatAddressSpace()) {
323 getActionDefinitionsBuilder(G_ADDRSPACE_CAST)
324 .scalarize(0)
325 .custom();
326 }
327
Matt Arsenault85803362018-03-17 15:17:41 +0000328 getActionDefinitionsBuilder({G_LOAD, G_STORE})
Matt Arsenault18619af2019-01-29 18:13:02 +0000329 .narrowScalarIf([](const LegalityQuery &Query) {
330 unsigned Size = Query.Types[0].getSizeInBits();
331 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
332 return (Size > 32 && MemSize < Size);
333 },
334 [](const LegalityQuery &Query) {
335 return std::make_pair(0, LLT::scalar(32));
336 })
Matt Arsenault045bc9a2019-01-30 02:35:38 +0000337 .fewerElementsIf([=, &ST](const LegalityQuery &Query) {
338 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
Matt Arsenaultc7bce732019-01-31 02:46:05 +0000339 return (MemSize == 96) &&
340 Query.Types[0].isVector() &&
Matt Arsenault045bc9a2019-01-30 02:35:38 +0000341 ST.getGeneration() < AMDGPUSubtarget::SEA_ISLANDS;
342 },
343 [=](const LegalityQuery &Query) {
344 return std::make_pair(0, V2S32);
345 })
Matt Arsenault85803362018-03-17 15:17:41 +0000346 .legalIf([=, &ST](const LegalityQuery &Query) {
347 const LLT &Ty0 = Query.Types[0];
348
Matt Arsenault18619af2019-01-29 18:13:02 +0000349 unsigned Size = Ty0.getSizeInBits();
350 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
Matt Arsenaulteb2603c2019-02-02 23:39:13 +0000351 if (Size < 32 || (Size > 32 && MemSize < Size))
Matt Arsenault18619af2019-01-29 18:13:02 +0000352 return false;
353
354 if (Ty0.isVector() && Size != MemSize)
355 return false;
356
Matt Arsenault85803362018-03-17 15:17:41 +0000357 // TODO: Decompose private loads into 4-byte components.
358 // TODO: Illegal flat loads on SI
Matt Arsenault18619af2019-01-29 18:13:02 +0000359 switch (MemSize) {
360 case 8:
361 case 16:
362 return Size == 32;
Matt Arsenault85803362018-03-17 15:17:41 +0000363 case 32:
364 case 64:
365 case 128:
366 return true;
367
368 case 96:
369 // XXX hasLoadX3
370 return (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS);
371
372 case 256:
373 case 512:
374 // TODO: constant loads
375 default:
376 return false;
377 }
Matt Arsenault18619af2019-01-29 18:13:02 +0000378 })
379 .clampScalar(0, S32, S64);
Matt Arsenault85803362018-03-17 15:17:41 +0000380
381
Matt Arsenault6614f852019-01-22 19:02:10 +0000382 auto &ExtLoads = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
383 .legalForTypesWithMemSize({
384 {S32, GlobalPtr, 8},
385 {S32, GlobalPtr, 16},
386 {S32, LocalPtr, 8},
387 {S32, LocalPtr, 16},
388 {S32, PrivatePtr, 8},
389 {S32, PrivatePtr, 16}});
390 if (ST.hasFlatAddressSpace()) {
391 ExtLoads.legalForTypesWithMemSize({{S32, FlatPtr, 8},
392 {S32, FlatPtr, 16}});
393 }
394
395 ExtLoads.clampScalar(0, S32, S32)
396 .widenScalarToNextPow2(0)
397 .unsupportedIfMemSizeNotPow2()
398 .lower();
399
Matt Arsenault36d40922018-12-20 00:33:49 +0000400 auto &Atomics = getActionDefinitionsBuilder(
401 {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB,
402 G_ATOMICRMW_AND, G_ATOMICRMW_OR, G_ATOMICRMW_XOR,
403 G_ATOMICRMW_MAX, G_ATOMICRMW_MIN, G_ATOMICRMW_UMAX,
404 G_ATOMICRMW_UMIN, G_ATOMIC_CMPXCHG})
405 .legalFor({{S32, GlobalPtr}, {S32, LocalPtr},
406 {S64, GlobalPtr}, {S64, LocalPtr}});
407 if (ST.hasFlatAddressSpace()) {
408 Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}});
409 }
Tom Stellardca166212017-01-30 21:56:46 +0000410
Matt Arsenault96e47012019-01-18 21:42:55 +0000411 // TODO: Pointer types, any 32-bit or 64-bit vector
412 getActionDefinitionsBuilder(G_SELECT)
Matt Arsenault10547232019-02-04 14:04:52 +0000413 .legalForCartesianProduct({S32, S64, V2S32, V2S16, V4S16,
414 GlobalPtr, LocalPtr, FlatPtr, PrivatePtr,
415 LLT::vector(2, LocalPtr), LLT::vector(2, PrivatePtr)}, {S1})
Matt Arsenault990f5072019-01-25 00:51:00 +0000416 .clampScalar(0, S32, S64)
Matt Arsenaultdc6c7852019-01-30 04:19:31 +0000417 .fewerElementsIf(
418 [=](const LegalityQuery &Query) {
419 if (Query.Types[1].isVector())
420 return true;
421
422 LLT Ty = Query.Types[0];
423
424 // FIXME: Hack until odd splits handled
425 return Ty.isVector() &&
426 (Ty.getScalarSizeInBits() > 32 || Ty.getNumElements() % 2 != 0);
427 },
428 scalarize(0))
429 // FIXME: Handle 16-bit vectors better
430 .fewerElementsIf(
431 [=](const LegalityQuery &Query) {
432 return Query.Types[0].isVector() &&
433 Query.Types[0].getElementType().getSizeInBits() < 32;},
434 scalarize(0))
435 .scalarize(1)
Matt Arsenault2491f822019-02-02 23:31:50 +0000436 .clampMaxNumElements(0, S32, 2)
437 .clampMaxNumElements(0, LocalPtr, 2)
438 .clampMaxNumElements(0, PrivatePtr, 2)
439 .legalIf(all(isPointer(0), typeIs(1, S1)));
Tom Stellard2860a422017-06-07 13:54:51 +0000440
Matt Arsenault4c5e8f512019-01-22 22:00:19 +0000441 // TODO: Only the low 4/5/6 bits of the shift amount are observed, so we can
442 // be more flexible with the shift amount type.
443 auto &Shifts = getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR})
444 .legalFor({{S32, S32}, {S64, S32}});
Matt Arsenaultf6cab162019-01-30 03:36:25 +0000445 if (ST.has16BitInsts()) {
Matt Arsenaultc83b8232019-02-07 17:38:00 +0000446 if (ST.hasVOP3PInsts()) {
447 Shifts.legalFor({{S16, S32}, {S16, S16}, {V2S16, V2S16}})
448 .clampMaxNumElements(0, S16, 2);
449 } else
450 Shifts.legalFor({{S16, S32}, {S16, S16}});
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000451
452 Shifts.clampScalar(1, S16, S32);
Matt Arsenaultf6cab162019-01-30 03:36:25 +0000453 Shifts.clampScalar(0, S16, S64);
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000454 } else {
455 // Make sure we legalize the shift amount type first, as the general
456 // expansion for the shifted type will produce much worse code if it hasn't
457 // been truncated already.
458 Shifts.clampScalar(1, S32, S32);
Matt Arsenault4c5e8f512019-01-22 22:00:19 +0000459 Shifts.clampScalar(0, S32, S64);
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000460 }
461 Shifts.scalarize(0);
Tom Stellardca166212017-01-30 21:56:46 +0000462
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000463 for (unsigned Op : {G_EXTRACT_VECTOR_ELT, G_INSERT_VECTOR_ELT}) {
Matt Arsenault63786292019-01-22 20:38:15 +0000464 unsigned VecTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 1 : 0;
465 unsigned EltTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 0 : 1;
466 unsigned IdxTypeIdx = 2;
467
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000468 getActionDefinitionsBuilder(Op)
469 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenault63786292019-01-22 20:38:15 +0000470 const LLT &VecTy = Query.Types[VecTypeIdx];
471 const LLT &IdxTy = Query.Types[IdxTypeIdx];
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000472 return VecTy.getSizeInBits() % 32 == 0 &&
473 VecTy.getSizeInBits() <= 512 &&
474 IdxTy.getSizeInBits() == 32;
Matt Arsenault63786292019-01-22 20:38:15 +0000475 })
476 .clampScalar(EltTypeIdx, S32, S64)
477 .clampScalar(VecTypeIdx, S32, S64)
478 .clampScalar(IdxTypeIdx, S32, S32);
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000479 }
480
Matt Arsenault63786292019-01-22 20:38:15 +0000481 getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
482 .unsupportedIf([=](const LegalityQuery &Query) {
483 const LLT &EltTy = Query.Types[1].getElementType();
484 return Query.Types[0] != EltTy;
485 });
486
Matt Arsenault71272e62018-03-05 16:25:15 +0000487 // FIXME: Doesn't handle extract of illegal sizes.
Tom Stellardb7f19e62018-07-24 02:19:20 +0000488 getActionDefinitionsBuilder({G_EXTRACT, G_INSERT})
Matt Arsenault91be65b2019-02-07 17:25:51 +0000489 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenault71272e62018-03-05 16:25:15 +0000490 const LLT &Ty0 = Query.Types[0];
491 const LLT &Ty1 = Query.Types[1];
Matt Arsenault26a6c742019-01-26 23:47:07 +0000492 return (Ty0.getSizeInBits() % 16 == 0) &&
493 (Ty1.getSizeInBits() % 16 == 0);
Matt Arsenault0e5d8562019-02-02 23:56:00 +0000494 })
Matt Arsenault91be65b2019-02-07 17:25:51 +0000495 .widenScalarIf(
496 [=](const LegalityQuery &Query) {
497 const LLT Ty1 = Query.Types[1];
498 return (Ty1.getScalarSizeInBits() < 16);
499 },
500 LegalizeMutations::widenScalarOrEltToNextPow2(1, 16));
Matt Arsenault71272e62018-03-05 16:25:15 +0000501
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000502 // TODO: vectors of pointers
Amara Emerson5ec14602018-12-10 18:44:58 +0000503 getActionDefinitionsBuilder(G_BUILD_VECTOR)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000504 .legalForCartesianProduct(AllS32Vectors, {S32})
505 .legalForCartesianProduct(AllS64Vectors, {S64})
506 .clampNumElements(0, V16S32, V16S32)
507 .clampNumElements(0, V2S64, V8S64)
508 .minScalarSameAs(1, 0)
509 // FIXME: Sort of a hack to make progress on other legalizations.
510 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenault2491f822019-02-02 23:31:50 +0000511 return Query.Types[0].getScalarSizeInBits() <= 32 ||
512 Query.Types[0].getScalarSizeInBits() == 64;
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000513 });
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000514
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000515 // TODO: Support any combination of v2s32
516 getActionDefinitionsBuilder(G_CONCAT_VECTORS)
517 .legalFor({{V4S32, V2S32},
518 {V8S32, V2S32},
519 {V8S32, V4S32},
520 {V4S64, V2S64},
521 {V4S16, V2S16},
522 {V8S16, V2S16},
Matt Arsenault2491f822019-02-02 23:31:50 +0000523 {V8S16, V4S16},
524 {LLT::vector(4, LocalPtr), LLT::vector(2, LocalPtr)},
525 {LLT::vector(4, PrivatePtr), LLT::vector(2, PrivatePtr)}});
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000526
Matt Arsenault503afda2018-03-12 13:35:43 +0000527 // Merge/Unmerge
528 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
529 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
530 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
531
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000532 auto notValidElt = [=](const LegalityQuery &Query, unsigned TypeIdx) {
533 const LLT &Ty = Query.Types[TypeIdx];
534 if (Ty.isVector()) {
535 const LLT &EltTy = Ty.getElementType();
536 if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64)
537 return true;
538 if (!isPowerOf2_32(EltTy.getSizeInBits()))
539 return true;
540 }
541 return false;
542 };
543
Matt Arsenault503afda2018-03-12 13:35:43 +0000544 getActionDefinitionsBuilder(Op)
Matt Arsenaultd8d193d2019-01-29 23:17:35 +0000545 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16)
546 // Clamp the little scalar to s8-s256 and make it a power of 2. It's not
547 // worth considering the multiples of 64 since 2*192 and 2*384 are not
548 // valid.
549 .clampScalar(LitTyIdx, S16, S256)
550 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32)
551
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000552 // Break up vectors with weird elements into scalars
553 .fewerElementsIf(
554 [=](const LegalityQuery &Query) { return notValidElt(Query, 0); },
Matt Arsenault990f5072019-01-25 00:51:00 +0000555 scalarize(0))
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000556 .fewerElementsIf(
557 [=](const LegalityQuery &Query) { return notValidElt(Query, 1); },
Matt Arsenault990f5072019-01-25 00:51:00 +0000558 scalarize(1))
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000559 .clampScalar(BigTyIdx, S32, S512)
560 .widenScalarIf(
561 [=](const LegalityQuery &Query) {
562 const LLT &Ty = Query.Types[BigTyIdx];
563 return !isPowerOf2_32(Ty.getSizeInBits()) &&
564 Ty.getSizeInBits() % 16 != 0;
565 },
566 [=](const LegalityQuery &Query) {
567 // Pick the next power of 2, or a multiple of 64 over 128.
568 // Whichever is smaller.
569 const LLT &Ty = Query.Types[BigTyIdx];
570 unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1);
571 if (NewSizeInBits >= 256) {
572 unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
573 if (RoundedTo < NewSizeInBits)
574 NewSizeInBits = RoundedTo;
575 }
576 return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
577 })
Matt Arsenault503afda2018-03-12 13:35:43 +0000578 .legalIf([=](const LegalityQuery &Query) {
579 const LLT &BigTy = Query.Types[BigTyIdx];
580 const LLT &LitTy = Query.Types[LitTyIdx];
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000581
582 if (BigTy.isVector() && BigTy.getSizeInBits() < 32)
583 return false;
584 if (LitTy.isVector() && LitTy.getSizeInBits() < 32)
585 return false;
586
587 return BigTy.getSizeInBits() % 16 == 0 &&
588 LitTy.getSizeInBits() % 16 == 0 &&
Matt Arsenault503afda2018-03-12 13:35:43 +0000589 BigTy.getSizeInBits() <= 512;
590 })
591 // Any vectors left are the wrong size. Scalarize them.
Matt Arsenault990f5072019-01-25 00:51:00 +0000592 .scalarize(0)
593 .scalarize(1);
Matt Arsenault503afda2018-03-12 13:35:43 +0000594 }
595
Tom Stellardca166212017-01-30 21:56:46 +0000596 computeTables();
Roman Tereshin76c29c62018-05-31 16:16:48 +0000597 verify(*ST.getInstrInfo());
Tom Stellardca166212017-01-30 21:56:46 +0000598}
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000599
600bool AMDGPULegalizerInfo::legalizeCustom(MachineInstr &MI,
601 MachineRegisterInfo &MRI,
602 MachineIRBuilder &MIRBuilder,
603 GISelChangeObserver &Observer) const {
604 switch (MI.getOpcode()) {
605 case TargetOpcode::G_ADDRSPACE_CAST:
606 return legalizeAddrSpaceCast(MI, MRI, MIRBuilder);
607 default:
608 return false;
609 }
610
611 llvm_unreachable("expected switch to return");
612}
613
614unsigned AMDGPULegalizerInfo::getSegmentAperture(
615 unsigned AS,
616 MachineRegisterInfo &MRI,
617 MachineIRBuilder &MIRBuilder) const {
618 MachineFunction &MF = MIRBuilder.getMF();
619 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
620 const LLT S32 = LLT::scalar(32);
621
622 if (ST.hasApertureRegs()) {
623 // FIXME: Use inline constants (src_{shared, private}_base) instead of
624 // getreg.
625 unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
626 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
627 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
628 unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
629 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
630 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
631 unsigned Encoding =
632 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
633 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
634 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
635
636 unsigned ShiftAmt = MRI.createGenericVirtualRegister(S32);
637 unsigned ApertureReg = MRI.createGenericVirtualRegister(S32);
638 unsigned GetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
639
640 MIRBuilder.buildInstr(AMDGPU::S_GETREG_B32)
641 .addDef(GetReg)
642 .addImm(Encoding);
643 MRI.setType(GetReg, S32);
644
645 MIRBuilder.buildConstant(ShiftAmt, WidthM1 + 1);
646 MIRBuilder.buildInstr(TargetOpcode::G_SHL)
647 .addDef(ApertureReg)
648 .addUse(GetReg)
649 .addUse(ShiftAmt);
650
651 return ApertureReg;
652 }
653
654 unsigned QueuePtr = MRI.createGenericVirtualRegister(
655 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
656
657 // FIXME: Placeholder until we can track the input registers.
658 MIRBuilder.buildConstant(QueuePtr, 0xdeadbeef);
659
660 // Offset into amd_queue_t for group_segment_aperture_base_hi /
661 // private_segment_aperture_base_hi.
662 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
663
664 // FIXME: Don't use undef
665 Value *V = UndefValue::get(PointerType::get(
666 Type::getInt8Ty(MF.getFunction().getContext()),
667 AMDGPUAS::CONSTANT_ADDRESS));
668
669 MachinePointerInfo PtrInfo(V, StructOffset);
670 MachineMemOperand *MMO = MF.getMachineMemOperand(
671 PtrInfo,
672 MachineMemOperand::MOLoad |
673 MachineMemOperand::MODereferenceable |
674 MachineMemOperand::MOInvariant,
675 4,
676 MinAlign(64, StructOffset));
677
678 unsigned LoadResult = MRI.createGenericVirtualRegister(S32);
679 unsigned LoadAddr = AMDGPU::NoRegister;
680
681 MIRBuilder.materializeGEP(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
682 MIRBuilder.buildLoad(LoadResult, LoadAddr, *MMO);
683 return LoadResult;
684}
685
686bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
687 MachineInstr &MI, MachineRegisterInfo &MRI,
688 MachineIRBuilder &MIRBuilder) const {
689 MachineFunction &MF = MIRBuilder.getMF();
690
691 MIRBuilder.setInstr(MI);
692
693 unsigned Dst = MI.getOperand(0).getReg();
694 unsigned Src = MI.getOperand(1).getReg();
695
696 LLT DstTy = MRI.getType(Dst);
697 LLT SrcTy = MRI.getType(Src);
698 unsigned DestAS = DstTy.getAddressSpace();
699 unsigned SrcAS = SrcTy.getAddressSpace();
700
701 // TODO: Avoid reloading from the queue ptr for each cast, or at least each
702 // vector element.
703 assert(!DstTy.isVector());
704
705 const AMDGPUTargetMachine &TM
706 = static_cast<const AMDGPUTargetMachine &>(MF.getTarget());
707
708 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
709 if (ST.getTargetLowering()->isNoopAddrSpaceCast(SrcAS, DestAS)) {
710 MI.setDesc(MIRBuilder.getTII().get(TargetOpcode::COPY));
711 return true;
712 }
713
714 if (SrcAS == AMDGPUAS::FLAT_ADDRESS) {
715 assert(DestAS == AMDGPUAS::LOCAL_ADDRESS ||
716 DestAS == AMDGPUAS::PRIVATE_ADDRESS);
717 unsigned NullVal = TM.getNullPointerValue(DestAS);
718
719 unsigned SegmentNullReg = MRI.createGenericVirtualRegister(DstTy);
720 unsigned FlatNullReg = MRI.createGenericVirtualRegister(SrcTy);
721
722 MIRBuilder.buildConstant(SegmentNullReg, NullVal);
723 MIRBuilder.buildConstant(FlatNullReg, 0);
724
725 unsigned PtrLo32 = MRI.createGenericVirtualRegister(DstTy);
726
727 // Extract low 32-bits of the pointer.
728 MIRBuilder.buildExtract(PtrLo32, Src, 0);
729
730 unsigned CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
731 MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, FlatNullReg);
732 MIRBuilder.buildSelect(Dst, CmpRes, PtrLo32, SegmentNullReg);
733
734 MI.eraseFromParent();
735 return true;
736 }
737
738 assert(SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
739 SrcAS == AMDGPUAS::PRIVATE_ADDRESS);
740
741 unsigned FlatNullReg = MRI.createGenericVirtualRegister(DstTy);
742 unsigned SegmentNullReg = MRI.createGenericVirtualRegister(SrcTy);
743 MIRBuilder.buildConstant(SegmentNullReg, TM.getNullPointerValue(SrcAS));
744 MIRBuilder.buildConstant(FlatNullReg, TM.getNullPointerValue(DestAS));
745
746 unsigned ApertureReg = getSegmentAperture(DestAS, MRI, MIRBuilder);
747
748 unsigned CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
749 MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, SegmentNullReg);
750
751 unsigned BuildPtr = MRI.createGenericVirtualRegister(DstTy);
752
753 // Coerce the type of the low half of the result so we can use merge_values.
754 unsigned SrcAsInt = MRI.createGenericVirtualRegister(LLT::scalar(32));
755 MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT)
756 .addDef(SrcAsInt)
757 .addUse(Src);
758
759 // TODO: Should we allow mismatched types but matching sizes in merges to
760 // avoid the ptrtoint?
761 MIRBuilder.buildMerge(BuildPtr, {SrcAsInt, ApertureReg});
762 MIRBuilder.buildSelect(Dst, CmpRes, BuildPtr, FlatNullReg);
763
764 MI.eraseFromParent();
765 return true;
766}