blob: ca415833297ac8d1eb6c03a5727663f1e5a7f2c8 [file] [log] [blame]
Tom Stellardca166212017-01-30 21:56:46 +00001//===- AMDGPULegalizerInfo.cpp -----------------------------------*- C++ -*-==//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellardca166212017-01-30 21:56:46 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the Machinelegalizer class for
10/// AMDGPU.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
David Blaikie36a0f222018-03-23 23:58:31 +000014#include "AMDGPU.h"
Craig Topper2fa14362018-03-29 17:21:10 +000015#include "AMDGPULegalizerInfo.h"
Matt Arsenault85803362018-03-17 15:17:41 +000016#include "AMDGPUTargetMachine.h"
Matt Arsenaulta8b43392019-02-08 02:40:47 +000017#include "SIMachineFunctionInfo.h"
18
19#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000020#include "llvm/CodeGen/TargetOpcodes.h"
Craig Topper2fa14362018-03-29 17:21:10 +000021#include "llvm/CodeGen/ValueTypes.h"
Tom Stellardca166212017-01-30 21:56:46 +000022#include "llvm/IR/DerivedTypes.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000023#include "llvm/IR/Type.h"
Tom Stellardca166212017-01-30 21:56:46 +000024#include "llvm/Support/Debug.h"
25
26using namespace llvm;
Daniel Sanders9ade5592018-01-29 17:37:29 +000027using namespace LegalizeActions;
Matt Arsenault990f5072019-01-25 00:51:00 +000028using namespace LegalizeMutations;
Matt Arsenault7ac79ed2019-01-20 19:45:18 +000029using namespace LegalityPredicates;
Tom Stellardca166212017-01-30 21:56:46 +000030
Matt Arsenaultd9141892019-02-07 19:10:15 +000031
32static LegalityPredicate isMultiple32(unsigned TypeIdx,
33 unsigned MaxSize = 512) {
34 return [=](const LegalityQuery &Query) {
35 const LLT Ty = Query.Types[TypeIdx];
36 const LLT EltTy = Ty.getScalarType();
37 return Ty.getSizeInBits() <= MaxSize && EltTy.getSizeInBits() % 32 == 0;
38 };
39}
40
Matt Arsenault18ec3822019-02-11 22:00:39 +000041static LegalityPredicate isSmallOddVector(unsigned TypeIdx) {
42 return [=](const LegalityQuery &Query) {
43 const LLT Ty = Query.Types[TypeIdx];
44 return Ty.isVector() &&
45 Ty.getNumElements() % 2 != 0 &&
46 Ty.getElementType().getSizeInBits() < 32;
47 };
48}
49
50static LegalizeMutation oneMoreElement(unsigned TypeIdx) {
51 return [=](const LegalityQuery &Query) {
52 const LLT Ty = Query.Types[TypeIdx];
53 const LLT EltTy = Ty.getElementType();
54 return std::make_pair(TypeIdx, LLT::vector(Ty.getNumElements() + 1, EltTy));
55 };
56}
57
Matt Arsenault26b7e852019-02-19 16:30:19 +000058static LegalizeMutation fewerEltsToSize64Vector(unsigned TypeIdx) {
59 return [=](const LegalityQuery &Query) {
60 const LLT Ty = Query.Types[TypeIdx];
61 const LLT EltTy = Ty.getElementType();
62 unsigned Size = Ty.getSizeInBits();
63 unsigned Pieces = (Size + 63) / 64;
64 unsigned NewNumElts = (Ty.getNumElements() + 1) / Pieces;
65 return std::make_pair(TypeIdx, LLT::scalarOrVector(NewNumElts, EltTy));
66 };
67}
68
69static LegalityPredicate vectorWiderThan(unsigned TypeIdx, unsigned Size) {
70 return [=](const LegalityQuery &Query) {
71 const LLT QueryTy = Query.Types[TypeIdx];
72 return QueryTy.isVector() && QueryTy.getSizeInBits() > Size;
73 };
74}
75
Matt Arsenaultb4c95b32019-02-19 17:03:09 +000076static LegalityPredicate numElementsNotEven(unsigned TypeIdx) {
77 return [=](const LegalityQuery &Query) {
78 const LLT QueryTy = Query.Types[TypeIdx];
79 return QueryTy.isVector() && QueryTy.getNumElements() % 2 != 0;
80 };
81}
Matt Arsenault18ec3822019-02-11 22:00:39 +000082
Tom Stellard5bfbae52018-07-11 20:59:01 +000083AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST,
Matt Arsenaultc3fe46b2018-03-08 16:24:16 +000084 const GCNTargetMachine &TM) {
Tom Stellardca166212017-01-30 21:56:46 +000085 using namespace TargetOpcode;
86
Matt Arsenault85803362018-03-17 15:17:41 +000087 auto GetAddrSpacePtr = [&TM](unsigned AS) {
88 return LLT::pointer(AS, TM.getPointerSizeInBits(AS));
89 };
90
91 const LLT S1 = LLT::scalar(1);
Matt Arsenault888aa5d2019-02-03 00:07:33 +000092 const LLT S8 = LLT::scalar(8);
Matt Arsenault45991592019-01-18 21:33:50 +000093 const LLT S16 = LLT::scalar(16);
Tom Stellardca166212017-01-30 21:56:46 +000094 const LLT S32 = LLT::scalar(32);
95 const LLT S64 = LLT::scalar(64);
Matt Arsenaultca676342019-01-25 02:36:32 +000096 const LLT S128 = LLT::scalar(128);
Matt Arsenaultff6a9a22019-01-20 18:40:36 +000097 const LLT S256 = LLT::scalar(256);
Tom Stellardeebbfc22018-06-30 04:09:44 +000098 const LLT S512 = LLT::scalar(512);
Matt Arsenault85803362018-03-17 15:17:41 +000099
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000100 const LLT V2S16 = LLT::vector(2, 16);
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000101 const LLT V4S16 = LLT::vector(4, 16);
102 const LLT V8S16 = LLT::vector(8, 16);
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000103
104 const LLT V2S32 = LLT::vector(2, 32);
105 const LLT V3S32 = LLT::vector(3, 32);
106 const LLT V4S32 = LLT::vector(4, 32);
107 const LLT V5S32 = LLT::vector(5, 32);
108 const LLT V6S32 = LLT::vector(6, 32);
109 const LLT V7S32 = LLT::vector(7, 32);
110 const LLT V8S32 = LLT::vector(8, 32);
111 const LLT V9S32 = LLT::vector(9, 32);
112 const LLT V10S32 = LLT::vector(10, 32);
113 const LLT V11S32 = LLT::vector(11, 32);
114 const LLT V12S32 = LLT::vector(12, 32);
115 const LLT V13S32 = LLT::vector(13, 32);
116 const LLT V14S32 = LLT::vector(14, 32);
117 const LLT V15S32 = LLT::vector(15, 32);
118 const LLT V16S32 = LLT::vector(16, 32);
119
120 const LLT V2S64 = LLT::vector(2, 64);
121 const LLT V3S64 = LLT::vector(3, 64);
122 const LLT V4S64 = LLT::vector(4, 64);
123 const LLT V5S64 = LLT::vector(5, 64);
124 const LLT V6S64 = LLT::vector(6, 64);
125 const LLT V7S64 = LLT::vector(7, 64);
126 const LLT V8S64 = LLT::vector(8, 64);
127
128 std::initializer_list<LLT> AllS32Vectors =
129 {V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32,
130 V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32};
131 std::initializer_list<LLT> AllS64Vectors =
132 {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64};
133
Matt Arsenault85803362018-03-17 15:17:41 +0000134 const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS);
135 const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS);
Matt Arsenault685d1e82018-03-17 15:17:45 +0000136 const LLT LocalPtr = GetAddrSpacePtr(AMDGPUAS::LOCAL_ADDRESS);
Matt Arsenault0da63502018-08-31 05:49:54 +0000137 const LLT FlatPtr = GetAddrSpacePtr(AMDGPUAS::FLAT_ADDRESS);
138 const LLT PrivatePtr = GetAddrSpacePtr(AMDGPUAS::PRIVATE_ADDRESS);
Matt Arsenault85803362018-03-17 15:17:41 +0000139
Matt Arsenault934e5342018-12-13 20:34:15 +0000140 const LLT CodePtr = FlatPtr;
141
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000142 const std::initializer_list<LLT> AddrSpaces64 = {
143 GlobalPtr, ConstantPtr, FlatPtr
144 };
145
146 const std::initializer_list<LLT> AddrSpaces32 = {
147 LocalPtr, PrivatePtr
Matt Arsenault685d1e82018-03-17 15:17:45 +0000148 };
Tom Stellardca166212017-01-30 21:56:46 +0000149
Matt Arsenaultadc40ba2019-01-08 01:22:47 +0000150 setAction({G_BRCOND, S1}, Legal);
151
Matt Arsenault2e0ee472019-02-21 15:48:13 +0000152 // TODO: All multiples of 32, vectors of pointers, all v2s16 pairs, more
153 // elements for v3s16
154 getActionDefinitionsBuilder(G_PHI)
155 .legalFor({S32, S64, V2S16, V4S16, S1, S128, S256})
156 .legalFor(AllS32Vectors)
157 .legalFor(AllS64Vectors)
158 .legalFor(AddrSpaces64)
159 .legalFor(AddrSpaces32)
160 .clampScalar(0, S32, S256)
161 .widenScalarToNextPow2(0, 32)
Matt Arsenaultd3093c22019-02-28 00:16:32 +0000162 .clampMaxNumElements(0, S32, 16)
Matt Arsenault72bcf152019-02-28 00:01:05 +0000163 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
Matt Arsenault2e0ee472019-02-21 15:48:13 +0000164 .legalIf(isPointer(0));
165
166
Matt Arsenault3e08b772019-01-25 04:53:57 +0000167 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_UMULH, G_SMULH})
Matt Arsenault5d622fb2019-01-25 03:23:04 +0000168 .legalFor({S32})
Matt Arsenault211e89d2019-01-27 00:52:51 +0000169 .clampScalar(0, S32, S32)
Matt Arsenault5d622fb2019-01-25 03:23:04 +0000170 .scalarize(0);
Matt Arsenault43398832018-12-20 01:35:49 +0000171
Matt Arsenault26a6c742019-01-26 23:47:07 +0000172 // Report legal for any types we can handle anywhere. For the cases only legal
173 // on the SALU, RegBankSelect will be able to re-legalize.
Matt Arsenault43398832018-12-20 01:35:49 +0000174 getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
Matt Arsenault26a6c742019-01-26 23:47:07 +0000175 .legalFor({S32, S1, S64, V2S32, V2S16, V4S16})
176 .clampScalar(0, S32, S64)
Matt Arsenault26b7e852019-02-19 16:30:19 +0000177 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
178 .fewerElementsIf(vectorWiderThan(0, 32), fewerEltsToSize64Vector(0))
Matt Arsenaultf4bfe4c2019-02-25 21:32:48 +0000179 .widenScalarToNextPow2(0)
Matt Arsenault26a6c742019-01-26 23:47:07 +0000180 .scalarize(0);
Tom Stellardee6e6452017-06-12 20:54:56 +0000181
Matt Arsenault68c668a2019-01-08 01:09:09 +0000182 getActionDefinitionsBuilder({G_UADDO, G_SADDO, G_USUBO, G_SSUBO,
183 G_UADDE, G_SADDE, G_USUBE, G_SSUBE})
Matt Arsenault4d475942019-01-26 23:44:51 +0000184 .legalFor({{S32, S1}})
185 .clampScalar(0, S32, S32);
Matt Arsenault2cc15b62019-01-08 01:03:58 +0000186
Matt Arsenault7ac79ed2019-01-20 19:45:18 +0000187 getActionDefinitionsBuilder(G_BITCAST)
188 .legalForCartesianProduct({S32, V2S16})
189 .legalForCartesianProduct({S64, V2S32, V4S16})
190 .legalForCartesianProduct({V2S64, V4S32})
191 // Don't worry about the size constraint.
192 .legalIf(all(isPointer(0), isPointer(1)));
Tom Stellardff63ee02017-06-19 13:15:45 +0000193
Matt Arsenault00ccd132019-02-12 14:54:55 +0000194 if (ST.has16BitInsts()) {
195 getActionDefinitionsBuilder(G_FCONSTANT)
196 .legalFor({S32, S64, S16})
197 .clampScalar(0, S16, S64);
198 } else {
199 getActionDefinitionsBuilder(G_FCONSTANT)
200 .legalFor({S32, S64})
201 .clampScalar(0, S32, S64);
202 }
Tom Stellardeebbfc22018-06-30 04:09:44 +0000203
Matt Arsenaultb3feccd2018-06-25 15:42:12 +0000204 getActionDefinitionsBuilder(G_IMPLICIT_DEF)
Matt Arsenaultd9141892019-02-07 19:10:15 +0000205 .legalFor({S1, S32, S64, V2S32, V4S32, V2S16, V4S16, GlobalPtr,
206 ConstantPtr, LocalPtr, FlatPtr, PrivatePtr})
Matt Arsenault18ec3822019-02-11 22:00:39 +0000207 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
Matt Arsenaultd9141892019-02-07 19:10:15 +0000208 .clampScalarOrElt(0, S32, S512)
Matt Arsenault0f2debb2019-02-08 14:46:27 +0000209 .legalIf(isMultiple32(0))
Matt Arsenault82b10392019-02-25 20:46:06 +0000210 .widenScalarToNextPow2(0, 32)
211 .clampMaxNumElements(0, S32, 16);
Matt Arsenaultb3feccd2018-06-25 15:42:12 +0000212
Matt Arsenaultabdc4f22018-03-17 15:17:48 +0000213
Tom Stellarde0424122017-06-03 01:13:33 +0000214 // FIXME: i1 operands to intrinsics should always be legal, but other i1
215 // values may not be legal. We need to figure out how to distinguish
216 // between these two scenarios.
Matt Arsenault45991592019-01-18 21:33:50 +0000217 getActionDefinitionsBuilder(G_CONSTANT)
Matt Arsenault2065c942019-02-02 23:33:49 +0000218 .legalFor({S1, S32, S64, GlobalPtr,
219 LocalPtr, ConstantPtr, PrivatePtr, FlatPtr })
Matt Arsenault45991592019-01-18 21:33:50 +0000220 .clampScalar(0, S32, S64)
Matt Arsenault2065c942019-02-02 23:33:49 +0000221 .widenScalarToNextPow2(0)
222 .legalIf(isPointer(0));
Matt Arsenault06cbb272018-03-01 19:16:52 +0000223
Matt Arsenaultc94e26c2018-12-18 09:46:13 +0000224 setAction({G_FRAME_INDEX, PrivatePtr}, Legal);
225
Matt Arsenault93fdec72019-02-07 18:03:11 +0000226 auto &FPOpActions = getActionDefinitionsBuilder(
Matt Arsenault9dba67f2019-02-11 17:05:20 +0000227 { G_FADD, G_FMUL, G_FNEG, G_FABS, G_FMA, G_FCANONICALIZE})
Matt Arsenault93fdec72019-02-07 18:03:11 +0000228 .legalFor({S32, S64});
229
230 if (ST.has16BitInsts()) {
231 if (ST.hasVOP3PInsts())
232 FPOpActions.legalFor({S16, V2S16});
233 else
234 FPOpActions.legalFor({S16});
235 }
236
237 if (ST.hasVOP3PInsts())
238 FPOpActions.clampMaxNumElements(0, S16, 2);
239 FPOpActions
240 .scalarize(0)
241 .clampScalar(0, ST.has16BitInsts() ? S16 : S32, S64);
Tom Stellardd0c6cf22017-10-27 23:57:41 +0000242
Matt Arsenaultc0f75692019-02-07 18:14:39 +0000243 if (ST.has16BitInsts()) {
244 getActionDefinitionsBuilder(G_FSQRT)
245 .legalFor({S32, S64, S16})
246 .scalarize(0)
247 .clampScalar(0, S16, S64);
248 } else {
249 getActionDefinitionsBuilder(G_FSQRT)
250 .legalFor({S32, S64})
251 .scalarize(0)
252 .clampScalar(0, S32, S64);
253 }
254
Matt Arsenaultdff33c32018-12-20 00:37:02 +0000255 getActionDefinitionsBuilder(G_FPTRUNC)
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000256 .legalFor({{S32, S64}, {S16, S32}})
257 .scalarize(0);
Matt Arsenaultdff33c32018-12-20 00:37:02 +0000258
Matt Arsenault24563ef2019-01-20 18:34:24 +0000259 getActionDefinitionsBuilder(G_FPEXT)
260 .legalFor({{S64, S32}, {S32, S16}})
Matt Arsenaultca676342019-01-25 02:36:32 +0000261 .lowerFor({{S64, S16}}) // FIXME: Implement
262 .scalarize(0);
Matt Arsenault24563ef2019-01-20 18:34:24 +0000263
Matt Arsenault1448f562019-05-17 12:19:52 +0000264 getActionDefinitionsBuilder(G_FCOPYSIGN)
265 .legalForCartesianProduct({S16, S32, S64}, {S16, S32, S64})
266 .scalarize(0);
267
Matt Arsenault745fd9f2019-01-20 19:10:31 +0000268 getActionDefinitionsBuilder(G_FSUB)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000269 // Use actual fsub instruction
270 .legalFor({S32})
271 // Must use fadd + fneg
272 .lowerFor({S64, S16, V2S16})
Matt Arsenault990f5072019-01-25 00:51:00 +0000273 .scalarize(0)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000274 .clampScalar(0, S32, S64);
Matt Arsenaulte01e7c82018-12-18 09:19:03 +0000275
Matt Arsenault24563ef2019-01-20 18:34:24 +0000276 getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT})
Matt Arsenault46ffe682019-01-20 19:28:20 +0000277 .legalFor({{S64, S32}, {S32, S16}, {S64, S16},
Matt Arsenaultca676342019-01-25 02:36:32 +0000278 {S32, S1}, {S64, S1}, {S16, S1},
279 // FIXME: Hack
Matt Arsenaultf4bfe4c2019-02-25 21:32:48 +0000280 {S64, LLT::scalar(33)},
Matt Arsenault888aa5d2019-02-03 00:07:33 +0000281 {S32, S8}, {S128, S32}, {S128, S64}, {S32, LLT::scalar(24)}})
Matt Arsenaultca676342019-01-25 02:36:32 +0000282 .scalarize(0);
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000283
Matt Arsenaultfb671642019-01-22 00:20:17 +0000284 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000285 .legalFor({{S32, S32}, {S64, S32}})
Matt Arsenault02b5ca82019-05-17 23:05:13 +0000286 .lowerFor({{S32, S64}})
Matt Arsenault2f292202019-05-17 23:05:18 +0000287 .customFor({{S64, S64}})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000288 .scalarize(0);
Matt Arsenaultdd022ce2018-03-01 19:04:25 +0000289
Matt Arsenaultfb671642019-01-22 00:20:17 +0000290 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000291 .legalFor({{S32, S32}, {S32, S64}})
292 .scalarize(0);
Tom Stellard33445762018-02-07 04:47:59 +0000293
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000294 getActionDefinitionsBuilder(G_INTRINSIC_ROUND)
Matt Arsenault2e5f9002019-01-27 00:12:21 +0000295 .legalFor({S32, S64})
296 .scalarize(0);
Matt Arsenaultf4c21c52018-12-21 03:14:45 +0000297
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000298 if (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) {
Matt Arsenaulta510b572019-05-17 12:20:05 +0000299 getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000300 .legalFor({S32, S64})
301 .clampScalar(0, S32, S64)
302 .scalarize(0);
303 } else {
Matt Arsenaulta510b572019-05-17 12:20:05 +0000304 getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_FCEIL, G_FRINT})
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000305 .legalFor({S32})
306 .customFor({S64})
307 .clampScalar(0, S32, S64)
308 .scalarize(0);
309 }
Tom Stellardca166212017-01-30 21:56:46 +0000310
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000311 getActionDefinitionsBuilder(G_GEP)
312 .legalForCartesianProduct(AddrSpaces64, {S64})
313 .legalForCartesianProduct(AddrSpaces32, {S32})
314 .scalarize(0);
Matt Arsenault3b9a82f2019-01-25 04:54:00 +0000315
Matt Arsenault934e5342018-12-13 20:34:15 +0000316 setAction({G_BLOCK_ADDR, CodePtr}, Legal);
317
Matt Arsenault58f9d3d2019-02-02 23:35:15 +0000318 getActionDefinitionsBuilder(G_ICMP)
319 .legalForCartesianProduct(
320 {S1}, {S32, S64, GlobalPtr, LocalPtr, ConstantPtr, PrivatePtr, FlatPtr})
321 .legalFor({{S1, S32}, {S1, S64}})
322 .widenScalarToNextPow2(1)
323 .clampScalar(1, S32, S64)
324 .scalarize(0)
325 .legalIf(all(typeIs(0, S1), isPointer(1)));
326
327 getActionDefinitionsBuilder(G_FCMP)
Matt Arsenault1b1e6852019-01-25 02:59:34 +0000328 .legalFor({{S1, S32}, {S1, S64}})
329 .widenScalarToNextPow2(1)
330 .clampScalar(1, S32, S64)
Matt Arsenaultded2f822019-01-26 23:54:53 +0000331 .scalarize(0);
Matt Arsenault1b1e6852019-01-25 02:59:34 +0000332
Matt Arsenault95fd95c2019-01-25 04:03:38 +0000333 // FIXME: fexp, flog2, flog10 needs to be custom lowered.
334 getActionDefinitionsBuilder({G_FPOW, G_FEXP, G_FEXP2,
335 G_FLOG, G_FLOG2, G_FLOG10})
336 .legalFor({S32})
337 .scalarize(0);
Tom Stellard8cd60a52017-06-06 14:16:50 +0000338
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000339 // The 64-bit versions produce 32-bit results, but only on the SALU.
340 getActionDefinitionsBuilder({G_CTLZ, G_CTLZ_ZERO_UNDEF,
341 G_CTTZ, G_CTTZ_ZERO_UNDEF,
342 G_CTPOP})
343 .legalFor({{S32, S32}, {S32, S64}})
344 .clampScalar(0, S32, S32)
Matt Arsenault75e30c42019-02-20 16:42:52 +0000345 .clampScalar(1, S32, S64)
Matt Arsenaultb10fa8d2019-02-21 15:22:20 +0000346 .scalarize(0)
347 .widenScalarToNextPow2(0, 32)
348 .widenScalarToNextPow2(1, 32);
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000349
Matt Arsenaultd1bfc8d2019-01-31 02:34:03 +0000350 // TODO: Expand for > s32
351 getActionDefinitionsBuilder(G_BSWAP)
352 .legalFor({S32})
353 .clampScalar(0, S32, S32)
354 .scalarize(0);
Matt Arsenaultd5684f72019-01-31 02:09:57 +0000355
Matt Arsenault0f3ba442019-05-23 17:58:48 +0000356 if (ST.has16BitInsts()) {
357 if (ST.hasVOP3PInsts()) {
358 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
359 .legalFor({S32, S16, V2S16})
360 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
361 .clampMaxNumElements(0, S16, 2)
362 .clampScalar(0, S16, S32)
363 .widenScalarToNextPow2(0)
364 .scalarize(0);
365 } else {
366 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
367 .legalFor({S32, S16})
368 .widenScalarToNextPow2(0)
369 .clampScalar(0, S16, S32)
370 .scalarize(0);
371 }
372 } else {
373 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX})
374 .legalFor({S32})
375 .clampScalar(0, S32, S32)
376 .widenScalarToNextPow2(0)
377 .scalarize(0);
378 }
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000379
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000380 auto smallerThan = [](unsigned TypeIdx0, unsigned TypeIdx1) {
381 return [=](const LegalityQuery &Query) {
382 return Query.Types[TypeIdx0].getSizeInBits() <
383 Query.Types[TypeIdx1].getSizeInBits();
384 };
385 };
386
387 auto greaterThan = [](unsigned TypeIdx0, unsigned TypeIdx1) {
388 return [=](const LegalityQuery &Query) {
389 return Query.Types[TypeIdx0].getSizeInBits() >
390 Query.Types[TypeIdx1].getSizeInBits();
391 };
392 };
393
Tom Stellard7c650782018-10-05 04:34:09 +0000394 getActionDefinitionsBuilder(G_INTTOPTR)
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000395 // List the common cases
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000396 .legalForCartesianProduct(AddrSpaces64, {S64})
397 .legalForCartesianProduct(AddrSpaces32, {S32})
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000398 .scalarize(0)
399 // Accept any address space as long as the size matches
400 .legalIf(sameSize(0, 1))
401 .widenScalarIf(smallerThan(1, 0),
402 [](const LegalityQuery &Query) {
403 return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
404 })
405 .narrowScalarIf(greaterThan(1, 0),
406 [](const LegalityQuery &Query) {
407 return std::make_pair(1, LLT::scalar(Query.Types[0].getSizeInBits()));
408 });
Matt Arsenault85803362018-03-17 15:17:41 +0000409
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000410 getActionDefinitionsBuilder(G_PTRTOINT)
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000411 // List the common cases
Matt Arsenault9e5e8682019-02-14 22:24:28 +0000412 .legalForCartesianProduct(AddrSpaces64, {S64})
413 .legalForCartesianProduct(AddrSpaces32, {S32})
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000414 .scalarize(0)
415 // Accept any address space as long as the size matches
416 .legalIf(sameSize(0, 1))
417 .widenScalarIf(smallerThan(0, 1),
418 [](const LegalityQuery &Query) {
419 return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
420 })
421 .narrowScalarIf(
422 greaterThan(0, 1),
423 [](const LegalityQuery &Query) {
424 return std::make_pair(0, LLT::scalar(Query.Types[1].getSizeInBits()));
425 });
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000426
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000427 if (ST.hasFlatAddressSpace()) {
428 getActionDefinitionsBuilder(G_ADDRSPACE_CAST)
429 .scalarize(0)
430 .custom();
431 }
432
Matt Arsenault85803362018-03-17 15:17:41 +0000433 getActionDefinitionsBuilder({G_LOAD, G_STORE})
Matt Arsenault18619af2019-01-29 18:13:02 +0000434 .narrowScalarIf([](const LegalityQuery &Query) {
435 unsigned Size = Query.Types[0].getSizeInBits();
436 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
437 return (Size > 32 && MemSize < Size);
438 },
439 [](const LegalityQuery &Query) {
440 return std::make_pair(0, LLT::scalar(32));
441 })
Matt Arsenault045bc9a2019-01-30 02:35:38 +0000442 .fewerElementsIf([=, &ST](const LegalityQuery &Query) {
443 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
Matt Arsenaultc7bce732019-01-31 02:46:05 +0000444 return (MemSize == 96) &&
445 Query.Types[0].isVector() &&
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +0000446 !ST.hasDwordx3LoadStores();
Matt Arsenault045bc9a2019-01-30 02:35:38 +0000447 },
448 [=](const LegalityQuery &Query) {
449 return std::make_pair(0, V2S32);
450 })
Matt Arsenault85803362018-03-17 15:17:41 +0000451 .legalIf([=, &ST](const LegalityQuery &Query) {
452 const LLT &Ty0 = Query.Types[0];
453
Matt Arsenault18619af2019-01-29 18:13:02 +0000454 unsigned Size = Ty0.getSizeInBits();
455 unsigned MemSize = Query.MMODescrs[0].SizeInBits;
Matt Arsenaulteb2603c2019-02-02 23:39:13 +0000456 if (Size < 32 || (Size > 32 && MemSize < Size))
Matt Arsenault18619af2019-01-29 18:13:02 +0000457 return false;
458
459 if (Ty0.isVector() && Size != MemSize)
460 return false;
461
Matt Arsenault85803362018-03-17 15:17:41 +0000462 // TODO: Decompose private loads into 4-byte components.
463 // TODO: Illegal flat loads on SI
Matt Arsenault18619af2019-01-29 18:13:02 +0000464 switch (MemSize) {
465 case 8:
466 case 16:
467 return Size == 32;
Matt Arsenault85803362018-03-17 15:17:41 +0000468 case 32:
469 case 64:
470 case 128:
471 return true;
472
473 case 96:
Matt Arsenaulte4c2e9b2019-06-19 23:54:58 +0000474 return ST.hasDwordx3LoadStores();
Matt Arsenault85803362018-03-17 15:17:41 +0000475
476 case 256:
477 case 512:
478 // TODO: constant loads
479 default:
480 return false;
481 }
Matt Arsenault18619af2019-01-29 18:13:02 +0000482 })
483 .clampScalar(0, S32, S64);
Matt Arsenault85803362018-03-17 15:17:41 +0000484
485
Matt Arsenault530d05e2019-02-14 22:41:09 +0000486 // FIXME: Handle alignment requirements.
Matt Arsenault6614f852019-01-22 19:02:10 +0000487 auto &ExtLoads = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
Matt Arsenault530d05e2019-02-14 22:41:09 +0000488 .legalForTypesWithMemDesc({
489 {S32, GlobalPtr, 8, 8},
490 {S32, GlobalPtr, 16, 8},
491 {S32, LocalPtr, 8, 8},
492 {S32, LocalPtr, 16, 8},
493 {S32, PrivatePtr, 8, 8},
494 {S32, PrivatePtr, 16, 8}});
Matt Arsenault6614f852019-01-22 19:02:10 +0000495 if (ST.hasFlatAddressSpace()) {
Matt Arsenault530d05e2019-02-14 22:41:09 +0000496 ExtLoads.legalForTypesWithMemDesc({{S32, FlatPtr, 8, 8},
497 {S32, FlatPtr, 16, 8}});
Matt Arsenault6614f852019-01-22 19:02:10 +0000498 }
499
500 ExtLoads.clampScalar(0, S32, S32)
501 .widenScalarToNextPow2(0)
502 .unsupportedIfMemSizeNotPow2()
503 .lower();
504
Matt Arsenault36d40922018-12-20 00:33:49 +0000505 auto &Atomics = getActionDefinitionsBuilder(
506 {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB,
507 G_ATOMICRMW_AND, G_ATOMICRMW_OR, G_ATOMICRMW_XOR,
508 G_ATOMICRMW_MAX, G_ATOMICRMW_MIN, G_ATOMICRMW_UMAX,
509 G_ATOMICRMW_UMIN, G_ATOMIC_CMPXCHG})
510 .legalFor({{S32, GlobalPtr}, {S32, LocalPtr},
511 {S64, GlobalPtr}, {S64, LocalPtr}});
512 if (ST.hasFlatAddressSpace()) {
513 Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}});
514 }
Tom Stellardca166212017-01-30 21:56:46 +0000515
Matt Arsenault96e47012019-01-18 21:42:55 +0000516 // TODO: Pointer types, any 32-bit or 64-bit vector
517 getActionDefinitionsBuilder(G_SELECT)
Matt Arsenault10547232019-02-04 14:04:52 +0000518 .legalForCartesianProduct({S32, S64, V2S32, V2S16, V4S16,
519 GlobalPtr, LocalPtr, FlatPtr, PrivatePtr,
520 LLT::vector(2, LocalPtr), LLT::vector(2, PrivatePtr)}, {S1})
Matt Arsenault990f5072019-01-25 00:51:00 +0000521 .clampScalar(0, S32, S64)
Matt Arsenaultb4c95b32019-02-19 17:03:09 +0000522 .moreElementsIf(isSmallOddVector(0), oneMoreElement(0))
523 .fewerElementsIf(numElementsNotEven(0), scalarize(0))
Matt Arsenaultdc6c7852019-01-30 04:19:31 +0000524 .scalarize(1)
Matt Arsenault2491f822019-02-02 23:31:50 +0000525 .clampMaxNumElements(0, S32, 2)
526 .clampMaxNumElements(0, LocalPtr, 2)
527 .clampMaxNumElements(0, PrivatePtr, 2)
Matt Arsenaultb4c95b32019-02-19 17:03:09 +0000528 .scalarize(0)
Matt Arsenault4ed6cca2019-04-05 14:03:04 +0000529 .widenScalarToNextPow2(0)
Matt Arsenault2491f822019-02-02 23:31:50 +0000530 .legalIf(all(isPointer(0), typeIs(1, S1)));
Tom Stellard2860a422017-06-07 13:54:51 +0000531
Matt Arsenault4c5e8f512019-01-22 22:00:19 +0000532 // TODO: Only the low 4/5/6 bits of the shift amount are observed, so we can
533 // be more flexible with the shift amount type.
534 auto &Shifts = getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR})
535 .legalFor({{S32, S32}, {S64, S32}});
Matt Arsenaultf6cab162019-01-30 03:36:25 +0000536 if (ST.has16BitInsts()) {
Matt Arsenaultc83b8232019-02-07 17:38:00 +0000537 if (ST.hasVOP3PInsts()) {
538 Shifts.legalFor({{S16, S32}, {S16, S16}, {V2S16, V2S16}})
539 .clampMaxNumElements(0, S16, 2);
540 } else
541 Shifts.legalFor({{S16, S32}, {S16, S16}});
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000542
543 Shifts.clampScalar(1, S16, S32);
Matt Arsenaultf6cab162019-01-30 03:36:25 +0000544 Shifts.clampScalar(0, S16, S64);
Matt Arsenaultb0a22702019-02-08 15:06:24 +0000545 Shifts.widenScalarToNextPow2(0, 16);
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000546 } else {
547 // Make sure we legalize the shift amount type first, as the general
548 // expansion for the shifted type will produce much worse code if it hasn't
549 // been truncated already.
550 Shifts.clampScalar(1, S32, S32);
Matt Arsenault4c5e8f512019-01-22 22:00:19 +0000551 Shifts.clampScalar(0, S32, S64);
Matt Arsenaultb0a22702019-02-08 15:06:24 +0000552 Shifts.widenScalarToNextPow2(0, 32);
Matt Arsenaultfbec8fe2019-02-07 19:37:44 +0000553 }
554 Shifts.scalarize(0);
Tom Stellardca166212017-01-30 21:56:46 +0000555
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000556 for (unsigned Op : {G_EXTRACT_VECTOR_ELT, G_INSERT_VECTOR_ELT}) {
Matt Arsenault63786292019-01-22 20:38:15 +0000557 unsigned VecTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 1 : 0;
558 unsigned EltTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 0 : 1;
559 unsigned IdxTypeIdx = 2;
560
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000561 getActionDefinitionsBuilder(Op)
562 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenault63786292019-01-22 20:38:15 +0000563 const LLT &VecTy = Query.Types[VecTypeIdx];
564 const LLT &IdxTy = Query.Types[IdxTypeIdx];
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000565 return VecTy.getSizeInBits() % 32 == 0 &&
566 VecTy.getSizeInBits() <= 512 &&
567 IdxTy.getSizeInBits() == 32;
Matt Arsenault63786292019-01-22 20:38:15 +0000568 })
569 .clampScalar(EltTypeIdx, S32, S64)
570 .clampScalar(VecTypeIdx, S32, S64)
571 .clampScalar(IdxTypeIdx, S32, S32);
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000572 }
573
Matt Arsenault63786292019-01-22 20:38:15 +0000574 getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
575 .unsupportedIf([=](const LegalityQuery &Query) {
576 const LLT &EltTy = Query.Types[1].getElementType();
577 return Query.Types[0] != EltTy;
578 });
579
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000580 for (unsigned Op : {G_EXTRACT, G_INSERT}) {
581 unsigned BigTyIdx = Op == G_EXTRACT ? 1 : 0;
582 unsigned LitTyIdx = Op == G_EXTRACT ? 0 : 1;
583
584 // FIXME: Doesn't handle extract of illegal sizes.
585 getActionDefinitionsBuilder(Op)
Matt Arsenault91be65b2019-02-07 17:25:51 +0000586 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000587 const LLT BigTy = Query.Types[BigTyIdx];
588 const LLT LitTy = Query.Types[LitTyIdx];
589 return (BigTy.getSizeInBits() % 32 == 0) &&
590 (LitTy.getSizeInBits() % 16 == 0);
591 })
Matt Arsenault91be65b2019-02-07 17:25:51 +0000592 .widenScalarIf(
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000593 [=](const LegalityQuery &Query) {
594 const LLT BigTy = Query.Types[BigTyIdx];
595 return (BigTy.getScalarSizeInBits() < 16);
596 },
597 LegalizeMutations::widenScalarOrEltToNextPow2(BigTyIdx, 16))
598 .widenScalarIf(
599 [=](const LegalityQuery &Query) {
600 const LLT LitTy = Query.Types[LitTyIdx];
601 return (LitTy.getScalarSizeInBits() < 16);
602 },
603 LegalizeMutations::widenScalarOrEltToNextPow2(LitTyIdx, 16))
Matt Arsenault2b6f76f2019-04-22 15:22:46 +0000604 .moreElementsIf(isSmallOddVector(BigTyIdx), oneMoreElement(BigTyIdx))
605 .widenScalarToNextPow2(BigTyIdx, 32);
606
Matt Arsenaultc4d07552019-02-20 16:11:22 +0000607 }
Matt Arsenault71272e62018-03-05 16:25:15 +0000608
Matt Arsenaultcbaada62019-02-02 23:29:55 +0000609 // TODO: vectors of pointers
Amara Emerson5ec14602018-12-10 18:44:58 +0000610 getActionDefinitionsBuilder(G_BUILD_VECTOR)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000611 .legalForCartesianProduct(AllS32Vectors, {S32})
612 .legalForCartesianProduct(AllS64Vectors, {S64})
613 .clampNumElements(0, V16S32, V16S32)
614 .clampNumElements(0, V2S64, V8S64)
615 .minScalarSameAs(1, 0)
616 // FIXME: Sort of a hack to make progress on other legalizations.
617 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenault2491f822019-02-02 23:31:50 +0000618 return Query.Types[0].getScalarSizeInBits() <= 32 ||
619 Query.Types[0].getScalarSizeInBits() == 64;
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000620 });
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000621
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000622 // TODO: Support any combination of v2s32
623 getActionDefinitionsBuilder(G_CONCAT_VECTORS)
624 .legalFor({{V4S32, V2S32},
625 {V8S32, V2S32},
626 {V8S32, V4S32},
627 {V4S64, V2S64},
628 {V4S16, V2S16},
629 {V8S16, V2S16},
Matt Arsenault2491f822019-02-02 23:31:50 +0000630 {V8S16, V4S16},
631 {LLT::vector(4, LocalPtr), LLT::vector(2, LocalPtr)},
632 {LLT::vector(4, PrivatePtr), LLT::vector(2, PrivatePtr)}});
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000633
Matt Arsenault503afda2018-03-12 13:35:43 +0000634 // Merge/Unmerge
635 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
636 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
637 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
638
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000639 auto notValidElt = [=](const LegalityQuery &Query, unsigned TypeIdx) {
640 const LLT &Ty = Query.Types[TypeIdx];
641 if (Ty.isVector()) {
642 const LLT &EltTy = Ty.getElementType();
643 if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64)
644 return true;
645 if (!isPowerOf2_32(EltTy.getSizeInBits()))
646 return true;
647 }
648 return false;
649 };
650
Matt Arsenault503afda2018-03-12 13:35:43 +0000651 getActionDefinitionsBuilder(Op)
Matt Arsenaultd8d193d2019-01-29 23:17:35 +0000652 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16)
653 // Clamp the little scalar to s8-s256 and make it a power of 2. It's not
654 // worth considering the multiples of 64 since 2*192 and 2*384 are not
655 // valid.
656 .clampScalar(LitTyIdx, S16, S256)
657 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32)
658
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000659 // Break up vectors with weird elements into scalars
660 .fewerElementsIf(
661 [=](const LegalityQuery &Query) { return notValidElt(Query, 0); },
Matt Arsenault990f5072019-01-25 00:51:00 +0000662 scalarize(0))
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000663 .fewerElementsIf(
664 [=](const LegalityQuery &Query) { return notValidElt(Query, 1); },
Matt Arsenault990f5072019-01-25 00:51:00 +0000665 scalarize(1))
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000666 .clampScalar(BigTyIdx, S32, S512)
667 .widenScalarIf(
668 [=](const LegalityQuery &Query) {
669 const LLT &Ty = Query.Types[BigTyIdx];
670 return !isPowerOf2_32(Ty.getSizeInBits()) &&
671 Ty.getSizeInBits() % 16 != 0;
672 },
673 [=](const LegalityQuery &Query) {
674 // Pick the next power of 2, or a multiple of 64 over 128.
675 // Whichever is smaller.
676 const LLT &Ty = Query.Types[BigTyIdx];
677 unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1);
678 if (NewSizeInBits >= 256) {
679 unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
680 if (RoundedTo < NewSizeInBits)
681 NewSizeInBits = RoundedTo;
682 }
683 return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
684 })
Matt Arsenault503afda2018-03-12 13:35:43 +0000685 .legalIf([=](const LegalityQuery &Query) {
686 const LLT &BigTy = Query.Types[BigTyIdx];
687 const LLT &LitTy = Query.Types[LitTyIdx];
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000688
689 if (BigTy.isVector() && BigTy.getSizeInBits() < 32)
690 return false;
691 if (LitTy.isVector() && LitTy.getSizeInBits() < 32)
692 return false;
693
694 return BigTy.getSizeInBits() % 16 == 0 &&
695 LitTy.getSizeInBits() % 16 == 0 &&
Matt Arsenault503afda2018-03-12 13:35:43 +0000696 BigTy.getSizeInBits() <= 512;
697 })
698 // Any vectors left are the wrong size. Scalarize them.
Matt Arsenault990f5072019-01-25 00:51:00 +0000699 .scalarize(0)
700 .scalarize(1);
Matt Arsenault503afda2018-03-12 13:35:43 +0000701 }
702
Tom Stellardca166212017-01-30 21:56:46 +0000703 computeTables();
Roman Tereshin76c29c62018-05-31 16:16:48 +0000704 verify(*ST.getInstrInfo());
Tom Stellardca166212017-01-30 21:56:46 +0000705}
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000706
707bool AMDGPULegalizerInfo::legalizeCustom(MachineInstr &MI,
708 MachineRegisterInfo &MRI,
709 MachineIRBuilder &MIRBuilder,
710 GISelChangeObserver &Observer) const {
711 switch (MI.getOpcode()) {
712 case TargetOpcode::G_ADDRSPACE_CAST:
713 return legalizeAddrSpaceCast(MI, MRI, MIRBuilder);
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000714 case TargetOpcode::G_FRINT:
715 return legalizeFrint(MI, MRI, MIRBuilder);
Matt Arsenaulta510b572019-05-17 12:20:05 +0000716 case TargetOpcode::G_FCEIL:
717 return legalizeFceil(MI, MRI, MIRBuilder);
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000718 case TargetOpcode::G_INTRINSIC_TRUNC:
719 return legalizeIntrinsicTrunc(MI, MRI, MIRBuilder);
Matt Arsenault2f292202019-05-17 23:05:18 +0000720 case TargetOpcode::G_SITOFP:
721 return legalizeITOFP(MI, MRI, MIRBuilder, true);
722 case TargetOpcode::G_UITOFP:
723 return legalizeITOFP(MI, MRI, MIRBuilder, false);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000724 default:
725 return false;
726 }
727
728 llvm_unreachable("expected switch to return");
729}
730
731unsigned AMDGPULegalizerInfo::getSegmentAperture(
732 unsigned AS,
733 MachineRegisterInfo &MRI,
734 MachineIRBuilder &MIRBuilder) const {
735 MachineFunction &MF = MIRBuilder.getMF();
736 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
737 const LLT S32 = LLT::scalar(32);
738
739 if (ST.hasApertureRegs()) {
740 // FIXME: Use inline constants (src_{shared, private}_base) instead of
741 // getreg.
742 unsigned Offset = AS == AMDGPUAS::LOCAL_ADDRESS ?
743 AMDGPU::Hwreg::OFFSET_SRC_SHARED_BASE :
744 AMDGPU::Hwreg::OFFSET_SRC_PRIVATE_BASE;
745 unsigned WidthM1 = AS == AMDGPUAS::LOCAL_ADDRESS ?
746 AMDGPU::Hwreg::WIDTH_M1_SRC_SHARED_BASE :
747 AMDGPU::Hwreg::WIDTH_M1_SRC_PRIVATE_BASE;
748 unsigned Encoding =
749 AMDGPU::Hwreg::ID_MEM_BASES << AMDGPU::Hwreg::ID_SHIFT_ |
750 Offset << AMDGPU::Hwreg::OFFSET_SHIFT_ |
751 WidthM1 << AMDGPU::Hwreg::WIDTH_M1_SHIFT_;
752
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000753 unsigned ApertureReg = MRI.createGenericVirtualRegister(S32);
754 unsigned GetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
755
756 MIRBuilder.buildInstr(AMDGPU::S_GETREG_B32)
757 .addDef(GetReg)
758 .addImm(Encoding);
759 MRI.setType(GetReg, S32);
760
Amara Emerson946b1242019-04-15 05:04:20 +0000761 auto ShiftAmt = MIRBuilder.buildConstant(S32, WidthM1 + 1);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000762 MIRBuilder.buildInstr(TargetOpcode::G_SHL)
763 .addDef(ApertureReg)
764 .addUse(GetReg)
Amara Emerson946b1242019-04-15 05:04:20 +0000765 .addUse(ShiftAmt.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000766
767 return ApertureReg;
768 }
769
770 unsigned QueuePtr = MRI.createGenericVirtualRegister(
771 LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64));
772
773 // FIXME: Placeholder until we can track the input registers.
774 MIRBuilder.buildConstant(QueuePtr, 0xdeadbeef);
775
776 // Offset into amd_queue_t for group_segment_aperture_base_hi /
777 // private_segment_aperture_base_hi.
778 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
779
780 // FIXME: Don't use undef
781 Value *V = UndefValue::get(PointerType::get(
782 Type::getInt8Ty(MF.getFunction().getContext()),
783 AMDGPUAS::CONSTANT_ADDRESS));
784
785 MachinePointerInfo PtrInfo(V, StructOffset);
786 MachineMemOperand *MMO = MF.getMachineMemOperand(
787 PtrInfo,
788 MachineMemOperand::MOLoad |
789 MachineMemOperand::MODereferenceable |
790 MachineMemOperand::MOInvariant,
791 4,
792 MinAlign(64, StructOffset));
793
Matt Arsenaulte3a676e2019-06-24 15:50:29 +0000794 Register LoadResult = MRI.createGenericVirtualRegister(S32);
795 Register LoadAddr;
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000796
797 MIRBuilder.materializeGEP(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
798 MIRBuilder.buildLoad(LoadResult, LoadAddr, *MMO);
799 return LoadResult;
800}
801
802bool AMDGPULegalizerInfo::legalizeAddrSpaceCast(
803 MachineInstr &MI, MachineRegisterInfo &MRI,
804 MachineIRBuilder &MIRBuilder) const {
805 MachineFunction &MF = MIRBuilder.getMF();
806
807 MIRBuilder.setInstr(MI);
808
Matt Arsenaulte3a676e2019-06-24 15:50:29 +0000809 Register Dst = MI.getOperand(0).getReg();
810 Register Src = MI.getOperand(1).getReg();
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000811
812 LLT DstTy = MRI.getType(Dst);
813 LLT SrcTy = MRI.getType(Src);
814 unsigned DestAS = DstTy.getAddressSpace();
815 unsigned SrcAS = SrcTy.getAddressSpace();
816
817 // TODO: Avoid reloading from the queue ptr for each cast, or at least each
818 // vector element.
819 assert(!DstTy.isVector());
820
821 const AMDGPUTargetMachine &TM
822 = static_cast<const AMDGPUTargetMachine &>(MF.getTarget());
823
824 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
825 if (ST.getTargetLowering()->isNoopAddrSpaceCast(SrcAS, DestAS)) {
Matt Arsenaultdc88a2c2019-02-08 14:16:11 +0000826 MI.setDesc(MIRBuilder.getTII().get(TargetOpcode::G_BITCAST));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000827 return true;
828 }
829
830 if (SrcAS == AMDGPUAS::FLAT_ADDRESS) {
831 assert(DestAS == AMDGPUAS::LOCAL_ADDRESS ||
832 DestAS == AMDGPUAS::PRIVATE_ADDRESS);
833 unsigned NullVal = TM.getNullPointerValue(DestAS);
834
Amara Emerson946b1242019-04-15 05:04:20 +0000835 auto SegmentNull = MIRBuilder.buildConstant(DstTy, NullVal);
836 auto FlatNull = MIRBuilder.buildConstant(SrcTy, 0);
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000837
838 unsigned PtrLo32 = MRI.createGenericVirtualRegister(DstTy);
839
840 // Extract low 32-bits of the pointer.
841 MIRBuilder.buildExtract(PtrLo32, Src, 0);
842
843 unsigned CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
Amara Emerson946b1242019-04-15 05:04:20 +0000844 MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, FlatNull.getReg(0));
845 MIRBuilder.buildSelect(Dst, CmpRes, PtrLo32, SegmentNull.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000846
847 MI.eraseFromParent();
848 return true;
849 }
850
851 assert(SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
852 SrcAS == AMDGPUAS::PRIVATE_ADDRESS);
853
Amara Emerson946b1242019-04-15 05:04:20 +0000854 auto SegmentNull =
855 MIRBuilder.buildConstant(SrcTy, TM.getNullPointerValue(SrcAS));
856 auto FlatNull =
857 MIRBuilder.buildConstant(DstTy, TM.getNullPointerValue(DestAS));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000858
859 unsigned ApertureReg = getSegmentAperture(DestAS, MRI, MIRBuilder);
860
861 unsigned CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1));
Amara Emerson946b1242019-04-15 05:04:20 +0000862 MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, SegmentNull.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000863
864 unsigned BuildPtr = MRI.createGenericVirtualRegister(DstTy);
865
866 // Coerce the type of the low half of the result so we can use merge_values.
867 unsigned SrcAsInt = MRI.createGenericVirtualRegister(LLT::scalar(32));
868 MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT)
869 .addDef(SrcAsInt)
870 .addUse(Src);
871
872 // TODO: Should we allow mismatched types but matching sizes in merges to
873 // avoid the ptrtoint?
874 MIRBuilder.buildMerge(BuildPtr, {SrcAsInt, ApertureReg});
Amara Emerson946b1242019-04-15 05:04:20 +0000875 MIRBuilder.buildSelect(Dst, CmpRes, BuildPtr, FlatNull.getReg(0));
Matt Arsenaulta8b43392019-02-08 02:40:47 +0000876
877 MI.eraseFromParent();
878 return true;
879}
Matt Arsenault6aafc5e2019-05-17 12:19:57 +0000880
881bool AMDGPULegalizerInfo::legalizeFrint(
882 MachineInstr &MI, MachineRegisterInfo &MRI,
883 MachineIRBuilder &MIRBuilder) const {
884 MIRBuilder.setInstr(MI);
885
886 unsigned Src = MI.getOperand(1).getReg();
887 LLT Ty = MRI.getType(Src);
888 assert(Ty.isScalar() && Ty.getSizeInBits() == 64);
889
890 APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
891 APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
892
893 auto C1 = MIRBuilder.buildFConstant(Ty, C1Val);
894 auto CopySign = MIRBuilder.buildFCopysign(Ty, C1, Src);
895
896 // TODO: Should this propagate fast-math-flags?
897 auto Tmp1 = MIRBuilder.buildFAdd(Ty, Src, CopySign);
898 auto Tmp2 = MIRBuilder.buildFSub(Ty, Tmp1, CopySign);
899
900 auto C2 = MIRBuilder.buildFConstant(Ty, C2Val);
901 auto Fabs = MIRBuilder.buildFAbs(Ty, Src);
902
903 auto Cond = MIRBuilder.buildFCmp(CmpInst::FCMP_OGT, LLT::scalar(1), Fabs, C2);
904 MIRBuilder.buildSelect(MI.getOperand(0).getReg(), Cond, Src, Tmp2);
905 return true;
906}
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000907
Matt Arsenaulta510b572019-05-17 12:20:05 +0000908bool AMDGPULegalizerInfo::legalizeFceil(
909 MachineInstr &MI, MachineRegisterInfo &MRI,
910 MachineIRBuilder &B) const {
911 B.setInstr(MI);
912
Matt Arsenault1a02d302019-05-17 12:59:27 +0000913 const LLT S1 = LLT::scalar(1);
914 const LLT S64 = LLT::scalar(64);
915
Matt Arsenaulta510b572019-05-17 12:20:05 +0000916 unsigned Src = MI.getOperand(1).getReg();
Matt Arsenault1a02d302019-05-17 12:59:27 +0000917 assert(MRI.getType(Src) == S64);
Matt Arsenaulta510b572019-05-17 12:20:05 +0000918
919 // result = trunc(src)
920 // if (src > 0.0 && src != result)
921 // result += 1.0
922
Matt Arsenaulta510b572019-05-17 12:20:05 +0000923 auto Trunc = B.buildInstr(TargetOpcode::G_INTRINSIC_TRUNC, {S64}, {Src});
924
Matt Arsenaulta510b572019-05-17 12:20:05 +0000925 const auto Zero = B.buildFConstant(S64, 0.0);
926 const auto One = B.buildFConstant(S64, 1.0);
927 auto Lt0 = B.buildFCmp(CmpInst::FCMP_OGT, S1, Src, Zero);
928 auto NeTrunc = B.buildFCmp(CmpInst::FCMP_ONE, S1, Src, Trunc);
929 auto And = B.buildAnd(S1, Lt0, NeTrunc);
930 auto Add = B.buildSelect(S64, And, One, Zero);
931
932 // TODO: Should this propagate fast-math-flags?
933 B.buildFAdd(MI.getOperand(0).getReg(), Trunc, Add);
934 return true;
935}
936
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000937static MachineInstrBuilder extractF64Exponent(unsigned Hi,
938 MachineIRBuilder &B) {
939 const unsigned FractBits = 52;
940 const unsigned ExpBits = 11;
941 LLT S32 = LLT::scalar(32);
942
943 auto Const0 = B.buildConstant(S32, FractBits - 32);
944 auto Const1 = B.buildConstant(S32, ExpBits);
945
946 auto ExpPart = B.buildIntrinsic(Intrinsic::amdgcn_ubfe, {S32}, false)
947 .addUse(Const0.getReg(0))
948 .addUse(Const1.getReg(0));
949
950 return B.buildSub(S32, ExpPart, B.buildConstant(S32, 1023));
951}
952
953bool AMDGPULegalizerInfo::legalizeIntrinsicTrunc(
954 MachineInstr &MI, MachineRegisterInfo &MRI,
955 MachineIRBuilder &B) const {
956 B.setInstr(MI);
957
Matt Arsenault1a02d302019-05-17 12:59:27 +0000958 const LLT S1 = LLT::scalar(1);
959 const LLT S32 = LLT::scalar(32);
960 const LLT S64 = LLT::scalar(64);
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000961
Matt Arsenault1a02d302019-05-17 12:59:27 +0000962 unsigned Src = MI.getOperand(1).getReg();
963 assert(MRI.getType(Src) == S64);
Matt Arsenault6aebcd52019-05-17 12:20:01 +0000964
965 // TODO: Should this use extract since the low half is unused?
966 auto Unmerge = B.buildUnmerge({S32, S32}, Src);
967 unsigned Hi = Unmerge.getReg(1);
968
969 // Extract the upper half, since this is where we will find the sign and
970 // exponent.
971 auto Exp = extractF64Exponent(Hi, B);
972
973 const unsigned FractBits = 52;
974
975 // Extract the sign bit.
976 const auto SignBitMask = B.buildConstant(S32, UINT32_C(1) << 31);
977 auto SignBit = B.buildAnd(S32, Hi, SignBitMask);
978
979 const auto FractMask = B.buildConstant(S64, (UINT64_C(1) << FractBits) - 1);
980
981 const auto Zero32 = B.buildConstant(S32, 0);
982
983 // Extend back to 64-bits.
984 auto SignBit64 = B.buildMerge(S64, {Zero32.getReg(0), SignBit.getReg(0)});
985
986 auto Shr = B.buildAShr(S64, FractMask, Exp);
987 auto Not = B.buildNot(S64, Shr);
988 auto Tmp0 = B.buildAnd(S64, Src, Not);
989 auto FiftyOne = B.buildConstant(S32, FractBits - 1);
990
991 auto ExpLt0 = B.buildICmp(CmpInst::ICMP_SLT, S1, Exp, Zero32);
992 auto ExpGt51 = B.buildICmp(CmpInst::ICMP_SGT, S1, Exp, FiftyOne);
993
994 auto Tmp1 = B.buildSelect(S64, ExpLt0, SignBit64, Tmp0);
995 B.buildSelect(MI.getOperand(0).getReg(), ExpGt51, Src, Tmp1);
996 return true;
997}
Matt Arsenault2f292202019-05-17 23:05:18 +0000998
999bool AMDGPULegalizerInfo::legalizeITOFP(
1000 MachineInstr &MI, MachineRegisterInfo &MRI,
1001 MachineIRBuilder &B, bool Signed) const {
1002 B.setInstr(MI);
1003
1004 unsigned Dst = MI.getOperand(0).getReg();
1005 unsigned Src = MI.getOperand(1).getReg();
1006
1007 const LLT S64 = LLT::scalar(64);
1008 const LLT S32 = LLT::scalar(32);
1009
1010 assert(MRI.getType(Src) == S64 && MRI.getType(Dst) == S64);
1011
1012 auto Unmerge = B.buildUnmerge({S32, S32}, Src);
1013
1014 auto CvtHi = Signed ?
1015 B.buildSITOFP(S64, Unmerge.getReg(1)) :
1016 B.buildUITOFP(S64, Unmerge.getReg(1));
1017
1018 auto CvtLo = B.buildUITOFP(S64, Unmerge.getReg(0));
1019
1020 auto ThirtyTwo = B.buildConstant(S32, 32);
1021 auto LdExp = B.buildIntrinsic(Intrinsic::amdgcn_ldexp, {S64}, false)
1022 .addUse(CvtHi.getReg(0))
1023 .addUse(ThirtyTwo.getReg(0));
1024
1025 // TODO: Should this propagate fast-math-flags?
1026 B.buildFAdd(Dst, LdExp, CvtLo);
1027 MI.eraseFromParent();
1028 return true;
1029}