blob: d5c176a7e2169ac9153da2eefe127b1125dd1397 [file] [log] [blame]
Tom Stellardca166212017-01-30 21:56:46 +00001//===- AMDGPULegalizerInfo.cpp -----------------------------------*- C++ -*-==//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellardca166212017-01-30 21:56:46 +00006//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the Machinelegalizer class for
10/// AMDGPU.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
David Blaikie36a0f222018-03-23 23:58:31 +000014#include "AMDGPU.h"
Craig Topper2fa14362018-03-29 17:21:10 +000015#include "AMDGPULegalizerInfo.h"
Matt Arsenault85803362018-03-17 15:17:41 +000016#include "AMDGPUTargetMachine.h"
David Blaikieb3bde2e2017-11-17 01:07:10 +000017#include "llvm/CodeGen/TargetOpcodes.h"
Craig Topper2fa14362018-03-29 17:21:10 +000018#include "llvm/CodeGen/ValueTypes.h"
Tom Stellardca166212017-01-30 21:56:46 +000019#include "llvm/IR/DerivedTypes.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000020#include "llvm/IR/Type.h"
Tom Stellardca166212017-01-30 21:56:46 +000021#include "llvm/Support/Debug.h"
22
23using namespace llvm;
Daniel Sanders9ade5592018-01-29 17:37:29 +000024using namespace LegalizeActions;
Matt Arsenault990f5072019-01-25 00:51:00 +000025using namespace LegalizeMutations;
Matt Arsenault7ac79ed2019-01-20 19:45:18 +000026using namespace LegalityPredicates;
Tom Stellardca166212017-01-30 21:56:46 +000027
Tom Stellard5bfbae52018-07-11 20:59:01 +000028AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST,
Matt Arsenaultc3fe46b2018-03-08 16:24:16 +000029 const GCNTargetMachine &TM) {
Tom Stellardca166212017-01-30 21:56:46 +000030 using namespace TargetOpcode;
31
Matt Arsenault85803362018-03-17 15:17:41 +000032 auto GetAddrSpacePtr = [&TM](unsigned AS) {
33 return LLT::pointer(AS, TM.getPointerSizeInBits(AS));
34 };
35
36 const LLT S1 = LLT::scalar(1);
Matt Arsenault45991592019-01-18 21:33:50 +000037 const LLT S16 = LLT::scalar(16);
Tom Stellardca166212017-01-30 21:56:46 +000038 const LLT S32 = LLT::scalar(32);
39 const LLT S64 = LLT::scalar(64);
Matt Arsenaultca676342019-01-25 02:36:32 +000040 const LLT S128 = LLT::scalar(128);
Matt Arsenaultff6a9a22019-01-20 18:40:36 +000041 const LLT S256 = LLT::scalar(256);
Tom Stellardeebbfc22018-06-30 04:09:44 +000042 const LLT S512 = LLT::scalar(512);
Matt Arsenault85803362018-03-17 15:17:41 +000043
Matt Arsenaultbee2ad72018-12-21 03:03:11 +000044 const LLT V2S16 = LLT::vector(2, 16);
Matt Arsenaulta1515d22019-01-08 01:30:02 +000045 const LLT V4S16 = LLT::vector(4, 16);
46 const LLT V8S16 = LLT::vector(8, 16);
Matt Arsenaultbee2ad72018-12-21 03:03:11 +000047
48 const LLT V2S32 = LLT::vector(2, 32);
49 const LLT V3S32 = LLT::vector(3, 32);
50 const LLT V4S32 = LLT::vector(4, 32);
51 const LLT V5S32 = LLT::vector(5, 32);
52 const LLT V6S32 = LLT::vector(6, 32);
53 const LLT V7S32 = LLT::vector(7, 32);
54 const LLT V8S32 = LLT::vector(8, 32);
55 const LLT V9S32 = LLT::vector(9, 32);
56 const LLT V10S32 = LLT::vector(10, 32);
57 const LLT V11S32 = LLT::vector(11, 32);
58 const LLT V12S32 = LLT::vector(12, 32);
59 const LLT V13S32 = LLT::vector(13, 32);
60 const LLT V14S32 = LLT::vector(14, 32);
61 const LLT V15S32 = LLT::vector(15, 32);
62 const LLT V16S32 = LLT::vector(16, 32);
63
64 const LLT V2S64 = LLT::vector(2, 64);
65 const LLT V3S64 = LLT::vector(3, 64);
66 const LLT V4S64 = LLT::vector(4, 64);
67 const LLT V5S64 = LLT::vector(5, 64);
68 const LLT V6S64 = LLT::vector(6, 64);
69 const LLT V7S64 = LLT::vector(7, 64);
70 const LLT V8S64 = LLT::vector(8, 64);
71
72 std::initializer_list<LLT> AllS32Vectors =
73 {V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32,
74 V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32};
75 std::initializer_list<LLT> AllS64Vectors =
76 {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64};
77
Matt Arsenault85803362018-03-17 15:17:41 +000078 const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS);
79 const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS);
Matt Arsenault685d1e82018-03-17 15:17:45 +000080 const LLT LocalPtr = GetAddrSpacePtr(AMDGPUAS::LOCAL_ADDRESS);
Matt Arsenault0da63502018-08-31 05:49:54 +000081 const LLT FlatPtr = GetAddrSpacePtr(AMDGPUAS::FLAT_ADDRESS);
82 const LLT PrivatePtr = GetAddrSpacePtr(AMDGPUAS::PRIVATE_ADDRESS);
Matt Arsenault85803362018-03-17 15:17:41 +000083
Matt Arsenault934e5342018-12-13 20:34:15 +000084 const LLT CodePtr = FlatPtr;
85
Matt Arsenault685d1e82018-03-17 15:17:45 +000086 const LLT AddrSpaces[] = {
87 GlobalPtr,
88 ConstantPtr,
89 LocalPtr,
90 FlatPtr,
91 PrivatePtr
92 };
Tom Stellardca166212017-01-30 21:56:46 +000093
Matt Arsenaultadc40ba2019-01-08 01:22:47 +000094 setAction({G_BRCOND, S1}, Legal);
95
Matt Arsenault3e08b772019-01-25 04:53:57 +000096 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_UMULH, G_SMULH})
Matt Arsenault5d622fb2019-01-25 03:23:04 +000097 .legalFor({S32})
98 .scalarize(0);
Matt Arsenault43398832018-12-20 01:35:49 +000099
100 // FIXME: 64-bit ones only legal for scalar
101 getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
102 .legalFor({S32, S1, S64, V2S32});
Tom Stellardee6e6452017-06-12 20:54:56 +0000103
Matt Arsenault68c668a2019-01-08 01:09:09 +0000104 getActionDefinitionsBuilder({G_UADDO, G_SADDO, G_USUBO, G_SSUBO,
105 G_UADDE, G_SADDE, G_USUBE, G_SSUBE})
Matt Arsenault2cc15b62019-01-08 01:03:58 +0000106 .legalFor({{S32, S1}});
107
Matt Arsenault7ac79ed2019-01-20 19:45:18 +0000108 getActionDefinitionsBuilder(G_BITCAST)
109 .legalForCartesianProduct({S32, V2S16})
110 .legalForCartesianProduct({S64, V2S32, V4S16})
111 .legalForCartesianProduct({V2S64, V4S32})
112 // Don't worry about the size constraint.
113 .legalIf(all(isPointer(0), isPointer(1)));
Tom Stellardff63ee02017-06-19 13:15:45 +0000114
Matt Arsenaultabdc4f22018-03-17 15:17:48 +0000115 getActionDefinitionsBuilder(G_FCONSTANT)
Matt Arsenault45991592019-01-18 21:33:50 +0000116 .legalFor({S32, S64, S16});
Tom Stellardeebbfc22018-06-30 04:09:44 +0000117
118 // G_IMPLICIT_DEF is a no-op so we can make it legal for any value type that
119 // can fit in a register.
120 // FIXME: We need to legalize several more operations before we can add
121 // a test case for size > 512.
Matt Arsenaultb3feccd2018-06-25 15:42:12 +0000122 getActionDefinitionsBuilder(G_IMPLICIT_DEF)
Tom Stellardeebbfc22018-06-30 04:09:44 +0000123 .legalIf([=](const LegalityQuery &Query) {
124 return Query.Types[0].getSizeInBits() <= 512;
125 })
126 .clampScalar(0, S1, S512);
Matt Arsenaultb3feccd2018-06-25 15:42:12 +0000127
Matt Arsenaultabdc4f22018-03-17 15:17:48 +0000128
Tom Stellarde0424122017-06-03 01:13:33 +0000129 // FIXME: i1 operands to intrinsics should always be legal, but other i1
130 // values may not be legal. We need to figure out how to distinguish
131 // between these two scenarios.
Matt Arsenault45991592019-01-18 21:33:50 +0000132 // FIXME: Pointer types
133 getActionDefinitionsBuilder(G_CONSTANT)
Matt Arsenault41a8bee2019-01-22 19:04:51 +0000134 .legalFor({S1, S32, S64})
Matt Arsenault45991592019-01-18 21:33:50 +0000135 .clampScalar(0, S32, S64)
136 .widenScalarToNextPow2(0);
Matt Arsenault06cbb272018-03-01 19:16:52 +0000137
Matt Arsenaultc94e26c2018-12-18 09:46:13 +0000138 setAction({G_FRAME_INDEX, PrivatePtr}, Legal);
139
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000140 getActionDefinitionsBuilder({G_FADD, G_FMUL, G_FNEG, G_FABS, G_FMA})
141 .legalFor({S32, S64})
Matt Arsenault990f5072019-01-25 00:51:00 +0000142 .scalarize(0)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000143 .clampScalar(0, S32, S64);
Tom Stellardd0c6cf22017-10-27 23:57:41 +0000144
Matt Arsenaultdff33c32018-12-20 00:37:02 +0000145 getActionDefinitionsBuilder(G_FPTRUNC)
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000146 .legalFor({{S32, S64}, {S16, S32}})
147 .scalarize(0);
Matt Arsenaultdff33c32018-12-20 00:37:02 +0000148
Matt Arsenault24563ef2019-01-20 18:34:24 +0000149 getActionDefinitionsBuilder(G_FPEXT)
150 .legalFor({{S64, S32}, {S32, S16}})
Matt Arsenaultca676342019-01-25 02:36:32 +0000151 .lowerFor({{S64, S16}}) // FIXME: Implement
152 .scalarize(0);
Matt Arsenault24563ef2019-01-20 18:34:24 +0000153
Matt Arsenault745fd9f2019-01-20 19:10:31 +0000154 getActionDefinitionsBuilder(G_FSUB)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000155 // Use actual fsub instruction
156 .legalFor({S32})
157 // Must use fadd + fneg
158 .lowerFor({S64, S16, V2S16})
Matt Arsenault990f5072019-01-25 00:51:00 +0000159 .scalarize(0)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000160 .clampScalar(0, S32, S64);
Matt Arsenaulte01e7c82018-12-18 09:19:03 +0000161
Matt Arsenault24563ef2019-01-20 18:34:24 +0000162 getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT})
Matt Arsenault46ffe682019-01-20 19:28:20 +0000163 .legalFor({{S64, S32}, {S32, S16}, {S64, S16},
Matt Arsenaultca676342019-01-25 02:36:32 +0000164 {S32, S1}, {S64, S1}, {S16, S1},
165 // FIXME: Hack
166 {S128, S32}})
167 .scalarize(0);
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000168
Matt Arsenaultfb671642019-01-22 00:20:17 +0000169 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000170 .legalFor({{S32, S32}, {S64, S32}})
171 .scalarize(0);
Matt Arsenaultdd022ce2018-03-01 19:04:25 +0000172
Matt Arsenaultfb671642019-01-22 00:20:17 +0000173 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
Matt Arsenaulte6cebd02019-01-25 04:37:33 +0000174 .legalFor({{S32, S32}, {S32, S64}})
175 .scalarize(0);
Tom Stellard33445762018-02-07 04:47:59 +0000176
Matt Arsenaultf4c21c52018-12-21 03:14:45 +0000177 getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND})
178 .legalFor({S32, S64});
179
Matt Arsenault685d1e82018-03-17 15:17:45 +0000180 for (LLT PtrTy : AddrSpaces) {
181 LLT IdxTy = LLT::scalar(PtrTy.getSizeInBits());
182 setAction({G_GEP, PtrTy}, Legal);
183 setAction({G_GEP, 1, IdxTy}, Legal);
184 }
Tom Stellardca166212017-01-30 21:56:46 +0000185
Matt Arsenault3b9a82f2019-01-25 04:54:00 +0000186 // FIXME: When RegBankSelect inserts copies, it will only create new registers
187 // with scalar types. This means we can end up with G_LOAD/G_STORE/G_GEP
188 // instruction with scalar types for their pointer operands. In assert builds,
189 // the instruction selector will assert if it sees a generic instruction which
190 // isn't legal, so we need to tell it that scalar types are legal for pointer
191 // operands
192 setAction({G_GEP, S64}, Legal);
193
Matt Arsenault934e5342018-12-13 20:34:15 +0000194 setAction({G_BLOCK_ADDR, CodePtr}, Legal);
195
Matt Arsenault1b1e6852019-01-25 02:59:34 +0000196 getActionDefinitionsBuilder({G_ICMP, G_FCMP})
197 .legalFor({{S1, S32}, {S1, S64}})
198 .widenScalarToNextPow2(1)
199 .clampScalar(1, S32, S64)
200 .clampMaxNumElements(0, S1, 1)
201 .clampMaxNumElements(1, S32, 1);
202
Matt Arsenault95fd95c2019-01-25 04:03:38 +0000203 // FIXME: fexp, flog2, flog10 needs to be custom lowered.
204 getActionDefinitionsBuilder({G_FPOW, G_FEXP, G_FEXP2,
205 G_FLOG, G_FLOG2, G_FLOG10})
206 .legalFor({S32})
207 .scalarize(0);
Tom Stellard8cd60a52017-06-06 14:16:50 +0000208
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000209 setAction({G_CTLZ, S32}, Legal);
210 setAction({G_CTLZ_ZERO_UNDEF, S32}, Legal);
211 setAction({G_CTTZ, S32}, Legal);
212 setAction({G_CTTZ_ZERO_UNDEF, S32}, Legal);
213 setAction({G_BSWAP, S32}, Legal);
214 setAction({G_CTPOP, S32}, Legal);
215
Tom Stellard7c650782018-10-05 04:34:09 +0000216 getActionDefinitionsBuilder(G_INTTOPTR)
217 .legalIf([](const LegalityQuery &Query) {
218 return true;
219 });
Matt Arsenault85803362018-03-17 15:17:41 +0000220
Matt Arsenaultf38f4832018-12-13 08:23:51 +0000221 getActionDefinitionsBuilder(G_PTRTOINT)
222 .legalIf([](const LegalityQuery &Query) {
223 return true;
224 });
225
Matt Arsenault85803362018-03-17 15:17:41 +0000226 getActionDefinitionsBuilder({G_LOAD, G_STORE})
227 .legalIf([=, &ST](const LegalityQuery &Query) {
228 const LLT &Ty0 = Query.Types[0];
229
230 // TODO: Decompose private loads into 4-byte components.
231 // TODO: Illegal flat loads on SI
232 switch (Ty0.getSizeInBits()) {
233 case 32:
234 case 64:
235 case 128:
236 return true;
237
238 case 96:
239 // XXX hasLoadX3
240 return (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS);
241
242 case 256:
243 case 512:
244 // TODO: constant loads
245 default:
246 return false;
247 }
248 });
249
250
Matt Arsenault6614f852019-01-22 19:02:10 +0000251 auto &ExtLoads = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
252 .legalForTypesWithMemSize({
253 {S32, GlobalPtr, 8},
254 {S32, GlobalPtr, 16},
255 {S32, LocalPtr, 8},
256 {S32, LocalPtr, 16},
257 {S32, PrivatePtr, 8},
258 {S32, PrivatePtr, 16}});
259 if (ST.hasFlatAddressSpace()) {
260 ExtLoads.legalForTypesWithMemSize({{S32, FlatPtr, 8},
261 {S32, FlatPtr, 16}});
262 }
263
264 ExtLoads.clampScalar(0, S32, S32)
265 .widenScalarToNextPow2(0)
266 .unsupportedIfMemSizeNotPow2()
267 .lower();
268
Matt Arsenault36d40922018-12-20 00:33:49 +0000269 auto &Atomics = getActionDefinitionsBuilder(
270 {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB,
271 G_ATOMICRMW_AND, G_ATOMICRMW_OR, G_ATOMICRMW_XOR,
272 G_ATOMICRMW_MAX, G_ATOMICRMW_MIN, G_ATOMICRMW_UMAX,
273 G_ATOMICRMW_UMIN, G_ATOMIC_CMPXCHG})
274 .legalFor({{S32, GlobalPtr}, {S32, LocalPtr},
275 {S64, GlobalPtr}, {S64, LocalPtr}});
276 if (ST.hasFlatAddressSpace()) {
277 Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}});
278 }
Tom Stellardca166212017-01-30 21:56:46 +0000279
Matt Arsenault96e47012019-01-18 21:42:55 +0000280 // TODO: Pointer types, any 32-bit or 64-bit vector
281 getActionDefinitionsBuilder(G_SELECT)
282 .legalFor({{S32, S1}, {S64, S1}, {V2S32, S1}, {V2S16, S1}})
Matt Arsenault990f5072019-01-25 00:51:00 +0000283 .clampScalar(0, S32, S64)
284 .scalarize(0);
Tom Stellard2860a422017-06-07 13:54:51 +0000285
Matt Arsenault4c5e8f512019-01-22 22:00:19 +0000286 // TODO: Only the low 4/5/6 bits of the shift amount are observed, so we can
287 // be more flexible with the shift amount type.
288 auto &Shifts = getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR})
289 .legalFor({{S32, S32}, {S64, S32}});
290 if (ST.has16BitInsts())
291 Shifts.legalFor({{S16, S32}, {S16, S16}});
292 else
293 Shifts.clampScalar(0, S32, S64);
294 Shifts.clampScalar(1, S32, S32);
Tom Stellardca166212017-01-30 21:56:46 +0000295
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000296 for (unsigned Op : {G_EXTRACT_VECTOR_ELT, G_INSERT_VECTOR_ELT}) {
Matt Arsenault63786292019-01-22 20:38:15 +0000297 unsigned VecTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 1 : 0;
298 unsigned EltTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 0 : 1;
299 unsigned IdxTypeIdx = 2;
300
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000301 getActionDefinitionsBuilder(Op)
302 .legalIf([=](const LegalityQuery &Query) {
Matt Arsenault63786292019-01-22 20:38:15 +0000303 const LLT &VecTy = Query.Types[VecTypeIdx];
304 const LLT &IdxTy = Query.Types[IdxTypeIdx];
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000305 return VecTy.getSizeInBits() % 32 == 0 &&
306 VecTy.getSizeInBits() <= 512 &&
307 IdxTy.getSizeInBits() == 32;
Matt Arsenault63786292019-01-22 20:38:15 +0000308 })
309 .clampScalar(EltTypeIdx, S32, S64)
310 .clampScalar(VecTypeIdx, S32, S64)
311 .clampScalar(IdxTypeIdx, S32, S32);
Matt Arsenault7b9ed892018-03-12 13:35:53 +0000312 }
313
Matt Arsenault63786292019-01-22 20:38:15 +0000314 getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
315 .unsupportedIf([=](const LegalityQuery &Query) {
316 const LLT &EltTy = Query.Types[1].getElementType();
317 return Query.Types[0] != EltTy;
318 });
319
Matt Arsenault71272e62018-03-05 16:25:15 +0000320 // FIXME: Doesn't handle extract of illegal sizes.
Tom Stellardb7f19e62018-07-24 02:19:20 +0000321 getActionDefinitionsBuilder({G_EXTRACT, G_INSERT})
Matt Arsenault71272e62018-03-05 16:25:15 +0000322 .legalIf([=](const LegalityQuery &Query) {
323 const LLT &Ty0 = Query.Types[0];
324 const LLT &Ty1 = Query.Types[1];
325 return (Ty0.getSizeInBits() % 32 == 0) &&
326 (Ty1.getSizeInBits() % 32 == 0);
327 });
328
Amara Emerson5ec14602018-12-10 18:44:58 +0000329 getActionDefinitionsBuilder(G_BUILD_VECTOR)
Matt Arsenaultaebb2ee2019-01-22 20:14:29 +0000330 .legalForCartesianProduct(AllS32Vectors, {S32})
331 .legalForCartesianProduct(AllS64Vectors, {S64})
332 .clampNumElements(0, V16S32, V16S32)
333 .clampNumElements(0, V2S64, V8S64)
334 .minScalarSameAs(1, 0)
335 // FIXME: Sort of a hack to make progress on other legalizations.
336 .legalIf([=](const LegalityQuery &Query) {
337 return Query.Types[0].getScalarSizeInBits() < 32;
338 });
Matt Arsenaultbee2ad72018-12-21 03:03:11 +0000339
Matt Arsenaulta1515d22019-01-08 01:30:02 +0000340 // TODO: Support any combination of v2s32
341 getActionDefinitionsBuilder(G_CONCAT_VECTORS)
342 .legalFor({{V4S32, V2S32},
343 {V8S32, V2S32},
344 {V8S32, V4S32},
345 {V4S64, V2S64},
346 {V4S16, V2S16},
347 {V8S16, V2S16},
348 {V8S16, V4S16}});
349
Matt Arsenault503afda2018-03-12 13:35:43 +0000350 // Merge/Unmerge
351 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
352 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
353 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
354
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000355 auto notValidElt = [=](const LegalityQuery &Query, unsigned TypeIdx) {
356 const LLT &Ty = Query.Types[TypeIdx];
357 if (Ty.isVector()) {
358 const LLT &EltTy = Ty.getElementType();
359 if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64)
360 return true;
361 if (!isPowerOf2_32(EltTy.getSizeInBits()))
362 return true;
363 }
364 return false;
365 };
366
Matt Arsenault503afda2018-03-12 13:35:43 +0000367 getActionDefinitionsBuilder(Op)
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000368 // Break up vectors with weird elements into scalars
369 .fewerElementsIf(
370 [=](const LegalityQuery &Query) { return notValidElt(Query, 0); },
Matt Arsenault990f5072019-01-25 00:51:00 +0000371 scalarize(0))
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000372 .fewerElementsIf(
373 [=](const LegalityQuery &Query) { return notValidElt(Query, 1); },
Matt Arsenault990f5072019-01-25 00:51:00 +0000374 scalarize(1))
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000375 .clampScalar(BigTyIdx, S32, S512)
376 .widenScalarIf(
377 [=](const LegalityQuery &Query) {
378 const LLT &Ty = Query.Types[BigTyIdx];
379 return !isPowerOf2_32(Ty.getSizeInBits()) &&
380 Ty.getSizeInBits() % 16 != 0;
381 },
382 [=](const LegalityQuery &Query) {
383 // Pick the next power of 2, or a multiple of 64 over 128.
384 // Whichever is smaller.
385 const LLT &Ty = Query.Types[BigTyIdx];
386 unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1);
387 if (NewSizeInBits >= 256) {
388 unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1);
389 if (RoundedTo < NewSizeInBits)
390 NewSizeInBits = RoundedTo;
391 }
392 return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits));
393 })
394 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16)
395 // Clamp the little scalar to s8-s256 and make it a power of 2. It's not
396 // worth considering the multiples of 64 since 2*192 and 2*384 are not
397 // valid.
398 .clampScalar(LitTyIdx, S16, S256)
399 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32)
Matt Arsenault503afda2018-03-12 13:35:43 +0000400 .legalIf([=](const LegalityQuery &Query) {
401 const LLT &BigTy = Query.Types[BigTyIdx];
402 const LLT &LitTy = Query.Types[LitTyIdx];
Matt Arsenaultff6a9a22019-01-20 18:40:36 +0000403
404 if (BigTy.isVector() && BigTy.getSizeInBits() < 32)
405 return false;
406 if (LitTy.isVector() && LitTy.getSizeInBits() < 32)
407 return false;
408
409 return BigTy.getSizeInBits() % 16 == 0 &&
410 LitTy.getSizeInBits() % 16 == 0 &&
Matt Arsenault503afda2018-03-12 13:35:43 +0000411 BigTy.getSizeInBits() <= 512;
412 })
413 // Any vectors left are the wrong size. Scalarize them.
Matt Arsenault990f5072019-01-25 00:51:00 +0000414 .scalarize(0)
415 .scalarize(1);
Matt Arsenault503afda2018-03-12 13:35:43 +0000416 }
417
Tom Stellardca166212017-01-30 21:56:46 +0000418 computeTables();
Roman Tereshin76c29c62018-05-31 16:16:48 +0000419 verify(*ST.getInstrInfo());
Tom Stellardca166212017-01-30 21:56:46 +0000420}