blob: 923494e42f936fa8995e219df76dc59724e058ac [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief This is the parent TargetLowering class for hardware code gen
12/// targets.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUISelLowering.h"
Tom Stellarded882c22013-06-03 17:40:11 +000017#include "AMDGPU.h"
Tom Stellard81d871d2013-11-13 23:36:50 +000018#include "AMDGPUFrameLowering.h"
Matt Arsenaultc791f392014-06-23 18:00:31 +000019#include "AMDGPUIntrinsicInfo.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000020#include "AMDGPURegisterInfo.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000021#include "AMDGPUSubtarget.h"
Tom Stellardacfeebf2013-07-23 01:48:05 +000022#include "R600MachineFunctionInfo.h"
Tom Stellarded882c22013-06-03 17:40:11 +000023#include "SIMachineFunctionInfo.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000024#include "llvm/CodeGen/CallingConvLower.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000025#include "llvm/CodeGen/MachineFunction.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/SelectionDAG.h"
28#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
Tom Stellardc026e8b2013-06-28 15:47:08 +000029#include "llvm/IR/DataLayout.h"
Oliver Stannard7e7d9832016-02-02 13:52:43 +000030#include "llvm/IR/DiagnosticInfo.h"
Matt Arsenault6e3a4512016-01-18 22:01:13 +000031#include "SIInstrInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000032using namespace llvm;
Matt Arsenault16353872014-04-22 16:42:00 +000033
Matt Arsenaulte935f052016-06-18 05:15:53 +000034static bool allocateKernArg(unsigned ValNo, MVT ValVT, MVT LocVT,
35 CCValAssign::LocInfo LocInfo,
36 ISD::ArgFlagsTy ArgFlags, CCState &State) {
37 MachineFunction &MF = State.getMachineFunction();
38 AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
Tom Stellardaf775432013-10-23 00:44:32 +000039
Matt Arsenaulte935f052016-06-18 05:15:53 +000040 uint64_t Offset = MFI->allocateKernArg(ValVT.getStoreSize(),
41 ArgFlags.getOrigAlign());
42 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo));
Tom Stellardaf775432013-10-23 00:44:32 +000043 return true;
44}
Tom Stellard75aadc22012-12-11 21:25:42 +000045
Christian Konig2c8f6d52013-03-07 09:03:52 +000046#include "AMDGPUGenCallingConv.inc"
47
Matt Arsenaultc9df7942014-06-11 03:29:54 +000048// Find a larger type to do a load / store of a vector with.
49EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
50 unsigned StoreSize = VT.getStoreSizeInBits();
51 if (StoreSize <= 32)
52 return EVT::getIntegerVT(Ctx, StoreSize);
53
54 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
55 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
56}
57
Matt Arsenault43e92fe2016-06-24 06:30:11 +000058AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
Eric Christopher7792e322015-01-30 23:24:40 +000059 const AMDGPUSubtarget &STI)
60 : TargetLowering(TM), Subtarget(&STI) {
Tom Stellard75aadc22012-12-11 21:25:42 +000061 // Lower floating point store/load to integer store/load to reduce the number
62 // of patterns in tablegen.
Tom Stellard75aadc22012-12-11 21:25:42 +000063 setOperationAction(ISD::LOAD, MVT::f32, Promote);
64 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
65
Tom Stellardadf732c2013-07-18 21:43:48 +000066 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
67 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
68
Tom Stellard75aadc22012-12-11 21:25:42 +000069 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
70 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
71
Tom Stellardaf775432013-10-23 00:44:32 +000072 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
73 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
74
75 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
76 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
77
Matt Arsenault71e66762016-05-21 02:27:49 +000078 setOperationAction(ISD::LOAD, MVT::i64, Promote);
79 AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
80
81 setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
82 AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32);
83
Tom Stellard7512c082013-07-12 18:14:56 +000084 setOperationAction(ISD::LOAD, MVT::f64, Promote);
Matt Arsenault71e66762016-05-21 02:27:49 +000085 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32);
Tom Stellard7512c082013-07-12 18:14:56 +000086
Matt Arsenaulte8a076a2014-05-08 18:01:56 +000087 setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
Matt Arsenault71e66762016-05-21 02:27:49 +000088 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32);
Tom Stellard0344cdf2013-08-01 15:23:42 +000089
Matt Arsenaultbd223422015-01-14 01:35:17 +000090 // There are no 64-bit extloads. These should be done as a 32-bit extload and
91 // an extension to 64-bit.
92 for (MVT VT : MVT::integer_valuetypes()) {
93 setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand);
94 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand);
95 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand);
96 }
97
Matt Arsenault71e66762016-05-21 02:27:49 +000098 for (MVT VT : MVT::integer_valuetypes()) {
99 if (VT == MVT::i64)
100 continue;
101
102 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
103 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal);
104 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal);
105 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand);
106
107 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
108 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal);
109 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal);
110 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand);
111
112 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
113 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal);
114 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal);
115 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand);
116 }
117
Ahmed Bougacha2b6917b2015-01-08 00:51:32 +0000118 for (MVT VT : MVT::integer_vector_valuetypes()) {
119 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand);
120 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand);
121 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand);
122 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand);
123 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand);
124 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand);
125 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand);
126 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand);
127 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand);
128 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand);
129 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand);
130 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand);
131 }
Tom Stellardb03edec2013-08-16 01:12:16 +0000132
Matt Arsenault71e66762016-05-21 02:27:49 +0000133 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
134 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
135 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
136 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
137
138 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
139 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
140 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
141 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand);
142
143 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
144 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
145 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
146 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
147
148 setOperationAction(ISD::STORE, MVT::f32, Promote);
149 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
150
151 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
152 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
153
154 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
155 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
156
157 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
158 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
159
160 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
161 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
162
163 setOperationAction(ISD::STORE, MVT::i64, Promote);
164 AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
165
166 setOperationAction(ISD::STORE, MVT::v2i64, Promote);
167 AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32);
168
169 setOperationAction(ISD::STORE, MVT::f64, Promote);
170 AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32);
171
172 setOperationAction(ISD::STORE, MVT::v2f64, Promote);
173 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32);
174
175 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
176 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
177
178 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
179 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
180
181 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
182 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Expand);
183 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
184
185 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
186 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
187 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
188 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
189
190 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
191 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand);
192 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand);
193 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand);
194
195 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
196 setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
197 setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
198 setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
199
200 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
201 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
202
203 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
204 setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand);
205
206 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand);
207 setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand);
208
209 setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand);
210 setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand);
211
212
213 setOperationAction(ISD::Constant, MVT::i32, Legal);
214 setOperationAction(ISD::Constant, MVT::i64, Legal);
215 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
216 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
217
218 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
219 setOperationAction(ISD::BRIND, MVT::Other, Expand);
220
221 // This is totally unsupported, just custom lower to produce an error.
222 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
223
224 // We need to custom lower some of the intrinsics
225 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
226 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
227
228 // Library functions. These default to Expand, but we have instructions
229 // for them.
230 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
231 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
232 setOperationAction(ISD::FPOW, MVT::f32, Legal);
233 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
234 setOperationAction(ISD::FABS, MVT::f32, Legal);
235 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
236 setOperationAction(ISD::FRINT, MVT::f32, Legal);
237 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
238 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
239 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
240
241 setOperationAction(ISD::FROUND, MVT::f32, Custom);
242 setOperationAction(ISD::FROUND, MVT::f64, Custom);
243
244 setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
245 setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
246
247 setOperationAction(ISD::FREM, MVT::f32, Custom);
248 setOperationAction(ISD::FREM, MVT::f64, Custom);
249
250 // v_mad_f32 does not support denormals according to some sources.
251 if (!Subtarget->hasFP32Denormals())
252 setOperationAction(ISD::FMAD, MVT::f32, Legal);
253
254 // Expand to fneg + fadd.
255 setOperationAction(ISD::FSUB, MVT::f64, Expand);
256
257 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
258 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
259 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
260 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
261 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
262 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
263 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
264 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
265 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
266 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
Tom Stellardaeb45642014-02-04 17:18:43 +0000267
Matt Arsenaulte8208ec2014-06-18 17:05:26 +0000268 if (Subtarget->getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
Matt Arsenault46010932014-06-18 17:05:30 +0000269 setOperationAction(ISD::FCEIL, MVT::f64, Custom);
270 setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
Matt Arsenaulte8208ec2014-06-18 17:05:26 +0000271 setOperationAction(ISD::FRINT, MVT::f64, Custom);
Matt Arsenault46010932014-06-18 17:05:30 +0000272 setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
Matt Arsenaulte8208ec2014-06-18 17:05:26 +0000273 }
274
Matt Arsenault6e439652014-06-10 19:00:20 +0000275 if (!Subtarget->hasBFI()) {
276 // fcopysign can be done in a single instruction with BFI.
277 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
278 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
279 }
280
Tim Northoverf861de32014-07-18 08:43:24 +0000281 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
282
Matt Arsenaultfd8c24e2014-06-13 17:20:53 +0000283 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
284 for (MVT VT : ScalarIntVTs) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000285 // These should use [SU]DIVREM, so set them to expand
Jan Vesely4a33bc62014-08-12 17:31:17 +0000286 setOperationAction(ISD::SDIV, VT, Expand);
Matt Arsenault71e66762016-05-21 02:27:49 +0000287 setOperationAction(ISD::UDIV, VT, Expand);
288 setOperationAction(ISD::SREM, VT, Expand);
289 setOperationAction(ISD::UREM, VT, Expand);
Matt Arsenault717c1d02014-06-15 21:08:58 +0000290
Matt Arsenaultfd8c24e2014-06-13 17:20:53 +0000291 // GPU does not have divrem function for signed or unsigned.
Jan Vesely109efdf2014-06-22 21:43:00 +0000292 setOperationAction(ISD::SDIVREM, VT, Custom);
Matt Arsenault717c1d02014-06-15 21:08:58 +0000293 setOperationAction(ISD::UDIVREM, VT, Custom);
Matt Arsenaultfd8c24e2014-06-13 17:20:53 +0000294
295 // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
296 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
297 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
298
299 setOperationAction(ISD::BSWAP, VT, Expand);
300 setOperationAction(ISD::CTTZ, VT, Expand);
301 setOperationAction(ISD::CTLZ, VT, Expand);
302 }
303
Matt Arsenault60425062014-06-10 19:18:28 +0000304 if (!Subtarget->hasBCNT(32))
305 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
306
307 if (!Subtarget->hasBCNT(64))
308 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
309
Matt Arsenault717c1d02014-06-15 21:08:58 +0000310 // The hardware supports 32-bit ROTR, but not ROTL.
311 setOperationAction(ISD::ROTL, MVT::i32, Expand);
312 setOperationAction(ISD::ROTL, MVT::i64, Expand);
313 setOperationAction(ISD::ROTR, MVT::i64, Expand);
314
315 setOperationAction(ISD::MUL, MVT::i64, Expand);
316 setOperationAction(ISD::MULHU, MVT::i64, Expand);
317 setOperationAction(ISD::MULHS, MVT::i64, Expand);
Matt Arsenault717c1d02014-06-15 21:08:58 +0000318 setOperationAction(ISD::UDIV, MVT::i32, Expand);
319 setOperationAction(ISD::UREM, MVT::i32, Expand);
320 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
Matt Arsenaultf7c95e32014-10-03 23:54:41 +0000321 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
Matt Arsenaultc9961752014-10-03 23:54:56 +0000322 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
323 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
Matt Arsenault717c1d02014-06-15 21:08:58 +0000324 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
Matt Arsenaultb5b51102014-06-10 19:18:21 +0000325
Matt Arsenault5881f4e2015-06-09 00:52:37 +0000326 setOperationAction(ISD::SMIN, MVT::i32, Legal);
327 setOperationAction(ISD::UMIN, MVT::i32, Legal);
328 setOperationAction(ISD::SMAX, MVT::i32, Legal);
329 setOperationAction(ISD::UMAX, MVT::i32, Legal);
330
Matt Arsenaultde5fbe92016-01-11 17:02:00 +0000331 if (Subtarget->hasFFBH())
332 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
Jan Vesely6ddb8dd2014-07-15 15:51:09 +0000333
Craig Topper33772c52016-04-28 03:34:31 +0000334 if (Subtarget->hasFFBL())
335 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Legal);
Matt Arsenault5ca3c722016-01-11 16:37:46 +0000336
Matt Arsenaultf058d672016-01-11 16:50:29 +0000337 setOperationAction(ISD::CTLZ, MVT::i64, Custom);
338 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
339
Matt Arsenault59b8b772016-03-01 04:58:17 +0000340 // We only really have 32-bit BFE instructions (and 16-bit on VI).
341 //
342 // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
343 // effort to match them now. We want this to be false for i64 cases when the
344 // extraction isn't restricted to the upper or lower half. Ideally we would
345 // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
346 // span the midpoint are probably relatively rare, so don't worry about them
347 // for now.
348 if (Subtarget->hasBFE())
349 setHasExtractBitsInsn(true);
350
Matt Arsenaultfd8c24e2014-06-13 17:20:53 +0000351 static const MVT::SimpleValueType VectorIntTypes[] = {
Tom Stellardf6d80232013-08-21 22:14:17 +0000352 MVT::v2i32, MVT::v4i32
Aaron Watry0a794a462013-06-25 13:55:57 +0000353 };
Aaron Watry0a794a462013-06-25 13:55:57 +0000354
Matt Arsenaultfd8c24e2014-06-13 17:20:53 +0000355 for (MVT VT : VectorIntTypes) {
Matt Arsenaultb5b51102014-06-10 19:18:21 +0000356 // Expand the following operations for the current type by default.
Aaron Watry0a794a462013-06-25 13:55:57 +0000357 setOperationAction(ISD::ADD, VT, Expand);
358 setOperationAction(ISD::AND, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000359 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
360 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000361 setOperationAction(ISD::MUL, VT, Expand);
362 setOperationAction(ISD::OR, VT, Expand);
363 setOperationAction(ISD::SHL, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000364 setOperationAction(ISD::SRA, VT, Expand);
Matt Arsenault825fb0b2014-06-13 04:00:30 +0000365 setOperationAction(ISD::SRL, VT, Expand);
366 setOperationAction(ISD::ROTL, VT, Expand);
367 setOperationAction(ISD::ROTR, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000368 setOperationAction(ISD::SUB, VT, Expand);
Matt Arsenault825fb0b2014-06-13 04:00:30 +0000369 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
Tom Stellardaa313d02013-07-30 14:31:03 +0000370 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
Jan Vesely109efdf2014-06-22 21:43:00 +0000371 setOperationAction(ISD::SDIV, VT, Expand);
Matt Arsenault717c1d02014-06-15 21:08:58 +0000372 setOperationAction(ISD::UDIV, VT, Expand);
Matt Arsenaultfd8c24e2014-06-13 17:20:53 +0000373 setOperationAction(ISD::SREM, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000374 setOperationAction(ISD::UREM, VT, Expand);
Matt Arsenaultfd8c24e2014-06-13 17:20:53 +0000375 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
376 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
Jan Vesely109efdf2014-06-22 21:43:00 +0000377 setOperationAction(ISD::SDIVREM, VT, Custom);
Artyom Skrobov63471332015-10-15 09:18:47 +0000378 setOperationAction(ISD::UDIVREM, VT, Expand);
Matt Arsenaultc4d3d3a2014-06-23 18:00:49 +0000379 setOperationAction(ISD::ADDC, VT, Expand);
380 setOperationAction(ISD::SUBC, VT, Expand);
381 setOperationAction(ISD::ADDE, VT, Expand);
382 setOperationAction(ISD::SUBE, VT, Expand);
Matt Arsenault9fe669c2014-03-06 17:34:03 +0000383 setOperationAction(ISD::SELECT, VT, Expand);
Tom Stellard67ae4762013-07-18 21:43:35 +0000384 setOperationAction(ISD::VSELECT, VT, Expand);
Matt Arsenaulte54e1c32014-06-23 18:00:44 +0000385 setOperationAction(ISD::SELECT_CC, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000386 setOperationAction(ISD::XOR, VT, Expand);
Matt Arsenault13ccc8f2014-06-09 16:20:25 +0000387 setOperationAction(ISD::BSWAP, VT, Expand);
Matt Arsenaultb5b51102014-06-10 19:18:21 +0000388 setOperationAction(ISD::CTPOP, VT, Expand);
389 setOperationAction(ISD::CTTZ, VT, Expand);
390 setOperationAction(ISD::CTLZ, VT, Expand);
Matt Arsenaulte54e1c32014-06-23 18:00:44 +0000391 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
Aaron Watry0a794a462013-06-25 13:55:57 +0000392 }
Tom Stellarda92ff872013-08-16 23:51:24 +0000393
Matt Arsenaultfd8c24e2014-06-13 17:20:53 +0000394 static const MVT::SimpleValueType FloatVectorTypes[] = {
Tom Stellardf6d80232013-08-21 22:14:17 +0000395 MVT::v2f32, MVT::v4f32
Tom Stellarda92ff872013-08-16 23:51:24 +0000396 };
Tom Stellarda92ff872013-08-16 23:51:24 +0000397
Matt Arsenaultfd8c24e2014-06-13 17:20:53 +0000398 for (MVT VT : FloatVectorTypes) {
Tom Stellard175e7a82013-11-27 21:23:39 +0000399 setOperationAction(ISD::FABS, VT, Expand);
Matt Arsenault7c936902014-10-21 23:01:01 +0000400 setOperationAction(ISD::FMINNUM, VT, Expand);
401 setOperationAction(ISD::FMAXNUM, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000402 setOperationAction(ISD::FADD, VT, Expand);
Jan Vesely85f0dbc2014-06-18 17:57:29 +0000403 setOperationAction(ISD::FCEIL, VT, Expand);
Tom Stellard3dbf1f82014-05-02 15:41:47 +0000404 setOperationAction(ISD::FCOS, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000405 setOperationAction(ISD::FDIV, VT, Expand);
Tom Stellard5222a882014-06-20 17:06:05 +0000406 setOperationAction(ISD::FEXP2, VT, Expand);
Tom Stellarda79e9f02014-06-20 17:06:07 +0000407 setOperationAction(ISD::FLOG2, VT, Expand);
Matt Arsenault16e31332014-09-10 21:44:27 +0000408 setOperationAction(ISD::FREM, VT, Expand);
Tom Stellardbfebd1f2014-02-04 17:18:37 +0000409 setOperationAction(ISD::FPOW, VT, Expand);
Tom Stellardad3aff22013-08-16 23:51:29 +0000410 setOperationAction(ISD::FFLOOR, VT, Expand);
Tom Stellardeddfa692013-12-20 05:11:55 +0000411 setOperationAction(ISD::FTRUNC, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000412 setOperationAction(ISD::FMUL, VT, Expand);
Matt Arsenaultc6f8fdb2014-06-26 01:28:05 +0000413 setOperationAction(ISD::FMA, VT, Expand);
Tom Stellardb249b752013-08-16 23:51:33 +0000414 setOperationAction(ISD::FRINT, VT, Expand);
Matt Arsenault692bd5e2014-06-18 22:03:45 +0000415 setOperationAction(ISD::FNEARBYINT, VT, Expand);
Tom Stellarde118b8b2013-10-29 16:37:20 +0000416 setOperationAction(ISD::FSQRT, VT, Expand);
Tom Stellard3dbf1f82014-05-02 15:41:47 +0000417 setOperationAction(ISD::FSIN, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000418 setOperationAction(ISD::FSUB, VT, Expand);
Matt Arsenault616a8e42014-06-01 07:38:21 +0000419 setOperationAction(ISD::FNEG, VT, Expand);
Matt Arsenault616a8e42014-06-01 07:38:21 +0000420 setOperationAction(ISD::VSELECT, VT, Expand);
Matt Arsenaulte54e1c32014-06-23 18:00:44 +0000421 setOperationAction(ISD::SELECT_CC, VT, Expand);
Matt Arsenault6e439652014-06-10 19:00:20 +0000422 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
Matt Arsenaulte54e1c32014-06-23 18:00:44 +0000423 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
Tom Stellarda92ff872013-08-16 23:51:24 +0000424 }
Matt Arsenaultfae02982014-03-17 18:58:11 +0000425
Matt Arsenault1cc49912016-05-25 17:34:58 +0000426 // This causes using an unrolled select operation rather than expansion with
427 // bit operations. This is in general better, but the alternative using BFI
428 // instructions may be better if the select sources are SGPRs.
429 setOperationAction(ISD::SELECT, MVT::v2f32, Promote);
430 AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32);
431
432 setOperationAction(ISD::SELECT, MVT::v4f32, Promote);
433 AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32);
434
Matt Arsenaultfcdddf92014-11-26 21:23:15 +0000435 setBooleanContents(ZeroOrNegativeOneBooleanContent);
436 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
437
Matt Arsenaultfd8c24e2014-06-13 17:20:53 +0000438 setSchedulingPreference(Sched::RegPressure);
439 setJumpIsExpensive(true);
440
Matt Arsenault996a0ef2014-08-09 03:46:58 +0000441 // SI at least has hardware support for floating point exceptions, but no way
442 // of using or handling them is implemented. They are also optional in OpenCL
443 // (Section 7.3)
Matt Arsenaultf639c322016-01-28 20:53:42 +0000444 setHasFloatingPointExceptions(Subtarget->hasFPExceptions());
Matt Arsenault996a0ef2014-08-09 03:46:58 +0000445
Matt Arsenaultd5f91fd2014-06-23 18:00:52 +0000446 setSelectIsExpensive(false);
447 PredictableSelectIsExpensive = false;
448
Matt Arsenault4d801cd2015-11-24 12:05:03 +0000449 // We want to find all load dependencies for long chains of stores to enable
450 // merging into very wide vectors. The problem is with vectors with > 4
451 // elements. MergeConsecutiveStores will attempt to merge these because x8/x16
452 // vectors are a legal type, even though we have to split the loads
453 // usually. When we can more precisely specify load legality per address
454 // space, we should be able to make FindBetterChain/MergeConsecutiveStores
455 // smarter so that they can figure out what to do in 2 iterations without all
456 // N > 4 stores on the same chain.
457 GatherAllAliasesMaxDepth = 16;
458
Matt Arsenaultfd8c24e2014-06-13 17:20:53 +0000459 // FIXME: Need to really handle these.
460 MaxStoresPerMemcpy = 4096;
461 MaxStoresPerMemmove = 4096;
462 MaxStoresPerMemset = 4096;
Matt Arsenault71e66762016-05-21 02:27:49 +0000463
464 setTargetDAGCombine(ISD::BITCAST);
465 setTargetDAGCombine(ISD::AND);
466 setTargetDAGCombine(ISD::SHL);
467 setTargetDAGCombine(ISD::SRA);
468 setTargetDAGCombine(ISD::SRL);
469 setTargetDAGCombine(ISD::MUL);
Matt Arsenault2712d4a2016-08-27 01:32:27 +0000470 setTargetDAGCombine(ISD::MULHU);
471 setTargetDAGCombine(ISD::MULHS);
Matt Arsenault71e66762016-05-21 02:27:49 +0000472 setTargetDAGCombine(ISD::SELECT);
473 setTargetDAGCombine(ISD::SELECT_CC);
474 setTargetDAGCombine(ISD::STORE);
475 setTargetDAGCombine(ISD::FADD);
476 setTargetDAGCombine(ISD::FSUB);
Tom Stellard75aadc22012-12-11 21:25:42 +0000477}
478
Tom Stellard28d06de2013-08-05 22:22:07 +0000479//===----------------------------------------------------------------------===//
480// Target Information
481//===----------------------------------------------------------------------===//
482
Mehdi Amini44ede332015-07-09 02:09:04 +0000483MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const {
Tom Stellard28d06de2013-08-05 22:22:07 +0000484 return MVT::i32;
485}
486
Matt Arsenaultd5f91fd2014-06-23 18:00:52 +0000487bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
488 return true;
489}
490
Matt Arsenault14d46452014-06-15 20:23:38 +0000491// The backend supports 32 and 64 bit floating point immediates.
492// FIXME: Why are we reporting vectors of FP immediates as legal?
493bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
494 EVT ScalarVT = VT.getScalarType();
Matt Arsenault2a60de52014-06-15 21:22:52 +0000495 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64);
Matt Arsenault14d46452014-06-15 20:23:38 +0000496}
497
498// We don't want to shrink f64 / f32 constants.
499bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
500 EVT ScalarVT = VT.getScalarType();
501 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
502}
503
Matt Arsenault810cb622014-12-12 00:00:24 +0000504bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
505 ISD::LoadExtType,
506 EVT NewVT) const {
507
508 unsigned NewSize = NewVT.getStoreSizeInBits();
509
510 // If we are reducing to a 32-bit load, this is always better.
511 if (NewSize == 32)
512 return true;
513
514 EVT OldVT = N->getValueType(0);
515 unsigned OldSize = OldVT.getStoreSizeInBits();
516
517 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
518 // extloads, so doing one requires using a buffer_load. In cases where we
519 // still couldn't use a scalar load, using the wider load shouldn't really
520 // hurt anything.
521
522 // If the old size already had to be an extload, there's no harm in continuing
523 // to reduce the width.
524 return (OldSize < 32);
525}
526
Matt Arsenaultc5559bb2013-11-15 04:42:23 +0000527bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
528 EVT CastTy) const {
Matt Arsenaultc5559bb2013-11-15 04:42:23 +0000529
Matt Arsenault327bb5a2016-07-01 22:47:50 +0000530 assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits());
Matt Arsenaultc5559bb2013-11-15 04:42:23 +0000531
Matt Arsenault327bb5a2016-07-01 22:47:50 +0000532 if (LoadTy.getScalarType() == MVT::i32)
533 return false;
534
535 unsigned LScalarSize = LoadTy.getScalarSizeInBits();
536 unsigned CastScalarSize = CastTy.getScalarSizeInBits();
537
538 return (LScalarSize < CastScalarSize) ||
539 (CastScalarSize >= 32);
Matt Arsenaultc5559bb2013-11-15 04:42:23 +0000540}
Tom Stellard28d06de2013-08-05 22:22:07 +0000541
Matt Arsenaultb56d8432015-01-13 19:46:48 +0000542// SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
543// profitable with the expansion for 64-bit since it's generally good to
544// speculate things.
545// FIXME: These should really have the size as a parameter.
546bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const {
547 return true;
548}
549
550bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const {
551 return true;
552}
553
Tom Stellard75aadc22012-12-11 21:25:42 +0000554//===---------------------------------------------------------------------===//
Tom Stellardc54731a2013-07-23 23:55:03 +0000555// Target Properties
556//===---------------------------------------------------------------------===//
557
558bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
559 assert(VT.isFloatingPoint());
Matt Arsenaulta1474382014-08-15 18:42:15 +0000560 return VT == MVT::f32 || VT == MVT::f64;
Tom Stellardc54731a2013-07-23 23:55:03 +0000561}
562
563bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
564 assert(VT.isFloatingPoint());
Matt Arsenault13623d02014-08-15 18:42:18 +0000565 return VT == MVT::f32 || VT == MVT::f64;
Tom Stellardc54731a2013-07-23 23:55:03 +0000566}
567
Matt Arsenault65ad1602015-05-24 00:51:27 +0000568bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT,
569 unsigned NumElem,
570 unsigned AS) const {
571 return true;
572}
573
Matt Arsenault61dc2352015-10-12 23:59:50 +0000574bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const {
575 // There are few operations which truly have vector input operands. Any vector
576 // operation is going to involve operations on each component, and a
577 // build_vector will be a copy per element, so it always makes sense to use a
578 // build_vector input in place of the extracted element to avoid a copy into a
579 // super register.
580 //
581 // We should probably only do this if all users are extracts only, but this
582 // should be the common case.
583 return true;
584}
585
Benjamin Kramer53f9df42014-02-12 10:17:54 +0000586bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
Matt Arsenault0cdcd962014-02-10 19:57:42 +0000587 // Truncate is just accessing a subregister.
Benjamin Kramer53f9df42014-02-12 10:17:54 +0000588 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
589}
590
591bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
592 // Truncate is just accessing a subregister.
593 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
594 (Dest->getPrimitiveSizeInBits() % 32 == 0);
Matt Arsenault0cdcd962014-02-10 19:57:42 +0000595}
596
Matt Arsenaultb517c812014-03-27 17:23:31 +0000597bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
Mehdi Aminia749f2a2015-07-09 02:09:52 +0000598 unsigned SrcSize = Src->getScalarSizeInBits();
599 unsigned DestSize = Dest->getScalarSizeInBits();
Matt Arsenaultb517c812014-03-27 17:23:31 +0000600
601 return SrcSize == 32 && DestSize == 64;
602}
603
604bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
605 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
606 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
607 // this will enable reducing 64-bit operations the 32-bit, which is always
608 // good.
609 return Src == MVT::i32 && Dest == MVT::i64;
610}
611
Aaron Ballman3c81e462014-06-26 13:45:47 +0000612bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
613 return isZExtFree(Val.getValueType(), VT2);
614}
615
Matt Arsenaulta7f1e0c2014-03-24 19:43:31 +0000616bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
617 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
618 // limited number of native 64-bit operations. Shrinking an operation to fit
619 // in a single 32-bit register should always be helpful. As currently used,
620 // this is much less general than the name suggests, and is only used in
621 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
622 // not profitable, and may actually be harmful.
623 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
624}
625
Tom Stellardc54731a2013-07-23 23:55:03 +0000626//===---------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000627// TargetLowering Callbacks
628//===---------------------------------------------------------------------===//
629
Christian Konig2c8f6d52013-03-07 09:03:52 +0000630void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
631 const SmallVectorImpl<ISD::InputArg> &Ins) const {
632
633 State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
Tom Stellard75aadc22012-12-11 21:25:42 +0000634}
635
Marek Olsak8a0f3352016-01-13 17:23:04 +0000636void AMDGPUTargetLowering::AnalyzeReturn(CCState &State,
637 const SmallVectorImpl<ISD::OutputArg> &Outs) const {
638
639 State.AnalyzeReturn(Outs, RetCC_SI);
640}
641
Benjamin Kramerbdc49562016-06-12 15:39:02 +0000642SDValue
643AMDGPUTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
644 bool isVarArg,
645 const SmallVectorImpl<ISD::OutputArg> &Outs,
646 const SmallVectorImpl<SDValue> &OutVals,
647 const SDLoc &DL, SelectionDAG &DAG) const {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000648 return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain);
Tom Stellard75aadc22012-12-11 21:25:42 +0000649}
650
651//===---------------------------------------------------------------------===//
652// Target specific lowering
653//===---------------------------------------------------------------------===//
654
Matt Arsenault16353872014-04-22 16:42:00 +0000655SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
656 SmallVectorImpl<SDValue> &InVals) const {
657 SDValue Callee = CLI.Callee;
658 SelectionDAG &DAG = CLI.DAG;
659
660 const Function &Fn = *DAG.getMachineFunction().getFunction();
661
662 StringRef FuncName("<unknown>");
663
Matt Arsenaultde1c34102014-04-25 22:22:01 +0000664 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
665 FuncName = G->getSymbol();
666 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
Matt Arsenault16353872014-04-22 16:42:00 +0000667 FuncName = G->getGlobal()->getName();
668
Oliver Stannard7e7d9832016-02-02 13:52:43 +0000669 DiagnosticInfoUnsupported NoCalls(
670 Fn, "unsupported call to function " + FuncName, CLI.DL.getDebugLoc());
Matt Arsenault16353872014-04-22 16:42:00 +0000671 DAG.getContext()->diagnose(NoCalls);
Matt Arsenault9430b912016-05-18 16:10:11 +0000672
673 for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
674 InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
675
676 return DAG.getEntryNode();
Matt Arsenault16353872014-04-22 16:42:00 +0000677}
678
Matt Arsenault19c54882015-08-26 18:37:13 +0000679SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
680 SelectionDAG &DAG) const {
681 const Function &Fn = *DAG.getMachineFunction().getFunction();
682
Oliver Stannard7e7d9832016-02-02 13:52:43 +0000683 DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca",
684 SDLoc(Op).getDebugLoc());
Matt Arsenault19c54882015-08-26 18:37:13 +0000685 DAG.getContext()->diagnose(NoDynamicAlloca);
Diana Picuse440f992016-06-23 09:19:16 +0000686 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
687 return DAG.getMergeValues(Ops, SDLoc());
Matt Arsenault19c54882015-08-26 18:37:13 +0000688}
689
Matt Arsenault14d46452014-06-15 20:23:38 +0000690SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
691 SelectionDAG &DAG) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000692 switch (Op.getOpcode()) {
693 default:
Matt Arsenaultdfaf4262016-04-25 19:27:09 +0000694 Op->dump(&DAG);
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000695 llvm_unreachable("Custom lowering code for this"
696 "instruction is not implemented yet!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000697 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000698 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
Tom Stellardd86003e2013-08-14 23:25:00 +0000699 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
700 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +0000701 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
702 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
Jan Vesely109efdf2014-06-22 21:43:00 +0000703 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
Matt Arsenault16e31332014-09-10 21:44:27 +0000704 case ISD::FREM: return LowerFREM(Op, DAG);
Matt Arsenault46010932014-06-18 17:05:30 +0000705 case ISD::FCEIL: return LowerFCEIL(Op, DAG);
706 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
Matt Arsenaulte8208ec2014-06-18 17:05:26 +0000707 case ISD::FRINT: return LowerFRINT(Op, DAG);
Matt Arsenault692bd5e2014-06-18 22:03:45 +0000708 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
Matt Arsenaultb0055482015-01-21 18:18:25 +0000709 case ISD::FROUND: return LowerFROUND(Op, DAG);
Matt Arsenault46010932014-06-18 17:05:30 +0000710 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
Matt Arsenaultf7c95e32014-10-03 23:54:41 +0000711 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
Tom Stellardc947d8c2013-10-30 17:22:05 +0000712 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
Matt Arsenaultc9961752014-10-03 23:54:56 +0000713 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
714 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
Matt Arsenaultf058d672016-01-11 16:50:29 +0000715 case ISD::CTLZ:
716 case ISD::CTLZ_ZERO_UNDEF:
717 return LowerCTLZ(Op, DAG);
Matt Arsenault19c54882015-08-26 18:37:13 +0000718 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +0000719 }
720 return Op;
721}
722
Matt Arsenaultd125d742014-03-27 17:23:24 +0000723void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
724 SmallVectorImpl<SDValue> &Results,
725 SelectionDAG &DAG) const {
726 switch (N->getOpcode()) {
727 case ISD::SIGN_EXTEND_INREG:
728 // Different parts of legalization seem to interpret which type of
729 // sign_extend_inreg is the one to check for custom lowering. The extended
730 // from type is what really matters, but some places check for custom
731 // lowering of the result type. This results in trying to use
732 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
733 // nothing here and let the illegal result integer be handled normally.
734 return;
Matt Arsenaultd125d742014-03-27 17:23:24 +0000735 default:
736 return;
737 }
738}
739
Matt Arsenaultcc8d3b82014-11-13 19:56:13 +0000740static bool hasDefinedInitializer(const GlobalValue *GV) {
741 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
742 if (!GVar || !GVar->hasInitializer())
743 return false;
744
Matt Arsenault8226fc42016-03-02 23:00:21 +0000745 return !isa<UndefValue>(GVar->getInitializer());
Matt Arsenaultcc8d3b82014-11-13 19:56:13 +0000746}
747
Tom Stellardc026e8b2013-06-28 15:47:08 +0000748SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
749 SDValue Op,
750 SelectionDAG &DAG) const {
751
Mehdi Amini44ede332015-07-09 02:09:04 +0000752 const DataLayout &DL = DAG.getDataLayout();
Tom Stellardc026e8b2013-06-28 15:47:08 +0000753 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
Tom Stellardc026e8b2013-06-28 15:47:08 +0000754 const GlobalValue *GV = G->getGlobal();
Tom Stellardc026e8b2013-06-28 15:47:08 +0000755
Tom Stellard04c0e982014-01-22 19:24:21 +0000756 switch (G->getAddressSpace()) {
Tom Stellard04c0e982014-01-22 19:24:21 +0000757 case AMDGPUAS::LOCAL_ADDRESS: {
758 // XXX: What does the value of G->getOffset() mean?
759 assert(G->getOffset() == 0 &&
760 "Do not know what to do with an non-zero offset");
Tom Stellardc026e8b2013-06-28 15:47:08 +0000761
Matt Arsenaultcc8d3b82014-11-13 19:56:13 +0000762 // TODO: We could emit code to handle the initialization somewhere.
763 if (hasDefinedInitializer(GV))
764 break;
765
Matt Arsenault52ef4012016-07-26 16:45:58 +0000766 unsigned Offset = MFI->allocateLDSGlobal(DL, *GV);
767 return DAG.getConstant(Offset, SDLoc(Op), Op.getValueType());
Tom Stellard04c0e982014-01-22 19:24:21 +0000768 }
Tom Stellard04c0e982014-01-22 19:24:21 +0000769 }
Matt Arsenaultcc8d3b82014-11-13 19:56:13 +0000770
771 const Function &Fn = *DAG.getMachineFunction().getFunction();
Oliver Stannard7e7d9832016-02-02 13:52:43 +0000772 DiagnosticInfoUnsupported BadInit(
773 Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc());
Matt Arsenaultcc8d3b82014-11-13 19:56:13 +0000774 DAG.getContext()->diagnose(BadInit);
775 return SDValue();
Tom Stellardc026e8b2013-06-28 15:47:08 +0000776}
777
Tom Stellardd86003e2013-08-14 23:25:00 +0000778SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
779 SelectionDAG &DAG) const {
780 SmallVector<SDValue, 8> Args;
Tom Stellardd86003e2013-08-14 23:25:00 +0000781
Tom Stellardff5cf0e2015-04-23 22:59:24 +0000782 for (const SDUse &U : Op->ops())
783 DAG.ExtractVectorElements(U.get(), Args);
Tom Stellardd86003e2013-08-14 23:25:00 +0000784
Ahmed Bougacha128f8732016-04-26 21:15:30 +0000785 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
Tom Stellardd86003e2013-08-14 23:25:00 +0000786}
787
788SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
789 SelectionDAG &DAG) const {
790
791 SmallVector<SDValue, 8> Args;
Tom Stellardd86003e2013-08-14 23:25:00 +0000792 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
Matt Arsenault9ec3cf22014-04-11 17:47:30 +0000793 EVT VT = Op.getValueType();
794 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
795 VT.getVectorNumElements());
Tom Stellardd86003e2013-08-14 23:25:00 +0000796
Ahmed Bougacha128f8732016-04-26 21:15:30 +0000797 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
Tom Stellardd86003e2013-08-14 23:25:00 +0000798}
799
Tom Stellard75aadc22012-12-11 21:25:42 +0000800SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
801 SelectionDAG &DAG) const {
802 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
Andrew Trickef9de2a2013-05-25 02:42:55 +0000803 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +0000804 EVT VT = Op.getValueType();
805
806 switch (IntrinsicID) {
807 default: return Op;
Matt Arsenaultf0711022016-07-13 19:42:06 +0000808 case AMDGPUIntrinsic::AMDGPU_clamp: // Legacy name.
Matt Arsenault5d47d4a2014-06-12 21:15:44 +0000809 return DAG.getNode(AMDGPUISD::CLAMP, DL, VT,
810 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
811
Matt Arsenault4c537172014-03-31 18:21:18 +0000812 case AMDGPUIntrinsic::AMDGPU_bfe_i32:
813 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
814 Op.getOperand(1),
815 Op.getOperand(2),
816 Op.getOperand(3));
817
818 case AMDGPUIntrinsic::AMDGPU_bfe_u32:
819 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
820 Op.getOperand(1),
821 Op.getOperand(2),
822 Op.getOperand(3));
Tom Stellard75aadc22012-12-11 21:25:42 +0000823 }
824}
825
Tom Stellard75aadc22012-12-11 21:25:42 +0000826/// \brief Generate Min/Max node
Benjamin Kramerbdc49562016-06-12 15:39:02 +0000827SDValue AMDGPUTargetLowering::CombineFMinMaxLegacy(const SDLoc &DL, EVT VT,
828 SDValue LHS, SDValue RHS,
829 SDValue True, SDValue False,
Matt Arsenault1e3a4eb2014-12-12 02:30:37 +0000830 SDValue CC,
831 DAGCombinerInfo &DCI) const {
832 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
833 return SDValue();
834
Matt Arsenaultda59f3d2014-11-13 23:03:09 +0000835 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
836 return SDValue();
Tom Stellard75aadc22012-12-11 21:25:42 +0000837
Matt Arsenault1e3a4eb2014-12-12 02:30:37 +0000838 SelectionDAG &DAG = DCI.DAG;
Tom Stellard75aadc22012-12-11 21:25:42 +0000839 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
840 switch (CCOpcode) {
841 case ISD::SETOEQ:
842 case ISD::SETONE:
843 case ISD::SETUNE:
844 case ISD::SETNE:
845 case ISD::SETUEQ:
846 case ISD::SETEQ:
847 case ISD::SETFALSE:
848 case ISD::SETFALSE2:
849 case ISD::SETTRUE:
850 case ISD::SETTRUE2:
851 case ISD::SETUO:
852 case ISD::SETO:
Matt Arsenaultda59f3d2014-11-13 23:03:09 +0000853 break;
Tom Stellard75aadc22012-12-11 21:25:42 +0000854 case ISD::SETULE:
Matt Arsenault1e3a4eb2014-12-12 02:30:37 +0000855 case ISD::SETULT: {
Matt Arsenault1e3a4eb2014-12-12 02:30:37 +0000856 if (LHS == True)
857 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
858 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
859 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000860 case ISD::SETOLE:
861 case ISD::SETOLT:
862 case ISD::SETLE:
863 case ISD::SETLT: {
Matt Arsenault1e3a4eb2014-12-12 02:30:37 +0000864 // Ordered. Assume ordered for undefined.
865
866 // Only do this after legalization to avoid interfering with other combines
867 // which might occur.
868 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
869 !DCI.isCalledByLegalizer())
870 return SDValue();
Marek Olsakbe047802014-12-07 12:19:03 +0000871
Matt Arsenault36094d72014-11-15 05:02:57 +0000872 // We need to permute the operands to get the correct NaN behavior. The
873 // selected operand is the second one based on the failing compare with NaN,
874 // so permute it based on the compare type the hardware uses.
875 if (LHS == True)
Matt Arsenault1e3a4eb2014-12-12 02:30:37 +0000876 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
877 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000878 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000879 case ISD::SETUGE:
Matt Arsenault1e3a4eb2014-12-12 02:30:37 +0000880 case ISD::SETUGT: {
Matt Arsenault36094d72014-11-15 05:02:57 +0000881 if (LHS == True)
882 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
883 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000884 }
Matt Arsenault1e3a4eb2014-12-12 02:30:37 +0000885 case ISD::SETGT:
886 case ISD::SETGE:
887 case ISD::SETOGE:
888 case ISD::SETOGT: {
889 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
890 !DCI.isCalledByLegalizer())
891 return SDValue();
892
893 if (LHS == True)
894 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
895 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
896 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000897 case ISD::SETCC_INVALID:
Matt Arsenaulteaa3a7e2013-12-10 21:37:42 +0000898 llvm_unreachable("Invalid setcc condcode!");
Tom Stellard75aadc22012-12-11 21:25:42 +0000899 }
Tom Stellardafa8b532014-05-09 16:42:16 +0000900 return SDValue();
Tom Stellard75aadc22012-12-11 21:25:42 +0000901}
902
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000903std::pair<SDValue, SDValue>
904AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const {
905 SDLoc SL(Op);
906
907 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
908
909 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
910 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
911
912 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
913 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
914
915 return std::make_pair(Lo, Hi);
916}
917
Matt Arsenault33e3ece2016-01-18 22:09:04 +0000918SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const {
919 SDLoc SL(Op);
920
921 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
922 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
923 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
924}
925
926SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const {
927 SDLoc SL(Op);
928
929 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
930 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
931 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
932}
933
Matt Arsenault83e60582014-07-24 17:10:35 +0000934SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
935 SelectionDAG &DAG) const {
Matt Arsenault9c499c32016-04-14 23:31:26 +0000936 LoadSDNode *Load = cast<LoadSDNode>(Op);
Matt Arsenault83e60582014-07-24 17:10:35 +0000937 EVT VT = Op.getValueType();
938
Matt Arsenault9c499c32016-04-14 23:31:26 +0000939
Matt Arsenault83e60582014-07-24 17:10:35 +0000940 // If this is a 2 element vector, we really want to scalarize and not create
941 // weird 1 element vectors.
942 if (VT.getVectorNumElements() == 2)
Matt Arsenault9c499c32016-04-14 23:31:26 +0000943 return scalarizeVectorLoad(Load, DAG);
Matt Arsenault83e60582014-07-24 17:10:35 +0000944
Matt Arsenault83e60582014-07-24 17:10:35 +0000945 SDValue BasePtr = Load->getBasePtr();
946 EVT PtrVT = BasePtr.getValueType();
947 EVT MemVT = Load->getMemoryVT();
948 SDLoc SL(Op);
Matt Arsenault52a52a52015-12-14 16:59:40 +0000949
950 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
Matt Arsenault83e60582014-07-24 17:10:35 +0000951
952 EVT LoVT, HiVT;
953 EVT LoMemVT, HiMemVT;
954 SDValue Lo, Hi;
955
956 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
957 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
958 std::tie(Lo, Hi) = DAG.SplitVector(Op, SL, LoVT, HiVT);
Matt Arsenault52a52a52015-12-14 16:59:40 +0000959
960 unsigned Size = LoMemVT.getStoreSize();
961 unsigned BaseAlign = Load->getAlignment();
962 unsigned HiAlign = MinAlign(BaseAlign, Size);
963
Justin Lebar9c375812016-07-15 18:27:10 +0000964 SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
965 Load->getChain(), BasePtr, SrcValue, LoMemVT,
966 BaseAlign, Load->getMemOperand()->getFlags());
Matt Arsenault83e60582014-07-24 17:10:35 +0000967 SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
Matt Arsenault52a52a52015-12-14 16:59:40 +0000968 DAG.getConstant(Size, SL, PtrVT));
Justin Lebar9c375812016-07-15 18:27:10 +0000969 SDValue HiLoad =
970 DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(),
971 HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()),
972 HiMemVT, HiAlign, Load->getMemOperand()->getFlags());
Matt Arsenault83e60582014-07-24 17:10:35 +0000973
974 SDValue Ops[] = {
975 DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad),
976 DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
977 LoLoad.getValue(1), HiLoad.getValue(1))
978 };
979
980 return DAG.getMergeValues(Ops, SL);
981}
982
Matt Arsenault95245662016-02-11 05:32:46 +0000983// FIXME: This isn't doing anything for SI. This should be used in a target
984// combine during type legalization.
Tom Stellard2ffc3302013-08-26 15:05:44 +0000985SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
986 SelectionDAG &DAG) const {
Matt Arsenault10da3b22014-06-11 03:30:06 +0000987 StoreSDNode *Store = cast<StoreSDNode>(Op);
Tom Stellard2ffc3302013-08-26 15:05:44 +0000988 EVT MemVT = Store->getMemoryVT();
989 unsigned MemBits = MemVT.getSizeInBits();
Tom Stellard75aadc22012-12-11 21:25:42 +0000990
Matt Arsenaultca6dcfc2014-03-05 21:47:22 +0000991 // Byte stores are really expensive, so if possible, try to pack 32-bit vector
992 // truncating store into an i32 store.
993 // XXX: We could also handle optimize other vector bitwidths.
Tom Stellard2ffc3302013-08-26 15:05:44 +0000994 if (!MemVT.isVector() || MemBits > 32) {
995 return SDValue();
996 }
997
998 SDLoc DL(Op);
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +0000999 SDValue Value = Store->getValue();
Tom Stellard2ffc3302013-08-26 15:05:44 +00001000 EVT VT = Value.getValueType();
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +00001001 EVT ElemVT = VT.getVectorElementType();
1002 SDValue Ptr = Store->getBasePtr();
Tom Stellard2ffc3302013-08-26 15:05:44 +00001003 EVT MemEltVT = MemVT.getVectorElementType();
1004 unsigned MemEltBits = MemEltVT.getSizeInBits();
1005 unsigned MemNumElements = MemVT.getVectorNumElements();
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +00001006 unsigned PackedSize = MemVT.getStoreSizeInBits();
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001007 SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, DL, MVT::i32);
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +00001008
1009 assert(Value.getValueType().getScalarSizeInBits() >= 32);
Matt Arsenault02117142014-03-11 01:38:53 +00001010
Tom Stellard2ffc3302013-08-26 15:05:44 +00001011 SDValue PackedValue;
1012 for (unsigned i = 0; i < MemNumElements; ++i) {
Tom Stellard2ffc3302013-08-26 15:05:44 +00001013 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001014 DAG.getConstant(i, DL, MVT::i32));
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +00001015 Elt = DAG.getZExtOrTrunc(Elt, DL, MVT::i32);
1016 Elt = DAG.getNode(ISD::AND, DL, MVT::i32, Elt, Mask); // getZeroExtendInReg
1017
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001018 SDValue Shift = DAG.getConstant(MemEltBits * i, DL, MVT::i32);
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +00001019 Elt = DAG.getNode(ISD::SHL, DL, MVT::i32, Elt, Shift);
1020
Tom Stellard2ffc3302013-08-26 15:05:44 +00001021 if (i == 0) {
1022 PackedValue = Elt;
1023 } else {
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +00001024 PackedValue = DAG.getNode(ISD::OR, DL, MVT::i32, PackedValue, Elt);
Tom Stellard2ffc3302013-08-26 15:05:44 +00001025 }
1026 }
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +00001027
1028 if (PackedSize < 32) {
1029 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), PackedSize);
1030 return DAG.getTruncStore(Store->getChain(), DL, PackedValue, Ptr,
Justin Lebar9c375812016-07-15 18:27:10 +00001031 Store->getMemOperand()->getPointerInfo(), PackedVT,
1032 Store->getAlignment(),
1033 Store->getMemOperand()->getFlags());
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +00001034 }
1035
Tom Stellard2ffc3302013-08-26 15:05:44 +00001036 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
Matt Arsenaulta3c8cde2014-04-22 04:11:14 +00001037 Store->getMemOperand()->getPointerInfo(),
Justin Lebar9c375812016-07-15 18:27:10 +00001038 Store->getAlignment(),
1039 Store->getMemOperand()->getFlags());
Tom Stellard2ffc3302013-08-26 15:05:44 +00001040}
1041
Matt Arsenault83e60582014-07-24 17:10:35 +00001042SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1043 SelectionDAG &DAG) const {
1044 StoreSDNode *Store = cast<StoreSDNode>(Op);
1045 SDValue Val = Store->getValue();
1046 EVT VT = Val.getValueType();
1047
1048 // If this is a 2 element vector, we really want to scalarize and not create
1049 // weird 1 element vectors.
1050 if (VT.getVectorNumElements() == 2)
Matt Arsenault9c499c32016-04-14 23:31:26 +00001051 return scalarizeVectorStore(Store, DAG);
Matt Arsenault83e60582014-07-24 17:10:35 +00001052
1053 EVT MemVT = Store->getMemoryVT();
1054 SDValue Chain = Store->getChain();
1055 SDValue BasePtr = Store->getBasePtr();
1056 SDLoc SL(Op);
1057
1058 EVT LoVT, HiVT;
1059 EVT LoMemVT, HiMemVT;
1060 SDValue Lo, Hi;
1061
1062 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
1063 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
1064 std::tie(Lo, Hi) = DAG.SplitVector(Val, SL, LoVT, HiVT);
1065
1066 EVT PtrVT = BasePtr.getValueType();
1067 SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001068 DAG.getConstant(LoMemVT.getStoreSize(), SL,
1069 PtrVT));
Matt Arsenault83e60582014-07-24 17:10:35 +00001070
Matt Arsenault52a52a52015-12-14 16:59:40 +00001071 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo();
1072 unsigned BaseAlign = Store->getAlignment();
1073 unsigned Size = LoMemVT.getStoreSize();
1074 unsigned HiAlign = MinAlign(BaseAlign, Size);
1075
Justin Lebar9c375812016-07-15 18:27:10 +00001076 SDValue LoStore =
1077 DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign,
1078 Store->getMemOperand()->getFlags());
1079 SDValue HiStore =
1080 DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size),
1081 HiMemVT, HiAlign, Store->getMemOperand()->getFlags());
Matt Arsenault83e60582014-07-24 17:10:35 +00001082
1083 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
1084}
1085
Matt Arsenault0daeb632014-07-24 06:59:20 +00001086// This is a shortcut for integer division because we have fast i32<->f32
1087// conversions, and fast f32 reciprocal instructions. The fractional part of a
Matt Arsenault81a70952016-05-21 01:53:33 +00001088// float is enough to accurately represent up to a 24-bit signed integer.
Matt Arsenault4e3d3832016-05-19 21:09:58 +00001089SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG,
1090 bool Sign) const {
Matt Arsenault1578aa72014-06-15 20:08:02 +00001091 SDLoc DL(Op);
Matt Arsenault0daeb632014-07-24 06:59:20 +00001092 EVT VT = Op.getValueType();
Matt Arsenault1578aa72014-06-15 20:08:02 +00001093 SDValue LHS = Op.getOperand(0);
1094 SDValue RHS = Op.getOperand(1);
Matt Arsenault0daeb632014-07-24 06:59:20 +00001095 MVT IntVT = MVT::i32;
1096 MVT FltVT = MVT::f32;
1097
Matt Arsenault81a70952016-05-21 01:53:33 +00001098 unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS);
1099 if (LHSSignBits < 9)
1100 return SDValue();
1101
1102 unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS);
1103 if (RHSSignBits < 9)
1104 return SDValue();
Jan Veselye5ca27d2014-08-12 17:31:20 +00001105
Matt Arsenault4e3d3832016-05-19 21:09:58 +00001106 unsigned BitSize = VT.getSizeInBits();
Matt Arsenault81a70952016-05-21 01:53:33 +00001107 unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1108 unsigned DivBits = BitSize - SignBits;
1109 if (Sign)
1110 ++DivBits;
1111
1112 ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
1113 ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
Matt Arsenault0daeb632014-07-24 06:59:20 +00001114
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001115 SDValue jq = DAG.getConstant(1, DL, IntVT);
Matt Arsenault1578aa72014-06-15 20:08:02 +00001116
Matt Arsenault4e3d3832016-05-19 21:09:58 +00001117 if (Sign) {
Jan Veselye5ca27d2014-08-12 17:31:20 +00001118 // char|short jq = ia ^ ib;
1119 jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
Matt Arsenault1578aa72014-06-15 20:08:02 +00001120
Jan Veselye5ca27d2014-08-12 17:31:20 +00001121 // jq = jq >> (bitsize - 2)
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001122 jq = DAG.getNode(ISD::SRA, DL, VT, jq,
1123 DAG.getConstant(BitSize - 2, DL, VT));
Matt Arsenault1578aa72014-06-15 20:08:02 +00001124
Jan Veselye5ca27d2014-08-12 17:31:20 +00001125 // jq = jq | 0x1
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001126 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
Jan Veselye5ca27d2014-08-12 17:31:20 +00001127 }
Matt Arsenault1578aa72014-06-15 20:08:02 +00001128
1129 // int ia = (int)LHS;
Matt Arsenault4e3d3832016-05-19 21:09:58 +00001130 SDValue ia = LHS;
Matt Arsenault1578aa72014-06-15 20:08:02 +00001131
1132 // int ib, (int)RHS;
Matt Arsenault4e3d3832016-05-19 21:09:58 +00001133 SDValue ib = RHS;
Matt Arsenault1578aa72014-06-15 20:08:02 +00001134
1135 // float fa = (float)ia;
Jan Veselye5ca27d2014-08-12 17:31:20 +00001136 SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
Matt Arsenault1578aa72014-06-15 20:08:02 +00001137
1138 // float fb = (float)ib;
Jan Veselye5ca27d2014-08-12 17:31:20 +00001139 SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
Matt Arsenault1578aa72014-06-15 20:08:02 +00001140
Matt Arsenault0daeb632014-07-24 06:59:20 +00001141 SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
1142 fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
Matt Arsenault1578aa72014-06-15 20:08:02 +00001143
1144 // fq = trunc(fq);
Matt Arsenault0daeb632014-07-24 06:59:20 +00001145 fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
Matt Arsenault1578aa72014-06-15 20:08:02 +00001146
1147 // float fqneg = -fq;
Matt Arsenault0daeb632014-07-24 06:59:20 +00001148 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
Matt Arsenault1578aa72014-06-15 20:08:02 +00001149
1150 // float fr = mad(fqneg, fb, fa);
Matt Arsenault4e3d3832016-05-19 21:09:58 +00001151 SDValue fr = DAG.getNode(ISD::FMAD, DL, FltVT, fqneg, fb, fa);
Matt Arsenault1578aa72014-06-15 20:08:02 +00001152
1153 // int iq = (int)fq;
Jan Veselye5ca27d2014-08-12 17:31:20 +00001154 SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
Matt Arsenault1578aa72014-06-15 20:08:02 +00001155
1156 // fr = fabs(fr);
Matt Arsenault0daeb632014-07-24 06:59:20 +00001157 fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
Matt Arsenault1578aa72014-06-15 20:08:02 +00001158
1159 // fb = fabs(fb);
Matt Arsenault0daeb632014-07-24 06:59:20 +00001160 fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
1161
Mehdi Amini44ede332015-07-09 02:09:04 +00001162 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
Matt Arsenault1578aa72014-06-15 20:08:02 +00001163
1164 // int cv = fr >= fb;
Matt Arsenault0daeb632014-07-24 06:59:20 +00001165 SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
1166
Matt Arsenault1578aa72014-06-15 20:08:02 +00001167 // jq = (cv ? jq : 0);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001168 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
Matt Arsenault0daeb632014-07-24 06:59:20 +00001169
Jan Veselye5ca27d2014-08-12 17:31:20 +00001170 // dst = iq + jq;
Jan Vesely4a33bc62014-08-12 17:31:17 +00001171 SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
1172
Jan Veselye5ca27d2014-08-12 17:31:20 +00001173 // Rem needs compensation, it's easier to recompute it
Jan Vesely4a33bc62014-08-12 17:31:17 +00001174 SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
1175 Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
1176
Matt Arsenault81a70952016-05-21 01:53:33 +00001177 // Truncate to number of bits this divide really is.
1178 if (Sign) {
1179 SDValue InRegSize
1180 = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits));
1181 Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize);
1182 Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize);
1183 } else {
1184 SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT);
1185 Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask);
1186 Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask);
1187 }
1188
Matt Arsenault4e3d3832016-05-19 21:09:58 +00001189 return DAG.getMergeValues({ Div, Rem }, DL);
Matt Arsenault1578aa72014-06-15 20:08:02 +00001190}
1191
Tom Stellardbf69d762014-11-15 01:07:53 +00001192void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
1193 SelectionDAG &DAG,
1194 SmallVectorImpl<SDValue> &Results) const {
1195 assert(Op.getValueType() == MVT::i64);
1196
1197 SDLoc DL(Op);
1198 EVT VT = Op.getValueType();
1199 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1200
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001201 SDValue one = DAG.getConstant(1, DL, HalfVT);
1202 SDValue zero = DAG.getConstant(0, DL, HalfVT);
Tom Stellardbf69d762014-11-15 01:07:53 +00001203
1204 //HiLo split
1205 SDValue LHS = Op.getOperand(0);
1206 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, zero);
1207 SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, one);
1208
1209 SDValue RHS = Op.getOperand(1);
1210 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, zero);
1211 SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, one);
1212
Jan Vesely5f715d32015-01-22 23:42:43 +00001213 if (VT == MVT::i64 &&
1214 DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) &&
1215 DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) {
1216
1217 SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1218 LHS_Lo, RHS_Lo);
1219
Ahmed Bougacha128f8732016-04-26 21:15:30 +00001220 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), zero});
1221 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), zero});
Matt Arsenaultd275fca2016-03-01 05:06:05 +00001222
1223 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV));
1224 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM));
Jan Vesely5f715d32015-01-22 23:42:43 +00001225 return;
1226 }
1227
Tom Stellardbf69d762014-11-15 01:07:53 +00001228 // Get Speculative values
1229 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
1230 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
1231
Tom Stellardbf69d762014-11-15 01:07:53 +00001232 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, zero, REM_Part, LHS_Hi, ISD::SETEQ);
Ahmed Bougacha128f8732016-04-26 21:15:30 +00001233 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, zero});
Matt Arsenaultd275fca2016-03-01 05:06:05 +00001234 REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM);
Tom Stellardbf69d762014-11-15 01:07:53 +00001235
1236 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, zero, DIV_Part, zero, ISD::SETEQ);
1237 SDValue DIV_Lo = zero;
1238
1239 const unsigned halfBitWidth = HalfVT.getSizeInBits();
1240
1241 for (unsigned i = 0; i < halfBitWidth; ++i) {
Jan Veselyf7987ca2015-01-22 23:42:39 +00001242 const unsigned bitPos = halfBitWidth - i - 1;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001243 SDValue POS = DAG.getConstant(bitPos, DL, HalfVT);
Jan Veselyf7987ca2015-01-22 23:42:39 +00001244 // Get value of high bit
Jan Vesely811ef522015-04-12 23:45:01 +00001245 SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
1246 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, one);
Jan Veselyf7987ca2015-01-22 23:42:39 +00001247 HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
Tom Stellardbf69d762014-11-15 01:07:53 +00001248
Jan Veselyf7987ca2015-01-22 23:42:39 +00001249 // Shift
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001250 REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT));
Jan Veselyf7987ca2015-01-22 23:42:39 +00001251 // Add LHS high bit
1252 REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
Tom Stellardbf69d762014-11-15 01:07:53 +00001253
Aaron Ballmanef0fe1e2016-03-30 21:30:00 +00001254 SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT);
Tom Stellard83171b32014-11-15 01:07:57 +00001255 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, zero, ISD::SETUGE);
Tom Stellardbf69d762014-11-15 01:07:53 +00001256
1257 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
1258
1259 // Update REM
Tom Stellardbf69d762014-11-15 01:07:53 +00001260 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
Tom Stellard83171b32014-11-15 01:07:57 +00001261 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
Tom Stellardbf69d762014-11-15 01:07:53 +00001262 }
1263
Ahmed Bougacha128f8732016-04-26 21:15:30 +00001264 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi});
Matt Arsenaultd275fca2016-03-01 05:06:05 +00001265 DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV);
Tom Stellardbf69d762014-11-15 01:07:53 +00001266 Results.push_back(DIV);
1267 Results.push_back(REM);
1268}
1269
Tom Stellard75aadc22012-12-11 21:25:42 +00001270SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
Matt Arsenault46013d92014-05-11 21:24:41 +00001271 SelectionDAG &DAG) const {
Andrew Trickef9de2a2013-05-25 02:42:55 +00001272 SDLoc DL(Op);
Tom Stellard75aadc22012-12-11 21:25:42 +00001273 EVT VT = Op.getValueType();
1274
Tom Stellardbf69d762014-11-15 01:07:53 +00001275 if (VT == MVT::i64) {
1276 SmallVector<SDValue, 2> Results;
1277 LowerUDIVREM64(Op, DAG, Results);
1278 return DAG.getMergeValues(Results, DL);
1279 }
1280
Matt Arsenault81a70952016-05-21 01:53:33 +00001281 if (VT == MVT::i32) {
1282 if (SDValue Res = LowerDIVREM24(Op, DAG, false))
1283 return Res;
1284 }
1285
Tom Stellard75aadc22012-12-11 21:25:42 +00001286 SDValue Num = Op.getOperand(0);
1287 SDValue Den = Op.getOperand(1);
1288
Tom Stellard75aadc22012-12-11 21:25:42 +00001289 // RCP = URECIP(Den) = 2^32 / Den + e
1290 // e is rounding error.
1291 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
1292
Tom Stellard4349b192014-09-22 15:35:30 +00001293 // RCP_LO = mul(RCP, Den) */
1294 SDValue RCP_LO = DAG.getNode(ISD::MUL, DL, VT, RCP, Den);
Tom Stellard75aadc22012-12-11 21:25:42 +00001295
1296 // RCP_HI = mulhu (RCP, Den) */
1297 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
1298
1299 // NEG_RCP_LO = -RCP_LO
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001300 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
Tom Stellard75aadc22012-12-11 21:25:42 +00001301 RCP_LO);
1302
1303 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001304 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
Tom Stellard75aadc22012-12-11 21:25:42 +00001305 NEG_RCP_LO, RCP_LO,
1306 ISD::SETEQ);
1307 // Calculate the rounding error from the URECIP instruction
1308 // E = mulhu(ABS_RCP_LO, RCP)
1309 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
1310
1311 // RCP_A_E = RCP + E
1312 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
1313
1314 // RCP_S_E = RCP - E
1315 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
1316
1317 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001318 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
Tom Stellard75aadc22012-12-11 21:25:42 +00001319 RCP_A_E, RCP_S_E,
1320 ISD::SETEQ);
1321 // Quotient = mulhu(Tmp0, Num)
1322 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
1323
1324 // Num_S_Remainder = Quotient * Den
Tom Stellard4349b192014-09-22 15:35:30 +00001325 SDValue Num_S_Remainder = DAG.getNode(ISD::MUL, DL, VT, Quotient, Den);
Tom Stellard75aadc22012-12-11 21:25:42 +00001326
1327 // Remainder = Num - Num_S_Remainder
1328 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
1329
1330 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
1331 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001332 DAG.getConstant(-1, DL, VT),
1333 DAG.getConstant(0, DL, VT),
Vincent Lejeune4f3751f2013-11-06 17:36:04 +00001334 ISD::SETUGE);
1335 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
1336 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
1337 Num_S_Remainder,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001338 DAG.getConstant(-1, DL, VT),
1339 DAG.getConstant(0, DL, VT),
Vincent Lejeune4f3751f2013-11-06 17:36:04 +00001340 ISD::SETUGE);
Tom Stellard75aadc22012-12-11 21:25:42 +00001341 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
1342 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
1343 Remainder_GE_Zero);
1344
1345 // Calculate Division result:
1346
1347 // Quotient_A_One = Quotient + 1
1348 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001349 DAG.getConstant(1, DL, VT));
Tom Stellard75aadc22012-12-11 21:25:42 +00001350
1351 // Quotient_S_One = Quotient - 1
1352 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001353 DAG.getConstant(1, DL, VT));
Tom Stellard75aadc22012-12-11 21:25:42 +00001354
1355 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001356 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
Tom Stellard75aadc22012-12-11 21:25:42 +00001357 Quotient, Quotient_A_One, ISD::SETEQ);
1358
1359 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001360 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
Tom Stellard75aadc22012-12-11 21:25:42 +00001361 Quotient_S_One, Div, ISD::SETEQ);
1362
1363 // Calculate Rem result:
1364
1365 // Remainder_S_Den = Remainder - Den
1366 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
1367
1368 // Remainder_A_Den = Remainder + Den
1369 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
1370
1371 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001372 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
Tom Stellard75aadc22012-12-11 21:25:42 +00001373 Remainder, Remainder_S_Den, ISD::SETEQ);
1374
1375 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001376 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
Tom Stellard75aadc22012-12-11 21:25:42 +00001377 Remainder_A_Den, Rem, ISD::SETEQ);
Matt Arsenault7939acd2014-04-07 16:44:24 +00001378 SDValue Ops[2] = {
1379 Div,
1380 Rem
1381 };
Craig Topper64941d92014-04-27 19:20:57 +00001382 return DAG.getMergeValues(Ops, DL);
Tom Stellard75aadc22012-12-11 21:25:42 +00001383}
1384
Jan Vesely109efdf2014-06-22 21:43:00 +00001385SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
1386 SelectionDAG &DAG) const {
1387 SDLoc DL(Op);
1388 EVT VT = Op.getValueType();
1389
Jan Vesely109efdf2014-06-22 21:43:00 +00001390 SDValue LHS = Op.getOperand(0);
1391 SDValue RHS = Op.getOperand(1);
1392
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001393 SDValue Zero = DAG.getConstant(0, DL, VT);
1394 SDValue NegOne = DAG.getConstant(-1, DL, VT);
Jan Vesely4a33bc62014-08-12 17:31:17 +00001395
Matt Arsenault81a70952016-05-21 01:53:33 +00001396 if (VT == MVT::i32) {
1397 if (SDValue Res = LowerDIVREM24(Op, DAG, true))
1398 return Res;
Jan Vesely5f715d32015-01-22 23:42:43 +00001399 }
Matt Arsenault81a70952016-05-21 01:53:33 +00001400
Jan Vesely5f715d32015-01-22 23:42:43 +00001401 if (VT == MVT::i64 &&
1402 DAG.ComputeNumSignBits(LHS) > 32 &&
1403 DAG.ComputeNumSignBits(RHS) > 32) {
1404 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1405
1406 //HiLo split
1407 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
1408 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
1409 SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1410 LHS_Lo, RHS_Lo);
1411 SDValue Res[2] = {
1412 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)),
1413 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1))
1414 };
1415 return DAG.getMergeValues(Res, DL);
1416 }
1417
Jan Vesely109efdf2014-06-22 21:43:00 +00001418 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
1419 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
1420 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
1421 SDValue RSign = LHSign; // Remainder sign is the same as LHS
1422
1423 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
1424 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
1425
1426 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
1427 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
1428
1429 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
1430 SDValue Rem = Div.getValue(1);
1431
1432 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
1433 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
1434
1435 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
1436 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
1437
1438 SDValue Res[2] = {
1439 Div,
1440 Rem
1441 };
1442 return DAG.getMergeValues(Res, DL);
1443}
1444
Matt Arsenault16e31332014-09-10 21:44:27 +00001445// (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y))
1446SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
1447 SDLoc SL(Op);
1448 EVT VT = Op.getValueType();
1449 SDValue X = Op.getOperand(0);
1450 SDValue Y = Op.getOperand(1);
1451
Sanjay Patela2607012015-09-16 16:31:21 +00001452 // TODO: Should this propagate fast-math-flags?
1453
Matt Arsenault16e31332014-09-10 21:44:27 +00001454 SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y);
1455 SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div);
1456 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y);
1457
1458 return DAG.getNode(ISD::FSUB, SL, VT, X, Mul);
1459}
1460
Matt Arsenault46010932014-06-18 17:05:30 +00001461SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
1462 SDLoc SL(Op);
1463 SDValue Src = Op.getOperand(0);
1464
1465 // result = trunc(src)
1466 // if (src > 0.0 && src != result)
1467 // result += 1.0
1468
1469 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
1470
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001471 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
1472 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
Matt Arsenault46010932014-06-18 17:05:30 +00001473
Mehdi Amini44ede332015-07-09 02:09:04 +00001474 EVT SetCCVT =
1475 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
Matt Arsenault46010932014-06-18 17:05:30 +00001476
1477 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
1478 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
1479 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
1480
1481 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
Sanjay Patela2607012015-09-16 16:31:21 +00001482 // TODO: Should this propagate fast-math-flags?
Matt Arsenault46010932014-06-18 17:05:30 +00001483 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
1484}
1485
Benjamin Kramerbdc49562016-06-12 15:39:02 +00001486static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL,
1487 SelectionDAG &DAG) {
Matt Arsenaultb0055482015-01-21 18:18:25 +00001488 const unsigned FractBits = 52;
1489 const unsigned ExpBits = 11;
1490
1491 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
1492 Hi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001493 DAG.getConstant(FractBits - 32, SL, MVT::i32),
1494 DAG.getConstant(ExpBits, SL, MVT::i32));
Matt Arsenaultb0055482015-01-21 18:18:25 +00001495 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001496 DAG.getConstant(1023, SL, MVT::i32));
Matt Arsenaultb0055482015-01-21 18:18:25 +00001497
1498 return Exp;
1499}
1500
Matt Arsenault46010932014-06-18 17:05:30 +00001501SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
1502 SDLoc SL(Op);
1503 SDValue Src = Op.getOperand(0);
1504
1505 assert(Op.getValueType() == MVT::f64);
1506
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001507 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1508 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
Matt Arsenault46010932014-06-18 17:05:30 +00001509
1510 SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
1511
1512 // Extract the upper half, since this is where we will find the sign and
1513 // exponent.
1514 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
1515
Matt Arsenaultb0055482015-01-21 18:18:25 +00001516 SDValue Exp = extractF64Exponent(Hi, SL, DAG);
Matt Arsenault46010932014-06-18 17:05:30 +00001517
Matt Arsenaultb0055482015-01-21 18:18:25 +00001518 const unsigned FractBits = 52;
Matt Arsenault46010932014-06-18 17:05:30 +00001519
1520 // Extract the sign bit.
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001521 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
Matt Arsenault46010932014-06-18 17:05:30 +00001522 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
1523
1524 // Extend back to to 64-bits.
Ahmed Bougacha128f8732016-04-26 21:15:30 +00001525 SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit});
Matt Arsenault46010932014-06-18 17:05:30 +00001526 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
1527
1528 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
Matt Arsenault2b0fa432014-06-18 22:11:03 +00001529 const SDValue FractMask
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001530 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
Matt Arsenault46010932014-06-18 17:05:30 +00001531
1532 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
1533 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
1534 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
1535
Mehdi Amini44ede332015-07-09 02:09:04 +00001536 EVT SetCCVT =
1537 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
Matt Arsenault46010932014-06-18 17:05:30 +00001538
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001539 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
Matt Arsenault46010932014-06-18 17:05:30 +00001540
1541 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
1542 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
1543
1544 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
1545 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
1546
1547 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
1548}
1549
Matt Arsenaulte8208ec2014-06-18 17:05:26 +00001550SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
1551 SDLoc SL(Op);
1552 SDValue Src = Op.getOperand(0);
1553
1554 assert(Op.getValueType() == MVT::f64);
1555
Matt Arsenaultd22626f2014-06-18 17:45:58 +00001556 APFloat C1Val(APFloat::IEEEdouble, "0x1.0p+52");
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001557 SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64);
Matt Arsenaulte8208ec2014-06-18 17:05:26 +00001558 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
1559
Sanjay Patela2607012015-09-16 16:31:21 +00001560 // TODO: Should this propagate fast-math-flags?
1561
Matt Arsenaulte8208ec2014-06-18 17:05:26 +00001562 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
1563 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
1564
1565 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
Matt Arsenaultd22626f2014-06-18 17:45:58 +00001566
1567 APFloat C2Val(APFloat::IEEEdouble, "0x1.fffffffffffffp+51");
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001568 SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64);
Matt Arsenaulte8208ec2014-06-18 17:05:26 +00001569
Mehdi Amini44ede332015-07-09 02:09:04 +00001570 EVT SetCCVT =
1571 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
Matt Arsenaulte8208ec2014-06-18 17:05:26 +00001572 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
1573
1574 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
1575}
1576
Matt Arsenault692bd5e2014-06-18 22:03:45 +00001577SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
1578 // FNEARBYINT and FRINT are the same, except in their handling of FP
1579 // exceptions. Those aren't really meaningful for us, and OpenCL only has
1580 // rint, so just treat them as equivalent.
1581 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
1582}
1583
Matt Arsenaultb0055482015-01-21 18:18:25 +00001584// XXX - May require not supporting f32 denormals?
1585SDValue AMDGPUTargetLowering::LowerFROUND32(SDValue Op, SelectionDAG &DAG) const {
1586 SDLoc SL(Op);
1587 SDValue X = Op.getOperand(0);
1588
1589 SDValue T = DAG.getNode(ISD::FTRUNC, SL, MVT::f32, X);
1590
Sanjay Patela2607012015-09-16 16:31:21 +00001591 // TODO: Should this propagate fast-math-flags?
1592
Matt Arsenaultb0055482015-01-21 18:18:25 +00001593 SDValue Diff = DAG.getNode(ISD::FSUB, SL, MVT::f32, X, T);
1594
1595 SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, MVT::f32, Diff);
1596
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001597 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f32);
1598 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
1599 const SDValue Half = DAG.getConstantFP(0.5, SL, MVT::f32);
Matt Arsenaultb0055482015-01-21 18:18:25 +00001600
1601 SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f32, One, X);
1602
Mehdi Amini44ede332015-07-09 02:09:04 +00001603 EVT SetCCVT =
1604 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
Matt Arsenaultb0055482015-01-21 18:18:25 +00001605
1606 SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
1607
1608 SDValue Sel = DAG.getNode(ISD::SELECT, SL, MVT::f32, Cmp, SignOne, Zero);
1609
1610 return DAG.getNode(ISD::FADD, SL, MVT::f32, T, Sel);
1611}
1612
1613SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const {
1614 SDLoc SL(Op);
1615 SDValue X = Op.getOperand(0);
1616
1617 SDValue L = DAG.getNode(ISD::BITCAST, SL, MVT::i64, X);
1618
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001619 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1620 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1621 const SDValue NegOne = DAG.getConstant(-1, SL, MVT::i32);
1622 const SDValue FiftyOne = DAG.getConstant(51, SL, MVT::i32);
Mehdi Amini44ede332015-07-09 02:09:04 +00001623 EVT SetCCVT =
1624 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
Matt Arsenaultb0055482015-01-21 18:18:25 +00001625
1626 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
1627
1628 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, One);
1629
1630 SDValue Exp = extractF64Exponent(Hi, SL, DAG);
1631
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001632 const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), SL,
1633 MVT::i64);
Matt Arsenaultb0055482015-01-21 18:18:25 +00001634
1635 SDValue M = DAG.getNode(ISD::SRA, SL, MVT::i64, Mask, Exp);
1636 SDValue D = DAG.getNode(ISD::SRA, SL, MVT::i64,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001637 DAG.getConstant(INT64_C(0x0008000000000000), SL,
1638 MVT::i64),
Matt Arsenaultb0055482015-01-21 18:18:25 +00001639 Exp);
1640
1641 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, L, M);
1642 SDValue Tmp1 = DAG.getSetCC(SL, SetCCVT,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001643 DAG.getConstant(0, SL, MVT::i64), Tmp0,
Matt Arsenaultb0055482015-01-21 18:18:25 +00001644 ISD::SETNE);
1645
1646 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, Tmp1,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001647 D, DAG.getConstant(0, SL, MVT::i64));
Matt Arsenaultb0055482015-01-21 18:18:25 +00001648 SDValue K = DAG.getNode(ISD::ADD, SL, MVT::i64, L, Tmp2);
1649
1650 K = DAG.getNode(ISD::AND, SL, MVT::i64, K, DAG.getNOT(SL, M, MVT::i64));
1651 K = DAG.getNode(ISD::BITCAST, SL, MVT::f64, K);
1652
1653 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
1654 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
1655 SDValue ExpEqNegOne = DAG.getSetCC(SL, SetCCVT, NegOne, Exp, ISD::SETEQ);
1656
1657 SDValue Mag = DAG.getNode(ISD::SELECT, SL, MVT::f64,
1658 ExpEqNegOne,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001659 DAG.getConstantFP(1.0, SL, MVT::f64),
1660 DAG.getConstantFP(0.0, SL, MVT::f64));
Matt Arsenaultb0055482015-01-21 18:18:25 +00001661
1662 SDValue S = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, Mag, X);
1663
1664 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpLt0, S, K);
1665 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpGt51, X, K);
1666
1667 return K;
1668}
1669
1670SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
1671 EVT VT = Op.getValueType();
1672
1673 if (VT == MVT::f32)
1674 return LowerFROUND32(Op, DAG);
1675
1676 if (VT == MVT::f64)
1677 return LowerFROUND64(Op, DAG);
1678
1679 llvm_unreachable("unhandled type");
1680}
1681
Matt Arsenault46010932014-06-18 17:05:30 +00001682SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
1683 SDLoc SL(Op);
1684 SDValue Src = Op.getOperand(0);
1685
1686 // result = trunc(src);
1687 // if (src < 0.0 && src != result)
1688 // result += -1.0.
1689
1690 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
1691
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001692 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
1693 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
Matt Arsenault46010932014-06-18 17:05:30 +00001694
Mehdi Amini44ede332015-07-09 02:09:04 +00001695 EVT SetCCVT =
1696 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
Matt Arsenault46010932014-06-18 17:05:30 +00001697
1698 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
1699 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
1700 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
1701
1702 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
Sanjay Patela2607012015-09-16 16:31:21 +00001703 // TODO: Should this propagate fast-math-flags?
Matt Arsenault46010932014-06-18 17:05:30 +00001704 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
1705}
1706
Matt Arsenaultf058d672016-01-11 16:50:29 +00001707SDValue AMDGPUTargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const {
1708 SDLoc SL(Op);
1709 SDValue Src = Op.getOperand(0);
Matt Arsenaultf058d672016-01-11 16:50:29 +00001710 bool ZeroUndef = Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF;
Matt Arsenaultde5fbe92016-01-11 17:02:00 +00001711
1712 if (ZeroUndef && Src.getValueType() == MVT::i32)
1713 return DAG.getNode(AMDGPUISD::FFBH_U32, SL, MVT::i32, Src);
1714
Matt Arsenaultf058d672016-01-11 16:50:29 +00001715 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
1716
1717 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1718 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1719
1720 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1721 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1722
1723 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
1724 *DAG.getContext(), MVT::i32);
1725
1726 SDValue Hi0 = DAG.getSetCC(SL, SetCCVT, Hi, Zero, ISD::SETEQ);
1727
1728 SDValue CtlzLo = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i32, Lo);
1729 SDValue CtlzHi = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i32, Hi);
1730
1731 const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32);
1732 SDValue Add = DAG.getNode(ISD::ADD, SL, MVT::i32, CtlzLo, Bits32);
1733
1734 // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x))
1735 SDValue NewCtlz = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0, Add, CtlzHi);
1736
1737 if (!ZeroUndef) {
1738 // Test if the full 64-bit input is zero.
1739
1740 // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32,
1741 // which we probably don't want.
1742 SDValue Lo0 = DAG.getSetCC(SL, SetCCVT, Lo, Zero, ISD::SETEQ);
1743 SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0, Hi0);
1744
1745 // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction
1746 // with the same cycles, otherwise it is slower.
1747 // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src,
1748 // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ);
1749
1750 const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32);
1751
1752 // The instruction returns -1 for 0 input, but the defined intrinsic
1753 // behavior is to return the number of bits.
1754 NewCtlz = DAG.getNode(ISD::SELECT, SL, MVT::i32,
1755 SrcIsZero, Bits32, NewCtlz);
1756 }
1757
1758 return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewCtlz);
1759}
1760
Matt Arsenault5e0bdb82016-01-11 22:01:48 +00001761SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG,
1762 bool Signed) const {
1763 // Unsigned
1764 // cul2f(ulong u)
1765 //{
1766 // uint lz = clz(u);
1767 // uint e = (u != 0) ? 127U + 63U - lz : 0;
1768 // u = (u << lz) & 0x7fffffffffffffffUL;
1769 // ulong t = u & 0xffffffffffUL;
1770 // uint v = (e << 23) | (uint)(u >> 40);
1771 // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U);
1772 // return as_float(v + r);
1773 //}
1774 // Signed
1775 // cl2f(long l)
1776 //{
1777 // long s = l >> 63;
1778 // float r = cul2f((l + s) ^ s);
1779 // return s ? -r : r;
1780 //}
1781
1782 SDLoc SL(Op);
1783 SDValue Src = Op.getOperand(0);
1784 SDValue L = Src;
1785
1786 SDValue S;
1787 if (Signed) {
1788 const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64);
1789 S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit);
1790
1791 SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S);
1792 L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S);
1793 }
1794
1795 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
1796 *DAG.getContext(), MVT::f32);
1797
1798
1799 SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32);
1800 SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64);
1801 SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L);
1802 LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ);
1803
1804 SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32);
1805 SDValue E = DAG.getSelect(SL, MVT::i32,
1806 DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE),
1807 DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ),
1808 ZeroI32);
1809
1810 SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64,
1811 DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ),
1812 DAG.getConstant((-1ULL) >> 1, SL, MVT::i64));
1813
1814 SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U,
1815 DAG.getConstant(0xffffffffffULL, SL, MVT::i64));
1816
1817 SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64,
1818 U, DAG.getConstant(40, SL, MVT::i64));
1819
1820 SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32,
1821 DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)),
1822 DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, UShl));
1823
1824 SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64);
1825 SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT);
1826 SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ);
1827
1828 SDValue One = DAG.getConstant(1, SL, MVT::i32);
1829
1830 SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One);
1831
1832 SDValue R = DAG.getSelect(SL, MVT::i32,
1833 RCmp,
1834 One,
1835 DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32));
1836 R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R);
1837 R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R);
1838
1839 if (!Signed)
1840 return R;
1841
1842 SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R);
1843 return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R);
1844}
1845
Matt Arsenaultf7c95e32014-10-03 23:54:41 +00001846SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
1847 bool Signed) const {
1848 SDLoc SL(Op);
1849 SDValue Src = Op.getOperand(0);
1850
1851 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
1852
1853 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001854 DAG.getConstant(0, SL, MVT::i32));
Matt Arsenaultf7c95e32014-10-03 23:54:41 +00001855 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001856 DAG.getConstant(1, SL, MVT::i32));
Matt Arsenaultf7c95e32014-10-03 23:54:41 +00001857
1858 SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
1859 SL, MVT::f64, Hi);
1860
1861 SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
1862
1863 SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001864 DAG.getConstant(32, SL, MVT::i32));
Sanjay Patela2607012015-09-16 16:31:21 +00001865 // TODO: Should this propagate fast-math-flags?
Matt Arsenaultf7c95e32014-10-03 23:54:41 +00001866 return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
1867}
1868
Tom Stellardc947d8c2013-10-30 17:22:05 +00001869SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
1870 SelectionDAG &DAG) const {
Matt Arsenault5e0bdb82016-01-11 22:01:48 +00001871 assert(Op.getOperand(0).getValueType() == MVT::i64 &&
1872 "operation should be legal");
Tom Stellardc947d8c2013-10-30 17:22:05 +00001873
Matt Arsenaultf7c95e32014-10-03 23:54:41 +00001874 EVT DestVT = Op.getValueType();
Matt Arsenaultf7c95e32014-10-03 23:54:41 +00001875
Matt Arsenault5e0bdb82016-01-11 22:01:48 +00001876 if (DestVT == MVT::f32)
1877 return LowerINT_TO_FP32(Op, DAG, false);
Matt Arsenaultf7c95e32014-10-03 23:54:41 +00001878
Matt Arsenaultedc7dcb2016-07-28 00:32:05 +00001879 assert(DestVT == MVT::f64);
1880 return LowerINT_TO_FP64(Op, DAG, false);
Tom Stellardc947d8c2013-10-30 17:22:05 +00001881}
Tom Stellardfbab8272013-08-16 01:12:11 +00001882
Matt Arsenaultf7c95e32014-10-03 23:54:41 +00001883SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
1884 SelectionDAG &DAG) const {
Matt Arsenault5e0bdb82016-01-11 22:01:48 +00001885 assert(Op.getOperand(0).getValueType() == MVT::i64 &&
1886 "operation should be legal");
1887
1888 EVT DestVT = Op.getValueType();
1889 if (DestVT == MVT::f32)
1890 return LowerINT_TO_FP32(Op, DAG, true);
1891
Matt Arsenaultedc7dcb2016-07-28 00:32:05 +00001892 assert(DestVT == MVT::f64);
1893 return LowerINT_TO_FP64(Op, DAG, true);
Matt Arsenaultf7c95e32014-10-03 23:54:41 +00001894}
1895
Matt Arsenaultc9961752014-10-03 23:54:56 +00001896SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG,
1897 bool Signed) const {
1898 SDLoc SL(Op);
1899
1900 SDValue Src = Op.getOperand(0);
1901
1902 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
1903
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00001904 SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL,
1905 MVT::f64);
1906 SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL,
1907 MVT::f64);
Sanjay Patela2607012015-09-16 16:31:21 +00001908 // TODO: Should this propagate fast-math-flags?
Matt Arsenaultc9961752014-10-03 23:54:56 +00001909 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0);
1910
1911 SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul);
1912
1913
1914 SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc);
1915
1916 SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL,
1917 MVT::i32, FloorMul);
1918 SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
1919
Ahmed Bougacha128f8732016-04-26 21:15:30 +00001920 SDValue Result = DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi});
Matt Arsenaultc9961752014-10-03 23:54:56 +00001921
1922 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result);
1923}
1924
1925SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op,
1926 SelectionDAG &DAG) const {
1927 SDValue Src = Op.getOperand(0);
1928
1929 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
1930 return LowerFP64_TO_INT(Op, DAG, true);
1931
1932 return SDValue();
1933}
1934
1935SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op,
1936 SelectionDAG &DAG) const {
1937 SDValue Src = Op.getOperand(0);
1938
1939 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
1940 return LowerFP64_TO_INT(Op, DAG, false);
1941
1942 return SDValue();
1943}
1944
Matt Arsenaultfae02982014-03-17 18:58:11 +00001945SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1946 SelectionDAG &DAG) const {
1947 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1948 MVT VT = Op.getSimpleValueType();
1949 MVT ScalarVT = VT.getScalarType();
1950
Matt Arsenaultedc7dcb2016-07-28 00:32:05 +00001951 assert(VT.isVector());
Matt Arsenaultfae02982014-03-17 18:58:11 +00001952
1953 SDValue Src = Op.getOperand(0);
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001954 SDLoc DL(Op);
Matt Arsenaultfae02982014-03-17 18:58:11 +00001955
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001956 // TODO: Don't scalarize on Evergreen?
1957 unsigned NElts = VT.getVectorNumElements();
1958 SmallVector<SDValue, 8> Args;
1959 DAG.ExtractVectorElements(Src, Args, 0, NElts);
Matt Arsenaultfae02982014-03-17 18:58:11 +00001960
Matt Arsenault5dbd5db2014-04-22 03:49:30 +00001961 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
1962 for (unsigned I = 0; I < NElts; ++I)
1963 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
Matt Arsenaultfae02982014-03-17 18:58:11 +00001964
Ahmed Bougacha128f8732016-04-26 21:15:30 +00001965 return DAG.getBuildVector(VT, DL, Args);
Matt Arsenaultfae02982014-03-17 18:58:11 +00001966}
1967
Tom Stellard75aadc22012-12-11 21:25:42 +00001968//===----------------------------------------------------------------------===//
Tom Stellard50122a52014-04-07 19:45:41 +00001969// Custom DAG optimizations
1970//===----------------------------------------------------------------------===//
1971
1972static bool isU24(SDValue Op, SelectionDAG &DAG) {
1973 APInt KnownZero, KnownOne;
1974 EVT VT = Op.getValueType();
Jay Foada0653a32014-05-14 21:14:37 +00001975 DAG.computeKnownBits(Op, KnownZero, KnownOne);
Tom Stellard50122a52014-04-07 19:45:41 +00001976
1977 return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
1978}
1979
1980static bool isI24(SDValue Op, SelectionDAG &DAG) {
1981 EVT VT = Op.getValueType();
1982
1983 // In order for this to be a signed 24-bit value, bit 23, must
1984 // be a sign bit.
1985 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
1986 // as unsigned 24-bit values.
1987 (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24;
1988}
1989
Matt Arsenault2712d4a2016-08-27 01:32:27 +00001990static bool simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
Tom Stellard50122a52014-04-07 19:45:41 +00001991
1992 SelectionDAG &DAG = DCI.DAG;
1993 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1994 EVT VT = Op.getValueType();
1995
1996 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
1997 APInt KnownZero, KnownOne;
1998 TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
Matt Arsenault2712d4a2016-08-27 01:32:27 +00001999 if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) {
Tom Stellard50122a52014-04-07 19:45:41 +00002000 DCI.CommitTargetLoweringOpt(TLO);
Matt Arsenault2712d4a2016-08-27 01:32:27 +00002001 return true;
2002 }
2003
2004 return false;
Tom Stellard50122a52014-04-07 19:45:41 +00002005}
2006
Matt Arsenault5565f65e2014-05-22 18:09:07 +00002007template <typename IntTy>
Benjamin Kramerbdc49562016-06-12 15:39:02 +00002008static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset,
2009 uint32_t Width, const SDLoc &DL) {
Matt Arsenault5565f65e2014-05-22 18:09:07 +00002010 if (Width + Offset < 32) {
Matt Arsenault46cbc432014-09-19 00:42:06 +00002011 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
2012 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002013 return DAG.getConstant(Result, DL, MVT::i32);
Matt Arsenault5565f65e2014-05-22 18:09:07 +00002014 }
2015
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002016 return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
Matt Arsenault5565f65e2014-05-22 18:09:07 +00002017}
2018
Matt Arsenault327bb5a2016-07-01 22:47:50 +00002019static bool hasVolatileUser(SDNode *Val) {
2020 for (SDNode *U : Val->uses()) {
2021 if (MemSDNode *M = dyn_cast<MemSDNode>(U)) {
2022 if (M->isVolatile())
2023 return true;
2024 }
2025 }
2026
2027 return false;
2028}
2029
Matt Arsenault8af47a02016-07-01 22:55:55 +00002030bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const {
Matt Arsenault327bb5a2016-07-01 22:47:50 +00002031 // i32 vectors are the canonical memory type.
2032 if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT))
2033 return false;
2034
Matt Arsenault327bb5a2016-07-01 22:47:50 +00002035 if (!VT.isByteSized())
2036 return false;
2037
2038 unsigned Size = VT.getStoreSize();
2039
2040 if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector())
2041 return false;
2042
2043 if (Size == 3 || (Size > 4 && (Size % 4 != 0)))
2044 return false;
2045
Matt Arsenaultca3976f2014-07-15 02:06:31 +00002046 return true;
2047}
2048
Matt Arsenault327bb5a2016-07-01 22:47:50 +00002049// Replace load of an illegal type with a store of a bitcast to a friendlier
2050// type.
2051SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N,
2052 DAGCombinerInfo &DCI) const {
2053 if (!DCI.isBeforeLegalize())
2054 return SDValue();
2055
2056 LoadSDNode *LN = cast<LoadSDNode>(N);
2057 if (LN->isVolatile() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN))
2058 return SDValue();
2059
Matt Arsenault327bb5a2016-07-01 22:47:50 +00002060 SDLoc SL(N);
2061 SelectionDAG &DAG = DCI.DAG;
2062 EVT VT = LN->getMemoryVT();
Matt Arsenault8af47a02016-07-01 22:55:55 +00002063
2064 unsigned Size = VT.getStoreSize();
2065 unsigned Align = LN->getAlignment();
2066 if (Align < Size && isTypeLegal(VT)) {
2067 bool IsFast;
2068 unsigned AS = LN->getAddressSpace();
2069
2070 // Expand unaligned loads earlier than legalization. Due to visitation order
2071 // problems during legalization, the emitted instructions to pack and unpack
2072 // the bytes again are not eliminated in the case of an unaligned copy.
2073 if (!allowsMisalignedMemoryAccesses(VT, AS, Align, &IsFast)) {
2074 SDValue Ops[2];
2075 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
2076 return DAG.getMergeValues(Ops, SDLoc(N));
2077 }
2078
2079 if (!IsFast)
2080 return SDValue();
2081 }
2082
2083 if (!shouldCombineMemoryType(VT))
2084 return SDValue();
2085
Matt Arsenault327bb5a2016-07-01 22:47:50 +00002086 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
2087
2088 SDValue NewLoad
2089 = DAG.getLoad(NewVT, SL, LN->getChain(),
2090 LN->getBasePtr(), LN->getMemOperand());
2091
2092 SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad);
2093 DCI.CombineTo(N, BC, NewLoad.getValue(1));
2094 return SDValue(N, 0);
2095}
2096
2097// Replace store of an illegal type with a store of a bitcast to a friendlier
2098// type.
Matt Arsenaultca3976f2014-07-15 02:06:31 +00002099SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
2100 DAGCombinerInfo &DCI) const {
2101 if (!DCI.isBeforeLegalize())
2102 return SDValue();
2103
2104 StoreSDNode *SN = cast<StoreSDNode>(N);
Matt Arsenault327bb5a2016-07-01 22:47:50 +00002105 if (SN->isVolatile() || !ISD::isNormalStore(SN))
Matt Arsenaultca3976f2014-07-15 02:06:31 +00002106 return SDValue();
2107
Matt Arsenault327bb5a2016-07-01 22:47:50 +00002108 EVT VT = SN->getMemoryVT();
Matt Arsenault8af47a02016-07-01 22:55:55 +00002109 unsigned Size = VT.getStoreSize();
Matt Arsenaultca3976f2014-07-15 02:06:31 +00002110
2111 SDLoc SL(N);
2112 SelectionDAG &DAG = DCI.DAG;
Matt Arsenault8af47a02016-07-01 22:55:55 +00002113 unsigned Align = SN->getAlignment();
2114 if (Align < Size && isTypeLegal(VT)) {
2115 bool IsFast;
2116 unsigned AS = SN->getAddressSpace();
2117
2118 // Expand unaligned stores earlier than legalization. Due to visitation
2119 // order problems during legalization, the emitted instructions to pack and
2120 // unpack the bytes again are not eliminated in the case of an unaligned
2121 // copy.
2122 if (!allowsMisalignedMemoryAccesses(VT, AS, Align, &IsFast))
2123 return expandUnalignedStore(SN, DAG);
2124
2125 if (!IsFast)
2126 return SDValue();
2127 }
2128
2129 if (!shouldCombineMemoryType(VT))
2130 return SDValue();
2131
Matt Arsenault327bb5a2016-07-01 22:47:50 +00002132 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
Matt Arsenault8af47a02016-07-01 22:55:55 +00002133 SDValue Val = SN->getValue();
2134
2135 //DCI.AddToWorklist(Val.getNode());
Matt Arsenaultca3976f2014-07-15 02:06:31 +00002136
Matt Arsenault327bb5a2016-07-01 22:47:50 +00002137 bool OtherUses = !Val.hasOneUse();
2138 SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val);
2139 if (OtherUses) {
2140 SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal);
2141 DAG.ReplaceAllUsesOfValueWith(Val, CastBack);
2142 }
Matt Arsenaultca3976f2014-07-15 02:06:31 +00002143
Matt Arsenault327bb5a2016-07-01 22:47:50 +00002144 return DAG.getStore(SN->getChain(), SL, CastVal,
Matt Arsenaultca3976f2014-07-15 02:06:31 +00002145 SN->getBasePtr(), SN->getMemOperand());
2146}
2147
Matt Arsenault6e3a4512016-01-18 22:01:13 +00002148// TODO: Should repeat for other bit ops.
2149SDValue AMDGPUTargetLowering::performAndCombine(SDNode *N,
2150 DAGCombinerInfo &DCI) const {
2151 if (N->getValueType(0) != MVT::i64)
2152 return SDValue();
2153
2154 // Break up 64-bit and of a constant into two 32-bit ands. This will typically
2155 // happen anyway for a VALU 64-bit and. This exposes other 32-bit integer
2156 // combine opportunities since most 64-bit operations are decomposed this way.
2157 // TODO: We won't want this for SALU especially if it is an inline immediate.
2158 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
2159 if (!RHS)
2160 return SDValue();
2161
2162 uint64_t Val = RHS->getZExtValue();
2163 if (Lo_32(Val) != 0 && Hi_32(Val) != 0 && !RHS->hasOneUse()) {
2164 // If either half of the constant is 0, this is really a 32-bit and, so
2165 // split it. If we can re-use the full materialized constant, keep it.
2166 return SDValue();
2167 }
2168
2169 SDLoc SL(N);
2170 SelectionDAG &DAG = DCI.DAG;
2171
2172 SDValue Lo, Hi;
2173 std::tie(Lo, Hi) = split64BitValue(N->getOperand(0), DAG);
2174
2175 SDValue LoRHS = DAG.getConstant(Lo_32(Val), SL, MVT::i32);
2176 SDValue HiRHS = DAG.getConstant(Hi_32(Val), SL, MVT::i32);
2177
2178 SDValue LoAnd = DAG.getNode(ISD::AND, SL, MVT::i32, Lo, LoRHS);
2179 SDValue HiAnd = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, HiRHS);
2180
Matt Arsenaultefa3fe12016-04-22 22:48:38 +00002181 // Re-visit the ands. It's possible we eliminated one of them and it could
2182 // simplify the vector.
2183 DCI.AddToWorklist(Lo.getNode());
2184 DCI.AddToWorklist(Hi.getNode());
2185
Ahmed Bougacha128f8732016-04-26 21:15:30 +00002186 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd});
Matt Arsenault6e3a4512016-01-18 22:01:13 +00002187 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
2188}
2189
Matt Arsenault24692112015-07-14 18:20:33 +00002190SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
2191 DAGCombinerInfo &DCI) const {
2192 if (N->getValueType(0) != MVT::i64)
2193 return SDValue();
2194
Matt Arsenault3cbbc102016-01-18 21:55:14 +00002195 // i64 (shl x, C) -> (build_pair 0, (shl x, C -32))
Matt Arsenault24692112015-07-14 18:20:33 +00002196
Matt Arsenault3cbbc102016-01-18 21:55:14 +00002197 // On some subtargets, 64-bit shift is a quarter rate instruction. In the
2198 // common case, splitting this into a move and a 32-bit shift is faster and
2199 // the same code size.
Matt Arsenault24692112015-07-14 18:20:33 +00002200 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
Matt Arsenault3cbbc102016-01-18 21:55:14 +00002201 if (!RHS)
2202 return SDValue();
2203
2204 unsigned RHSVal = RHS->getZExtValue();
2205 if (RHSVal < 32)
Matt Arsenault24692112015-07-14 18:20:33 +00002206 return SDValue();
2207
2208 SDValue LHS = N->getOperand(0);
2209
2210 SDLoc SL(N);
2211 SelectionDAG &DAG = DCI.DAG;
2212
Matt Arsenault3cbbc102016-01-18 21:55:14 +00002213 SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32);
2214
Matt Arsenault24692112015-07-14 18:20:33 +00002215 SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
Matt Arsenault3cbbc102016-01-18 21:55:14 +00002216 SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
Matt Arsenault24692112015-07-14 18:20:33 +00002217
2218 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
Matt Arsenault80edab92016-01-18 21:43:36 +00002219
Ahmed Bougacha128f8732016-04-26 21:15:30 +00002220 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
Matt Arsenault3cbbc102016-01-18 21:55:14 +00002221 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
Matt Arsenault24692112015-07-14 18:20:33 +00002222}
2223
Matt Arsenault33e3ece2016-01-18 22:09:04 +00002224SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
2225 DAGCombinerInfo &DCI) const {
2226 if (N->getValueType(0) != MVT::i64)
2227 return SDValue();
2228
2229 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
2230 if (!RHS)
2231 return SDValue();
2232
2233 SelectionDAG &DAG = DCI.DAG;
2234 SDLoc SL(N);
2235 unsigned RHSVal = RHS->getZExtValue();
2236
2237 // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31)
2238 if (RHSVal == 32) {
2239 SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
2240 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
2241 DAG.getConstant(31, SL, MVT::i32));
2242
Ahmed Bougacha128f8732016-04-26 21:15:30 +00002243 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift});
Matt Arsenault33e3ece2016-01-18 22:09:04 +00002244 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
2245 }
2246
2247 // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31)
2248 if (RHSVal == 63) {
2249 SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
2250 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
2251 DAG.getConstant(31, SL, MVT::i32));
Ahmed Bougacha128f8732016-04-26 21:15:30 +00002252 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift});
Matt Arsenault33e3ece2016-01-18 22:09:04 +00002253 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
2254 }
2255
2256 return SDValue();
2257}
2258
Matt Arsenault80edab92016-01-18 21:43:36 +00002259SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
2260 DAGCombinerInfo &DCI) const {
2261 if (N->getValueType(0) != MVT::i64)
2262 return SDValue();
2263
2264 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
2265 if (!RHS)
2266 return SDValue();
2267
2268 unsigned ShiftAmt = RHS->getZExtValue();
2269 if (ShiftAmt < 32)
2270 return SDValue();
2271
2272 // srl i64:x, C for C >= 32
2273 // =>
2274 // build_pair (srl hi_32(x), C - 32), 0
2275
2276 SelectionDAG &DAG = DCI.DAG;
2277 SDLoc SL(N);
2278
2279 SDValue One = DAG.getConstant(1, SL, MVT::i32);
2280 SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2281
2282 SDValue VecOp = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, N->getOperand(0));
2283 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32,
2284 VecOp, One);
2285
2286 SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32);
2287 SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst);
2288
Ahmed Bougacha128f8732016-04-26 21:15:30 +00002289 SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero});
Matt Arsenault80edab92016-01-18 21:43:36 +00002290
2291 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair);
2292}
2293
Matt Arsenault2712d4a2016-08-27 01:32:27 +00002294// We need to specifically handle i64 mul here to avoid unnecessary conversion
2295// instructions. If we only match on the legalized i64 mul expansion,
2296// SimplifyDemandedBits will be unable to remove them because there will be
2297// multiple uses due to the separate mul + mulh[su].
2298static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL,
2299 SDValue N0, SDValue N1, unsigned Size, bool Signed) {
2300 if (Size <= 32) {
2301 unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
2302 return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1);
2303 }
2304
2305 // Because we want to eliminate extension instructions before the
2306 // operation, we need to create a single user here (i.e. not the separate
2307 // mul_lo + mul_hi) so that SimplifyDemandedBits will deal with it.
2308
2309 unsigned MulOpc = Signed ? AMDGPUISD::MUL_LOHI_I24 : AMDGPUISD::MUL_LOHI_U24;
2310
2311 SDValue Mul = DAG.getNode(MulOpc, SL,
2312 DAG.getVTList(MVT::i32, MVT::i32), N0, N1);
2313
2314 return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64,
2315 Mul.getValue(0), Mul.getValue(1));
2316}
2317
Matt Arsenaultd0e0f0a2014-06-30 17:55:48 +00002318SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
2319 DAGCombinerInfo &DCI) const {
2320 EVT VT = N->getValueType(0);
2321
Matt Arsenault2712d4a2016-08-27 01:32:27 +00002322 unsigned Size = VT.getSizeInBits();
2323 if (VT.isVector() || Size > 64)
Matt Arsenaultd0e0f0a2014-06-30 17:55:48 +00002324 return SDValue();
2325
2326 SelectionDAG &DAG = DCI.DAG;
2327 SDLoc DL(N);
2328
2329 SDValue N0 = N->getOperand(0);
2330 SDValue N1 = N->getOperand(1);
2331 SDValue Mul;
2332
2333 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
2334 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
2335 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
Matt Arsenault2712d4a2016-08-27 01:32:27 +00002336 Mul = getMul24(DAG, DL, N0, N1, Size, false);
Matt Arsenaultd0e0f0a2014-06-30 17:55:48 +00002337 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
2338 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
2339 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
Matt Arsenault2712d4a2016-08-27 01:32:27 +00002340 Mul = getMul24(DAG, DL, N0, N1, Size, true);
Matt Arsenaultd0e0f0a2014-06-30 17:55:48 +00002341 } else {
2342 return SDValue();
2343 }
2344
2345 // We need to use sext even for MUL_U24, because MUL_U24 is used
2346 // for signed multiply of 8 and 16-bit types.
2347 return DAG.getSExtOrTrunc(Mul, DL, VT);
2348}
2349
Matt Arsenault2712d4a2016-08-27 01:32:27 +00002350SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N,
2351 DAGCombinerInfo &DCI) const {
2352 EVT VT = N->getValueType(0);
2353
2354 if (!Subtarget->hasMulI24() || VT.isVector())
2355 return SDValue();
2356
2357 SelectionDAG &DAG = DCI.DAG;
2358 SDLoc DL(N);
2359
2360 SDValue N0 = N->getOperand(0);
2361 SDValue N1 = N->getOperand(1);
2362
2363 if (!isI24(N0, DAG) || !isI24(N1, DAG))
2364 return SDValue();
2365
2366 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
2367 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
2368
2369 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1);
2370 DCI.AddToWorklist(Mulhi.getNode());
2371 return DAG.getSExtOrTrunc(Mulhi, DL, VT);
2372}
2373
2374SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N,
2375 DAGCombinerInfo &DCI) const {
2376 EVT VT = N->getValueType(0);
2377
2378 if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32)
2379 return SDValue();
2380
2381 SelectionDAG &DAG = DCI.DAG;
2382 SDLoc DL(N);
2383
2384 SDValue N0 = N->getOperand(0);
2385 SDValue N1 = N->getOperand(1);
2386
2387 if (!isU24(N0, DAG) || !isU24(N1, DAG))
2388 return SDValue();
2389
2390 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
2391 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
2392
2393 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1);
2394 DCI.AddToWorklist(Mulhi.getNode());
2395 return DAG.getZExtOrTrunc(Mulhi, DL, VT);
2396}
2397
2398SDValue AMDGPUTargetLowering::performMulLoHi24Combine(
2399 SDNode *N, DAGCombinerInfo &DCI) const {
2400 SelectionDAG &DAG = DCI.DAG;
2401
2402 SDValue N0 = N->getOperand(0);
2403 SDValue N1 = N->getOperand(1);
2404
2405 // Simplify demanded bits before splitting into multiple users.
2406 if (simplifyI24(N0, DCI) || simplifyI24(N1, DCI))
2407 return SDValue();
2408
2409 bool Signed = (N->getOpcode() == AMDGPUISD::MUL_LOHI_I24);
2410
2411 unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
2412 unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24;
2413
2414 SDLoc SL(N);
2415
2416 SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1);
2417 SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1);
2418 return DAG.getMergeValues({ MulLo, MulHi }, SL);
2419}
2420
Matt Arsenaultde5fbe92016-01-11 17:02:00 +00002421static bool isNegativeOne(SDValue Val) {
2422 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val))
2423 return C->isAllOnesValue();
2424 return false;
2425}
2426
2427static bool isCtlzOpc(unsigned Opc) {
2428 return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF;
2429}
2430
Matt Arsenault5319b0a2016-01-11 17:02:06 +00002431// Get FFBH node if the incoming op may have been type legalized from a smaller
2432// type VT.
2433// Need to match pre-legalized type because the generic legalization inserts the
2434// add/sub between the select and compare.
Benjamin Kramerbdc49562016-06-12 15:39:02 +00002435static SDValue getFFBH_U32(const TargetLowering &TLI, SelectionDAG &DAG,
2436 const SDLoc &SL, SDValue Op) {
Matt Arsenault5319b0a2016-01-11 17:02:06 +00002437 EVT VT = Op.getValueType();
2438 EVT LegalVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
2439 if (LegalVT != MVT::i32)
2440 return SDValue();
2441
2442 if (VT != MVT::i32)
2443 Op = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Op);
2444
2445 SDValue FFBH = DAG.getNode(AMDGPUISD::FFBH_U32, SL, MVT::i32, Op);
2446 if (VT != MVT::i32)
2447 FFBH = DAG.getNode(ISD::TRUNCATE, SL, VT, FFBH);
2448
2449 return FFBH;
2450}
2451
Matt Arsenaultde5fbe92016-01-11 17:02:00 +00002452// The native instructions return -1 on 0 input. Optimize out a select that
2453// produces -1 on 0.
2454//
2455// TODO: If zero is not undef, we could also do this if the output is compared
2456// against the bitwidth.
2457//
2458// TODO: Should probably combine against FFBH_U32 instead of ctlz directly.
Benjamin Kramerbdc49562016-06-12 15:39:02 +00002459SDValue AMDGPUTargetLowering::performCtlzCombine(const SDLoc &SL, SDValue Cond,
2460 SDValue LHS, SDValue RHS,
Matt Arsenaultde5fbe92016-01-11 17:02:00 +00002461 DAGCombinerInfo &DCI) const {
2462 ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
2463 if (!CmpRhs || !CmpRhs->isNullValue())
2464 return SDValue();
2465
2466 SelectionDAG &DAG = DCI.DAG;
2467 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
2468 SDValue CmpLHS = Cond.getOperand(0);
2469
2470 // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x
2471 if (CCOpcode == ISD::SETEQ &&
2472 isCtlzOpc(RHS.getOpcode()) &&
2473 RHS.getOperand(0) == CmpLHS &&
2474 isNegativeOne(LHS)) {
Matt Arsenault5319b0a2016-01-11 17:02:06 +00002475 return getFFBH_U32(*this, DAG, SL, CmpLHS);
Matt Arsenaultde5fbe92016-01-11 17:02:00 +00002476 }
2477
2478 // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x
2479 if (CCOpcode == ISD::SETNE &&
2480 isCtlzOpc(LHS.getOpcode()) &&
2481 LHS.getOperand(0) == CmpLHS &&
2482 isNegativeOne(RHS)) {
Matt Arsenault5319b0a2016-01-11 17:02:06 +00002483 return getFFBH_U32(*this, DAG, SL, CmpLHS);
Matt Arsenaultde5fbe92016-01-11 17:02:00 +00002484 }
2485
2486 return SDValue();
2487}
2488
2489SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N,
2490 DAGCombinerInfo &DCI) const {
2491 SDValue Cond = N->getOperand(0);
2492 if (Cond.getOpcode() != ISD::SETCC)
2493 return SDValue();
2494
2495 EVT VT = N->getValueType(0);
2496 SDValue LHS = Cond.getOperand(0);
2497 SDValue RHS = Cond.getOperand(1);
2498 SDValue CC = Cond.getOperand(2);
2499
2500 SDValue True = N->getOperand(1);
2501 SDValue False = N->getOperand(2);
2502
Matt Arsenault5b39b342016-01-28 20:53:48 +00002503 if (VT == MVT::f32 && Cond.hasOneUse()) {
2504 SDValue MinMax
2505 = CombineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI);
2506 // Revisit this node so we can catch min3/max3/med3 patterns.
2507 //DCI.AddToWorklist(MinMax.getNode());
2508 return MinMax;
2509 }
Matt Arsenaultde5fbe92016-01-11 17:02:00 +00002510
2511 // There's no reason to not do this if the condition has other uses.
Matt Arsenault5319b0a2016-01-11 17:02:06 +00002512 return performCtlzCombine(SDLoc(N), Cond, True, False, DCI);
Matt Arsenaultde5fbe92016-01-11 17:02:00 +00002513}
2514
Tom Stellard50122a52014-04-07 19:45:41 +00002515SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
Matt Arsenaultca3976f2014-07-15 02:06:31 +00002516 DAGCombinerInfo &DCI) const {
Tom Stellard50122a52014-04-07 19:45:41 +00002517 SelectionDAG &DAG = DCI.DAG;
2518 SDLoc DL(N);
2519
2520 switch(N->getOpcode()) {
Matt Arsenault24e33d12015-07-03 23:33:38 +00002521 default:
2522 break;
Matt Arsenault79003342016-04-14 21:58:07 +00002523 case ISD::BITCAST: {
2524 EVT DestVT = N->getValueType(0);
2525 if (DestVT.getSizeInBits() != 64 && !DestVT.isVector())
2526 break;
2527
2528 // Fold bitcasts of constants.
2529 //
2530 // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k)
2531 // TODO: Generalize and move to DAGCombiner
2532 SDValue Src = N->getOperand(0);
2533 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
2534 assert(Src.getValueType() == MVT::i64);
2535 SDLoc SL(N);
2536 uint64_t CVal = C->getZExtValue();
2537 return DAG.getNode(ISD::BUILD_VECTOR, SL, DestVT,
2538 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
2539 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
2540 }
2541
2542 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) {
2543 const APInt &Val = C->getValueAPF().bitcastToAPInt();
2544 SDLoc SL(N);
2545 uint64_t CVal = Val.getZExtValue();
2546 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
2547 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
2548 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
2549
2550 return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec);
2551 }
2552
2553 break;
2554 }
Matt Arsenault24692112015-07-14 18:20:33 +00002555 case ISD::SHL: {
2556 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
2557 break;
2558
2559 return performShlCombine(N, DCI);
2560 }
Matt Arsenault80edab92016-01-18 21:43:36 +00002561 case ISD::SRL: {
2562 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
2563 break;
2564
2565 return performSrlCombine(N, DCI);
2566 }
Matt Arsenault33e3ece2016-01-18 22:09:04 +00002567 case ISD::SRA: {
2568 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
2569 break;
2570
2571 return performSraCombine(N, DCI);
2572 }
Matt Arsenault6e3a4512016-01-18 22:01:13 +00002573 case ISD::AND: {
2574 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
2575 break;
2576
2577 return performAndCombine(N, DCI);
2578 }
Matt Arsenault24e33d12015-07-03 23:33:38 +00002579 case ISD::MUL:
2580 return performMulCombine(N, DCI);
Matt Arsenault2712d4a2016-08-27 01:32:27 +00002581 case ISD::MULHS:
2582 return performMulhsCombine(N, DCI);
2583 case ISD::MULHU:
2584 return performMulhuCombine(N, DCI);
Matt Arsenault24e33d12015-07-03 23:33:38 +00002585 case AMDGPUISD::MUL_I24:
Matt Arsenault2712d4a2016-08-27 01:32:27 +00002586 case AMDGPUISD::MUL_U24:
2587 case AMDGPUISD::MULHI_I24:
2588 case AMDGPUISD::MULHI_U24: {
Matt Arsenault24e33d12015-07-03 23:33:38 +00002589 SDValue N0 = N->getOperand(0);
2590 SDValue N1 = N->getOperand(1);
2591 simplifyI24(N0, DCI);
2592 simplifyI24(N1, DCI);
2593 return SDValue();
2594 }
Matt Arsenault2712d4a2016-08-27 01:32:27 +00002595 case AMDGPUISD::MUL_LOHI_I24:
2596 case AMDGPUISD::MUL_LOHI_U24:
2597 return performMulLoHi24Combine(N, DCI);
Matt Arsenaultde5fbe92016-01-11 17:02:00 +00002598 case ISD::SELECT:
2599 return performSelectCombine(N, DCI);
Matt Arsenault5565f65e2014-05-22 18:09:07 +00002600 case AMDGPUISD::BFE_I32:
2601 case AMDGPUISD::BFE_U32: {
2602 assert(!N->getValueType(0).isVector() &&
2603 "Vector handling of BFE not implemented");
2604 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
2605 if (!Width)
2606 break;
2607
2608 uint32_t WidthVal = Width->getZExtValue() & 0x1f;
2609 if (WidthVal == 0)
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002610 return DAG.getConstant(0, DL, MVT::i32);
Matt Arsenault5565f65e2014-05-22 18:09:07 +00002611
2612 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
2613 if (!Offset)
2614 break;
2615
2616 SDValue BitsFrom = N->getOperand(0);
2617 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
2618
2619 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
2620
2621 if (OffsetVal == 0) {
2622 // This is already sign / zero extended, so try to fold away extra BFEs.
2623 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
2624
2625 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
2626 if (OpSignBits >= SignBits)
2627 return BitsFrom;
Matt Arsenault05e96f42014-05-22 18:09:12 +00002628
2629 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
2630 if (Signed) {
2631 // This is a sign_extend_inreg. Replace it to take advantage of existing
2632 // DAG Combines. If not eliminated, we will match back to BFE during
2633 // selection.
2634
2635 // TODO: The sext_inreg of extended types ends, although we can could
2636 // handle them in a single BFE.
2637 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
2638 DAG.getValueType(SmallVT));
2639 }
2640
2641 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
Matt Arsenault5565f65e2014-05-22 18:09:07 +00002642 }
2643
Matt Arsenaultf1794202014-10-15 05:07:00 +00002644 if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) {
Matt Arsenault5565f65e2014-05-22 18:09:07 +00002645 if (Signed) {
2646 return constantFoldBFE<int32_t>(DAG,
Matt Arsenault46cbc432014-09-19 00:42:06 +00002647 CVal->getSExtValue(),
Matt Arsenault5565f65e2014-05-22 18:09:07 +00002648 OffsetVal,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002649 WidthVal,
2650 DL);
Matt Arsenault5565f65e2014-05-22 18:09:07 +00002651 }
2652
2653 return constantFoldBFE<uint32_t>(DAG,
Matt Arsenault6462f942014-09-18 15:52:26 +00002654 CVal->getZExtValue(),
Matt Arsenault5565f65e2014-05-22 18:09:07 +00002655 OffsetVal,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002656 WidthVal,
2657 DL);
Matt Arsenault5565f65e2014-05-22 18:09:07 +00002658 }
2659
Matt Arsenault05e96f42014-05-22 18:09:12 +00002660 if ((OffsetVal + WidthVal) >= 32) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002661 SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32);
Matt Arsenault05e96f42014-05-22 18:09:12 +00002662 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
2663 BitsFrom, ShiftVal);
2664 }
2665
Matt Arsenault7b68fdf2014-10-15 17:58:34 +00002666 if (BitsFrom.hasOneUse()) {
Matt Arsenault6de7af42014-10-15 23:37:42 +00002667 APInt Demanded = APInt::getBitsSet(32,
2668 OffsetVal,
2669 OffsetVal + WidthVal);
2670
Matt Arsenault7b68fdf2014-10-15 17:58:34 +00002671 APInt KnownZero, KnownOne;
2672 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
2673 !DCI.isBeforeLegalizeOps());
2674 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2675 if (TLO.ShrinkDemandedConstant(BitsFrom, Demanded) ||
2676 TLI.SimplifyDemandedBits(BitsFrom, Demanded,
2677 KnownZero, KnownOne, TLO)) {
2678 DCI.CommitTargetLoweringOpt(TLO);
2679 }
Matt Arsenault5565f65e2014-05-22 18:09:07 +00002680 }
2681
2682 break;
2683 }
Matt Arsenault327bb5a2016-07-01 22:47:50 +00002684 case ISD::LOAD:
2685 return performLoadCombine(N, DCI);
Matt Arsenaultca3976f2014-07-15 02:06:31 +00002686 case ISD::STORE:
2687 return performStoreCombine(N, DCI);
Tom Stellard50122a52014-04-07 19:45:41 +00002688 }
2689 return SDValue();
2690}
2691
2692//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +00002693// Helper functions
2694//===----------------------------------------------------------------------===//
2695
Tom Stellardaf775432013-10-23 00:44:32 +00002696void AMDGPUTargetLowering::getOriginalFunctionArgs(
2697 SelectionDAG &DAG,
2698 const Function *F,
2699 const SmallVectorImpl<ISD::InputArg> &Ins,
2700 SmallVectorImpl<ISD::InputArg> &OrigIns) const {
2701
2702 for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
2703 if (Ins[i].ArgVT == Ins[i].VT) {
2704 OrigIns.push_back(Ins[i]);
2705 continue;
2706 }
2707
2708 EVT VT;
2709 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
2710 // Vector has been split into scalars.
2711 VT = Ins[i].ArgVT.getVectorElementType();
2712 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
2713 Ins[i].ArgVT.getVectorElementType() !=
2714 Ins[i].VT.getVectorElementType()) {
2715 // Vector elements have been promoted
2716 VT = Ins[i].ArgVT;
2717 } else {
2718 // Vector has been spilt into smaller vectors.
2719 VT = Ins[i].VT;
2720 }
2721
2722 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
2723 Ins[i].OrigArgIndex, Ins[i].PartOffset);
2724 OrigIns.push_back(Arg);
2725 }
2726}
2727
Tom Stellard75aadc22012-12-11 21:25:42 +00002728SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
2729 const TargetRegisterClass *RC,
2730 unsigned Reg, EVT VT) const {
2731 MachineFunction &MF = DAG.getMachineFunction();
2732 MachineRegisterInfo &MRI = MF.getRegInfo();
2733 unsigned VirtualRegister;
2734 if (!MRI.isLiveIn(Reg)) {
2735 VirtualRegister = MRI.createVirtualRegister(RC);
2736 MRI.addLiveIn(Reg, VirtualRegister);
2737 } else {
2738 VirtualRegister = MRI.getLiveInVirtReg(Reg);
2739 }
2740 return DAG.getRegister(VirtualRegister, VT);
2741}
2742
Tom Stellarddcb9f092015-07-09 21:20:37 +00002743uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
2744 const AMDGPUMachineFunction *MFI, const ImplicitParameter Param) const {
Matt Arsenault52ef4012016-07-26 16:45:58 +00002745 uint64_t ArgOffset = MFI->getABIArgOffset();
Tom Stellarddcb9f092015-07-09 21:20:37 +00002746 switch (Param) {
2747 case GRID_DIM:
2748 return ArgOffset;
2749 case GRID_OFFSET:
2750 return ArgOffset + 4;
2751 }
2752 llvm_unreachable("unexpected implicit parameter type");
2753}
2754
Tom Stellard75aadc22012-12-11 21:25:42 +00002755#define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
2756
2757const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
Matthias Braund04893f2015-05-07 21:33:59 +00002758 switch ((AMDGPUISD::NodeType)Opcode) {
2759 case AMDGPUISD::FIRST_NUMBER: break;
Tom Stellard75aadc22012-12-11 21:25:42 +00002760 // AMDIL DAG nodes
Tom Stellard75aadc22012-12-11 21:25:42 +00002761 NODE_NAME_CASE(CALL);
2762 NODE_NAME_CASE(UMUL);
Tom Stellard75aadc22012-12-11 21:25:42 +00002763 NODE_NAME_CASE(BRANCH_COND);
2764
2765 // AMDGPU DAG nodes
Matt Arsenault9babdf42016-06-22 20:15:28 +00002766 NODE_NAME_CASE(ENDPGM)
2767 NODE_NAME_CASE(RETURN)
Tom Stellard75aadc22012-12-11 21:25:42 +00002768 NODE_NAME_CASE(DWORDADDR)
2769 NODE_NAME_CASE(FRACT)
Wei Ding07e03712016-07-28 16:42:13 +00002770 NODE_NAME_CASE(SETCC)
Matt Arsenault5d47d4a2014-06-12 21:15:44 +00002771 NODE_NAME_CASE(CLAMP)
Matthias Braund04893f2015-05-07 21:33:59 +00002772 NODE_NAME_CASE(COS_HW)
2773 NODE_NAME_CASE(SIN_HW)
Matt Arsenaultda59f3d2014-11-13 23:03:09 +00002774 NODE_NAME_CASE(FMAX_LEGACY)
Matt Arsenaultda59f3d2014-11-13 23:03:09 +00002775 NODE_NAME_CASE(FMIN_LEGACY)
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00002776 NODE_NAME_CASE(FMAX3)
2777 NODE_NAME_CASE(SMAX3)
2778 NODE_NAME_CASE(UMAX3)
2779 NODE_NAME_CASE(FMIN3)
2780 NODE_NAME_CASE(SMIN3)
2781 NODE_NAME_CASE(UMIN3)
Matt Arsenaultf639c322016-01-28 20:53:42 +00002782 NODE_NAME_CASE(FMED3)
2783 NODE_NAME_CASE(SMED3)
2784 NODE_NAME_CASE(UMED3)
Matt Arsenaulta0050b02014-06-19 01:19:19 +00002785 NODE_NAME_CASE(URECIP)
2786 NODE_NAME_CASE(DIV_SCALE)
2787 NODE_NAME_CASE(DIV_FMAS)
2788 NODE_NAME_CASE(DIV_FIXUP)
2789 NODE_NAME_CASE(TRIG_PREOP)
2790 NODE_NAME_CASE(RCP)
2791 NODE_NAME_CASE(RSQ)
Matt Arsenault32fc5272016-07-26 16:45:45 +00002792 NODE_NAME_CASE(RCP_LEGACY)
Matt Arsenault257d48d2014-06-24 22:13:39 +00002793 NODE_NAME_CASE(RSQ_LEGACY)
Matt Arsenault32fc5272016-07-26 16:45:45 +00002794 NODE_NAME_CASE(FMUL_LEGACY)
Matt Arsenault79963e82016-02-13 01:03:00 +00002795 NODE_NAME_CASE(RSQ_CLAMP)
Matt Arsenault2e7cc482014-08-15 17:30:25 +00002796 NODE_NAME_CASE(LDEXP)
Matt Arsenault4831ce52015-01-06 23:00:37 +00002797 NODE_NAME_CASE(FP_CLASS)
Matt Arsenaulta0050b02014-06-19 01:19:19 +00002798 NODE_NAME_CASE(DOT4)
Matthias Braund04893f2015-05-07 21:33:59 +00002799 NODE_NAME_CASE(CARRY)
2800 NODE_NAME_CASE(BORROW)
Matt Arsenaultfae02982014-03-17 18:58:11 +00002801 NODE_NAME_CASE(BFE_U32)
2802 NODE_NAME_CASE(BFE_I32)
Matt Arsenaultb3458362014-03-31 18:21:13 +00002803 NODE_NAME_CASE(BFI)
2804 NODE_NAME_CASE(BFM)
Matt Arsenaultde5fbe92016-01-11 17:02:00 +00002805 NODE_NAME_CASE(FFBH_U32)
Matt Arsenaultb51dcb92016-07-18 18:40:51 +00002806 NODE_NAME_CASE(FFBH_I32)
Tom Stellard50122a52014-04-07 19:45:41 +00002807 NODE_NAME_CASE(MUL_U24)
2808 NODE_NAME_CASE(MUL_I24)
Matt Arsenault2712d4a2016-08-27 01:32:27 +00002809 NODE_NAME_CASE(MULHI_U24)
2810 NODE_NAME_CASE(MULHI_I24)
2811 NODE_NAME_CASE(MUL_LOHI_U24)
2812 NODE_NAME_CASE(MUL_LOHI_I24)
Matt Arsenaulteb260202014-05-22 18:00:15 +00002813 NODE_NAME_CASE(MAD_U24)
2814 NODE_NAME_CASE(MAD_I24)
Matthias Braund04893f2015-05-07 21:33:59 +00002815 NODE_NAME_CASE(TEXTURE_FETCH)
Tom Stellard75aadc22012-12-11 21:25:42 +00002816 NODE_NAME_CASE(EXPORT)
Tom Stellardff62c352013-01-23 02:09:03 +00002817 NODE_NAME_CASE(CONST_ADDRESS)
Tom Stellardf3b2a1e2013-02-06 17:32:29 +00002818 NODE_NAME_CASE(REGISTER_LOAD)
2819 NODE_NAME_CASE(REGISTER_STORE)
Tom Stellard9fa17912013-08-14 23:24:45 +00002820 NODE_NAME_CASE(LOAD_INPUT)
2821 NODE_NAME_CASE(SAMPLE)
2822 NODE_NAME_CASE(SAMPLEB)
2823 NODE_NAME_CASE(SAMPLED)
2824 NODE_NAME_CASE(SAMPLEL)
Matt Arsenault364a6742014-06-11 17:50:44 +00002825 NODE_NAME_CASE(CVT_F32_UBYTE0)
2826 NODE_NAME_CASE(CVT_F32_UBYTE1)
2827 NODE_NAME_CASE(CVT_F32_UBYTE2)
2828 NODE_NAME_CASE(CVT_F32_UBYTE3)
Tom Stellard880a80a2014-06-17 16:53:14 +00002829 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
Tom Stellard067c8152014-07-21 14:01:14 +00002830 NODE_NAME_CASE(CONST_DATA_PTR)
Tom Stellardbf3e6e52016-06-14 20:29:59 +00002831 NODE_NAME_CASE(PC_ADD_REL_OFFSET)
Matt Arsenault03006fd2016-07-19 16:27:56 +00002832 NODE_NAME_CASE(KILL)
Matthias Braund04893f2015-05-07 21:33:59 +00002833 case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
Tom Stellardfc92e772015-05-12 14:18:14 +00002834 NODE_NAME_CASE(SENDMSG)
Tom Stellard2a9d9472015-05-12 15:00:46 +00002835 NODE_NAME_CASE(INTERP_MOV)
2836 NODE_NAME_CASE(INTERP_P1)
2837 NODE_NAME_CASE(INTERP_P2)
Tom Stellardd3ee8c12013-08-16 01:12:06 +00002838 NODE_NAME_CASE(STORE_MSKOR)
Matt Arsenaultdfaf4262016-04-25 19:27:09 +00002839 NODE_NAME_CASE(LOAD_CONSTANT)
Tom Stellardafcf12f2013-09-12 02:55:14 +00002840 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
Tom Stellard354a43c2016-04-01 18:27:37 +00002841 NODE_NAME_CASE(ATOMIC_CMP_SWAP)
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00002842 NODE_NAME_CASE(ATOMIC_INC)
2843 NODE_NAME_CASE(ATOMIC_DEC)
Matthias Braund04893f2015-05-07 21:33:59 +00002844 case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break;
Tom Stellard75aadc22012-12-11 21:25:42 +00002845 }
Matthias Braund04893f2015-05-07 21:33:59 +00002846 return nullptr;
Tom Stellard75aadc22012-12-11 21:25:42 +00002847}
Matt Arsenault0c274fe2014-03-25 18:18:27 +00002848
Matt Arsenaulte93d06a2015-01-13 20:53:18 +00002849SDValue AMDGPUTargetLowering::getRsqrtEstimate(SDValue Operand,
2850 DAGCombinerInfo &DCI,
2851 unsigned &RefinementSteps,
2852 bool &UseOneConstNR) const {
2853 SelectionDAG &DAG = DCI.DAG;
2854 EVT VT = Operand.getValueType();
2855
2856 if (VT == MVT::f32) {
2857 RefinementSteps = 0;
2858 return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand);
2859 }
2860
2861 // TODO: There is also f64 rsq instruction, but the documentation is less
2862 // clear on its precision.
2863
2864 return SDValue();
2865}
2866
Matt Arsenaultbf0db912015-01-13 20:53:23 +00002867SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
2868 DAGCombinerInfo &DCI,
2869 unsigned &RefinementSteps) const {
2870 SelectionDAG &DAG = DCI.DAG;
2871 EVT VT = Operand.getValueType();
2872
2873 if (VT == MVT::f32) {
2874 // Reciprocal, < 1 ulp error.
2875 //
2876 // This reciprocal approximation converges to < 0.5 ulp error with one
2877 // newton rhapson performed with two fused multiple adds (FMAs).
2878
2879 RefinementSteps = 0;
2880 return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand);
2881 }
2882
2883 // TODO: There is also f64 rcp instruction, but the documentation is less
2884 // clear on its precision.
2885
2886 return SDValue();
2887}
2888
Jay Foada0653a32014-05-14 21:14:37 +00002889void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
Matt Arsenault0c274fe2014-03-25 18:18:27 +00002890 const SDValue Op,
2891 APInt &KnownZero,
2892 APInt &KnownOne,
2893 const SelectionDAG &DAG,
2894 unsigned Depth) const {
Matt Arsenault378bf9c2014-03-31 19:35:33 +00002895
Matt Arsenault0c274fe2014-03-25 18:18:27 +00002896 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything.
Matt Arsenaultaf6df9d2014-05-22 18:09:00 +00002897
2898 APInt KnownZero2;
2899 APInt KnownOne2;
Matt Arsenault378bf9c2014-03-31 19:35:33 +00002900 unsigned Opc = Op.getOpcode();
Matt Arsenaultaf6df9d2014-05-22 18:09:00 +00002901
Matt Arsenault378bf9c2014-03-31 19:35:33 +00002902 switch (Opc) {
Matt Arsenaultaf6df9d2014-05-22 18:09:00 +00002903 default:
2904 break;
Jan Vesely808fff52015-04-30 17:15:56 +00002905 case AMDGPUISD::CARRY:
2906 case AMDGPUISD::BORROW: {
2907 KnownZero = APInt::getHighBitsSet(32, 31);
2908 break;
2909 }
2910
Matt Arsenaultaf6df9d2014-05-22 18:09:00 +00002911 case AMDGPUISD::BFE_I32:
2912 case AMDGPUISD::BFE_U32: {
2913 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2914 if (!CWidth)
2915 return;
2916
2917 unsigned BitWidth = 32;
2918 uint32_t Width = CWidth->getZExtValue() & 0x1f;
Matt Arsenaultaf6df9d2014-05-22 18:09:00 +00002919
Matt Arsenaulta3fe7c62014-10-16 20:07:40 +00002920 if (Opc == AMDGPUISD::BFE_U32)
Matt Arsenaultaf6df9d2014-05-22 18:09:00 +00002921 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
2922
Matt Arsenault378bf9c2014-03-31 19:35:33 +00002923 break;
2924 }
Matt Arsenaultaf6df9d2014-05-22 18:09:00 +00002925 }
Matt Arsenault0c274fe2014-03-25 18:18:27 +00002926}
Matt Arsenaultbf8694d2014-05-22 18:09:03 +00002927
2928unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
2929 SDValue Op,
2930 const SelectionDAG &DAG,
2931 unsigned Depth) const {
2932 switch (Op.getOpcode()) {
2933 case AMDGPUISD::BFE_I32: {
2934 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2935 if (!Width)
2936 return 1;
2937
2938 unsigned SignBits = 32 - Width->getZExtValue() + 1;
Artyom Skrobov314ee042015-11-25 19:41:11 +00002939 if (!isNullConstant(Op.getOperand(1)))
Matt Arsenaultbf8694d2014-05-22 18:09:03 +00002940 return SignBits;
2941
2942 // TODO: Could probably figure something out with non-0 offsets.
2943 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
2944 return std::max(SignBits, Op0SignBits);
2945 }
2946
Matt Arsenault5565f65e2014-05-22 18:09:07 +00002947 case AMDGPUISD::BFE_U32: {
2948 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2949 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
2950 }
2951
Jan Vesely808fff52015-04-30 17:15:56 +00002952 case AMDGPUISD::CARRY:
2953 case AMDGPUISD::BORROW:
2954 return 31;
2955
Matt Arsenaultbf8694d2014-05-22 18:09:03 +00002956 default:
2957 return 1;
2958 }
2959}