blob: d8ed325d0983649d41696623e76b6f9483ae0677 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Custom DAG lowering for SI
12//
13//===----------------------------------------------------------------------===//
14
NAKAMURA Takumi45e0a832014-07-20 11:15:07 +000015#ifdef _MSC_VER
16// Provide M_PI.
17#define _USE_MATH_DEFINES
18#include <cmath>
19#endif
20
Christian Konig99ee0f42013-03-07 09:04:14 +000021#include "AMDGPU.h"
Matt Arsenaultc791f392014-06-23 18:00:31 +000022#include "AMDGPUIntrinsicInfo.h"
Matt Arsenault41e2f2b2014-02-24 21:01:28 +000023#include "AMDGPUSubtarget.h"
Mehdi Aminib550cb12016-04-18 09:17:29 +000024#include "SIISelLowering.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000025#include "SIInstrInfo.h"
26#include "SIMachineFunctionInfo.h"
27#include "SIRegisterInfo.h"
Alexey Samsonova253bf92014-08-27 19:36:53 +000028#include "llvm/ADT/BitVector.h"
Matt Arsenault9a10cea2016-01-26 04:29:24 +000029#include "llvm/ADT/StringSwitch.h"
Christian Konig2c8f6d52013-03-07 09:03:52 +000030#include "llvm/CodeGen/CallingConvLower.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000031#include "llvm/CodeGen/MachineInstrBuilder.h"
32#include "llvm/CodeGen/MachineRegisterInfo.h"
33#include "llvm/CodeGen/SelectionDAG.h"
Wei Ding07e03712016-07-28 16:42:13 +000034#include "llvm/CodeGen/Analysis.h"
Oliver Stannard7e7d9832016-02-02 13:52:43 +000035#include "llvm/IR/DiagnosticInfo.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000036#include "llvm/IR/Function.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000037
38using namespace llvm;
39
Matt Arsenaultd486d3f2016-10-12 18:49:05 +000040static cl::opt<bool> EnableVGPRIndexMode(
41 "amdgpu-vgpr-index-mode",
42 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
43 cl::init(false));
44
45
Tom Stellardf110f8f2016-04-14 16:27:03 +000046static unsigned findFirstFreeSGPR(CCState &CCInfo) {
47 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
48 for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) {
49 if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) {
50 return AMDGPU::SGPR0 + Reg;
51 }
52 }
53 llvm_unreachable("Cannot allocate sgpr");
54}
55
Matt Arsenault43e92fe2016-06-24 06:30:11 +000056SITargetLowering::SITargetLowering(const TargetMachine &TM,
57 const SISubtarget &STI)
Eric Christopher7792e322015-01-30 23:24:40 +000058 : AMDGPUTargetLowering(TM, STI) {
Tom Stellard1bd80722014-04-30 15:31:33 +000059 addRegisterClass(MVT::i1, &AMDGPU::VReg_1RegClass);
Tom Stellard436780b2014-05-15 14:41:57 +000060 addRegisterClass(MVT::i64, &AMDGPU::SReg_64RegClass);
Christian Konig2214f142013-03-07 09:03:38 +000061
Tom Stellard334b29c2014-04-17 21:00:09 +000062 addRegisterClass(MVT::i32, &AMDGPU::SReg_32RegClass);
Tom Stellard45c0b3a2015-01-07 20:59:25 +000063 addRegisterClass(MVT::f32, &AMDGPU::VGPR_32RegClass);
Tom Stellard75aadc22012-12-11 21:25:42 +000064
Tom Stellard436780b2014-05-15 14:41:57 +000065 addRegisterClass(MVT::f64, &AMDGPU::VReg_64RegClass);
66 addRegisterClass(MVT::v2i32, &AMDGPU::SReg_64RegClass);
67 addRegisterClass(MVT::v2f32, &AMDGPU::VReg_64RegClass);
Christian Konig2214f142013-03-07 09:03:38 +000068
Matt Arsenault61001bb2015-11-25 19:58:34 +000069 addRegisterClass(MVT::v2i64, &AMDGPU::SReg_128RegClass);
70 addRegisterClass(MVT::v2f64, &AMDGPU::SReg_128RegClass);
71
Tom Stellard436780b2014-05-15 14:41:57 +000072 addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
73 addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
Christian Konig2214f142013-03-07 09:03:38 +000074
Tom Stellardf0a21072014-11-18 20:39:39 +000075 addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
Christian Konig2214f142013-03-07 09:03:38 +000076 addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
77
Tom Stellardf0a21072014-11-18 20:39:39 +000078 addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass);
Christian Konig2214f142013-03-07 09:03:38 +000079 addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
Tom Stellard75aadc22012-12-11 21:25:42 +000080
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +000081 if (Subtarget->has16BitInsts()) {
Tom Stellard115a6152016-11-10 16:02:37 +000082 addRegisterClass(MVT::i16, &AMDGPU::SReg_32RegClass);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +000083 addRegisterClass(MVT::f16, &AMDGPU::SReg_32RegClass);
84 }
Tom Stellard115a6152016-11-10 16:02:37 +000085
Eric Christopher23a3a7c2015-02-26 00:00:24 +000086 computeRegisterProperties(STI.getRegisterInfo());
Tom Stellard75aadc22012-12-11 21:25:42 +000087
Tom Stellard35bb18c2013-08-26 15:06:04 +000088 // We need to custom lower vector stores from local memory
Matt Arsenault71e66762016-05-21 02:27:49 +000089 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
Tom Stellard35bb18c2013-08-26 15:06:04 +000090 setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
Tom Stellardaf775432013-10-23 00:44:32 +000091 setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
92 setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +000093 setOperationAction(ISD::LOAD, MVT::i1, Custom);
Matt Arsenault2b957b52016-05-02 20:07:26 +000094
Matt Arsenaultbcdfee72016-05-02 20:13:51 +000095 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +000096 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
97 setOperationAction(ISD::STORE, MVT::v8i32, Custom);
98 setOperationAction(ISD::STORE, MVT::v16i32, Custom);
99 setOperationAction(ISD::STORE, MVT::i1, Custom);
Matt Arsenaultbcdfee72016-05-02 20:13:51 +0000100
Matt Arsenault71e66762016-05-21 02:27:49 +0000101 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
102 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000103 setOperationAction(ISD::ConstantPool, MVT::v2i64, Expand);
104
105 setOperationAction(ISD::SELECT, MVT::i1, Promote);
Tom Stellard0ec134f2014-02-04 17:18:40 +0000106 setOperationAction(ISD::SELECT, MVT::i64, Custom);
Tom Stellardda99c6e2014-03-24 16:07:30 +0000107 setOperationAction(ISD::SELECT, MVT::f64, Promote);
108 AddPromotedToType(ISD::SELECT, MVT::f64, MVT::i64);
Tom Stellard81d871d2013-11-13 23:36:50 +0000109
Tom Stellard3ca1bfc2014-06-10 16:01:22 +0000110 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
111 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
112 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
113 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
Matt Arsenault71e66762016-05-21 02:27:49 +0000114 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
Tom Stellard754f80f2013-04-05 23:31:51 +0000115
Tom Stellardd1efda82016-01-20 21:48:24 +0000116 setOperationAction(ISD::SETCC, MVT::i1, Promote);
Tom Stellard83747202013-07-18 21:43:53 +0000117 setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
118 setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
119
Matt Arsenault71e66762016-05-21 02:27:49 +0000120 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Expand);
121 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
Matt Arsenaulte306a322014-10-21 16:25:08 +0000122
Matt Arsenault4e466652014-04-16 01:41:30 +0000123 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
124 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000125 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
126 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000127 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
128 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
Matt Arsenault4e466652014-04-16 01:41:30 +0000129 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
130
Tom Stellard9fa17912013-08-14 23:24:45 +0000131 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
Tom Stellard9fa17912013-08-14 23:24:45 +0000132 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000133 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
134
Matt Arsenaulte54e1c32014-06-23 18:00:44 +0000135 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000136 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
Tom Stellardbc4497b2016-02-12 23:45:29 +0000137 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
138 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
139 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
140 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
Tom Stellardafcf12f2013-09-12 02:55:14 +0000141
Benjamin Kramer867bfc52015-03-07 17:41:00 +0000142 // We only support LOAD/STORE and vector manipulation ops for vectors
143 // with > 4 elements.
Matt Arsenault61001bb2015-11-25 19:58:34 +0000144 for (MVT VT : {MVT::v8i32, MVT::v8f32, MVT::v16i32, MVT::v16f32, MVT::v2i64, MVT::v2f64}) {
Tom Stellard967bf582014-02-13 23:34:15 +0000145 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000146 switch (Op) {
Tom Stellard967bf582014-02-13 23:34:15 +0000147 case ISD::LOAD:
148 case ISD::STORE:
149 case ISD::BUILD_VECTOR:
150 case ISD::BITCAST:
151 case ISD::EXTRACT_VECTOR_ELT:
152 case ISD::INSERT_VECTOR_ELT:
Tom Stellard967bf582014-02-13 23:34:15 +0000153 case ISD::INSERT_SUBVECTOR:
154 case ISD::EXTRACT_SUBVECTOR:
Matt Arsenault61001bb2015-11-25 19:58:34 +0000155 case ISD::SCALAR_TO_VECTOR:
Tom Stellard967bf582014-02-13 23:34:15 +0000156 break;
Tom Stellardc0503db2014-08-09 01:06:56 +0000157 case ISD::CONCAT_VECTORS:
158 setOperationAction(Op, VT, Custom);
159 break;
Tom Stellard967bf582014-02-13 23:34:15 +0000160 default:
Matt Arsenaultd504a742014-05-15 21:44:05 +0000161 setOperationAction(Op, VT, Expand);
Tom Stellard967bf582014-02-13 23:34:15 +0000162 break;
163 }
164 }
165 }
166
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000167 // TODO: For dynamic 64-bit vector inserts/extracts, should emit a pseudo that
168 // is expanded to avoid having two separate loops in case the index is a VGPR.
169
Matt Arsenault61001bb2015-11-25 19:58:34 +0000170 // Most operations are naturally 32-bit vector operations. We only support
171 // load and store of i64 vectors, so promote v2i64 vector operations to v4i32.
172 for (MVT Vec64 : { MVT::v2i64, MVT::v2f64 }) {
173 setOperationAction(ISD::BUILD_VECTOR, Vec64, Promote);
174 AddPromotedToType(ISD::BUILD_VECTOR, Vec64, MVT::v4i32);
175
176 setOperationAction(ISD::EXTRACT_VECTOR_ELT, Vec64, Promote);
177 AddPromotedToType(ISD::EXTRACT_VECTOR_ELT, Vec64, MVT::v4i32);
178
179 setOperationAction(ISD::INSERT_VECTOR_ELT, Vec64, Promote);
180 AddPromotedToType(ISD::INSERT_VECTOR_ELT, Vec64, MVT::v4i32);
181
182 setOperationAction(ISD::SCALAR_TO_VECTOR, Vec64, Promote);
183 AddPromotedToType(ISD::SCALAR_TO_VECTOR, Vec64, MVT::v4i32);
184 }
185
Matt Arsenault71e66762016-05-21 02:27:49 +0000186 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
187 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
188 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
189 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +0000190
Tom Stellard354a43c2016-04-01 18:27:37 +0000191 // BUFFER/FLAT_ATOMIC_CMP_SWAP on GCN GPUs needs input marshalling,
192 // and output demarshalling
193 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
194 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
195
196 // We can't return success/failure, only the old value,
197 // let LLVM add the comparison
198 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Expand);
199 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Expand);
200
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000201 if (getSubtarget()->hasFlatAddressSpace()) {
Matt Arsenault99c14522016-04-25 19:27:24 +0000202 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
203 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
204 }
205
Matt Arsenault71e66762016-05-21 02:27:49 +0000206 setOperationAction(ISD::BSWAP, MVT::i32, Legal);
207 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
208
209 // On SI this is s_memtime and s_memrealtime on VI.
210 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
Matt Arsenault0bb294b2016-06-17 22:27:03 +0000211 setOperationAction(ISD::TRAP, MVT::Other, Custom);
Matt Arsenault71e66762016-05-21 02:27:49 +0000212
213 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
214 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
215
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000216 if (Subtarget->getGeneration() >= SISubtarget::SEA_ISLANDS) {
Matt Arsenault71e66762016-05-21 02:27:49 +0000217 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
218 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
219 setOperationAction(ISD::FRINT, MVT::f64, Legal);
220 }
221
222 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
223
224 setOperationAction(ISD::FSIN, MVT::f32, Custom);
225 setOperationAction(ISD::FCOS, MVT::f32, Custom);
226 setOperationAction(ISD::FDIV, MVT::f32, Custom);
227 setOperationAction(ISD::FDIV, MVT::f64, Custom);
228
Tom Stellard115a6152016-11-10 16:02:37 +0000229 if (Subtarget->has16BitInsts()) {
230 setOperationAction(ISD::Constant, MVT::i16, Legal);
231
232 setOperationAction(ISD::SMIN, MVT::i16, Legal);
233 setOperationAction(ISD::SMAX, MVT::i16, Legal);
234
235 setOperationAction(ISD::UMIN, MVT::i16, Legal);
236 setOperationAction(ISD::UMAX, MVT::i16, Legal);
237
238 setOperationAction(ISD::SETCC, MVT::i16, Promote);
239 AddPromotedToType(ISD::SETCC, MVT::i16, MVT::i32);
240
241 setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Promote);
242 AddPromotedToType(ISD::SIGN_EXTEND, MVT::i16, MVT::i32);
243
244 setOperationAction(ISD::ROTR, MVT::i16, Promote);
245 setOperationAction(ISD::ROTL, MVT::i16, Promote);
246
247 setOperationAction(ISD::SDIV, MVT::i16, Promote);
248 setOperationAction(ISD::UDIV, MVT::i16, Promote);
249 setOperationAction(ISD::SREM, MVT::i16, Promote);
250 setOperationAction(ISD::UREM, MVT::i16, Promote);
251
252 setOperationAction(ISD::BSWAP, MVT::i16, Promote);
253 setOperationAction(ISD::BITREVERSE, MVT::i16, Promote);
254
255 setOperationAction(ISD::CTTZ, MVT::i16, Promote);
256 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Promote);
257 setOperationAction(ISD::CTLZ, MVT::i16, Promote);
258 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Promote);
259
260 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
261
262 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
263
264 setOperationAction(ISD::LOAD, MVT::i16, Custom);
265
266 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
267
Tom Stellard115a6152016-11-10 16:02:37 +0000268 setOperationAction(ISD::FP16_TO_FP, MVT::i16, Promote);
269 AddPromotedToType(ISD::FP16_TO_FP, MVT::i16, MVT::i32);
270 setOperationAction(ISD::FP_TO_FP16, MVT::i16, Promote);
271 AddPromotedToType(ISD::FP_TO_FP16, MVT::i16, MVT::i32);
Tom Stellardb4c8e8e2016-11-12 00:19:11 +0000272
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000273 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Custom);
274 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Custom);
275 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Custom);
276 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Custom);
Tom Stellardb4c8e8e2016-11-12 00:19:11 +0000277
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000278 // F16 - Constant Actions.
279 setOperationAction(ISD::ConstantFP, MVT::f16, Custom);
280
281 // F16 - Load/Store Actions.
282 setOperationAction(ISD::LOAD, MVT::f16, Promote);
283 AddPromotedToType(ISD::LOAD, MVT::f16, MVT::i16);
284 setOperationAction(ISD::STORE, MVT::f16, Promote);
285 AddPromotedToType(ISD::STORE, MVT::f16, MVT::i16);
286
287 // F16 - VOP1 Actions.
288 setOperationAction(ISD::FCOS, MVT::f16, Promote);
289 setOperationAction(ISD::FSIN, MVT::f16, Promote);
290
291 // F16 - VOP2 Actions.
292 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
293 setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
294 setOperationAction(ISD::FDIV, MVT::f16, Promote);
295
296 // F16 - VOP3 Actions.
297 setOperationAction(ISD::FMA, MVT::f16, Legal);
298 if (!Subtarget->hasFP16Denormals())
299 setOperationAction(ISD::FMAD, MVT::f16, Legal);
Tom Stellard115a6152016-11-10 16:02:37 +0000300 }
301
Matt Arsenault02cb0ff2014-09-29 14:59:34 +0000302 setTargetDAGCombine(ISD::FADD);
Matt Arsenault8675db12014-08-29 16:01:14 +0000303 setTargetDAGCombine(ISD::FSUB);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +0000304 setTargetDAGCombine(ISD::FMINNUM);
305 setTargetDAGCombine(ISD::FMAXNUM);
Matt Arsenault5881f4e2015-06-09 00:52:37 +0000306 setTargetDAGCombine(ISD::SMIN);
307 setTargetDAGCombine(ISD::SMAX);
308 setTargetDAGCombine(ISD::UMIN);
309 setTargetDAGCombine(ISD::UMAX);
Tom Stellard75aadc22012-12-11 21:25:42 +0000310 setTargetDAGCombine(ISD::SETCC);
Matt Arsenaultd0101a22015-01-06 23:00:46 +0000311 setTargetDAGCombine(ISD::AND);
Matt Arsenaultf2290332015-01-06 23:00:39 +0000312 setTargetDAGCombine(ISD::OR);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000313 setTargetDAGCombine(ISD::XOR);
Konstantin Zhuravlyovfda33ea2016-10-21 22:10:03 +0000314 setTargetDAGCombine(ISD::SINT_TO_FP);
Matt Arsenault364a6742014-06-11 17:50:44 +0000315 setTargetDAGCombine(ISD::UINT_TO_FP);
Matt Arsenault9cd90712016-04-14 01:42:16 +0000316 setTargetDAGCombine(ISD::FCANONICALIZE);
Matt Arsenault364a6742014-06-11 17:50:44 +0000317
Matt Arsenaultb2baffa2014-08-15 17:49:05 +0000318 // All memory operations. Some folding on the pointer operand is done to help
319 // matching the constant offsets in the addressing modes.
320 setTargetDAGCombine(ISD::LOAD);
321 setTargetDAGCombine(ISD::STORE);
322 setTargetDAGCombine(ISD::ATOMIC_LOAD);
323 setTargetDAGCombine(ISD::ATOMIC_STORE);
324 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
325 setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
326 setTargetDAGCombine(ISD::ATOMIC_SWAP);
327 setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
328 setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
329 setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
330 setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
331 setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
332 setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
333 setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
334 setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
335 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
336 setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
337
Christian Konigeecebd02013-03-26 14:04:02 +0000338 setSchedulingPreference(Sched::RegPressure);
Tom Stellard75aadc22012-12-11 21:25:42 +0000339}
340
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000341const SISubtarget *SITargetLowering::getSubtarget() const {
342 return static_cast<const SISubtarget *>(Subtarget);
343}
344
Tom Stellard0125f2a2013-06-25 02:39:35 +0000345//===----------------------------------------------------------------------===//
346// TargetLowering queries
347//===----------------------------------------------------------------------===//
348
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +0000349bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
350 const CallInst &CI,
351 unsigned IntrID) const {
352 switch (IntrID) {
353 case Intrinsic::amdgcn_atomic_inc:
354 case Intrinsic::amdgcn_atomic_dec:
355 Info.opc = ISD::INTRINSIC_W_CHAIN;
356 Info.memVT = MVT::getVT(CI.getType());
357 Info.ptrVal = CI.getOperand(0);
358 Info.align = 0;
359 Info.vol = false;
360 Info.readMem = true;
361 Info.writeMem = true;
362 return true;
363 default:
364 return false;
365 }
366}
367
Matt Arsenaulte306a322014-10-21 16:25:08 +0000368bool SITargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &,
369 EVT) const {
370 // SI has some legal vector types, but no legal vector operations. Say no
371 // shuffles are legal in order to prefer scalarizing some vector operations.
372 return false;
373}
374
Tom Stellard70580f82015-07-20 14:28:41 +0000375bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM) const {
376 // Flat instructions do not have offsets, and only have the register
377 // address.
378 return AM.BaseOffs == 0 && (AM.Scale == 0 || AM.Scale == 1);
379}
380
Matt Arsenault711b3902015-08-07 20:18:34 +0000381bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const {
382 // MUBUF / MTBUF instructions have a 12-bit unsigned byte offset, and
383 // additionally can do r + r + i with addr64. 32-bit has more addressing
384 // mode options. Depending on the resource constant, it can also do
385 // (i64 r0) + (i32 r1) * (i14 i).
386 //
387 // Private arrays end up using a scratch buffer most of the time, so also
388 // assume those use MUBUF instructions. Scratch loads / stores are currently
389 // implemented as mubuf instructions with offen bit set, so slightly
390 // different than the normal addr64.
391 if (!isUInt<12>(AM.BaseOffs))
392 return false;
393
394 // FIXME: Since we can split immediate into soffset and immediate offset,
395 // would it make sense to allow any immediate?
396
397 switch (AM.Scale) {
398 case 0: // r + i or just i, depending on HasBaseReg.
399 return true;
400 case 1:
401 return true; // We have r + r or r + i.
402 case 2:
403 if (AM.HasBaseReg) {
404 // Reject 2 * r + r.
405 return false;
406 }
407
408 // Allow 2 * r as r + r
409 // Or 2 * r + i is allowed as r + r + i.
410 return true;
411 default: // Don't allow n * r
412 return false;
413 }
414}
415
Mehdi Amini0cdec1e2015-07-09 02:09:40 +0000416bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
417 const AddrMode &AM, Type *Ty,
418 unsigned AS) const {
Matt Arsenault5015a892014-08-15 17:17:07 +0000419 // No global is ever allowed as a base.
420 if (AM.BaseGV)
421 return false;
422
Matt Arsenault73e06fa2015-06-04 16:17:42 +0000423 switch (AS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000424 case AMDGPUAS::GLOBAL_ADDRESS: {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000425 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
Tom Stellard70580f82015-07-20 14:28:41 +0000426 // Assume the we will use FLAT for all global memory accesses
427 // on VI.
428 // FIXME: This assumption is currently wrong. On VI we still use
429 // MUBUF instructions for the r + i addressing mode. As currently
430 // implemented, the MUBUF instructions only work on buffer < 4GB.
431 // It may be possible to support > 4GB buffers with MUBUF instructions,
432 // by setting the stride value in the resource descriptor which would
433 // increase the size limit to (stride * 4GB). However, this is risky,
434 // because it has never been validated.
435 return isLegalFlatAddressingMode(AM);
436 }
Matt Arsenault5015a892014-08-15 17:17:07 +0000437
Matt Arsenault711b3902015-08-07 20:18:34 +0000438 return isLegalMUBUFAddressingMode(AM);
Matt Arsenault73e06fa2015-06-04 16:17:42 +0000439 }
Matt Arsenault711b3902015-08-07 20:18:34 +0000440 case AMDGPUAS::CONSTANT_ADDRESS: {
441 // If the offset isn't a multiple of 4, it probably isn't going to be
442 // correctly aligned.
Matt Arsenault3cc1e002016-08-13 01:43:51 +0000443 // FIXME: Can we get the real alignment here?
Matt Arsenault711b3902015-08-07 20:18:34 +0000444 if (AM.BaseOffs % 4 != 0)
445 return isLegalMUBUFAddressingMode(AM);
446
447 // There are no SMRD extloads, so if we have to do a small type access we
448 // will use a MUBUF load.
449 // FIXME?: We also need to do this if unaligned, but we don't know the
450 // alignment here.
451 if (DL.getTypeStoreSize(Ty) < 4)
452 return isLegalMUBUFAddressingMode(AM);
453
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000454 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000455 // SMRD instructions have an 8-bit, dword offset on SI.
456 if (!isUInt<8>(AM.BaseOffs / 4))
457 return false;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000458 } else if (Subtarget->getGeneration() == SISubtarget::SEA_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000459 // On CI+, this can also be a 32-bit literal constant offset. If it fits
460 // in 8-bits, it can use a smaller encoding.
461 if (!isUInt<32>(AM.BaseOffs / 4))
462 return false;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000463 } else if (Subtarget->getGeneration() == SISubtarget::VOLCANIC_ISLANDS) {
Matt Arsenault711b3902015-08-07 20:18:34 +0000464 // On VI, these use the SMEM format and the offset is 20-bit in bytes.
465 if (!isUInt<20>(AM.BaseOffs))
466 return false;
467 } else
468 llvm_unreachable("unhandled generation");
469
470 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
471 return true;
472
473 if (AM.Scale == 1 && AM.HasBaseReg)
474 return true;
475
476 return false;
477 }
478
479 case AMDGPUAS::PRIVATE_ADDRESS:
Matt Arsenault711b3902015-08-07 20:18:34 +0000480 return isLegalMUBUFAddressingMode(AM);
481
Matt Arsenault73e06fa2015-06-04 16:17:42 +0000482 case AMDGPUAS::LOCAL_ADDRESS:
483 case AMDGPUAS::REGION_ADDRESS: {
484 // Basic, single offset DS instructions allow a 16-bit unsigned immediate
485 // field.
486 // XXX - If doing a 4-byte aligned 8-byte type access, we effectively have
487 // an 8-bit dword offset but we don't know the alignment here.
488 if (!isUInt<16>(AM.BaseOffs))
Matt Arsenault5015a892014-08-15 17:17:07 +0000489 return false;
Matt Arsenault73e06fa2015-06-04 16:17:42 +0000490
491 if (AM.Scale == 0) // r + i or just i, depending on HasBaseReg.
492 return true;
493
494 if (AM.Scale == 1 && AM.HasBaseReg)
495 return true;
496
Matt Arsenault5015a892014-08-15 17:17:07 +0000497 return false;
498 }
Tom Stellard70580f82015-07-20 14:28:41 +0000499 case AMDGPUAS::FLAT_ADDRESS:
Matt Arsenault7d1b6c82016-04-29 06:25:10 +0000500 case AMDGPUAS::UNKNOWN_ADDRESS_SPACE:
501 // For an unknown address space, this usually means that this is for some
502 // reason being used for pure arithmetic, and not based on some addressing
503 // computation. We don't have instructions that compute pointers with any
504 // addressing modes, so treat them as having no offset like flat
505 // instructions.
Tom Stellard70580f82015-07-20 14:28:41 +0000506 return isLegalFlatAddressingMode(AM);
507
Matt Arsenault73e06fa2015-06-04 16:17:42 +0000508 default:
509 llvm_unreachable("unhandled address space");
510 }
Matt Arsenault5015a892014-08-15 17:17:07 +0000511}
512
Matt Arsenaulte6986632015-01-14 01:35:22 +0000513bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
Matt Arsenault6f2a5262014-07-27 17:46:40 +0000514 unsigned AddrSpace,
515 unsigned Align,
516 bool *IsFast) const {
Matt Arsenault1018c892014-04-24 17:08:26 +0000517 if (IsFast)
518 *IsFast = false;
519
Matt Arsenault1018c892014-04-24 17:08:26 +0000520 // TODO: I think v3i32 should allow unaligned accesses on CI with DS_READ_B96,
521 // which isn't a simple VT.
Alina Sbirlea6f937b12016-08-04 16:38:44 +0000522 // Until MVT is extended to handle this, simply check for the size and
523 // rely on the condition below: allow accesses if the size is a multiple of 4.
524 if (VT == MVT::Other || (VT != MVT::Other && VT.getSizeInBits() > 1024 &&
525 VT.getStoreSize() > 16)) {
Tom Stellard81d871d2013-11-13 23:36:50 +0000526 return false;
Alina Sbirlea6f937b12016-08-04 16:38:44 +0000527 }
Matt Arsenault1018c892014-04-24 17:08:26 +0000528
Matt Arsenault7f681ac2016-07-01 23:03:44 +0000529 if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
530 AddrSpace == AMDGPUAS::REGION_ADDRESS) {
Matt Arsenault6f2a5262014-07-27 17:46:40 +0000531 // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte
532 // aligned, 8 byte access in a single operation using ds_read2/write2_b32
533 // with adjacent offsets.
Sanjay Patelce74db92015-09-03 15:03:19 +0000534 bool AlignedBy4 = (Align % 4 == 0);
535 if (IsFast)
536 *IsFast = AlignedBy4;
Matt Arsenault7f681ac2016-07-01 23:03:44 +0000537
Sanjay Patelce74db92015-09-03 15:03:19 +0000538 return AlignedBy4;
Matt Arsenault6f2a5262014-07-27 17:46:40 +0000539 }
Matt Arsenault1018c892014-04-24 17:08:26 +0000540
Tom Stellard64a9d082016-10-14 18:10:39 +0000541 // FIXME: We have to be conservative here and assume that flat operations
542 // will access scratch. If we had access to the IR function, then we
543 // could determine if any private memory was used in the function.
544 if (!Subtarget->hasUnalignedScratchAccess() &&
545 (AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
546 AddrSpace == AMDGPUAS::FLAT_ADDRESS)) {
547 return false;
548 }
549
Matt Arsenault7f681ac2016-07-01 23:03:44 +0000550 if (Subtarget->hasUnalignedBufferAccess()) {
551 // If we have an uniform constant load, it still requires using a slow
552 // buffer instruction if unaligned.
553 if (IsFast) {
554 *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS) ?
555 (Align % 4 == 0) : true;
556 }
557
558 return true;
559 }
560
Tom Stellard33e64c62015-02-04 20:49:52 +0000561 // Smaller than dword value must be aligned.
Tom Stellard33e64c62015-02-04 20:49:52 +0000562 if (VT.bitsLT(MVT::i32))
563 return false;
564
Matt Arsenault1018c892014-04-24 17:08:26 +0000565 // 8.1.6 - For Dword or larger reads or writes, the two LSBs of the
566 // byte-address are ignored, thus forcing Dword alignment.
Tom Stellarde812f2f2014-07-21 15:45:06 +0000567 // This applies to private, global, and constant memory.
Matt Arsenault1018c892014-04-24 17:08:26 +0000568 if (IsFast)
569 *IsFast = true;
Tom Stellardc6b299c2015-02-02 18:02:28 +0000570
571 return VT.bitsGT(MVT::i32) && Align % 4 == 0;
Tom Stellard0125f2a2013-06-25 02:39:35 +0000572}
573
Matt Arsenault46645fa2014-07-28 17:49:26 +0000574EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
575 unsigned SrcAlign, bool IsMemset,
576 bool ZeroMemset,
577 bool MemcpyStrSrc,
578 MachineFunction &MF) const {
579 // FIXME: Should account for address space here.
580
581 // The default fallback uses the private pointer size as a guess for a type to
582 // use. Make sure we switch these to 64-bit accesses.
583
584 if (Size >= 16 && DstAlign >= 4) // XXX: Should only do for global
585 return MVT::v4i32;
586
587 if (Size >= 8 && DstAlign >= 4)
588 return MVT::v2i32;
589
590 // Use the default.
591 return MVT::Other;
592}
593
Matt Arsenaultf9bfeaf2015-12-01 23:04:00 +0000594static bool isFlatGlobalAddrSpace(unsigned AS) {
595 return AS == AMDGPUAS::GLOBAL_ADDRESS ||
596 AS == AMDGPUAS::FLAT_ADDRESS ||
597 AS == AMDGPUAS::CONSTANT_ADDRESS;
598}
599
600bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
601 unsigned DestAS) const {
Matt Arsenault37fefd62016-06-10 02:18:02 +0000602 return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS);
Matt Arsenaultf9bfeaf2015-12-01 23:04:00 +0000603}
604
Tom Stellarda6f24c62015-12-15 20:55:55 +0000605bool SITargetLowering::isMemOpUniform(const SDNode *N) const {
606 const MemSDNode *MemNode = cast<MemSDNode>(N);
607 const Value *Ptr = MemNode->getMemOperand()->getValue();
608
609 // UndefValue means this is a load of a kernel input. These are uniform.
Tom Stellard418beb72016-07-13 14:23:33 +0000610 // Sometimes LDS instructions have constant pointers.
611 // If Ptr is null, then that means this mem operand contains a
612 // PseudoSourceValue like GOT.
613 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
614 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
Tom Stellarda6f24c62015-12-15 20:55:55 +0000615 return true;
616
Tom Stellard418beb72016-07-13 14:23:33 +0000617 const Instruction *I = dyn_cast<Instruction>(Ptr);
Tom Stellarda6f24c62015-12-15 20:55:55 +0000618 return I && I->getMetadata("amdgpu.uniform");
619}
620
Chandler Carruth9d010ff2014-07-03 00:23:43 +0000621TargetLoweringBase::LegalizeTypeAction
622SITargetLowering::getPreferredVectorAction(EVT VT) const {
623 if (VT.getVectorNumElements() != 1 && VT.getScalarType().bitsLE(MVT::i16))
624 return TypeSplitVector;
625
626 return TargetLoweringBase::getPreferredVectorAction(VT);
Tom Stellardd86003e2013-08-14 23:25:00 +0000627}
Tom Stellard0125f2a2013-06-25 02:39:35 +0000628
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +0000629bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
630 Type *Ty) const {
Matt Arsenault749035b2016-07-30 01:40:36 +0000631 // FIXME: Could be smarter if called for vector constants.
632 return true;
Matt Arsenaultd7bdcc42014-03-31 19:54:27 +0000633}
634
Tom Stellard2e045bb2016-01-20 00:13:22 +0000635bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const {
636
Konstantin Zhuravlyove14df4b2016-09-28 20:05:39 +0000637 // i16 is not desirable unless it is a load or a store.
638 if (VT == MVT::i16 && Op != ISD::LOAD && Op != ISD::STORE)
639 return false;
640
Tom Stellard2e045bb2016-01-20 00:13:22 +0000641 // SimplifySetCC uses this function to determine whether or not it should
642 // create setcc with i1 operands. We don't have instructions for i1 setcc.
643 if (VT == MVT::i1 && Op == ISD::SETCC)
644 return false;
645
646 return TargetLowering::isTypeDesirableForOp(Op, VT);
647}
648
Jan Veselyfea814d2016-06-21 20:46:20 +0000649SDValue SITargetLowering::LowerParameterPtr(SelectionDAG &DAG,
650 const SDLoc &SL, SDValue Chain,
651 unsigned Offset) const {
Mehdi Aminia749f2a2015-07-09 02:09:52 +0000652 const DataLayout &DL = DAG.getDataLayout();
Tom Stellardec2e43c2014-09-22 15:35:29 +0000653 MachineFunction &MF = DAG.getMachineFunction();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000654 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
Matt Arsenaultac234b62015-11-30 21:15:57 +0000655 unsigned InputPtrReg = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR);
Tom Stellard94593ee2013-06-03 17:40:18 +0000656
Matt Arsenault86033ca2014-07-28 17:31:39 +0000657 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
Mehdi Aminia749f2a2015-07-09 02:09:52 +0000658 MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
Matt Arsenaulta0269b62015-06-01 21:58:24 +0000659 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
660 MRI.getLiveInVirtReg(InputPtrReg), PtrVT);
Jan Veselyfea814d2016-06-21 20:46:20 +0000661 return DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
662 DAG.getConstant(Offset, SL, PtrVT));
663}
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000664
Jan Veselyfea814d2016-06-21 20:46:20 +0000665SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
666 const SDLoc &SL, SDValue Chain,
667 unsigned Offset, bool Signed) const {
668 const DataLayout &DL = DAG.getDataLayout();
Tom Stellard083f1622016-10-17 16:56:19 +0000669 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
Jan Veselyfea814d2016-06-21 20:46:20 +0000670 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
Matt Arsenault86033ca2014-07-28 17:31:39 +0000671 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
672
Mehdi Aminia749f2a2015-07-09 02:09:52 +0000673 unsigned Align = DL.getABITypeAlignment(Ty);
Matt Arsenault81c7ae22015-06-04 16:00:27 +0000674
Jan Veselyfea814d2016-06-21 20:46:20 +0000675 SDValue Ptr = LowerParameterPtr(DAG, SL, Chain, Offset);
Tom Stellardbc6c5232016-10-17 16:21:45 +0000676 SDValue Load = DAG.getLoad(MemVT, SL, Chain, Ptr, PtrInfo, Align,
677 MachineMemOperand::MONonTemporal |
678 MachineMemOperand::MODereferenceable |
679 MachineMemOperand::MOInvariant);
680
681 SDValue Val;
682 if (MemVT.isFloatingPoint())
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +0000683 Val = getFPExtOrFPTrunc(DAG, Load, SL, VT);
Tom Stellardbc6c5232016-10-17 16:21:45 +0000684 else if (Signed)
685 Val = DAG.getSExtOrTrunc(Load, SL, VT);
686 else
687 Val = DAG.getZExtOrTrunc(Load, SL, VT);
688
689 SDValue Ops[] = {
690 Val,
691 Load.getValue(1)
692 };
693
694 return DAG.getMergeValues(Ops, SL);
Tom Stellard94593ee2013-06-03 17:40:18 +0000695}
696
Christian Konig2c8f6d52013-03-07 09:03:52 +0000697SDValue SITargetLowering::LowerFormalArguments(
Eric Christopher7792e322015-01-30 23:24:40 +0000698 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
Benjamin Kramerbdc49562016-06-12 15:39:02 +0000699 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
700 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000701 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
Christian Konig2c8f6d52013-03-07 09:03:52 +0000702
703 MachineFunction &MF = DAG.getMachineFunction();
704 FunctionType *FType = MF.getFunction()->getFunctionType();
Christian Konig99ee0f42013-03-07 09:04:14 +0000705 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000706 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
Christian Konig2c8f6d52013-03-07 09:03:52 +0000707
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000708 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
Matt Arsenaultd48da142015-11-02 23:23:02 +0000709 const Function *Fn = MF.getFunction();
Oliver Stannard7e7d9832016-02-02 13:52:43 +0000710 DiagnosticInfoUnsupported NoGraphicsHSA(
711 *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
Matt Arsenaultd48da142015-11-02 23:23:02 +0000712 DAG.getContext()->diagnose(NoGraphicsHSA);
Diana Picus81bc3172016-05-26 15:24:55 +0000713 return DAG.getEntryNode();
Matt Arsenaultd48da142015-11-02 23:23:02 +0000714 }
715
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +0000716 // Create stack objects that are used for emitting debugger prologue if
717 // "amdgpu-debugger-emit-prologue" attribute was specified.
718 if (ST.debuggerEmitPrologue())
719 createDebuggerPrologueStackObjects(MF);
720
Christian Konig2c8f6d52013-03-07 09:03:52 +0000721 SmallVector<ISD::InputArg, 16> Splits;
Alexey Samsonova253bf92014-08-27 19:36:53 +0000722 BitVector Skipped(Ins.size());
Christian Konig99ee0f42013-03-07 09:04:14 +0000723
724 for (unsigned i = 0, e = Ins.size(), PSInputNum = 0; i != e; ++i) {
Christian Konig2c8f6d52013-03-07 09:03:52 +0000725 const ISD::InputArg &Arg = Ins[i];
Matt Arsenault758659232013-05-18 00:21:46 +0000726
727 // First check if it's a PS input addr
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000728 if (CallConv == CallingConv::AMDGPU_PS && !Arg.Flags.isInReg() &&
Marek Olsakb6c8c3d2016-01-13 11:46:10 +0000729 !Arg.Flags.isByVal() && PSInputNum <= 15) {
Christian Konig99ee0f42013-03-07 09:04:14 +0000730
Marek Olsakfccabaf2016-01-13 11:45:36 +0000731 if (!Arg.Used && !Info->isPSInputAllocated(PSInputNum)) {
Benjamin Kramerdf005cb2015-08-08 18:27:36 +0000732 // We can safely skip PS inputs
Alexey Samsonova253bf92014-08-27 19:36:53 +0000733 Skipped.set(i);
Christian Konig99ee0f42013-03-07 09:04:14 +0000734 ++PSInputNum;
735 continue;
736 }
737
Marek Olsakfccabaf2016-01-13 11:45:36 +0000738 Info->markPSInputAllocated(PSInputNum);
739 if (Arg.Used)
740 Info->PSInputEna |= 1 << PSInputNum;
741
742 ++PSInputNum;
Christian Konig99ee0f42013-03-07 09:04:14 +0000743 }
744
Matt Arsenault539ca882016-05-05 20:27:02 +0000745 if (AMDGPU::isShader(CallConv)) {
746 // Second split vertices into their elements
747 if (Arg.VT.isVector()) {
748 ISD::InputArg NewArg = Arg;
749 NewArg.Flags.setSplit();
750 NewArg.VT = Arg.VT.getVectorElementType();
Christian Konig2c8f6d52013-03-07 09:03:52 +0000751
Matt Arsenault539ca882016-05-05 20:27:02 +0000752 // We REALLY want the ORIGINAL number of vertex elements here, e.g. a
753 // three or five element vertex only needs three or five registers,
754 // NOT four or eight.
755 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
756 unsigned NumElements = ParamType->getVectorNumElements();
Christian Konig2c8f6d52013-03-07 09:03:52 +0000757
Matt Arsenault539ca882016-05-05 20:27:02 +0000758 for (unsigned j = 0; j != NumElements; ++j) {
759 Splits.push_back(NewArg);
760 NewArg.PartOffset += NewArg.VT.getStoreSize();
761 }
762 } else {
763 Splits.push_back(Arg);
Christian Konig2c8f6d52013-03-07 09:03:52 +0000764 }
Christian Konig2c8f6d52013-03-07 09:03:52 +0000765 }
766 }
767
768 SmallVector<CCValAssign, 16> ArgLocs;
Eric Christopherb5217502014-08-06 18:45:26 +0000769 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
770 *DAG.getContext());
Christian Konig2c8f6d52013-03-07 09:03:52 +0000771
Christian Konig99ee0f42013-03-07 09:04:14 +0000772 // At least one interpolation mode must be enabled or else the GPU will hang.
Marek Olsakfccabaf2016-01-13 11:45:36 +0000773 //
774 // Check PSInputAddr instead of PSInputEna. The idea is that if the user set
775 // PSInputAddr, the user wants to enable some bits after the compilation
776 // based on run-time states. Since we can't know what the final PSInputEna
777 // will look like, so we shouldn't do anything here and the user should take
778 // responsibility for the correct programming.
Marek Olsak46dadbf2016-01-13 17:23:20 +0000779 //
780 // Otherwise, the following restrictions apply:
781 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
782 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
783 // enabled too.
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000784 if (CallConv == CallingConv::AMDGPU_PS &&
Marek Olsak46dadbf2016-01-13 17:23:20 +0000785 ((Info->getPSInputAddr() & 0x7F) == 0 ||
NAKAMURA Takumife1202c2016-06-20 00:37:41 +0000786 ((Info->getPSInputAddr() & 0xF) == 0 && Info->isPSInputAllocated(11)))) {
Christian Konig99ee0f42013-03-07 09:04:14 +0000787 CCInfo.AllocateReg(AMDGPU::VGPR0);
788 CCInfo.AllocateReg(AMDGPU::VGPR1);
Marek Olsakfccabaf2016-01-13 11:45:36 +0000789 Info->markPSInputAllocated(0);
790 Info->PSInputEna |= 1;
Christian Konig99ee0f42013-03-07 09:04:14 +0000791 }
792
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000793 if (!AMDGPU::isShader(CallConv)) {
Tom Stellardf110f8f2016-04-14 16:27:03 +0000794 assert(Info->hasWorkGroupIDX() && Info->hasWorkItemIDX());
795 } else {
796 assert(!Info->hasPrivateSegmentBuffer() && !Info->hasDispatchPtr() &&
797 !Info->hasKernargSegmentPtr() && !Info->hasFlatScratchInit() &&
798 !Info->hasWorkGroupIDX() && !Info->hasWorkGroupIDY() &&
799 !Info->hasWorkGroupIDZ() && !Info->hasWorkGroupInfo() &&
800 !Info->hasWorkItemIDX() && !Info->hasWorkItemIDY() &&
801 !Info->hasWorkItemIDZ());
Tom Stellardaf775432013-10-23 00:44:32 +0000802 }
803
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000804 // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
805 if (Info->hasPrivateSegmentBuffer()) {
806 unsigned PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI);
807 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SReg_128RegClass);
808 CCInfo.AllocateReg(PrivateSegmentBufferReg);
809 }
810
811 if (Info->hasDispatchPtr()) {
812 unsigned DispatchPtrReg = Info->addDispatchPtr(*TRI);
813 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SReg_64RegClass);
814 CCInfo.AllocateReg(DispatchPtrReg);
815 }
816
Matt Arsenault48ab5262016-04-25 19:27:18 +0000817 if (Info->hasQueuePtr()) {
818 unsigned QueuePtrReg = Info->addQueuePtr(*TRI);
819 MF.addLiveIn(QueuePtrReg, &AMDGPU::SReg_64RegClass);
820 CCInfo.AllocateReg(QueuePtrReg);
821 }
822
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000823 if (Info->hasKernargSegmentPtr()) {
824 unsigned InputPtrReg = Info->addKernargSegmentPtr(*TRI);
825 MF.addLiveIn(InputPtrReg, &AMDGPU::SReg_64RegClass);
826 CCInfo.AllocateReg(InputPtrReg);
827 }
828
Matt Arsenault8d718dc2016-07-22 17:01:30 +0000829 if (Info->hasDispatchID()) {
830 unsigned DispatchIDReg = Info->addDispatchID(*TRI);
831 MF.addLiveIn(DispatchIDReg, &AMDGPU::SReg_64RegClass);
832 CCInfo.AllocateReg(DispatchIDReg);
833 }
834
Matt Arsenault296b8492016-02-12 06:31:30 +0000835 if (Info->hasFlatScratchInit()) {
836 unsigned FlatScratchInitReg = Info->addFlatScratchInit(*TRI);
837 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SReg_64RegClass);
838 CCInfo.AllocateReg(FlatScratchInitReg);
839 }
840
Tom Stellardbbeb45a2016-09-16 21:53:00 +0000841 if (!AMDGPU::isShader(CallConv))
842 analyzeFormalArgumentsCompute(CCInfo, Ins);
843 else
844 AnalyzeFormalArguments(CCInfo, Splits);
Christian Konig2c8f6d52013-03-07 09:03:52 +0000845
Matt Arsenaultcf13d182015-07-10 22:51:36 +0000846 SmallVector<SDValue, 16> Chains;
847
Christian Konig2c8f6d52013-03-07 09:03:52 +0000848 for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
849
Christian Konigb7be72d2013-05-17 09:46:48 +0000850 const ISD::InputArg &Arg = Ins[i];
Alexey Samsonova253bf92014-08-27 19:36:53 +0000851 if (Skipped[i]) {
Christian Konigb7be72d2013-05-17 09:46:48 +0000852 InVals.push_back(DAG.getUNDEF(Arg.VT));
Christian Konig99ee0f42013-03-07 09:04:14 +0000853 continue;
854 }
855
Christian Konig2c8f6d52013-03-07 09:03:52 +0000856 CCValAssign &VA = ArgLocs[ArgIdx++];
Craig Topper7f416c82014-11-16 21:17:18 +0000857 MVT VT = VA.getLocVT();
Tom Stellarded882c22013-06-03 17:40:11 +0000858
859 if (VA.isMemLoc()) {
Tom Stellardaf775432013-10-23 00:44:32 +0000860 VT = Ins[i].VT;
Tom Stellardbbeb45a2016-09-16 21:53:00 +0000861 EVT MemVT = VA.getLocVT();
Tom Stellardb5798b02015-06-26 21:15:03 +0000862 const unsigned Offset = Subtarget->getExplicitKernelArgOffset() +
863 VA.getLocMemOffset();
Tom Stellard94593ee2013-06-03 17:40:18 +0000864 // The first 36 bytes of the input buffer contains information about
865 // thread group and global sizes.
Matt Arsenault0d519732015-07-10 22:28:41 +0000866 SDValue Arg = LowerParameter(DAG, VT, MemVT, DL, Chain,
Jan Veselye5121f32014-10-14 20:05:26 +0000867 Offset, Ins[i].Flags.isSExt());
Matt Arsenaultcf13d182015-07-10 22:51:36 +0000868 Chains.push_back(Arg.getValue(1));
Tom Stellardca7ecf32014-08-22 18:49:31 +0000869
Craig Toppere3dcce92015-08-01 22:20:21 +0000870 auto *ParamTy =
Andrew Trick05938a52015-02-16 18:10:47 +0000871 dyn_cast<PointerType>(FType->getParamType(Ins[i].getOrigArgIndex()));
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000872 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS &&
Tom Stellardca7ecf32014-08-22 18:49:31 +0000873 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
874 // On SI local pointers are just offsets into LDS, so they are always
875 // less than 16-bits. On CI and newer they could potentially be
876 // real pointers, so we can't guarantee their size.
877 Arg = DAG.getNode(ISD::AssertZext, DL, Arg.getValueType(), Arg,
878 DAG.getValueType(MVT::i16));
879 }
880
Tom Stellarded882c22013-06-03 17:40:11 +0000881 InVals.push_back(Arg);
Matt Arsenault52ef4012016-07-26 16:45:58 +0000882 Info->setABIArgOffset(Offset + MemVT.getStoreSize());
Tom Stellarded882c22013-06-03 17:40:11 +0000883 continue;
884 }
Christian Konig2c8f6d52013-03-07 09:03:52 +0000885 assert(VA.isRegLoc() && "Parameter must be in a register!");
886
887 unsigned Reg = VA.getLocReg();
Christian Konig2c8f6d52013-03-07 09:03:52 +0000888
889 if (VT == MVT::i64) {
890 // For now assume it is a pointer
891 Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0,
892 &AMDGPU::SReg_64RegClass);
893 Reg = MF.addLiveIn(Reg, &AMDGPU::SReg_64RegClass);
Matt Arsenaultcf13d182015-07-10 22:51:36 +0000894 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT);
895 InVals.push_back(Copy);
Christian Konig2c8f6d52013-03-07 09:03:52 +0000896 continue;
897 }
898
899 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
900
901 Reg = MF.addLiveIn(Reg, RC);
902 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
903
Christian Konig2c8f6d52013-03-07 09:03:52 +0000904 if (Arg.VT.isVector()) {
905
906 // Build a vector from the registers
Andrew Trick05938a52015-02-16 18:10:47 +0000907 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
Christian Konig2c8f6d52013-03-07 09:03:52 +0000908 unsigned NumElements = ParamType->getVectorNumElements();
909
910 SmallVector<SDValue, 4> Regs;
911 Regs.push_back(Val);
912 for (unsigned j = 1; j != NumElements; ++j) {
913 Reg = ArgLocs[ArgIdx++].getLocReg();
914 Reg = MF.addLiveIn(Reg, RC);
Matt Arsenaultcf13d182015-07-10 22:51:36 +0000915
916 SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT);
917 Regs.push_back(Copy);
Christian Konig2c8f6d52013-03-07 09:03:52 +0000918 }
919
920 // Fill up the missing vector elements
921 NumElements = Arg.VT.getVectorNumElements() - NumElements;
Benjamin Kramer6cd780f2015-02-17 15:29:18 +0000922 Regs.append(NumElements, DAG.getUNDEF(VT));
Matt Arsenault758659232013-05-18 00:21:46 +0000923
Ahmed Bougacha128f8732016-04-26 21:15:30 +0000924 InVals.push_back(DAG.getBuildVector(Arg.VT, DL, Regs));
Christian Konig2c8f6d52013-03-07 09:03:52 +0000925 continue;
926 }
927
928 InVals.push_back(Val);
929 }
Tom Stellarde99fb652015-01-20 19:33:04 +0000930
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000931 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
932 // these from the dispatch pointer.
933
934 // Start adding system SGPRs.
935 if (Info->hasWorkGroupIDX()) {
936 unsigned Reg = Info->addWorkGroupIDX();
937 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass);
938 CCInfo.AllocateReg(Reg);
Tom Stellardf110f8f2016-04-14 16:27:03 +0000939 }
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000940
941 if (Info->hasWorkGroupIDY()) {
942 unsigned Reg = Info->addWorkGroupIDY();
943 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass);
944 CCInfo.AllocateReg(Reg);
Tom Stellarde99fb652015-01-20 19:33:04 +0000945 }
Matt Arsenaultcf13d182015-07-10 22:51:36 +0000946
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000947 if (Info->hasWorkGroupIDZ()) {
948 unsigned Reg = Info->addWorkGroupIDZ();
949 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass);
950 CCInfo.AllocateReg(Reg);
951 }
952
953 if (Info->hasWorkGroupInfo()) {
954 unsigned Reg = Info->addWorkGroupInfo();
955 MF.addLiveIn(Reg, &AMDGPU::SReg_32RegClass);
956 CCInfo.AllocateReg(Reg);
957 }
958
959 if (Info->hasPrivateSegmentWaveByteOffset()) {
960 // Scratch wave offset passed in system SGPR.
Tom Stellardf110f8f2016-04-14 16:27:03 +0000961 unsigned PrivateSegmentWaveByteOffsetReg;
962
963 if (AMDGPU::isShader(CallConv)) {
964 PrivateSegmentWaveByteOffsetReg = findFirstFreeSGPR(CCInfo);
965 Info->setPrivateSegmentWaveByteOffset(PrivateSegmentWaveByteOffsetReg);
966 } else
967 PrivateSegmentWaveByteOffsetReg = Info->addPrivateSegmentWaveByteOffset();
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000968
969 MF.addLiveIn(PrivateSegmentWaveByteOffsetReg, &AMDGPU::SGPR_32RegClass);
970 CCInfo.AllocateReg(PrivateSegmentWaveByteOffsetReg);
971 }
972
973 // Now that we've figured out where the scratch register inputs are, see if
974 // should reserve the arguments and use them directly.
Matthias Braun941a7052016-07-28 18:40:00 +0000975 bool HasStackObjects = MF.getFrameInfo().hasStackObjects();
Matt Arsenault296b8492016-02-12 06:31:30 +0000976 // Record that we know we have non-spill stack objects so we don't need to
977 // check all stack objects later.
978 if (HasStackObjects)
979 Info->setHasNonSpillStackObjects(true);
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000980
Matt Arsenault253640e2016-10-13 13:10:00 +0000981 // Everything live out of a block is spilled with fast regalloc, so it's
982 // almost certain that spilling will be required.
983 if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
984 HasStackObjects = true;
985
Tom Stellard0b76fc4c2016-09-16 21:34:26 +0000986 if (ST.isAmdCodeObjectV2()) {
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000987 if (HasStackObjects) {
988 // If we have stack objects, we unquestionably need the private buffer
Tom Stellard0b76fc4c2016-09-16 21:34:26 +0000989 // resource. For the Code Object V2 ABI, this will be the first 4 user
990 // SGPR inputs. We can reserve those and use them directly.
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000991
992 unsigned PrivateSegmentBufferReg = TRI->getPreloadedValue(
993 MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER);
994 Info->setScratchRSrcReg(PrivateSegmentBufferReg);
995
996 unsigned PrivateSegmentWaveByteOffsetReg = TRI->getPreloadedValue(
997 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
998 Info->setScratchWaveOffsetReg(PrivateSegmentWaveByteOffsetReg);
999 } else {
1000 unsigned ReservedBufferReg
1001 = TRI->reservedPrivateSegmentBufferReg(MF);
1002 unsigned ReservedOffsetReg
1003 = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF);
1004
1005 // We tentatively reserve the last registers (skipping the last two
1006 // which may contain VCC). After register allocation, we'll replace
1007 // these with the ones immediately after those which were really
1008 // allocated. In the prologue copies will be inserted from the argument
1009 // to these reserved registers.
1010 Info->setScratchRSrcReg(ReservedBufferReg);
1011 Info->setScratchWaveOffsetReg(ReservedOffsetReg);
1012 }
1013 } else {
1014 unsigned ReservedBufferReg = TRI->reservedPrivateSegmentBufferReg(MF);
1015
1016 // Without HSA, relocations are used for the scratch pointer and the
1017 // buffer resource setup is always inserted in the prologue. Scratch wave
1018 // offset is still in an input SGPR.
1019 Info->setScratchRSrcReg(ReservedBufferReg);
1020
1021 if (HasStackObjects) {
1022 unsigned ScratchWaveOffsetReg = TRI->getPreloadedValue(
1023 MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET);
1024 Info->setScratchWaveOffsetReg(ScratchWaveOffsetReg);
1025 } else {
1026 unsigned ReservedOffsetReg
1027 = TRI->reservedPrivateSegmentWaveByteOffsetReg(MF);
1028 Info->setScratchWaveOffsetReg(ReservedOffsetReg);
1029 }
1030 }
1031
1032 if (Info->hasWorkItemIDX()) {
1033 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X);
1034 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1035 CCInfo.AllocateReg(Reg);
Tom Stellardf110f8f2016-04-14 16:27:03 +00001036 }
Matt Arsenault26f8f3d2015-11-30 21:16:03 +00001037
1038 if (Info->hasWorkItemIDY()) {
1039 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y);
1040 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1041 CCInfo.AllocateReg(Reg);
1042 }
1043
1044 if (Info->hasWorkItemIDZ()) {
1045 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z);
1046 MF.addLiveIn(Reg, &AMDGPU::VGPR_32RegClass);
1047 CCInfo.AllocateReg(Reg);
1048 }
Matt Arsenault0e3d3892015-11-30 21:15:53 +00001049
Matt Arsenaultcf13d182015-07-10 22:51:36 +00001050 if (Chains.empty())
1051 return Chain;
1052
1053 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
Christian Konig2c8f6d52013-03-07 09:03:52 +00001054}
1055
Benjamin Kramerbdc49562016-06-12 15:39:02 +00001056SDValue
1057SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1058 bool isVarArg,
1059 const SmallVectorImpl<ISD::OutputArg> &Outs,
1060 const SmallVectorImpl<SDValue> &OutVals,
1061 const SDLoc &DL, SelectionDAG &DAG) const {
Marek Olsak8a0f3352016-01-13 17:23:04 +00001062 MachineFunction &MF = DAG.getMachineFunction();
1063 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1064
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +00001065 if (!AMDGPU::isShader(CallConv))
Marek Olsak8a0f3352016-01-13 17:23:04 +00001066 return AMDGPUTargetLowering::LowerReturn(Chain, CallConv, isVarArg, Outs,
1067 OutVals, DL, DAG);
1068
Marek Olsak8e9cc632016-01-13 17:23:09 +00001069 Info->setIfReturnsVoid(Outs.size() == 0);
1070
Marek Olsak8a0f3352016-01-13 17:23:04 +00001071 SmallVector<ISD::OutputArg, 48> Splits;
1072 SmallVector<SDValue, 48> SplitVals;
1073
1074 // Split vectors into their elements.
1075 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
1076 const ISD::OutputArg &Out = Outs[i];
1077
1078 if (Out.VT.isVector()) {
1079 MVT VT = Out.VT.getVectorElementType();
1080 ISD::OutputArg NewOut = Out;
1081 NewOut.Flags.setSplit();
1082 NewOut.VT = VT;
1083
1084 // We want the original number of vector elements here, e.g.
1085 // three or five, not four or eight.
1086 unsigned NumElements = Out.ArgVT.getVectorNumElements();
1087
1088 for (unsigned j = 0; j != NumElements; ++j) {
1089 SDValue Elem = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, OutVals[i],
1090 DAG.getConstant(j, DL, MVT::i32));
1091 SplitVals.push_back(Elem);
1092 Splits.push_back(NewOut);
1093 NewOut.PartOffset += NewOut.VT.getStoreSize();
1094 }
1095 } else {
1096 SplitVals.push_back(OutVals[i]);
1097 Splits.push_back(Out);
1098 }
1099 }
1100
1101 // CCValAssign - represent the assignment of the return value to a location.
1102 SmallVector<CCValAssign, 48> RVLocs;
1103
1104 // CCState - Info about the registers and stack slots.
1105 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1106 *DAG.getContext());
1107
1108 // Analyze outgoing return values.
1109 AnalyzeReturn(CCInfo, Splits);
1110
1111 SDValue Flag;
1112 SmallVector<SDValue, 48> RetOps;
1113 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
1114
1115 // Copy the result values into the output registers.
1116 for (unsigned i = 0, realRVLocIdx = 0;
1117 i != RVLocs.size();
1118 ++i, ++realRVLocIdx) {
1119 CCValAssign &VA = RVLocs[i];
1120 assert(VA.isRegLoc() && "Can only return in registers!");
1121
1122 SDValue Arg = SplitVals[realRVLocIdx];
1123
1124 // Copied from other backends.
1125 switch (VA.getLocInfo()) {
1126 default: llvm_unreachable("Unknown loc info!");
1127 case CCValAssign::Full:
1128 break;
1129 case CCValAssign::BCvt:
1130 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1131 break;
1132 }
1133
1134 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
1135 Flag = Chain.getValue(1);
1136 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1137 }
1138
1139 // Update chain and glue.
1140 RetOps[0] = Chain;
1141 if (Flag.getNode())
1142 RetOps.push_back(Flag);
1143
Matt Arsenault9babdf42016-06-22 20:15:28 +00001144 unsigned Opc = Info->returnsVoid() ? AMDGPUISD::ENDPGM : AMDGPUISD::RETURN;
1145 return DAG.getNode(Opc, DL, MVT::Other, RetOps);
Marek Olsak8a0f3352016-01-13 17:23:04 +00001146}
1147
Matt Arsenault9a10cea2016-01-26 04:29:24 +00001148unsigned SITargetLowering::getRegisterByName(const char* RegName, EVT VT,
1149 SelectionDAG &DAG) const {
1150 unsigned Reg = StringSwitch<unsigned>(RegName)
1151 .Case("m0", AMDGPU::M0)
1152 .Case("exec", AMDGPU::EXEC)
1153 .Case("exec_lo", AMDGPU::EXEC_LO)
1154 .Case("exec_hi", AMDGPU::EXEC_HI)
1155 .Case("flat_scratch", AMDGPU::FLAT_SCR)
1156 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1157 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
1158 .Default(AMDGPU::NoRegister);
1159
1160 if (Reg == AMDGPU::NoRegister) {
1161 report_fatal_error(Twine("invalid register name \""
1162 + StringRef(RegName) + "\"."));
1163
1164 }
1165
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001166 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS &&
Matt Arsenault9a10cea2016-01-26 04:29:24 +00001167 Subtarget->getRegisterInfo()->regsOverlap(Reg, AMDGPU::FLAT_SCR)) {
1168 report_fatal_error(Twine("invalid register \""
1169 + StringRef(RegName) + "\" for subtarget."));
1170 }
1171
1172 switch (Reg) {
1173 case AMDGPU::M0:
1174 case AMDGPU::EXEC_LO:
1175 case AMDGPU::EXEC_HI:
1176 case AMDGPU::FLAT_SCR_LO:
1177 case AMDGPU::FLAT_SCR_HI:
1178 if (VT.getSizeInBits() == 32)
1179 return Reg;
1180 break;
1181 case AMDGPU::EXEC:
1182 case AMDGPU::FLAT_SCR:
1183 if (VT.getSizeInBits() == 64)
1184 return Reg;
1185 break;
1186 default:
1187 llvm_unreachable("missing register type checking");
1188 }
1189
1190 report_fatal_error(Twine("invalid type for register \""
1191 + StringRef(RegName) + "\"."));
1192}
1193
Matt Arsenault786724a2016-07-12 21:41:32 +00001194// If kill is not the last instruction, split the block so kill is always a
1195// proper terminator.
1196MachineBasicBlock *SITargetLowering::splitKillBlock(MachineInstr &MI,
1197 MachineBasicBlock *BB) const {
1198 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
1199
1200 MachineBasicBlock::iterator SplitPoint(&MI);
1201 ++SplitPoint;
1202
1203 if (SplitPoint == BB->end()) {
1204 // Don't bother with a new block.
1205 MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR));
1206 return BB;
1207 }
1208
1209 MachineFunction *MF = BB->getParent();
1210 MachineBasicBlock *SplitBB
1211 = MF->CreateMachineBasicBlock(BB->getBasicBlock());
1212
Matt Arsenault786724a2016-07-12 21:41:32 +00001213 MF->insert(++MachineFunction::iterator(BB), SplitBB);
1214 SplitBB->splice(SplitBB->begin(), BB, SplitPoint, BB->end());
1215
Matt Arsenaultd40ded62016-07-22 17:01:15 +00001216 SplitBB->transferSuccessorsAndUpdatePHIs(BB);
Matt Arsenault786724a2016-07-12 21:41:32 +00001217 BB->addSuccessor(SplitBB);
1218
1219 MI.setDesc(TII->get(AMDGPU::SI_KILL_TERMINATOR));
1220 return SplitBB;
1221}
1222
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001223// Do a v_movrels_b32 or v_movreld_b32 for each unique value of \p IdxReg in the
1224// wavefront. If the value is uniform and just happens to be in a VGPR, this
1225// will only do one iteration. In the worst case, this will loop 64 times.
1226//
1227// TODO: Just use v_readlane_b32 if we know the VGPR has a uniform value.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00001228static MachineBasicBlock::iterator emitLoadM0FromVGPRLoop(
1229 const SIInstrInfo *TII,
1230 MachineRegisterInfo &MRI,
1231 MachineBasicBlock &OrigBB,
1232 MachineBasicBlock &LoopBB,
1233 const DebugLoc &DL,
1234 const MachineOperand &IdxReg,
1235 unsigned InitReg,
1236 unsigned ResultReg,
1237 unsigned PhiReg,
1238 unsigned InitSaveExecReg,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001239 int Offset,
1240 bool UseGPRIdxMode) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001241 MachineBasicBlock::iterator I = LoopBB.begin();
1242
1243 unsigned PhiExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1244 unsigned NewExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1245 unsigned CurrentIdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1246 unsigned CondReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1247
1248 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiReg)
1249 .addReg(InitReg)
1250 .addMBB(&OrigBB)
1251 .addReg(ResultReg)
1252 .addMBB(&LoopBB);
1253
1254 BuildMI(LoopBB, I, DL, TII->get(TargetOpcode::PHI), PhiExec)
1255 .addReg(InitSaveExecReg)
1256 .addMBB(&OrigBB)
1257 .addReg(NewExec)
1258 .addMBB(&LoopBB);
1259
1260 // Read the next variant <- also loop target.
1261 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), CurrentIdxReg)
1262 .addReg(IdxReg.getReg(), getUndefRegState(IdxReg.isUndef()));
1263
1264 // Compare the just read M0 value to all possible Idx values.
1265 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e64), CondReg)
1266 .addReg(CurrentIdxReg)
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +00001267 .addReg(IdxReg.getReg(), 0, IdxReg.getSubReg());
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001268
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001269 if (UseGPRIdxMode) {
1270 unsigned IdxReg;
1271 if (Offset == 0) {
1272 IdxReg = CurrentIdxReg;
1273 } else {
1274 IdxReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1275 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), IdxReg)
1276 .addReg(CurrentIdxReg, RegState::Kill)
1277 .addImm(Offset);
1278 }
1279
1280 MachineInstr *SetIdx =
1281 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_IDX))
1282 .addReg(IdxReg, RegState::Kill);
Matt Arsenaultdac31db2016-10-13 12:45:16 +00001283 SetIdx->getOperand(2).setIsUndef();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001284 } else {
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001285 // Move index from VCC into M0
1286 if (Offset == 0) {
1287 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1288 .addReg(CurrentIdxReg, RegState::Kill);
1289 } else {
1290 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
1291 .addReg(CurrentIdxReg, RegState::Kill)
1292 .addImm(Offset);
1293 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001294 }
1295
1296 // Update EXEC, save the original EXEC value to VCC.
1297 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), NewExec)
1298 .addReg(CondReg, RegState::Kill);
1299
1300 MRI.setSimpleHint(NewExec, CondReg);
1301
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001302 // Update EXEC, switch all done bits to 0 and all todo bits to 1.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00001303 MachineInstr *InsertPt =
1304 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001305 .addReg(AMDGPU::EXEC)
1306 .addReg(NewExec);
1307
1308 // XXX - s_xor_b64 sets scc to 1 if the result is nonzero, so can we use
1309 // s_cbranch_scc0?
1310
1311 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover.
1312 BuildMI(LoopBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
1313 .addMBB(&LoopBB);
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00001314
1315 return InsertPt->getIterator();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001316}
1317
1318// This has slightly sub-optimal regalloc when the source vector is killed by
1319// the read. The register allocator does not understand that the kill is
1320// per-workitem, so is kept alive for the whole loop so we end up not re-using a
1321// subregister from it, using 1 more VGPR than necessary. This was saved when
1322// this was expanded after register allocation.
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00001323static MachineBasicBlock::iterator loadM0FromVGPR(const SIInstrInfo *TII,
1324 MachineBasicBlock &MBB,
1325 MachineInstr &MI,
1326 unsigned InitResultReg,
1327 unsigned PhiReg,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001328 int Offset,
1329 bool UseGPRIdxMode) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001330 MachineFunction *MF = MBB.getParent();
1331 MachineRegisterInfo &MRI = MF->getRegInfo();
1332 const DebugLoc &DL = MI.getDebugLoc();
1333 MachineBasicBlock::iterator I(&MI);
1334
1335 unsigned DstReg = MI.getOperand(0).getReg();
1336 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1337 unsigned TmpExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
1338
1339 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), TmpExec);
1340
1341 // Save the EXEC mask
1342 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_MOV_B64), SaveExec)
1343 .addReg(AMDGPU::EXEC);
1344
1345 // To insert the loop we need to split the block. Move everything after this
1346 // point to a new block, and insert a new empty block between the two.
1347 MachineBasicBlock *LoopBB = MF->CreateMachineBasicBlock();
1348 MachineBasicBlock *RemainderBB = MF->CreateMachineBasicBlock();
1349 MachineFunction::iterator MBBI(MBB);
1350 ++MBBI;
1351
1352 MF->insert(MBBI, LoopBB);
1353 MF->insert(MBBI, RemainderBB);
1354
1355 LoopBB->addSuccessor(LoopBB);
1356 LoopBB->addSuccessor(RemainderBB);
1357
1358 // Move the rest of the block into a new block.
Matt Arsenaultd40ded62016-07-22 17:01:15 +00001359 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001360 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end());
1361
1362 MBB.addSuccessor(LoopBB);
1363
1364 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
1365
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00001366 auto InsPt = emitLoadM0FromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, *Idx,
1367 InitResultReg, DstReg, PhiReg, TmpExec,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001368 Offset, UseGPRIdxMode);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001369
1370 MachineBasicBlock::iterator First = RemainderBB->begin();
1371 BuildMI(*RemainderBB, First, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
1372 .addReg(SaveExec);
1373
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00001374 return InsPt;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001375}
1376
1377// Returns subreg index, offset
1378static std::pair<unsigned, int>
1379computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
1380 const TargetRegisterClass *SuperRC,
1381 unsigned VecReg,
1382 int Offset) {
1383 int NumElts = SuperRC->getSize() / 4;
1384
1385 // Skip out of bounds offsets, or else we would end up using an undefined
1386 // register.
1387 if (Offset >= NumElts || Offset < 0)
1388 return std::make_pair(AMDGPU::sub0, Offset);
1389
1390 return std::make_pair(AMDGPU::sub0 + Offset, 0);
1391}
1392
1393// Return true if the index is an SGPR and was set.
1394static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII,
1395 MachineRegisterInfo &MRI,
1396 MachineInstr &MI,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001397 int Offset,
1398 bool UseGPRIdxMode,
1399 bool IsIndirectSrc) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001400 MachineBasicBlock *MBB = MI.getParent();
1401 const DebugLoc &DL = MI.getDebugLoc();
1402 MachineBasicBlock::iterator I(&MI);
1403
1404 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
1405 const TargetRegisterClass *IdxRC = MRI.getRegClass(Idx->getReg());
1406
1407 assert(Idx->getReg() != AMDGPU::NoRegister);
1408
1409 if (!TII->getRegisterInfo().isSGPRClass(IdxRC))
1410 return false;
1411
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001412 if (UseGPRIdxMode) {
1413 unsigned IdxMode = IsIndirectSrc ?
1414 VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE;
1415 if (Offset == 0) {
1416 MachineInstr *SetOn =
1417 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
1418 .addOperand(*Idx)
1419 .addImm(IdxMode);
1420
Matt Arsenaultdac31db2016-10-13 12:45:16 +00001421 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001422 } else {
1423 unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
1424 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp)
1425 .addOperand(*Idx)
1426 .addImm(Offset);
1427 MachineInstr *SetOn =
1428 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
1429 .addReg(Tmp, RegState::Kill)
1430 .addImm(IdxMode);
1431
Matt Arsenaultdac31db2016-10-13 12:45:16 +00001432 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001433 }
1434
1435 return true;
1436 }
1437
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001438 if (Offset == 0) {
1439 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1440 .addOperand(*Idx);
1441 } else {
1442 BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0)
1443 .addOperand(*Idx)
1444 .addImm(Offset);
1445 }
1446
1447 return true;
1448}
1449
1450// Control flow needs to be inserted if indexing with a VGPR.
1451static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
1452 MachineBasicBlock &MBB,
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00001453 const SISubtarget &ST) {
1454 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001455 const SIRegisterInfo &TRI = TII->getRegisterInfo();
1456 MachineFunction *MF = MBB.getParent();
1457 MachineRegisterInfo &MRI = MF->getRegInfo();
1458
1459 unsigned Dst = MI.getOperand(0).getReg();
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00001460 unsigned SrcReg = TII->getNamedOperand(MI, AMDGPU::OpName::src)->getReg();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001461 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
1462
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00001463 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcReg);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001464
1465 unsigned SubReg;
1466 std::tie(SubReg, Offset)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00001467 = computeIndirectRegAndOffset(TRI, VecRC, SrcReg, Offset);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001468
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001469 bool UseGPRIdxMode = ST.hasVGPRIndexMode() && EnableVGPRIndexMode;
1470
1471 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, true)) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001472 MachineBasicBlock::iterator I(&MI);
1473 const DebugLoc &DL = MI.getDebugLoc();
1474
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001475 if (UseGPRIdxMode) {
1476 // TODO: Look at the uses to avoid the copy. This may require rescheduling
1477 // to avoid interfering with other uses, so probably requires a new
1478 // optimization pass.
1479 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00001480 .addReg(SrcReg, RegState::Undef, SubReg)
1481 .addReg(SrcReg, RegState::Implicit)
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001482 .addReg(AMDGPU::M0, RegState::Implicit);
1483 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
1484 } else {
1485 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00001486 .addReg(SrcReg, RegState::Undef, SubReg)
1487 .addReg(SrcReg, RegState::Implicit);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001488 }
1489
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001490 MI.eraseFromParent();
1491
1492 return &MBB;
1493 }
1494
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001495
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001496 const DebugLoc &DL = MI.getDebugLoc();
1497 MachineBasicBlock::iterator I(&MI);
1498
1499 unsigned PhiReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1500 unsigned InitReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1501
1502 BuildMI(MBB, I, DL, TII->get(TargetOpcode::IMPLICIT_DEF), InitReg);
1503
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001504 if (UseGPRIdxMode) {
1505 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
1506 .addImm(0) // Reset inside loop.
1507 .addImm(VGPRIndexMode::SRC0_ENABLE);
Matt Arsenaultdac31db2016-10-13 12:45:16 +00001508 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00001509
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001510 // Disable again after the loop.
1511 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
1512 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001513
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001514 auto InsPt = loadM0FromVGPR(TII, MBB, MI, InitReg, PhiReg, Offset, UseGPRIdxMode);
1515 MachineBasicBlock *LoopBB = InsPt->getParent();
1516
1517 if (UseGPRIdxMode) {
1518 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00001519 .addReg(SrcReg, RegState::Undef, SubReg)
1520 .addReg(SrcReg, RegState::Implicit)
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001521 .addReg(AMDGPU::M0, RegState::Implicit);
1522 } else {
1523 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00001524 .addReg(SrcReg, RegState::Undef, SubReg)
1525 .addReg(SrcReg, RegState::Implicit);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001526 }
1527
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00001528 MI.eraseFromParent();
1529
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001530 return LoopBB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001531}
1532
Nicolai Haehnlea7852092016-10-24 14:56:02 +00001533static unsigned getMOVRELDPseudo(const TargetRegisterClass *VecRC) {
1534 switch (VecRC->getSize()) {
1535 case 4:
1536 return AMDGPU::V_MOVRELD_B32_V1;
1537 case 8:
1538 return AMDGPU::V_MOVRELD_B32_V2;
1539 case 16:
1540 return AMDGPU::V_MOVRELD_B32_V4;
1541 case 32:
1542 return AMDGPU::V_MOVRELD_B32_V8;
1543 case 64:
1544 return AMDGPU::V_MOVRELD_B32_V16;
1545 default:
1546 llvm_unreachable("unsupported size for MOVRELD pseudos");
1547 }
1548}
1549
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001550static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
1551 MachineBasicBlock &MBB,
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00001552 const SISubtarget &ST) {
1553 const SIInstrInfo *TII = ST.getInstrInfo();
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001554 const SIRegisterInfo &TRI = TII->getRegisterInfo();
1555 MachineFunction *MF = MBB.getParent();
1556 MachineRegisterInfo &MRI = MF->getRegInfo();
1557
1558 unsigned Dst = MI.getOperand(0).getReg();
1559 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src);
1560 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx);
1561 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val);
1562 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm();
1563 const TargetRegisterClass *VecRC = MRI.getRegClass(SrcVec->getReg());
1564
1565 // This can be an immediate, but will be folded later.
1566 assert(Val->getReg());
1567
1568 unsigned SubReg;
1569 std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC,
1570 SrcVec->getReg(),
1571 Offset);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001572 bool UseGPRIdxMode = ST.hasVGPRIndexMode() && EnableVGPRIndexMode;
1573
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001574 if (Idx->getReg() == AMDGPU::NoRegister) {
1575 MachineBasicBlock::iterator I(&MI);
1576 const DebugLoc &DL = MI.getDebugLoc();
1577
1578 assert(Offset == 0);
1579
1580 BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst)
1581 .addOperand(*SrcVec)
1582 .addOperand(*Val)
1583 .addImm(SubReg);
1584
1585 MI.eraseFromParent();
1586 return &MBB;
1587 }
1588
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001589 if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset, UseGPRIdxMode, false)) {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001590 MachineBasicBlock::iterator I(&MI);
1591 const DebugLoc &DL = MI.getDebugLoc();
1592
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001593 if (UseGPRIdxMode) {
1594 BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
1595 .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst
1596 .addOperand(*Val)
1597 .addReg(Dst, RegState::ImplicitDefine)
1598 .addReg(SrcVec->getReg(), RegState::Implicit)
1599 .addReg(AMDGPU::M0, RegState::Implicit);
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001600
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001601 BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
1602 } else {
Nicolai Haehnlea7852092016-10-24 14:56:02 +00001603 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(VecRC));
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001604
Nicolai Haehnlea7852092016-10-24 14:56:02 +00001605 BuildMI(MBB, I, DL, MovRelDesc)
1606 .addReg(Dst, RegState::Define)
1607 .addReg(SrcVec->getReg())
1608 .addOperand(*Val)
1609 .addImm(SubReg - AMDGPU::sub0);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001610 }
1611
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001612 MI.eraseFromParent();
1613 return &MBB;
1614 }
1615
1616 if (Val->isReg())
1617 MRI.clearKillFlags(Val->getReg());
1618
1619 const DebugLoc &DL = MI.getDebugLoc();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001620
1621 if (UseGPRIdxMode) {
1622 MachineBasicBlock::iterator I(&MI);
1623
1624 MachineInstr *SetOn = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON))
1625 .addImm(0) // Reset inside loop.
1626 .addImm(VGPRIndexMode::DST_ENABLE);
Matt Arsenaultdac31db2016-10-13 12:45:16 +00001627 SetOn->getOperand(3).setIsUndef();
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001628
1629 // Disable again after the loop.
1630 BuildMI(MBB, std::next(I), DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
1631 }
1632
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001633 unsigned PhiReg = MRI.createVirtualRegister(VecRC);
1634
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001635 auto InsPt = loadM0FromVGPR(TII, MBB, MI, SrcVec->getReg(), PhiReg,
1636 Offset, UseGPRIdxMode);
1637 MachineBasicBlock *LoopBB = InsPt->getParent();
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00001638
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001639 if (UseGPRIdxMode) {
1640 BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect))
1641 .addReg(PhiReg, RegState::Undef, SubReg) // vdst
1642 .addOperand(*Val) // src0
1643 .addReg(Dst, RegState::ImplicitDefine)
1644 .addReg(PhiReg, RegState::Implicit)
1645 .addReg(AMDGPU::M0, RegState::Implicit);
1646 } else {
Nicolai Haehnlea7852092016-10-24 14:56:02 +00001647 const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(VecRC));
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001648
Nicolai Haehnlea7852092016-10-24 14:56:02 +00001649 BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
1650 .addReg(Dst, RegState::Define)
1651 .addReg(PhiReg)
1652 .addOperand(*Val)
1653 .addImm(SubReg - AMDGPU::sub0);
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001654 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001655
Nicolai Haehnlebd15c322016-10-14 09:03:04 +00001656 MI.eraseFromParent();
1657
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001658 return LoopBB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001659}
1660
Matt Arsenault786724a2016-07-12 21:41:32 +00001661MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
1662 MachineInstr &MI, MachineBasicBlock *BB) const {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00001663 switch (MI.getOpcode()) {
Matt Arsenault4ac341c2016-04-14 21:58:15 +00001664 case AMDGPU::SI_INIT_M0: {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001665 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00001666 BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(),
Matt Arsenault4ac341c2016-04-14 21:58:15 +00001667 TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001668 .addOperand(MI.getOperand(0));
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00001669 MI.eraseFromParent();
Matt Arsenault20711b72015-02-20 22:10:45 +00001670 return BB;
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001671 }
Changpeng Fang01f60622016-03-15 17:28:44 +00001672 case AMDGPU::GET_GROUPSTATICSIZE: {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001673 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
1674
Changpeng Fang01f60622016-03-15 17:28:44 +00001675 MachineFunction *MF = BB->getParent();
1676 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00001677 DebugLoc DL = MI.getDebugLoc();
Matt Arsenault3c07c812016-07-22 17:01:33 +00001678 BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32))
1679 .addOperand(MI.getOperand(0))
Matt Arsenault52ef4012016-07-26 16:45:58 +00001680 .addImm(MFI->getLDSSize());
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00001681 MI.eraseFromParent();
Changpeng Fang01f60622016-03-15 17:28:44 +00001682 return BB;
1683 }
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001684 case AMDGPU::SI_INDIRECT_SRC_V1:
1685 case AMDGPU::SI_INDIRECT_SRC_V2:
1686 case AMDGPU::SI_INDIRECT_SRC_V4:
1687 case AMDGPU::SI_INDIRECT_SRC_V8:
1688 case AMDGPU::SI_INDIRECT_SRC_V16:
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00001689 return emitIndirectSrc(MI, *BB, *getSubtarget());
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00001690 case AMDGPU::SI_INDIRECT_DST_V1:
1691 case AMDGPU::SI_INDIRECT_DST_V2:
1692 case AMDGPU::SI_INDIRECT_DST_V4:
1693 case AMDGPU::SI_INDIRECT_DST_V8:
1694 case AMDGPU::SI_INDIRECT_DST_V16:
Matt Arsenaultdcf0cfc2016-10-04 01:41:05 +00001695 return emitIndirectDst(MI, *BB, *getSubtarget());
Matt Arsenault786724a2016-07-12 21:41:32 +00001696 case AMDGPU::SI_KILL:
1697 return splitKillBlock(MI, BB);
Matt Arsenault22e41792016-08-27 01:00:37 +00001698 case AMDGPU::V_CNDMASK_B64_PSEUDO: {
1699 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
1700 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
1701
1702 unsigned Dst = MI.getOperand(0).getReg();
1703 unsigned Src0 = MI.getOperand(1).getReg();
1704 unsigned Src1 = MI.getOperand(2).getReg();
1705 const DebugLoc &DL = MI.getDebugLoc();
1706 unsigned SrcCond = MI.getOperand(3).getReg();
1707
1708 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1709 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1710
1711 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstLo)
1712 .addReg(Src0, 0, AMDGPU::sub0)
1713 .addReg(Src1, 0, AMDGPU::sub0)
1714 .addReg(SrcCond);
1715 BuildMI(*BB, MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64), DstHi)
1716 .addReg(Src0, 0, AMDGPU::sub1)
1717 .addReg(Src1, 0, AMDGPU::sub1)
1718 .addReg(SrcCond);
1719
1720 BuildMI(*BB, MI, DL, TII->get(AMDGPU::REG_SEQUENCE), Dst)
1721 .addReg(DstLo)
1722 .addImm(AMDGPU::sub0)
1723 .addReg(DstHi)
1724 .addImm(AMDGPU::sub1);
1725 MI.eraseFromParent();
1726 return BB;
1727 }
Changpeng Fang01f60622016-03-15 17:28:44 +00001728 default:
1729 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
Tom Stellard75aadc22012-12-11 21:25:42 +00001730 }
Tom Stellard75aadc22012-12-11 21:25:42 +00001731}
1732
Matt Arsenault423bf3f2015-01-29 19:34:32 +00001733bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1734 // This currently forces unfolding various combinations of fsub into fma with
1735 // free fneg'd operands. As long as we have fast FMA (controlled by
1736 // isFMAFasterThanFMulAndFAdd), we should perform these.
1737
1738 // When fma is quarter rate, for f64 where add / sub are at best half rate,
1739 // most of these combines appear to be cycle neutral but save on instruction
1740 // count / code size.
1741 return true;
1742}
1743
Mehdi Amini44ede332015-07-09 02:09:04 +00001744EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
1745 EVT VT) const {
Tom Stellard83747202013-07-18 21:43:53 +00001746 if (!VT.isVector()) {
1747 return MVT::i1;
1748 }
Matt Arsenault8596f712014-11-28 22:51:38 +00001749 return EVT::getVectorVT(Ctx, MVT::i1, VT.getVectorNumElements());
Tom Stellard75aadc22012-12-11 21:25:42 +00001750}
1751
Mehdi Aminieaabc512015-07-09 15:12:23 +00001752MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT) const {
Christian Konig082a14a2013-03-18 11:34:05 +00001753 return MVT::i32;
1754}
1755
Matt Arsenault423bf3f2015-01-29 19:34:32 +00001756// Answering this is somewhat tricky and depends on the specific device which
1757// have different rates for fma or all f64 operations.
1758//
1759// v_fma_f64 and v_mul_f64 always take the same number of cycles as each other
1760// regardless of which device (although the number of cycles differs between
1761// devices), so it is always profitable for f64.
1762//
1763// v_fma_f32 takes 4 or 16 cycles depending on the device, so it is profitable
1764// only on full rate devices. Normally, we should prefer selecting v_mad_f32
1765// which we can always do even without fused FP ops since it returns the same
1766// result as the separate operations and since it is always full
1767// rate. Therefore, we lie and report that it is not faster for f32. v_mad_f32
1768// however does not support denormals, so we do report fma as faster if we have
1769// a fast fma device and require denormals.
1770//
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00001771bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
1772 VT = VT.getScalarType();
1773
1774 if (!VT.isSimple())
1775 return false;
1776
1777 switch (VT.getSimpleVT().SimpleTy) {
1778 case MVT::f32:
Matt Arsenault423bf3f2015-01-29 19:34:32 +00001779 // This is as fast on some subtargets. However, we always have full rate f32
1780 // mad available which returns the same result as the separate operations
Matt Arsenault8d630032015-02-20 22:10:41 +00001781 // which we should prefer over fma. We can't use this if we want to support
1782 // denormals, so only report this in these cases.
1783 return Subtarget->hasFP32Denormals() && Subtarget->hasFastFMAF32();
Niels Ole Salscheiderd3a039f2013-08-10 10:38:54 +00001784 case MVT::f64:
1785 return true;
1786 default:
1787 break;
1788 }
1789
1790 return false;
1791}
1792
Tom Stellard75aadc22012-12-11 21:25:42 +00001793//===----------------------------------------------------------------------===//
1794// Custom DAG Lowering Operations
1795//===----------------------------------------------------------------------===//
1796
1797SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
1798 switch (Op.getOpcode()) {
1799 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
Tom Stellardf8794352012-12-19 22:10:31 +00001800 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
Tom Stellard35bb18c2013-08-26 15:06:04 +00001801 case ISD::LOAD: {
Tom Stellarde812f2f2014-07-21 15:45:06 +00001802 SDValue Result = LowerLOAD(Op, DAG);
1803 assert((!Result.getNode() ||
1804 Result.getNode()->getNumValues() == 2) &&
1805 "Load should return a value and a chain");
1806 return Result;
Tom Stellard35bb18c2013-08-26 15:06:04 +00001807 }
Tom Stellardaf775432013-10-23 00:44:32 +00001808
Matt Arsenaultad14ce82014-07-19 18:44:39 +00001809 case ISD::FSIN:
1810 case ISD::FCOS:
1811 return LowerTrig(Op, DAG);
Tom Stellard0ec134f2014-02-04 17:18:40 +00001812 case ISD::SELECT: return LowerSELECT(Op, DAG);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00001813 case ISD::FDIV: return LowerFDIV(Op, DAG);
Tom Stellard354a43c2016-04-01 18:27:37 +00001814 case ISD::ATOMIC_CMP_SWAP: return LowerATOMIC_CMP_SWAP(Op, DAG);
Tom Stellard81d871d2013-11-13 23:36:50 +00001815 case ISD::STORE: return LowerSTORE(Op, DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00001816 case ISD::GlobalAddress: {
1817 MachineFunction &MF = DAG.getMachineFunction();
1818 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1819 return LowerGlobalAddress(MFI, Op, DAG);
Tom Stellard94593ee2013-06-03 17:40:18 +00001820 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00001821 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00001822 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00001823 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG);
Matt Arsenault99c14522016-04-25 19:27:24 +00001824 case ISD::ADDRSPACECAST: return lowerADDRSPACECAST(Op, DAG);
Matt Arsenault0bb294b2016-06-17 22:27:03 +00001825 case ISD::TRAP: return lowerTRAP(Op, DAG);
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001826
1827 case ISD::ConstantFP:
1828 return lowerConstantFP(Op, DAG);
1829 case ISD::FP_TO_SINT:
1830 case ISD::FP_TO_UINT:
1831 return lowerFpToInt(Op, DAG);
1832 case ISD::SINT_TO_FP:
1833 case ISD::UINT_TO_FP:
1834 return lowerIntToFp(Op, DAG);
Tom Stellard75aadc22012-12-11 21:25:42 +00001835 }
1836 return SDValue();
1837}
1838
Tom Stellardf8794352012-12-19 22:10:31 +00001839/// \brief Helper function for LowerBRCOND
1840static SDNode *findUser(SDValue Value, unsigned Opcode) {
Tom Stellard75aadc22012-12-11 21:25:42 +00001841
Tom Stellardf8794352012-12-19 22:10:31 +00001842 SDNode *Parent = Value.getNode();
1843 for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
1844 I != E; ++I) {
1845
1846 if (I.getUse().get() != Value)
1847 continue;
1848
1849 if (I->getOpcode() == Opcode)
1850 return *I;
1851 }
Craig Topper062a2ba2014-04-25 05:30:21 +00001852 return nullptr;
Tom Stellardf8794352012-12-19 22:10:31 +00001853}
1854
Tom Stellardbc4497b2016-02-12 23:45:29 +00001855bool SITargetLowering::isCFIntrinsic(const SDNode *Intr) const {
Matt Arsenault6408c912016-09-16 22:11:18 +00001856 if (Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
1857 switch (cast<ConstantSDNode>(Intr->getOperand(1))->getZExtValue()) {
1858 case AMDGPUIntrinsic::amdgcn_if:
1859 case AMDGPUIntrinsic::amdgcn_else:
1860 case AMDGPUIntrinsic::amdgcn_end_cf:
1861 case AMDGPUIntrinsic::amdgcn_loop:
1862 return true;
1863 default:
1864 return false;
1865 }
Tom Stellardbc4497b2016-02-12 23:45:29 +00001866 }
Matt Arsenault6408c912016-09-16 22:11:18 +00001867
1868 if (Intr->getOpcode() == ISD::INTRINSIC_WO_CHAIN) {
1869 switch (cast<ConstantSDNode>(Intr->getOperand(0))->getZExtValue()) {
1870 case AMDGPUIntrinsic::amdgcn_break:
1871 case AMDGPUIntrinsic::amdgcn_if_break:
1872 case AMDGPUIntrinsic::amdgcn_else_break:
1873 return true;
1874 default:
1875 return false;
1876 }
1877 }
1878
1879 return false;
Tom Stellardbc4497b2016-02-12 23:45:29 +00001880}
1881
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00001882void SITargetLowering::createDebuggerPrologueStackObjects(
1883 MachineFunction &MF) const {
1884 // Create stack objects that are used for emitting debugger prologue.
1885 //
1886 // Debugger prologue writes work group IDs and work item IDs to scratch memory
1887 // at fixed location in the following format:
1888 // offset 0: work group ID x
1889 // offset 4: work group ID y
1890 // offset 8: work group ID z
1891 // offset 16: work item ID x
1892 // offset 20: work item ID y
1893 // offset 24: work item ID z
1894 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1895 int ObjectIdx = 0;
1896
1897 // For each dimension:
1898 for (unsigned i = 0; i < 3; ++i) {
1899 // Create fixed stack object for work group ID.
Matthias Braun941a7052016-07-28 18:40:00 +00001900 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4, true);
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00001901 Info->setDebuggerWorkGroupIDStackObjectIndex(i, ObjectIdx);
1902 // Create fixed stack object for work item ID.
Matthias Braun941a7052016-07-28 18:40:00 +00001903 ObjectIdx = MF.getFrameInfo().CreateFixedObject(4, i * 4 + 16, true);
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +00001904 Info->setDebuggerWorkItemIDStackObjectIndex(i, ObjectIdx);
1905 }
1906}
1907
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00001908bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
1909 const Triple &TT = getTargetMachine().getTargetTriple();
1910 return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
1911 AMDGPU::shouldEmitConstantsToTextSection(TT);
1912}
1913
1914bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
1915 return (GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
1916 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) &&
1917 !shouldEmitFixup(GV) &&
1918 !getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
1919}
1920
1921bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const {
1922 return !shouldEmitFixup(GV) && !shouldEmitGOTReloc(GV);
1923}
1924
Tom Stellardf8794352012-12-19 22:10:31 +00001925/// This transforms the control flow intrinsics to get the branch destination as
1926/// last parameter, also switches branch target with BR if the need arise
1927SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
1928 SelectionDAG &DAG) const {
1929
Andrew Trickef9de2a2013-05-25 02:42:55 +00001930 SDLoc DL(BRCOND);
Tom Stellardf8794352012-12-19 22:10:31 +00001931
1932 SDNode *Intr = BRCOND.getOperand(1).getNode();
1933 SDValue Target = BRCOND.getOperand(2);
Craig Topper062a2ba2014-04-25 05:30:21 +00001934 SDNode *BR = nullptr;
Tom Stellardbc4497b2016-02-12 23:45:29 +00001935 SDNode *SetCC = nullptr;
Tom Stellardf8794352012-12-19 22:10:31 +00001936
1937 if (Intr->getOpcode() == ISD::SETCC) {
1938 // As long as we negate the condition everything is fine
Tom Stellardbc4497b2016-02-12 23:45:29 +00001939 SetCC = Intr;
Tom Stellardf8794352012-12-19 22:10:31 +00001940 Intr = SetCC->getOperand(0).getNode();
1941
1942 } else {
1943 // Get the target from BR if we don't negate the condition
1944 BR = findUser(BRCOND, ISD::BR);
1945 Target = BR->getOperand(1);
1946 }
1947
Matt Arsenault6408c912016-09-16 22:11:18 +00001948 // FIXME: This changes the types of the intrinsics instead of introducing new
1949 // nodes with the correct types.
1950 // e.g. llvm.amdgcn.loop
1951
1952 // eg: i1,ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3
1953 // => t9: ch = llvm.amdgcn.loop t0, TargetConstant:i32<6271>, t3, BasicBlock:ch<bb1 0x7fee5286d088>
1954
Nicolai Haehnleffbd56a2016-05-05 17:36:36 +00001955 if (!isCFIntrinsic(Intr)) {
Tom Stellardbc4497b2016-02-12 23:45:29 +00001956 // This is a uniform branch so we don't need to legalize.
1957 return BRCOND;
1958 }
1959
Matt Arsenault6408c912016-09-16 22:11:18 +00001960 bool HaveChain = Intr->getOpcode() == ISD::INTRINSIC_VOID ||
1961 Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN;
1962
Tom Stellardbc4497b2016-02-12 23:45:29 +00001963 assert(!SetCC ||
1964 (SetCC->getConstantOperandVal(1) == 1 &&
Tom Stellardbc4497b2016-02-12 23:45:29 +00001965 cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
1966 ISD::SETNE));
Tom Stellardf8794352012-12-19 22:10:31 +00001967
Tom Stellardf8794352012-12-19 22:10:31 +00001968 // operands of the new intrinsic call
1969 SmallVector<SDValue, 4> Ops;
Matt Arsenault6408c912016-09-16 22:11:18 +00001970 if (HaveChain)
1971 Ops.push_back(BRCOND.getOperand(0));
1972
1973 Ops.append(Intr->op_begin() + (HaveChain ? 1 : 0), Intr->op_end());
Tom Stellardf8794352012-12-19 22:10:31 +00001974 Ops.push_back(Target);
1975
Matt Arsenault6408c912016-09-16 22:11:18 +00001976 ArrayRef<EVT> Res(Intr->value_begin() + 1, Intr->value_end());
1977
Tom Stellardf8794352012-12-19 22:10:31 +00001978 // build the new intrinsic call
1979 SDNode *Result = DAG.getNode(
1980 Res.size() > 1 ? ISD::INTRINSIC_W_CHAIN : ISD::INTRINSIC_VOID, DL,
Craig Topper48d114b2014-04-26 18:35:24 +00001981 DAG.getVTList(Res), Ops).getNode();
Tom Stellardf8794352012-12-19 22:10:31 +00001982
Matt Arsenault6408c912016-09-16 22:11:18 +00001983 if (!HaveChain) {
1984 SDValue Ops[] = {
1985 SDValue(Result, 0),
1986 BRCOND.getOperand(0)
1987 };
1988
1989 Result = DAG.getMergeValues(Ops, DL).getNode();
1990 }
1991
Tom Stellardf8794352012-12-19 22:10:31 +00001992 if (BR) {
1993 // Give the branch instruction our target
1994 SDValue Ops[] = {
1995 BR->getOperand(0),
1996 BRCOND.getOperand(2)
1997 };
Chandler Carruth356665a2014-08-01 22:09:43 +00001998 SDValue NewBR = DAG.getNode(ISD::BR, DL, BR->getVTList(), Ops);
1999 DAG.ReplaceAllUsesWith(BR, NewBR.getNode());
2000 BR = NewBR.getNode();
Tom Stellardf8794352012-12-19 22:10:31 +00002001 }
2002
2003 SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
2004
2005 // Copy the intrinsic results to registers
2006 for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
2007 SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
2008 if (!CopyToReg)
2009 continue;
2010
2011 Chain = DAG.getCopyToReg(
2012 Chain, DL,
2013 CopyToReg->getOperand(1),
2014 SDValue(Result, i - 1),
2015 SDValue());
2016
2017 DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
2018 }
2019
2020 // Remove the old intrinsic from the chain
2021 DAG.ReplaceAllUsesOfValueWith(
2022 SDValue(Intr, Intr->getNumValues() - 1),
2023 Intr->getOperand(0));
2024
2025 return Chain;
Tom Stellard75aadc22012-12-11 21:25:42 +00002026}
2027
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002028SDValue SITargetLowering::getFPExtOrFPTrunc(SelectionDAG &DAG,
2029 SDValue Op,
2030 const SDLoc &DL,
2031 EVT VT) const {
2032 return Op.getValueType().bitsLE(VT) ?
2033 DAG.getNode(ISD::FP_EXTEND, DL, VT, Op) :
2034 DAG.getNode(ISD::FTRUNC, DL, VT, Op);
2035}
2036
2037SDValue SITargetLowering::lowerConstantFP(SDValue Op, SelectionDAG &DAG) const {
2038 if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(Op)) {
2039 return DAG.getConstant(FP->getValueAPF().bitcastToAPInt().getZExtValue(),
2040 SDLoc(Op), MVT::i32);
2041 }
2042
2043 return SDValue();
2044}
2045
2046SDValue SITargetLowering::lowerFpToInt(SDValue Op, SelectionDAG &DAG) const {
2047 EVT DstVT = Op.getValueType();
2048 EVT SrcVT = Op.getOperand(0).getValueType();
2049 if (DstVT == MVT::i64) {
2050 return Op.getOpcode() == ISD::FP_TO_SINT ?
2051 AMDGPUTargetLowering::LowerFP_TO_SINT(Op, DAG) :
2052 AMDGPUTargetLowering::LowerFP_TO_UINT(Op, DAG);
2053 }
2054
2055 if (SrcVT == MVT::f16)
2056 return Op;
2057
2058 SDLoc DL(Op);
2059 SDValue OrigSrc = Op.getOperand(0);
2060 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, DL);
2061 SDValue FPRoundSrc =
2062 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, OrigSrc, FPRoundFlag);
2063
2064 return DAG.getNode(Op.getOpcode(), DL, DstVT, FPRoundSrc);
2065}
2066
2067SDValue SITargetLowering::lowerIntToFp(SDValue Op, SelectionDAG &DAG) const {
2068 EVT DstVT = Op.getValueType();
2069 EVT SrcVT = Op.getOperand(0).getValueType();
2070 if (SrcVT == MVT::i64) {
2071 return Op.getOpcode() == ISD::SINT_TO_FP ?
2072 AMDGPUTargetLowering::LowerSINT_TO_FP(Op, DAG) :
2073 AMDGPUTargetLowering::LowerUINT_TO_FP(Op, DAG);
2074 }
2075
2076 if (DstVT == MVT::f16)
2077 return Op;
2078
2079 SDLoc DL(Op);
2080 SDValue OrigSrc = Op.getOperand(0);
2081 SDValue SExtOrZExtOrTruncSrc = Op.getOpcode() == ISD::SINT_TO_FP ?
2082 DAG.getSExtOrTrunc(OrigSrc, DL, MVT::i32) :
2083 DAG.getZExtOrTrunc(OrigSrc, DL, MVT::i32);
2084
2085 return DAG.getNode(Op.getOpcode(), DL, DstVT, SExtOrZExtOrTruncSrc);
2086}
2087
Matt Arsenault99c14522016-04-25 19:27:24 +00002088SDValue SITargetLowering::getSegmentAperture(unsigned AS,
2089 SelectionDAG &DAG) const {
2090 SDLoc SL;
2091 MachineFunction &MF = DAG.getMachineFunction();
2092 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00002093 unsigned UserSGPR = Info->getQueuePtrUserSGPR();
2094 assert(UserSGPR != AMDGPU::NoRegister);
2095
Matt Arsenault99c14522016-04-25 19:27:24 +00002096 SDValue QueuePtr = CreateLiveInRegister(
Matt Arsenault3b2e2a52016-06-06 20:03:31 +00002097 DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
Matt Arsenault99c14522016-04-25 19:27:24 +00002098
2099 // Offset into amd_queue_t for group_segment_aperture_base_hi /
2100 // private_segment_aperture_base_hi.
2101 uint32_t StructOffset = (AS == AMDGPUAS::LOCAL_ADDRESS) ? 0x40 : 0x44;
2102
2103 SDValue Ptr = DAG.getNode(ISD::ADD, SL, MVT::i64, QueuePtr,
2104 DAG.getConstant(StructOffset, SL, MVT::i64));
2105
2106 // TODO: Use custom target PseudoSourceValue.
2107 // TODO: We should use the value from the IR intrinsic call, but it might not
2108 // be available and how do we get it?
2109 Value *V = UndefValue::get(PointerType::get(Type::getInt8Ty(*DAG.getContext()),
2110 AMDGPUAS::CONSTANT_ADDRESS));
2111
2112 MachinePointerInfo PtrInfo(V, StructOffset);
Justin Lebar9c375812016-07-15 18:27:10 +00002113 return DAG.getLoad(MVT::i32, SL, QueuePtr.getValue(1), Ptr, PtrInfo,
2114 MinAlign(64, StructOffset),
Justin Lebaradbf09e2016-09-11 01:38:58 +00002115 MachineMemOperand::MODereferenceable |
2116 MachineMemOperand::MOInvariant);
Matt Arsenault99c14522016-04-25 19:27:24 +00002117}
2118
2119SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
2120 SelectionDAG &DAG) const {
2121 SDLoc SL(Op);
2122 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
2123
2124 SDValue Src = ASC->getOperand(0);
2125
2126 // FIXME: Really support non-0 null pointers.
2127 SDValue SegmentNullPtr = DAG.getConstant(-1, SL, MVT::i32);
2128 SDValue FlatNullPtr = DAG.getConstant(0, SL, MVT::i64);
2129
2130 // flat -> local/private
2131 if (ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
2132 if (ASC->getDestAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
2133 ASC->getDestAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) {
2134 SDValue NonNull = DAG.getSetCC(SL, MVT::i1, Src, FlatNullPtr, ISD::SETNE);
2135 SDValue Ptr = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
2136
2137 return DAG.getNode(ISD::SELECT, SL, MVT::i32,
2138 NonNull, Ptr, SegmentNullPtr);
2139 }
2140 }
2141
2142 // local/private -> flat
2143 if (ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) {
2144 if (ASC->getSrcAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
2145 ASC->getSrcAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) {
2146 SDValue NonNull
2147 = DAG.getSetCC(SL, MVT::i1, Src, SegmentNullPtr, ISD::SETNE);
2148
2149 SDValue Aperture = getSegmentAperture(ASC->getSrcAddressSpace(), DAG);
2150 SDValue CvtPtr
2151 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Src, Aperture);
2152
2153 return DAG.getNode(ISD::SELECT, SL, MVT::i64, NonNull,
2154 DAG.getNode(ISD::BITCAST, SL, MVT::i64, CvtPtr),
2155 FlatNullPtr);
2156 }
2157 }
2158
2159 // global <-> flat are no-ops and never emitted.
2160
2161 const MachineFunction &MF = DAG.getMachineFunction();
2162 DiagnosticInfoUnsupported InvalidAddrSpaceCast(
2163 *MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
2164 DAG.getContext()->diagnose(InvalidAddrSpaceCast);
2165
2166 return DAG.getUNDEF(ASC->getValueType(0));
2167}
2168
Tom Stellard418beb72016-07-13 14:23:33 +00002169bool
2170SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
2171 // We can fold offsets for anything that doesn't require a GOT relocation.
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00002172 return (GA->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS ||
2173 GA->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) &&
2174 !shouldEmitGOTReloc(GA->getGlobal());
Tom Stellard418beb72016-07-13 14:23:33 +00002175}
Tom Stellardbf3e6e52016-06-14 20:29:59 +00002176
Tom Stellard418beb72016-07-13 14:23:33 +00002177static SDValue buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
2178 SDLoc DL, unsigned Offset, EVT PtrVT,
2179 unsigned GAFlags = SIInstrInfo::MO_NONE) {
Tom Stellardbf3e6e52016-06-14 20:29:59 +00002180 // In order to support pc-relative addressing, the PC_ADD_REL_OFFSET SDNode is
2181 // lowered to the following code sequence:
Tom Stellardbf3e6e52016-06-14 20:29:59 +00002182 //
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00002183 // For constant address space:
2184 // s_getpc_b64 s[0:1]
2185 // s_add_u32 s0, s0, $symbol
2186 // s_addc_u32 s1, s1, 0
2187 //
2188 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
2189 // a fixup or relocation is emitted to replace $symbol with a literal
2190 // constant, which is a pc-relative offset from the encoding of the $symbol
2191 // operand to the global variable.
2192 //
2193 // For global address space:
2194 // s_getpc_b64 s[0:1]
2195 // s_add_u32 s0, s0, $symbol@{gotpc}rel32@lo
2196 // s_addc_u32 s1, s1, $symbol@{gotpc}rel32@hi
2197 //
2198 // s_getpc_b64 returns the address of the s_add_u32 instruction and then
2199 // fixups or relocations are emitted to replace $symbol@*@lo and
2200 // $symbol@*@hi with lower 32 bits and higher 32 bits of a literal constant,
2201 // which is a 64-bit pc-relative offset from the encoding of the $symbol
2202 // operand to the global variable.
Tom Stellardbf3e6e52016-06-14 20:29:59 +00002203 //
2204 // What we want here is an offset from the value returned by s_getpc
2205 // (which is the address of the s_add_u32 instruction) to the global
2206 // variable, but since the encoding of $symbol starts 4 bytes after the start
2207 // of the s_add_u32 instruction, we end up with an offset that is 4 bytes too
2208 // small. This requires us to add 4 to the global variable offset in order to
2209 // compute the correct address.
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00002210 SDValue PtrLo = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
2211 GAFlags);
2212 SDValue PtrHi = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, Offset + 4,
2213 GAFlags == SIInstrInfo::MO_NONE ?
2214 GAFlags : GAFlags + 1);
2215 return DAG.getNode(AMDGPUISD::PC_ADD_REL_OFFSET, DL, PtrVT, PtrLo, PtrHi);
Tom Stellardbf3e6e52016-06-14 20:29:59 +00002216}
2217
Tom Stellard418beb72016-07-13 14:23:33 +00002218SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
2219 SDValue Op,
2220 SelectionDAG &DAG) const {
2221 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
2222
2223 if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS &&
2224 GSD->getAddressSpace() != AMDGPUAS::GLOBAL_ADDRESS)
2225 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
2226
2227 SDLoc DL(GSD);
2228 const GlobalValue *GV = GSD->getGlobal();
2229 EVT PtrVT = Op.getValueType();
2230
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00002231 if (shouldEmitFixup(GV))
Tom Stellard418beb72016-07-13 14:23:33 +00002232 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT);
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +00002233 else if (shouldEmitPCReloc(GV))
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00002234 return buildPCRelGlobalAddress(DAG, GV, DL, GSD->getOffset(), PtrVT,
2235 SIInstrInfo::MO_REL32);
Tom Stellard418beb72016-07-13 14:23:33 +00002236
2237 SDValue GOTAddr = buildPCRelGlobalAddress(DAG, GV, DL, 0, PtrVT,
Konstantin Zhuravlyovc96b5d72016-10-14 04:37:34 +00002238 SIInstrInfo::MO_GOTPCREL32);
Tom Stellard418beb72016-07-13 14:23:33 +00002239
2240 Type *Ty = PtrVT.getTypeForEVT(*DAG.getContext());
2241 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
2242 const DataLayout &DataLayout = DAG.getDataLayout();
2243 unsigned Align = DataLayout.getABITypeAlignment(PtrTy);
2244 // FIXME: Use a PseudoSourceValue once those can be assigned an address space.
2245 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
2246
Justin Lebar9c375812016-07-15 18:27:10 +00002247 return DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), GOTAddr, PtrInfo, Align,
Justin Lebaradbf09e2016-09-11 01:38:58 +00002248 MachineMemOperand::MODereferenceable |
2249 MachineMemOperand::MOInvariant);
Tom Stellard418beb72016-07-13 14:23:33 +00002250}
2251
Matt Arsenault0bb294b2016-06-17 22:27:03 +00002252SDValue SITargetLowering::lowerTRAP(SDValue Op,
2253 SelectionDAG &DAG) const {
2254 const MachineFunction &MF = DAG.getMachineFunction();
2255 DiagnosticInfoUnsupported NoTrap(*MF.getFunction(),
2256 "trap handler not supported",
2257 Op.getDebugLoc(),
2258 DS_Warning);
2259 DAG.getContext()->diagnose(NoTrap);
2260
2261 // Emit s_endpgm.
2262
2263 // FIXME: This should really be selected to s_trap, but that requires
2264 // setting up the trap handler for it o do anything.
Matt Arsenault9babdf42016-06-22 20:15:28 +00002265 return DAG.getNode(AMDGPUISD::ENDPGM, SDLoc(Op), MVT::Other,
2266 Op.getOperand(0));
Matt Arsenault0bb294b2016-06-17 22:27:03 +00002267}
2268
Benjamin Kramerbdc49562016-06-12 15:39:02 +00002269SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
2270 const SDLoc &DL, SDValue V) const {
Matt Arsenault4ac341c2016-04-14 21:58:15 +00002271 // We can't use S_MOV_B32 directly, because there is no way to specify m0 as
2272 // the destination register.
2273 //
Tom Stellardfc92e772015-05-12 14:18:14 +00002274 // We can't use CopyToReg, because MachineCSE won't combine COPY instructions,
2275 // so we will end up with redundant moves to m0.
2276 //
Matt Arsenault4ac341c2016-04-14 21:58:15 +00002277 // We use a pseudo to ensure we emit s_mov_b32 with m0 as the direct result.
2278
2279 // A Null SDValue creates a glue result.
2280 SDNode *M0 = DAG.getMachineNode(AMDGPU::SI_INIT_M0, DL, MVT::Other, MVT::Glue,
2281 V, Chain);
2282 return SDValue(M0, 0);
Tom Stellardfc92e772015-05-12 14:18:14 +00002283}
2284
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00002285SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
2286 SDValue Op,
2287 MVT VT,
2288 unsigned Offset) const {
2289 SDLoc SL(Op);
2290 SDValue Param = LowerParameter(DAG, MVT::i32, MVT::i32, SL,
2291 DAG.getEntryNode(), Offset, false);
2292 // The local size values will have the hi 16-bits as zero.
2293 return DAG.getNode(ISD::AssertZext, SL, MVT::i32, Param,
2294 DAG.getValueType(VT));
2295}
2296
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00002297static SDValue emitNonHSAIntrinsicError(SelectionDAG& DAG, SDLoc DL, EVT VT) {
Matt Arsenaulte0132462016-01-30 05:19:45 +00002298 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00002299 "non-hsa intrinsic with hsa target",
2300 DL.getDebugLoc());
2301 DAG.getContext()->diagnose(BadIntrin);
2302 return DAG.getUNDEF(VT);
2303}
2304
2305static SDValue emitRemovedIntrinsicError(SelectionDAG& DAG, SDLoc DL, EVT VT) {
2306 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
2307 "intrinsic not supported on subtarget",
2308 DL.getDebugLoc());
Matt Arsenaulte0132462016-01-30 05:19:45 +00002309 DAG.getContext()->diagnose(BadIntrin);
2310 return DAG.getUNDEF(VT);
2311}
2312
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002313SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
2314 SelectionDAG &DAG) const {
2315 MachineFunction &MF = DAG.getMachineFunction();
Tom Stellarddcb9f092015-07-09 21:20:37 +00002316 auto MFI = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenault43e92fe2016-06-24 06:30:11 +00002317 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002318
2319 EVT VT = Op.getValueType();
2320 SDLoc DL(Op);
2321 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2322
Sanjay Patela2607012015-09-16 16:31:21 +00002323 // TODO: Should this propagate fast-math-flags?
2324
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002325 switch (IntrinsicID) {
Tom Stellard48f29f22015-11-26 00:43:29 +00002326 case Intrinsic::amdgcn_dispatch_ptr:
Matt Arsenault48ab5262016-04-25 19:27:18 +00002327 case Intrinsic::amdgcn_queue_ptr: {
Tom Stellard0b76fc4c2016-09-16 21:34:26 +00002328 if (!Subtarget->isAmdCodeObjectV2()) {
Oliver Stannard7e7d9832016-02-02 13:52:43 +00002329 DiagnosticInfoUnsupported BadIntrin(
2330 *MF.getFunction(), "unsupported hsa intrinsic without hsa target",
2331 DL.getDebugLoc());
Matt Arsenault800fecf2016-01-11 21:18:33 +00002332 DAG.getContext()->diagnose(BadIntrin);
2333 return DAG.getUNDEF(VT);
2334 }
2335
Matt Arsenault48ab5262016-04-25 19:27:18 +00002336 auto Reg = IntrinsicID == Intrinsic::amdgcn_dispatch_ptr ?
2337 SIRegisterInfo::DISPATCH_PTR : SIRegisterInfo::QUEUE_PTR;
Tom Stellard48f29f22015-11-26 00:43:29 +00002338 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass,
Matt Arsenault48ab5262016-04-25 19:27:18 +00002339 TRI->getPreloadedValue(MF, Reg), VT);
2340 }
Jan Veselyfea814d2016-06-21 20:46:20 +00002341 case Intrinsic::amdgcn_implicitarg_ptr: {
2342 unsigned offset = getImplicitParameterOffset(MFI, FIRST_IMPLICIT);
2343 return LowerParameterPtr(DAG, DL, DAG.getEntryNode(), offset);
2344 }
Matt Arsenaultdc4ebad2016-04-29 21:16:52 +00002345 case Intrinsic::amdgcn_kernarg_segment_ptr: {
2346 unsigned Reg
2347 = TRI->getPreloadedValue(MF, SIRegisterInfo::KERNARG_SEGMENT_PTR);
2348 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT);
2349 }
Matt Arsenault8d718dc2016-07-22 17:01:30 +00002350 case Intrinsic::amdgcn_dispatch_id: {
2351 unsigned Reg = TRI->getPreloadedValue(MF, SIRegisterInfo::DISPATCH_ID);
2352 return CreateLiveInRegister(DAG, &AMDGPU::SReg_64RegClass, Reg, VT);
2353 }
Matt Arsenaultf75257a2016-01-23 05:32:20 +00002354 case Intrinsic::amdgcn_rcp:
2355 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
2356 case Intrinsic::amdgcn_rsq:
Matt Arsenault0c3e2332016-01-26 04:14:16 +00002357 case AMDGPUIntrinsic::AMDGPU_rsq: // Legacy name
Matt Arsenaultf75257a2016-01-23 05:32:20 +00002358 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00002359 case Intrinsic::amdgcn_rsq_legacy: {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00002360 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00002361 return emitRemovedIntrinsicError(DAG, DL, VT);
2362
2363 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
2364 }
Matt Arsenault32fc5272016-07-26 16:45:45 +00002365 case Intrinsic::amdgcn_rcp_legacy: {
2366 if (Subtarget->getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
2367 return emitRemovedIntrinsicError(DAG, DL, VT);
2368 return DAG.getNode(AMDGPUISD::RCP_LEGACY, DL, VT, Op.getOperand(1));
2369 }
Matt Arsenault09b2c4a2016-07-15 21:26:52 +00002370 case Intrinsic::amdgcn_rsq_clamp: {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00002371 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
Matt Arsenault79963e82016-02-13 01:03:00 +00002372 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
Tom Stellard48f29f22015-11-26 00:43:29 +00002373
Matt Arsenaultf75257a2016-01-23 05:32:20 +00002374 Type *Type = VT.getTypeForEVT(*DAG.getContext());
2375 APFloat Max = APFloat::getLargest(Type->getFltSemantics());
2376 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
2377
2378 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
2379 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
2380 DAG.getConstantFP(Max, DL, VT));
2381 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
2382 DAG.getConstantFP(Min, DL, VT));
2383 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002384 case Intrinsic::r600_read_ngroups_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00002385 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00002386 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00002387
Tom Stellardec2e43c2014-09-22 15:35:29 +00002388 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
2389 SI::KernelInputOffsets::NGROUPS_X, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002390 case Intrinsic::r600_read_ngroups_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00002391 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00002392 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00002393
Tom Stellardec2e43c2014-09-22 15:35:29 +00002394 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
2395 SI::KernelInputOffsets::NGROUPS_Y, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002396 case Intrinsic::r600_read_ngroups_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00002397 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00002398 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00002399
Tom Stellardec2e43c2014-09-22 15:35:29 +00002400 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
2401 SI::KernelInputOffsets::NGROUPS_Z, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002402 case Intrinsic::r600_read_global_size_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00002403 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00002404 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00002405
Tom Stellardec2e43c2014-09-22 15:35:29 +00002406 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
2407 SI::KernelInputOffsets::GLOBAL_SIZE_X, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002408 case Intrinsic::r600_read_global_size_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00002409 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00002410 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00002411
Tom Stellardec2e43c2014-09-22 15:35:29 +00002412 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
2413 SI::KernelInputOffsets::GLOBAL_SIZE_Y, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002414 case Intrinsic::r600_read_global_size_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00002415 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00002416 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00002417
Tom Stellardec2e43c2014-09-22 15:35:29 +00002418 return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(),
2419 SI::KernelInputOffsets::GLOBAL_SIZE_Z, false);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002420 case Intrinsic::r600_read_local_size_x:
Matt Arsenaulte0132462016-01-30 05:19:45 +00002421 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00002422 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00002423
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00002424 return lowerImplicitZextParam(DAG, Op, MVT::i16,
2425 SI::KernelInputOffsets::LOCAL_SIZE_X);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002426 case Intrinsic::r600_read_local_size_y:
Matt Arsenaulte0132462016-01-30 05:19:45 +00002427 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00002428 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00002429
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00002430 return lowerImplicitZextParam(DAG, Op, MVT::i16,
2431 SI::KernelInputOffsets::LOCAL_SIZE_Y);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002432 case Intrinsic::r600_read_local_size_z:
Matt Arsenaulte0132462016-01-30 05:19:45 +00002433 if (Subtarget->isAmdHsaOS())
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00002434 return emitNonHSAIntrinsicError(DAG, DL, VT);
Matt Arsenaulte0132462016-01-30 05:19:45 +00002435
Matt Arsenaultff6da2f2015-11-30 21:15:45 +00002436 return lowerImplicitZextParam(DAG, Op, MVT::i16,
2437 SI::KernelInputOffsets::LOCAL_SIZE_Z);
Matt Arsenault43976df2016-01-30 04:25:19 +00002438 case Intrinsic::amdgcn_workgroup_id_x:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002439 case Intrinsic::r600_read_tgid_x:
2440 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
Matt Arsenaultac234b62015-11-30 21:15:57 +00002441 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_X), VT);
Matt Arsenault43976df2016-01-30 04:25:19 +00002442 case Intrinsic::amdgcn_workgroup_id_y:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002443 case Intrinsic::r600_read_tgid_y:
2444 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
Matt Arsenaultac234b62015-11-30 21:15:57 +00002445 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Y), VT);
Matt Arsenault43976df2016-01-30 04:25:19 +00002446 case Intrinsic::amdgcn_workgroup_id_z:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002447 case Intrinsic::r600_read_tgid_z:
2448 return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
Matt Arsenaultac234b62015-11-30 21:15:57 +00002449 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKGROUP_ID_Z), VT);
Matt Arsenault43976df2016-01-30 04:25:19 +00002450 case Intrinsic::amdgcn_workitem_id_x:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002451 case Intrinsic::r600_read_tidig_x:
Tom Stellard45c0b3a2015-01-07 20:59:25 +00002452 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass,
Matt Arsenaultac234b62015-11-30 21:15:57 +00002453 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_X), VT);
Matt Arsenault43976df2016-01-30 04:25:19 +00002454 case Intrinsic::amdgcn_workitem_id_y:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002455 case Intrinsic::r600_read_tidig_y:
Tom Stellard45c0b3a2015-01-07 20:59:25 +00002456 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass,
Matt Arsenaultac234b62015-11-30 21:15:57 +00002457 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Y), VT);
Matt Arsenault43976df2016-01-30 04:25:19 +00002458 case Intrinsic::amdgcn_workitem_id_z:
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002459 case Intrinsic::r600_read_tidig_z:
Tom Stellard45c0b3a2015-01-07 20:59:25 +00002460 return CreateLiveInRegister(DAG, &AMDGPU::VGPR_32RegClass,
Matt Arsenaultac234b62015-11-30 21:15:57 +00002461 TRI->getPreloadedValue(MF, SIRegisterInfo::WORKITEM_ID_Z), VT);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002462 case AMDGPUIntrinsic::SI_load_const: {
2463 SDValue Ops[] = {
2464 Op.getOperand(1),
2465 Op.getOperand(2)
2466 };
2467
2468 MachineMemOperand *MMO = MF.getMachineMemOperand(
Justin Lebaradbf09e2016-09-11 01:38:58 +00002469 MachinePointerInfo(),
2470 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
2471 MachineMemOperand::MOInvariant,
2472 VT.getStoreSize(), 4);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002473 return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL,
2474 Op->getVTList(), Ops, VT, MMO);
2475 }
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00002476 case AMDGPUIntrinsic::amdgcn_fdiv_fast: {
2477 return lowerFDIV_FAST(Op, DAG);
2478 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002479 case AMDGPUIntrinsic::SI_vs_load_input:
2480 return DAG.getNode(AMDGPUISD::LOAD_INPUT, DL, VT,
2481 Op.getOperand(1),
2482 Op.getOperand(2),
2483 Op.getOperand(3));
Marek Olsak43650e42015-03-24 13:40:08 +00002484
Tom Stellard2a9d9472015-05-12 15:00:46 +00002485 case AMDGPUIntrinsic::SI_fs_constant: {
2486 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3));
2487 SDValue Glue = M0.getValue(1);
2488 return DAG.getNode(AMDGPUISD::INTERP_MOV, DL, MVT::f32,
2489 DAG.getConstant(2, DL, MVT::i32), // P0
2490 Op.getOperand(1), Op.getOperand(2), Glue);
2491 }
Marek Olsak6f6d3182015-10-29 15:29:09 +00002492 case AMDGPUIntrinsic::SI_packf16:
2493 if (Op.getOperand(1).isUndef() && Op.getOperand(2).isUndef())
2494 return DAG.getUNDEF(MVT::i32);
2495 return Op;
Tom Stellard2a9d9472015-05-12 15:00:46 +00002496 case AMDGPUIntrinsic::SI_fs_interp: {
2497 SDValue IJ = Op.getOperand(4);
2498 SDValue I = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ,
2499 DAG.getConstant(0, DL, MVT::i32));
2500 SDValue J = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, IJ,
2501 DAG.getConstant(1, DL, MVT::i32));
2502 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3));
2503 SDValue Glue = M0.getValue(1);
2504 SDValue P1 = DAG.getNode(AMDGPUISD::INTERP_P1, DL,
2505 DAG.getVTList(MVT::f32, MVT::Glue),
2506 I, Op.getOperand(1), Op.getOperand(2), Glue);
2507 Glue = SDValue(P1.getNode(), 1);
2508 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, P1, J,
2509 Op.getOperand(1), Op.getOperand(2), Glue);
2510 }
Tom Stellardad7d03d2015-12-15 17:02:49 +00002511 case Intrinsic::amdgcn_interp_p1: {
2512 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(4));
2513 SDValue Glue = M0.getValue(1);
2514 return DAG.getNode(AMDGPUISD::INTERP_P1, DL, MVT::f32, Op.getOperand(1),
2515 Op.getOperand(2), Op.getOperand(3), Glue);
2516 }
2517 case Intrinsic::amdgcn_interp_p2: {
2518 SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(5));
2519 SDValue Glue = SDValue(M0.getNode(), 1);
2520 return DAG.getNode(AMDGPUISD::INTERP_P2, DL, MVT::f32, Op.getOperand(1),
2521 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
2522 Glue);
2523 }
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00002524 case Intrinsic::amdgcn_sin:
2525 return DAG.getNode(AMDGPUISD::SIN_HW, DL, VT, Op.getOperand(1));
2526
2527 case Intrinsic::amdgcn_cos:
2528 return DAG.getNode(AMDGPUISD::COS_HW, DL, VT, Op.getOperand(1));
2529
2530 case Intrinsic::amdgcn_log_clamp: {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00002531 if (Subtarget->getGeneration() < SISubtarget::VOLCANIC_ISLANDS)
Matt Arsenaultce56a0e2016-02-13 01:19:56 +00002532 return SDValue();
2533
2534 DiagnosticInfoUnsupported BadIntrin(
2535 *MF.getFunction(), "intrinsic not supported on subtarget",
2536 DL.getDebugLoc());
2537 DAG.getContext()->diagnose(BadIntrin);
2538 return DAG.getUNDEF(VT);
2539 }
Matt Arsenaultf75257a2016-01-23 05:32:20 +00002540 case Intrinsic::amdgcn_ldexp:
2541 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT,
2542 Op.getOperand(1), Op.getOperand(2));
Matt Arsenault74015162016-05-28 00:19:52 +00002543
2544 case Intrinsic::amdgcn_fract:
2545 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
2546
Matt Arsenaultf75257a2016-01-23 05:32:20 +00002547 case Intrinsic::amdgcn_class:
2548 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
2549 Op.getOperand(1), Op.getOperand(2));
2550 case Intrinsic::amdgcn_div_fmas:
2551 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
2552 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
2553 Op.getOperand(4));
2554
2555 case Intrinsic::amdgcn_div_fixup:
2556 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
2557 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
2558
2559 case Intrinsic::amdgcn_trig_preop:
2560 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
2561 Op.getOperand(1), Op.getOperand(2));
2562 case Intrinsic::amdgcn_div_scale: {
2563 // 3rd parameter required to be a constant.
2564 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
2565 if (!Param)
2566 return DAG.getUNDEF(VT);
2567
2568 // Translate to the operands expected by the machine instruction. The
2569 // first parameter must be the same as the first instruction.
2570 SDValue Numerator = Op.getOperand(1);
2571 SDValue Denominator = Op.getOperand(2);
2572
2573 // Note this order is opposite of the machine instruction's operations,
2574 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
2575 // intrinsic has the numerator as the first operand to match a normal
2576 // division operation.
2577
2578 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
2579
2580 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
2581 Denominator, Numerator);
2582 }
Wei Ding07e03712016-07-28 16:42:13 +00002583 case Intrinsic::amdgcn_icmp: {
2584 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
2585 int CondCode = CD->getSExtValue();
2586
2587 if (CondCode < ICmpInst::Predicate::FIRST_ICMP_PREDICATE ||
NAKAMURA Takumi59a20642016-08-22 00:58:04 +00002588 CondCode >= ICmpInst::Predicate::BAD_ICMP_PREDICATE)
Wei Ding07e03712016-07-28 16:42:13 +00002589 return DAG.getUNDEF(VT);
2590
NAKAMURA Takumi59a20642016-08-22 00:58:04 +00002591 ICmpInst::Predicate IcInput = static_cast<ICmpInst::Predicate>(CondCode);
Wei Ding07e03712016-07-28 16:42:13 +00002592 ISD::CondCode CCOpcode = getICmpCondCode(IcInput);
2593 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
2594 Op.getOperand(2), DAG.getCondCode(CCOpcode));
2595 }
2596 case Intrinsic::amdgcn_fcmp: {
2597 const auto *CD = dyn_cast<ConstantSDNode>(Op.getOperand(3));
2598 int CondCode = CD->getSExtValue();
2599
2600 if (CondCode <= FCmpInst::Predicate::FCMP_FALSE ||
NAKAMURA Takumi59a20642016-08-22 00:58:04 +00002601 CondCode >= FCmpInst::Predicate::FCMP_TRUE)
Wei Ding07e03712016-07-28 16:42:13 +00002602 return DAG.getUNDEF(VT);
2603
NAKAMURA Takumi59a20642016-08-22 00:58:04 +00002604 FCmpInst::Predicate IcInput = static_cast<FCmpInst::Predicate>(CondCode);
Wei Ding07e03712016-07-28 16:42:13 +00002605 ISD::CondCode CCOpcode = getFCmpCondCode(IcInput);
2606 return DAG.getNode(AMDGPUISD::SETCC, DL, VT, Op.getOperand(1),
2607 Op.getOperand(2), DAG.getCondCode(CCOpcode));
2608 }
Matt Arsenault32fc5272016-07-26 16:45:45 +00002609 case Intrinsic::amdgcn_fmul_legacy:
2610 return DAG.getNode(AMDGPUISD::FMUL_LEGACY, DL, VT,
2611 Op.getOperand(1), Op.getOperand(2));
Matt Arsenaultc96e1de2016-07-18 18:35:05 +00002612 case Intrinsic::amdgcn_sffbh:
2613 case AMDGPUIntrinsic::AMDGPU_flbit_i32: // Legacy name.
2614 return DAG.getNode(AMDGPUISD::FFBH_I32, DL, VT, Op.getOperand(1));
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002615 default:
2616 return AMDGPUTargetLowering::LowerOperation(Op, DAG);
2617 }
2618}
2619
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00002620SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
2621 SelectionDAG &DAG) const {
2622 unsigned IntrID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
2623 switch (IntrID) {
2624 case Intrinsic::amdgcn_atomic_inc:
2625 case Intrinsic::amdgcn_atomic_dec: {
2626 MemSDNode *M = cast<MemSDNode>(Op);
2627 unsigned Opc = (IntrID == Intrinsic::amdgcn_atomic_inc) ?
2628 AMDGPUISD::ATOMIC_INC : AMDGPUISD::ATOMIC_DEC;
2629 SDValue Ops[] = {
2630 M->getOperand(0), // Chain
2631 M->getOperand(2), // Ptr
2632 M->getOperand(3) // Value
2633 };
2634
2635 return DAG.getMemIntrinsicNode(Opc, SDLoc(Op), M->getVTList(), Ops,
2636 M->getMemoryVT(), M->getMemOperand());
2637 }
2638 default:
2639 return SDValue();
2640 }
2641}
2642
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002643SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
2644 SelectionDAG &DAG) const {
2645 MachineFunction &MF = DAG.getMachineFunction();
Tom Stellardfc92e772015-05-12 14:18:14 +00002646 SDLoc DL(Op);
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002647 SDValue Chain = Op.getOperand(0);
2648 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
2649
2650 switch (IntrinsicID) {
Tom Stellardfc92e772015-05-12 14:18:14 +00002651 case AMDGPUIntrinsic::SI_sendmsg: {
2652 Chain = copyToM0(DAG, Chain, DL, Op.getOperand(3));
2653 SDValue Glue = Chain.getValue(1);
2654 return DAG.getNode(AMDGPUISD::SENDMSG, DL, MVT::Other, Chain,
2655 Op.getOperand(2), Glue);
2656 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002657 case AMDGPUIntrinsic::SI_tbuffer_store: {
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002658 SDValue Ops[] = {
2659 Chain,
2660 Op.getOperand(2),
2661 Op.getOperand(3),
2662 Op.getOperand(4),
2663 Op.getOperand(5),
2664 Op.getOperand(6),
2665 Op.getOperand(7),
2666 Op.getOperand(8),
2667 Op.getOperand(9),
2668 Op.getOperand(10),
2669 Op.getOperand(11),
2670 Op.getOperand(12),
2671 Op.getOperand(13),
2672 Op.getOperand(14)
2673 };
2674
2675 EVT VT = Op.getOperand(3).getValueType();
2676
2677 MachineMemOperand *MMO = MF.getMachineMemOperand(
2678 MachinePointerInfo(),
2679 MachineMemOperand::MOStore,
2680 VT.getStoreSize(), 4);
2681 return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL,
2682 Op->getVTList(), Ops, VT, MMO);
2683 }
Matt Arsenault00568682016-07-13 06:04:22 +00002684 case AMDGPUIntrinsic::AMDGPU_kill: {
Matt Arsenault03006fd2016-07-19 16:27:56 +00002685 SDValue Src = Op.getOperand(2);
2686 if (const ConstantFPSDNode *K = dyn_cast<ConstantFPSDNode>(Src)) {
Matt Arsenault00568682016-07-13 06:04:22 +00002687 if (!K->isNegative())
2688 return Chain;
Matt Arsenault03006fd2016-07-19 16:27:56 +00002689
2690 SDValue NegOne = DAG.getTargetConstant(FloatToBits(-1.0f), DL, MVT::i32);
2691 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, NegOne);
Matt Arsenault00568682016-07-13 06:04:22 +00002692 }
2693
Matt Arsenault03006fd2016-07-19 16:27:56 +00002694 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Src);
2695 return DAG.getNode(AMDGPUISD::KILL, DL, MVT::Other, Chain, Cast);
Matt Arsenault00568682016-07-13 06:04:22 +00002696 }
Matt Arsenaulta5789bb2014-07-26 06:23:37 +00002697 default:
2698 return SDValue();
2699 }
2700}
2701
Tom Stellard81d871d2013-11-13 23:36:50 +00002702SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
2703 SDLoc DL(Op);
2704 LoadSDNode *Load = cast<LoadSDNode>(Op);
Matt Arsenault6dfda962016-02-10 18:21:39 +00002705 ISD::LoadExtType ExtType = Load->getExtensionType();
Matt Arsenaulta1436412016-02-10 18:21:45 +00002706 EVT MemVT = Load->getMemoryVT();
Matt Arsenault6dfda962016-02-10 18:21:39 +00002707
Matt Arsenaulta1436412016-02-10 18:21:45 +00002708 if (ExtType == ISD::NON_EXTLOAD && MemVT.getSizeInBits() < 32) {
Matt Arsenault6dfda962016-02-10 18:21:39 +00002709 // FIXME: Copied from PPC
2710 // First, load into 32 bits, then truncate to 1 bit.
2711
2712 SDValue Chain = Load->getChain();
2713 SDValue BasePtr = Load->getBasePtr();
2714 MachineMemOperand *MMO = Load->getMemOperand();
2715
Tom Stellard115a6152016-11-10 16:02:37 +00002716 EVT RealMemVT = (MemVT == MVT::i1) ? MVT::i8 : MVT::i16;
2717
Matt Arsenault6dfda962016-02-10 18:21:39 +00002718 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
Tom Stellard115a6152016-11-10 16:02:37 +00002719 BasePtr, RealMemVT, MMO);
Matt Arsenault6dfda962016-02-10 18:21:39 +00002720
2721 SDValue Ops[] = {
Matt Arsenaulta1436412016-02-10 18:21:45 +00002722 DAG.getNode(ISD::TRUNCATE, DL, MemVT, NewLD),
Matt Arsenault6dfda962016-02-10 18:21:39 +00002723 NewLD.getValue(1)
2724 };
2725
2726 return DAG.getMergeValues(Ops, DL);
2727 }
Tom Stellard81d871d2013-11-13 23:36:50 +00002728
Matt Arsenaulta1436412016-02-10 18:21:45 +00002729 if (!MemVT.isVector())
2730 return SDValue();
Matt Arsenault4d801cd2015-11-24 12:05:03 +00002731
Matt Arsenaulta1436412016-02-10 18:21:45 +00002732 assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
2733 "Custom lowering for non-i32 vectors hasn't been implemented.");
Matt Arsenault4d801cd2015-11-24 12:05:03 +00002734
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00002735 unsigned AS = Load->getAddressSpace();
2736 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
2737 AS, Load->getAlignment())) {
2738 SDValue Ops[2];
2739 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
2740 return DAG.getMergeValues(Ops, DL);
2741 }
2742
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00002743 MachineFunction &MF = DAG.getMachineFunction();
2744 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
2745 // If there is a possibilty that flat instruction access scratch memory
2746 // then we need to use the same legalization rules we use for private.
2747 if (AS == AMDGPUAS::FLAT_ADDRESS)
2748 AS = MFI->hasFlatScratchInit() ?
2749 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
2750
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00002751 unsigned NumElements = MemVT.getVectorNumElements();
2752 switch (AS) {
Matt Arsenaulta1436412016-02-10 18:21:45 +00002753 case AMDGPUAS::CONSTANT_ADDRESS:
2754 if (isMemOpUniform(Load))
2755 return SDValue();
2756 // Non-uniform loads will be selected to MUBUF instructions, so they
2757 // have the same legalization requires ments as global and private
2758 // loads.
2759 //
Justin Bognerb03fd122016-08-17 05:10:15 +00002760 LLVM_FALLTHROUGH;
Matt Arsenaulta1436412016-02-10 18:21:45 +00002761 case AMDGPUAS::GLOBAL_ADDRESS:
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00002762 case AMDGPUAS::FLAT_ADDRESS:
2763 if (NumElements > 4)
Matt Arsenaulta1436412016-02-10 18:21:45 +00002764 return SplitVectorLoad(Op, DAG);
2765 // v4 loads are supported for private and global memory.
2766 return SDValue();
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00002767 case AMDGPUAS::PRIVATE_ADDRESS: {
2768 // Depending on the setting of the private_element_size field in the
2769 // resource descriptor, we can only make private accesses up to a certain
2770 // size.
2771 switch (Subtarget->getMaxPrivateElementSize()) {
2772 case 4:
Matt Arsenault9c499c32016-04-14 23:31:26 +00002773 return scalarizeVectorLoad(Load, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00002774 case 8:
2775 if (NumElements > 2)
2776 return SplitVectorLoad(Op, DAG);
2777 return SDValue();
2778 case 16:
2779 // Same as global/flat
2780 if (NumElements > 4)
2781 return SplitVectorLoad(Op, DAG);
2782 return SDValue();
2783 default:
2784 llvm_unreachable("unsupported private_element_size");
2785 }
2786 }
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00002787 case AMDGPUAS::LOCAL_ADDRESS: {
2788 if (NumElements > 2)
2789 return SplitVectorLoad(Op, DAG);
2790
2791 if (NumElements == 2)
2792 return SDValue();
2793
Matt Arsenaulta1436412016-02-10 18:21:45 +00002794 // If properly aligned, if we split we might be able to use ds_read_b64.
2795 return SplitVectorLoad(Op, DAG);
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00002796 }
Matt Arsenaulta1436412016-02-10 18:21:45 +00002797 default:
2798 return SDValue();
Tom Stellarde9373602014-01-22 19:24:14 +00002799 }
Tom Stellard81d871d2013-11-13 23:36:50 +00002800}
2801
Tom Stellard0ec134f2014-02-04 17:18:40 +00002802SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2803 if (Op.getValueType() != MVT::i64)
2804 return SDValue();
2805
2806 SDLoc DL(Op);
2807 SDValue Cond = Op.getOperand(0);
Tom Stellard0ec134f2014-02-04 17:18:40 +00002808
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002809 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
2810 SDValue One = DAG.getConstant(1, DL, MVT::i32);
Tom Stellard0ec134f2014-02-04 17:18:40 +00002811
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00002812 SDValue LHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(1));
2813 SDValue RHS = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Op.getOperand(2));
2814
2815 SDValue Lo0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, Zero);
2816 SDValue Lo1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, Zero);
Tom Stellard0ec134f2014-02-04 17:18:40 +00002817
2818 SDValue Lo = DAG.getSelect(DL, MVT::i32, Cond, Lo0, Lo1);
2819
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00002820 SDValue Hi0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, LHS, One);
2821 SDValue Hi1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, RHS, One);
Tom Stellard0ec134f2014-02-04 17:18:40 +00002822
2823 SDValue Hi = DAG.getSelect(DL, MVT::i32, Cond, Hi0, Hi1);
2824
Ahmed Bougacha128f8732016-04-26 21:15:30 +00002825 SDValue Res = DAG.getBuildVector(MVT::v2i32, DL, {Lo, Hi});
Tom Stellard7ea3d6d2014-03-31 14:01:55 +00002826 return DAG.getNode(ISD::BITCAST, DL, MVT::i64, Res);
Tom Stellard0ec134f2014-02-04 17:18:40 +00002827}
2828
Matt Arsenault22ca3f82014-07-15 23:50:10 +00002829// Catch division cases where we can use shortcuts with rcp and rsq
2830// instructions.
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00002831SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
2832 SelectionDAG &DAG) const {
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00002833 SDLoc SL(Op);
2834 SDValue LHS = Op.getOperand(0);
2835 SDValue RHS = Op.getOperand(1);
2836 EVT VT = Op.getValueType();
Matt Arsenault22ca3f82014-07-15 23:50:10 +00002837 bool Unsafe = DAG.getTarget().Options.UnsafeFPMath;
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00002838
2839 if (const ConstantFPSDNode *CLHS = dyn_cast<ConstantFPSDNode>(LHS)) {
Matt Arsenault979902b2016-08-02 22:25:04 +00002840 if ((Unsafe || (VT == MVT::f32 && !Subtarget->hasFP32Denormals()))) {
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00002841
Matt Arsenault979902b2016-08-02 22:25:04 +00002842 if (CLHS->isExactlyValue(1.0)) {
2843 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
2844 // the CI documentation has a worst case error of 1 ulp.
2845 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
2846 // use it as long as we aren't trying to use denormals.
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00002847
Matt Arsenault979902b2016-08-02 22:25:04 +00002848 // 1.0 / sqrt(x) -> rsq(x)
2849 //
2850 // XXX - Is UnsafeFPMath sufficient to do this for f64? The maximum ULP
2851 // error seems really high at 2^29 ULP.
2852 if (RHS.getOpcode() == ISD::FSQRT)
2853 return DAG.getNode(AMDGPUISD::RSQ, SL, VT, RHS.getOperand(0));
2854
2855 // 1.0 / x -> rcp(x)
2856 return DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
2857 }
2858
2859 // Same as for 1.0, but expand the sign out of the constant.
2860 if (CLHS->isExactlyValue(-1.0)) {
2861 // -1.0 / x -> rcp (fneg x)
2862 SDValue FNegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
2863 return DAG.getNode(AMDGPUISD::RCP, SL, VT, FNegRHS);
2864 }
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00002865 }
2866 }
2867
Wei Dinged0f97f2016-06-09 19:17:15 +00002868 const SDNodeFlags *Flags = Op->getFlags();
2869
2870 if (Unsafe || Flags->hasAllowReciprocal()) {
Matt Arsenault22ca3f82014-07-15 23:50:10 +00002871 // Turn into multiply by the reciprocal.
2872 // x / y -> x * (1.0 / y)
Sanjay Patela2607012015-09-16 16:31:21 +00002873 SDNodeFlags Flags;
2874 Flags.setUnsafeAlgebra(true);
Matt Arsenault22ca3f82014-07-15 23:50:10 +00002875 SDValue Recip = DAG.getNode(AMDGPUISD::RCP, SL, VT, RHS);
Sanjay Patela2607012015-09-16 16:31:21 +00002876 return DAG.getNode(ISD::FMUL, SL, VT, LHS, Recip, &Flags);
Matt Arsenault22ca3f82014-07-15 23:50:10 +00002877 }
2878
2879 return SDValue();
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00002880}
2881
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00002882// Faster 2.5 ULP division that does not support denormals.
2883SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const {
2884 SDLoc SL(Op);
2885 SDValue LHS = Op.getOperand(1);
2886 SDValue RHS = Op.getOperand(2);
2887
2888 SDValue r1 = DAG.getNode(ISD::FABS, SL, MVT::f32, RHS);
2889
2890 const APFloat K0Val(BitsToFloat(0x6f800000));
2891 const SDValue K0 = DAG.getConstantFP(K0Val, SL, MVT::f32);
2892
2893 const APFloat K1Val(BitsToFloat(0x2f800000));
2894 const SDValue K1 = DAG.getConstantFP(K1Val, SL, MVT::f32);
2895
2896 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
2897
2898 EVT SetCCVT =
2899 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
2900
2901 SDValue r2 = DAG.getSetCC(SL, SetCCVT, r1, K0, ISD::SETOGT);
2902
2903 SDValue r3 = DAG.getNode(ISD::SELECT, SL, MVT::f32, r2, K1, One);
2904
2905 // TODO: Should this propagate fast-math-flags?
2906 r1 = DAG.getNode(ISD::FMUL, SL, MVT::f32, RHS, r3);
2907
2908 // rcp does not support denormals.
2909 SDValue r0 = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, r1);
2910
2911 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, LHS, r0);
2912
2913 return DAG.getNode(ISD::FMUL, SL, MVT::f32, r3, Mul);
2914}
2915
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00002916SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00002917 if (SDValue FastLowered = lowerFastUnsafeFDIV(Op, DAG))
Eric Christopher538d09d02016-06-07 20:27:12 +00002918 return FastLowered;
Matt Arsenault22ca3f82014-07-15 23:50:10 +00002919
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00002920 SDLoc SL(Op);
2921 SDValue LHS = Op.getOperand(0);
2922 SDValue RHS = Op.getOperand(1);
2923
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002924 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
Matt Arsenault37fefd62016-06-10 02:18:02 +00002925
Wei Dinged0f97f2016-06-09 19:17:15 +00002926 SDVTList ScaleVT = DAG.getVTList(MVT::f32, MVT::i1);
Matt Arsenault37fefd62016-06-10 02:18:02 +00002927
Wei Dinged0f97f2016-06-09 19:17:15 +00002928 SDValue DenominatorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, RHS, RHS, LHS);
2929 SDValue NumeratorScaled = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, LHS, RHS, LHS);
Matt Arsenault37fefd62016-06-10 02:18:02 +00002930
Matt Arsenaultdfec5ce2016-07-09 07:48:11 +00002931 // Denominator is scaled to not be denormal, so using rcp is ok.
Wei Dinged0f97f2016-06-09 19:17:15 +00002932 SDValue ApproxRcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f32, DenominatorScaled);
Matt Arsenault37fefd62016-06-10 02:18:02 +00002933
Wei Dinged0f97f2016-06-09 19:17:15 +00002934 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f32, DenominatorScaled);
Matt Arsenault37fefd62016-06-10 02:18:02 +00002935
Wei Dinged0f97f2016-06-09 19:17:15 +00002936 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f32, NegDivScale0, ApproxRcp, One);
2937 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f32, Fma0, ApproxRcp, ApproxRcp);
Matt Arsenault37fefd62016-06-10 02:18:02 +00002938
Wei Dinged0f97f2016-06-09 19:17:15 +00002939 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f32, NumeratorScaled, Fma1);
Matt Arsenault37fefd62016-06-10 02:18:02 +00002940
Wei Dinged0f97f2016-06-09 19:17:15 +00002941 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f32, NegDivScale0, Mul, NumeratorScaled);
2942 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f32, Fma2, Fma1, Mul);
2943 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f32, NegDivScale0, Fma3, NumeratorScaled);
Matt Arsenault37fefd62016-06-10 02:18:02 +00002944
Wei Dinged0f97f2016-06-09 19:17:15 +00002945 SDValue Scale = NumeratorScaled.getValue(1);
2946 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f32, Fma4, Fma1, Fma3, Scale);
Matt Arsenault37fefd62016-06-10 02:18:02 +00002947
Wei Dinged0f97f2016-06-09 19:17:15 +00002948 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f32, Fmas, RHS, LHS);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00002949}
2950
2951SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00002952 if (DAG.getTarget().Options.UnsafeFPMath)
Matt Arsenaulta1fe17c2016-07-19 23:16:53 +00002953 return lowerFastUnsafeFDIV(Op, DAG);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00002954
2955 SDLoc SL(Op);
2956 SDValue X = Op.getOperand(0);
2957 SDValue Y = Op.getOperand(1);
2958
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002959 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00002960
2961 SDVTList ScaleVT = DAG.getVTList(MVT::f64, MVT::i1);
2962
2963 SDValue DivScale0 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, Y, Y, X);
2964
2965 SDValue NegDivScale0 = DAG.getNode(ISD::FNEG, SL, MVT::f64, DivScale0);
2966
2967 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, SL, MVT::f64, DivScale0);
2968
2969 SDValue Fma0 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Rcp, One);
2970
2971 SDValue Fma1 = DAG.getNode(ISD::FMA, SL, MVT::f64, Rcp, Fma0, Rcp);
2972
2973 SDValue Fma2 = DAG.getNode(ISD::FMA, SL, MVT::f64, NegDivScale0, Fma1, One);
2974
2975 SDValue DivScale1 = DAG.getNode(AMDGPUISD::DIV_SCALE, SL, ScaleVT, X, Y, X);
2976
2977 SDValue Fma3 = DAG.getNode(ISD::FMA, SL, MVT::f64, Fma1, Fma2, Fma1);
2978 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, DivScale1, Fma3);
2979
2980 SDValue Fma4 = DAG.getNode(ISD::FMA, SL, MVT::f64,
2981 NegDivScale0, Mul, DivScale1);
2982
2983 SDValue Scale;
2984
Matt Arsenault43e92fe2016-06-24 06:30:11 +00002985 if (Subtarget->getGeneration() == SISubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00002986 // Workaround a hardware bug on SI where the condition output from div_scale
2987 // is not usable.
2988
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00002989 const SDValue Hi = DAG.getConstant(1, SL, MVT::i32);
Matt Arsenault0bbcd8b2015-02-14 04:30:08 +00002990
2991 // Figure out if the scale to use for div_fmas.
2992 SDValue NumBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
2993 SDValue DenBC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Y);
2994 SDValue Scale0BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale0);
2995 SDValue Scale1BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, DivScale1);
2996
2997 SDValue NumHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, NumBC, Hi);
2998 SDValue DenHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, DenBC, Hi);
2999
3000 SDValue Scale0Hi
3001 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale0BC, Hi);
3002 SDValue Scale1Hi
3003 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Scale1BC, Hi);
3004
3005 SDValue CmpDen = DAG.getSetCC(SL, MVT::i1, DenHi, Scale0Hi, ISD::SETEQ);
3006 SDValue CmpNum = DAG.getSetCC(SL, MVT::i1, NumHi, Scale1Hi, ISD::SETEQ);
3007 Scale = DAG.getNode(ISD::XOR, SL, MVT::i1, CmpNum, CmpDen);
3008 } else {
3009 Scale = DivScale1.getValue(1);
3010 }
3011
3012 SDValue Fmas = DAG.getNode(AMDGPUISD::DIV_FMAS, SL, MVT::f64,
3013 Fma4, Fma3, Mul, Scale);
3014
3015 return DAG.getNode(AMDGPUISD::DIV_FIXUP, SL, MVT::f64, Fmas, Y, X);
Matt Arsenaulte9fa3b82014-07-15 20:18:31 +00003016}
3017
3018SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {
3019 EVT VT = Op.getValueType();
3020
3021 if (VT == MVT::f32)
3022 return LowerFDIV32(Op, DAG);
3023
3024 if (VT == MVT::f64)
3025 return LowerFDIV64(Op, DAG);
3026
3027 llvm_unreachable("Unexpected type for fdiv");
3028}
3029
Tom Stellard81d871d2013-11-13 23:36:50 +00003030SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
3031 SDLoc DL(Op);
3032 StoreSDNode *Store = cast<StoreSDNode>(Op);
3033 EVT VT = Store->getMemoryVT();
3034
Matt Arsenault95245662016-02-11 05:32:46 +00003035 if (VT == MVT::i1) {
3036 return DAG.getTruncStore(Store->getChain(), DL,
3037 DAG.getSExtOrTrunc(Store->getValue(), DL, MVT::i32),
3038 Store->getBasePtr(), MVT::i1, Store->getMemOperand());
Tom Stellardb02094e2014-07-21 15:45:01 +00003039 }
3040
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00003041 assert(VT.isVector() &&
3042 Store->getValue().getValueType().getScalarType() == MVT::i32);
3043
3044 unsigned AS = Store->getAddressSpace();
3045 if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
3046 AS, Store->getAlignment())) {
3047 return expandUnalignedStore(Store, DAG);
3048 }
Tom Stellard81d871d2013-11-13 23:36:50 +00003049
Tom Stellardf8e6eaf2016-10-26 14:38:47 +00003050 MachineFunction &MF = DAG.getMachineFunction();
3051 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
3052 // If there is a possibilty that flat instruction access scratch memory
3053 // then we need to use the same legalization rules we use for private.
3054 if (AS == AMDGPUAS::FLAT_ADDRESS)
3055 AS = MFI->hasFlatScratchInit() ?
3056 AMDGPUAS::PRIVATE_ADDRESS : AMDGPUAS::GLOBAL_ADDRESS;
3057
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00003058 unsigned NumElements = VT.getVectorNumElements();
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00003059 switch (AS) {
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00003060 case AMDGPUAS::GLOBAL_ADDRESS:
3061 case AMDGPUAS::FLAT_ADDRESS:
3062 if (NumElements > 4)
3063 return SplitVectorStore(Op, DAG);
3064 return SDValue();
3065 case AMDGPUAS::PRIVATE_ADDRESS: {
3066 switch (Subtarget->getMaxPrivateElementSize()) {
3067 case 4:
Matt Arsenault9c499c32016-04-14 23:31:26 +00003068 return scalarizeVectorStore(Store, DAG);
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00003069 case 8:
3070 if (NumElements > 2)
3071 return SplitVectorStore(Op, DAG);
3072 return SDValue();
3073 case 16:
3074 if (NumElements > 4)
3075 return SplitVectorStore(Op, DAG);
3076 return SDValue();
3077 default:
3078 llvm_unreachable("unsupported private_element_size");
3079 }
3080 }
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00003081 case AMDGPUAS::LOCAL_ADDRESS: {
3082 if (NumElements > 2)
3083 return SplitVectorStore(Op, DAG);
3084
3085 if (NumElements == 2)
3086 return Op;
3087
Matt Arsenault95245662016-02-11 05:32:46 +00003088 // If properly aligned, if we split we might be able to use ds_write_b64.
3089 return SplitVectorStore(Op, DAG);
Matt Arsenaultbcdfee72016-05-02 20:13:51 +00003090 }
Matt Arsenaultf2ddbf02016-02-13 04:18:53 +00003091 default:
3092 llvm_unreachable("unhandled address space");
Matt Arsenault95245662016-02-11 05:32:46 +00003093 }
Tom Stellard81d871d2013-11-13 23:36:50 +00003094}
3095
Matt Arsenaultad14ce82014-07-19 18:44:39 +00003096SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00003097 SDLoc DL(Op);
Matt Arsenaultad14ce82014-07-19 18:44:39 +00003098 EVT VT = Op.getValueType();
3099 SDValue Arg = Op.getOperand(0);
Sanjay Patela2607012015-09-16 16:31:21 +00003100 // TODO: Should this propagate fast-math-flags?
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00003101 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT,
3102 DAG.getNode(ISD::FMUL, DL, VT, Arg,
3103 DAG.getConstantFP(0.5/M_PI, DL,
3104 VT)));
Matt Arsenaultad14ce82014-07-19 18:44:39 +00003105
3106 switch (Op.getOpcode()) {
3107 case ISD::FCOS:
3108 return DAG.getNode(AMDGPUISD::COS_HW, SDLoc(Op), VT, FractPart);
3109 case ISD::FSIN:
3110 return DAG.getNode(AMDGPUISD::SIN_HW, SDLoc(Op), VT, FractPart);
3111 default:
3112 llvm_unreachable("Wrong trig opcode");
3113 }
3114}
3115
Tom Stellard354a43c2016-04-01 18:27:37 +00003116SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const {
3117 AtomicSDNode *AtomicNode = cast<AtomicSDNode>(Op);
3118 assert(AtomicNode->isCompareAndSwap());
3119 unsigned AS = AtomicNode->getAddressSpace();
3120
3121 // No custom lowering required for local address space
3122 if (!isFlatGlobalAddrSpace(AS))
3123 return Op;
3124
3125 // Non-local address space requires custom lowering for atomic compare
3126 // and swap; cmp and swap should be in a v2i32 or v2i64 in case of _X2
3127 SDLoc DL(Op);
3128 SDValue ChainIn = Op.getOperand(0);
3129 SDValue Addr = Op.getOperand(1);
3130 SDValue Old = Op.getOperand(2);
3131 SDValue New = Op.getOperand(3);
3132 EVT VT = Op.getValueType();
3133 MVT SimpleVT = VT.getSimpleVT();
3134 MVT VecType = MVT::getVectorVT(SimpleVT, 2);
3135
Ahmed Bougacha128f8732016-04-26 21:15:30 +00003136 SDValue NewOld = DAG.getBuildVector(VecType, DL, {New, Old});
Tom Stellard354a43c2016-04-01 18:27:37 +00003137 SDValue Ops[] = { ChainIn, Addr, NewOld };
Matt Arsenault88701812016-06-09 23:42:48 +00003138
3139 return DAG.getMemIntrinsicNode(AMDGPUISD::ATOMIC_CMP_SWAP, DL, Op->getVTList(),
3140 Ops, VT, AtomicNode->getMemOperand());
Tom Stellard354a43c2016-04-01 18:27:37 +00003141}
3142
Tom Stellard75aadc22012-12-11 21:25:42 +00003143//===----------------------------------------------------------------------===//
3144// Custom DAG optimizations
3145//===----------------------------------------------------------------------===//
3146
Matt Arsenault364a6742014-06-11 17:50:44 +00003147SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
Matt Arsenaulte6986632015-01-14 01:35:22 +00003148 DAGCombinerInfo &DCI) const {
Matt Arsenault364a6742014-06-11 17:50:44 +00003149 EVT VT = N->getValueType(0);
3150 EVT ScalarVT = VT.getScalarType();
3151 if (ScalarVT != MVT::f32)
3152 return SDValue();
3153
3154 SelectionDAG &DAG = DCI.DAG;
3155 SDLoc DL(N);
3156
3157 SDValue Src = N->getOperand(0);
3158 EVT SrcVT = Src.getValueType();
3159
3160 // TODO: We could try to match extracting the higher bytes, which would be
3161 // easier if i8 vectors weren't promoted to i32 vectors, particularly after
3162 // types are legalized. v4i8 -> v4f32 is probably the only case to worry
3163 // about in practice.
3164 if (DCI.isAfterLegalizeVectorOps() && SrcVT == MVT::i32) {
3165 if (DAG.MaskedValueIsZero(Src, APInt::getHighBitsSet(32, 24))) {
3166 SDValue Cvt = DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Src);
3167 DCI.AddToWorklist(Cvt.getNode());
3168 return Cvt;
3169 }
3170 }
3171
Matt Arsenault364a6742014-06-11 17:50:44 +00003172 return SDValue();
3173}
3174
Eric Christopher6c5b5112015-03-11 18:43:21 +00003175/// \brief Return true if the given offset Size in bytes can be folded into
3176/// the immediate offsets of a memory instruction for the given address space.
3177static bool canFoldOffset(unsigned OffsetSize, unsigned AS,
Matt Arsenault43e92fe2016-06-24 06:30:11 +00003178 const SISubtarget &STI) {
Eric Christopher6c5b5112015-03-11 18:43:21 +00003179 switch (AS) {
3180 case AMDGPUAS::GLOBAL_ADDRESS: {
3181 // MUBUF instructions a 12-bit offset in bytes.
3182 return isUInt<12>(OffsetSize);
3183 }
3184 case AMDGPUAS::CONSTANT_ADDRESS: {
3185 // SMRD instructions have an 8-bit offset in dwords on SI and
3186 // a 20-bit offset in bytes on VI.
Matt Arsenault43e92fe2016-06-24 06:30:11 +00003187 if (STI.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS)
Eric Christopher6c5b5112015-03-11 18:43:21 +00003188 return isUInt<20>(OffsetSize);
3189 else
3190 return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4);
3191 }
3192 case AMDGPUAS::LOCAL_ADDRESS:
3193 case AMDGPUAS::REGION_ADDRESS: {
3194 // The single offset versions have a 16-bit offset in bytes.
3195 return isUInt<16>(OffsetSize);
3196 }
3197 case AMDGPUAS::PRIVATE_ADDRESS:
3198 // Indirect register addressing does not use any offsets.
3199 default:
3200 return 0;
3201 }
3202}
3203
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00003204// (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
3205
3206// This is a variant of
3207// (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
3208//
3209// The normal DAG combiner will do this, but only if the add has one use since
3210// that would increase the number of instructions.
3211//
3212// This prevents us from seeing a constant offset that can be folded into a
3213// memory instruction's addressing mode. If we know the resulting add offset of
3214// a pointer can be folded into an addressing offset, we can replace the pointer
3215// operand with the add of new constant offset. This eliminates one of the uses,
3216// and may allow the remaining use to also be simplified.
3217//
3218SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
3219 unsigned AddrSpace,
3220 DAGCombinerInfo &DCI) const {
3221 SDValue N0 = N->getOperand(0);
3222 SDValue N1 = N->getOperand(1);
3223
3224 if (N0.getOpcode() != ISD::ADD)
3225 return SDValue();
3226
3227 const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
3228 if (!CN1)
3229 return SDValue();
3230
3231 const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
3232 if (!CAdd)
3233 return SDValue();
3234
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00003235 // If the resulting offset is too large, we can't fold it into the addressing
3236 // mode offset.
3237 APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
Matt Arsenault43e92fe2016-06-24 06:30:11 +00003238 if (!canFoldOffset(Offset.getZExtValue(), AddrSpace, *getSubtarget()))
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00003239 return SDValue();
3240
3241 SelectionDAG &DAG = DCI.DAG;
3242 SDLoc SL(N);
3243 EVT VT = N->getValueType(0);
3244
3245 SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00003246 SDValue COffset = DAG.getConstant(Offset, SL, MVT::i32);
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00003247
3248 return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset);
3249}
3250
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00003251static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) {
3252 return (Opc == ISD::AND && (Val == 0 || Val == 0xffffffff)) ||
3253 (Opc == ISD::OR && (Val == 0xffffffff || Val == 0)) ||
3254 (Opc == ISD::XOR && Val == 0);
3255}
3256
3257// Break up 64-bit bit operation of a constant into two 32-bit and/or/xor. This
3258// will typically happen anyway for a VALU 64-bit and. This exposes other 32-bit
3259// integer combine opportunities since most 64-bit operations are decomposed
3260// this way. TODO: We won't want this for SALU especially if it is an inline
3261// immediate.
3262SDValue SITargetLowering::splitBinaryBitConstantOp(
3263 DAGCombinerInfo &DCI,
3264 const SDLoc &SL,
3265 unsigned Opc, SDValue LHS,
3266 const ConstantSDNode *CRHS) const {
3267 uint64_t Val = CRHS->getZExtValue();
3268 uint32_t ValLo = Lo_32(Val);
3269 uint32_t ValHi = Hi_32(Val);
3270 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
3271
3272 if ((bitOpWithConstantIsReducible(Opc, ValLo) ||
3273 bitOpWithConstantIsReducible(Opc, ValHi)) ||
3274 (CRHS->hasOneUse() && !TII->isInlineConstant(CRHS->getAPIntValue()))) {
3275 // If we need to materialize a 64-bit immediate, it will be split up later
3276 // anyway. Avoid creating the harder to understand 64-bit immediate
3277 // materialization.
3278 return splitBinaryBitConstantOpImpl(DCI, SL, Opc, LHS, ValLo, ValHi);
3279 }
3280
3281 return SDValue();
3282}
3283
Matt Arsenaultd0101a22015-01-06 23:00:46 +00003284SDValue SITargetLowering::performAndCombine(SDNode *N,
3285 DAGCombinerInfo &DCI) const {
3286 if (DCI.isBeforeLegalize())
3287 return SDValue();
3288
3289 SelectionDAG &DAG = DCI.DAG;
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00003290 EVT VT = N->getValueType(0);
Matt Arsenaultd0101a22015-01-06 23:00:46 +00003291 SDValue LHS = N->getOperand(0);
3292 SDValue RHS = N->getOperand(1);
3293
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00003294
3295 if (VT == MVT::i64) {
3296 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
3297 if (CRHS) {
3298 if (SDValue Split
3299 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::AND, LHS, CRHS))
3300 return Split;
3301 }
3302 }
3303
3304 // (and (fcmp ord x, x), (fcmp une (fabs x), inf)) ->
3305 // fp_class x, ~(s_nan | q_nan | n_infinity | p_infinity)
3306 if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == ISD::SETCC) {
Matt Arsenaultd0101a22015-01-06 23:00:46 +00003307 ISD::CondCode LCC = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
3308 ISD::CondCode RCC = cast<CondCodeSDNode>(RHS.getOperand(2))->get();
3309
3310 SDValue X = LHS.getOperand(0);
3311 SDValue Y = RHS.getOperand(0);
3312 if (Y.getOpcode() != ISD::FABS || Y.getOperand(0) != X)
3313 return SDValue();
3314
3315 if (LCC == ISD::SETO) {
3316 if (X != LHS.getOperand(1))
3317 return SDValue();
3318
3319 if (RCC == ISD::SETUNE) {
3320 const ConstantFPSDNode *C1 = dyn_cast<ConstantFPSDNode>(RHS.getOperand(1));
3321 if (!C1 || !C1->isInfinity() || C1->isNegative())
3322 return SDValue();
3323
3324 const uint32_t Mask = SIInstrFlags::N_NORMAL |
3325 SIInstrFlags::N_SUBNORMAL |
3326 SIInstrFlags::N_ZERO |
3327 SIInstrFlags::P_ZERO |
3328 SIInstrFlags::P_SUBNORMAL |
3329 SIInstrFlags::P_NORMAL;
3330
3331 static_assert(((~(SIInstrFlags::S_NAN |
3332 SIInstrFlags::Q_NAN |
3333 SIInstrFlags::N_INFINITY |
3334 SIInstrFlags::P_INFINITY)) & 0x3ff) == Mask,
3335 "mask not equal");
3336
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00003337 SDLoc DL(N);
3338 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
3339 X, DAG.getConstant(Mask, DL, MVT::i32));
Matt Arsenaultd0101a22015-01-06 23:00:46 +00003340 }
3341 }
3342 }
3343
3344 return SDValue();
3345}
3346
Matt Arsenaultf2290332015-01-06 23:00:39 +00003347SDValue SITargetLowering::performOrCombine(SDNode *N,
3348 DAGCombinerInfo &DCI) const {
3349 SelectionDAG &DAG = DCI.DAG;
3350 SDValue LHS = N->getOperand(0);
3351 SDValue RHS = N->getOperand(1);
3352
Matt Arsenault3b082382016-04-12 18:24:38 +00003353 EVT VT = N->getValueType(0);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00003354 if (VT == MVT::i1) {
3355 // or (fp_class x, c1), (fp_class x, c2) -> fp_class x, (c1 | c2)
3356 if (LHS.getOpcode() == AMDGPUISD::FP_CLASS &&
3357 RHS.getOpcode() == AMDGPUISD::FP_CLASS) {
3358 SDValue Src = LHS.getOperand(0);
3359 if (Src != RHS.getOperand(0))
3360 return SDValue();
Matt Arsenault3b082382016-04-12 18:24:38 +00003361
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00003362 const ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
3363 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS.getOperand(1));
3364 if (!CLHS || !CRHS)
3365 return SDValue();
Matt Arsenault3b082382016-04-12 18:24:38 +00003366
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00003367 // Only 10 bits are used.
3368 static const uint32_t MaxMask = 0x3ff;
Matt Arsenault3b082382016-04-12 18:24:38 +00003369
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00003370 uint32_t NewMask = (CLHS->getZExtValue() | CRHS->getZExtValue()) & MaxMask;
3371 SDLoc DL(N);
3372 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1,
3373 Src, DAG.getConstant(NewMask, DL, MVT::i32));
3374 }
Matt Arsenault3b082382016-04-12 18:24:38 +00003375
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00003376 return SDValue();
3377 }
3378
3379 if (VT != MVT::i64)
3380 return SDValue();
3381
3382 // TODO: This could be a generic combine with a predicate for extracting the
3383 // high half of an integer being free.
3384
3385 // (or i64:x, (zero_extend i32:y)) ->
3386 // i64 (bitcast (v2i32 build_vector (or i32:y, lo_32(x)), hi_32(x)))
3387 if (LHS.getOpcode() == ISD::ZERO_EXTEND &&
3388 RHS.getOpcode() != ISD::ZERO_EXTEND)
3389 std::swap(LHS, RHS);
3390
3391 if (RHS.getOpcode() == ISD::ZERO_EXTEND) {
3392 SDValue ExtSrc = RHS.getOperand(0);
3393 EVT SrcVT = ExtSrc.getValueType();
3394 if (SrcVT == MVT::i32) {
3395 SDLoc SL(N);
3396 SDValue LowLHS, HiBits;
3397 std::tie(LowLHS, HiBits) = split64BitValue(LHS, DAG);
3398 SDValue LowOr = DAG.getNode(ISD::OR, SL, MVT::i32, LowLHS, ExtSrc);
3399
3400 DCI.AddToWorklist(LowOr.getNode());
3401 DCI.AddToWorklist(HiBits.getNode());
3402
3403 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
3404 LowOr, HiBits);
3405 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
Matt Arsenault3b082382016-04-12 18:24:38 +00003406 }
3407 }
3408
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00003409 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3410 if (CRHS) {
3411 if (SDValue Split
3412 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::OR, LHS, CRHS))
3413 return Split;
3414 }
Matt Arsenaultf2290332015-01-06 23:00:39 +00003415
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00003416 return SDValue();
3417}
Matt Arsenaultf2290332015-01-06 23:00:39 +00003418
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00003419SDValue SITargetLowering::performXorCombine(SDNode *N,
3420 DAGCombinerInfo &DCI) const {
3421 EVT VT = N->getValueType(0);
3422 if (VT != MVT::i64)
3423 return SDValue();
Matt Arsenaultf2290332015-01-06 23:00:39 +00003424
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00003425 SDValue LHS = N->getOperand(0);
3426 SDValue RHS = N->getOperand(1);
3427
3428 const ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(RHS);
3429 if (CRHS) {
3430 if (SDValue Split
3431 = splitBinaryBitConstantOp(DCI, SDLoc(N), ISD::XOR, LHS, CRHS))
3432 return Split;
Matt Arsenaultf2290332015-01-06 23:00:39 +00003433 }
3434
3435 return SDValue();
3436}
3437
3438SDValue SITargetLowering::performClassCombine(SDNode *N,
3439 DAGCombinerInfo &DCI) const {
3440 SelectionDAG &DAG = DCI.DAG;
3441 SDValue Mask = N->getOperand(1);
3442
3443 // fp_class x, 0 -> false
3444 if (const ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Mask)) {
3445 if (CMask->isNullValue())
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00003446 return DAG.getConstant(0, SDLoc(N), MVT::i1);
Matt Arsenaultf2290332015-01-06 23:00:39 +00003447 }
3448
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00003449 if (N->getOperand(0).isUndef())
3450 return DAG.getUNDEF(MVT::i1);
3451
Matt Arsenaultf2290332015-01-06 23:00:39 +00003452 return SDValue();
3453}
3454
Matt Arsenault9cd90712016-04-14 01:42:16 +00003455// Constant fold canonicalize.
3456SDValue SITargetLowering::performFCanonicalizeCombine(
3457 SDNode *N,
3458 DAGCombinerInfo &DCI) const {
3459 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
3460 if (!CFP)
3461 return SDValue();
3462
3463 SelectionDAG &DAG = DCI.DAG;
3464 const APFloat &C = CFP->getValueAPF();
3465
3466 // Flush denormals to 0 if not enabled.
3467 if (C.isDenormal()) {
3468 EVT VT = N->getValueType(0);
3469 if (VT == MVT::f32 && !Subtarget->hasFP32Denormals())
3470 return DAG.getConstantFP(0.0, SDLoc(N), VT);
3471
3472 if (VT == MVT::f64 && !Subtarget->hasFP64Denormals())
3473 return DAG.getConstantFP(0.0, SDLoc(N), VT);
3474 }
3475
3476 if (C.isNaN()) {
3477 EVT VT = N->getValueType(0);
3478 APFloat CanonicalQNaN = APFloat::getQNaN(C.getSemantics());
3479 if (C.isSignaling()) {
3480 // Quiet a signaling NaN.
3481 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
3482 }
3483
3484 // Make sure it is the canonical NaN bitpattern.
3485 //
3486 // TODO: Can we use -1 as the canonical NaN value since it's an inline
3487 // immediate?
3488 if (C.bitcastToAPInt() != CanonicalQNaN.bitcastToAPInt())
3489 return DAG.getConstantFP(CanonicalQNaN, SDLoc(N), VT);
3490 }
3491
3492 return SDValue(CFP, 0);
3493}
3494
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00003495static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) {
3496 switch (Opc) {
3497 case ISD::FMAXNUM:
3498 return AMDGPUISD::FMAX3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00003499 case ISD::SMAX:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00003500 return AMDGPUISD::SMAX3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00003501 case ISD::UMAX:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00003502 return AMDGPUISD::UMAX3;
3503 case ISD::FMINNUM:
3504 return AMDGPUISD::FMIN3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00003505 case ISD::SMIN:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00003506 return AMDGPUISD::SMIN3;
Matt Arsenault5881f4e2015-06-09 00:52:37 +00003507 case ISD::UMIN:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00003508 return AMDGPUISD::UMIN3;
3509 default:
3510 llvm_unreachable("Not a min/max opcode");
3511 }
3512}
3513
Benjamin Kramerbdc49562016-06-12 15:39:02 +00003514static SDValue performIntMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL,
3515 SDValue Op0, SDValue Op1, bool Signed) {
Matt Arsenaultf639c322016-01-28 20:53:42 +00003516 ConstantSDNode *K1 = dyn_cast<ConstantSDNode>(Op1);
3517 if (!K1)
3518 return SDValue();
3519
3520 ConstantSDNode *K0 = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
3521 if (!K0)
3522 return SDValue();
3523
Matt Arsenaultf639c322016-01-28 20:53:42 +00003524 if (Signed) {
3525 if (K0->getAPIntValue().sge(K1->getAPIntValue()))
3526 return SDValue();
3527 } else {
3528 if (K0->getAPIntValue().uge(K1->getAPIntValue()))
3529 return SDValue();
3530 }
3531
3532 EVT VT = K0->getValueType(0);
Tom Stellard115a6152016-11-10 16:02:37 +00003533
3534 MVT NVT = MVT::i32;
3535 unsigned ExtOp = Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3536
3537 SDValue Tmp1, Tmp2, Tmp3;
3538 Tmp1 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(0));
3539 Tmp2 = DAG.getNode(ExtOp, SL, NVT, Op0->getOperand(1));
3540 Tmp3 = DAG.getNode(ExtOp, SL, NVT, Op1);
3541
3542 if (VT == MVT::i16) {
3543 Tmp1 = DAG.getNode(Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3, SL, NVT,
3544 Tmp1, Tmp2, Tmp3);
3545
3546 return DAG.getNode(ISD::TRUNCATE, SL, VT, Tmp1);
3547 } else
3548 return DAG.getNode(Signed ? AMDGPUISD::SMED3 : AMDGPUISD::UMED3, SL, VT,
3549 Op0.getOperand(0), SDValue(K0, 0), SDValue(K1, 0));
Matt Arsenaultf639c322016-01-28 20:53:42 +00003550}
3551
3552static bool isKnownNeverSNan(SelectionDAG &DAG, SDValue Op) {
3553 if (!DAG.getTargetLoweringInfo().hasFloatingPointExceptions())
3554 return true;
3555
3556 return DAG.isKnownNeverNaN(Op);
3557}
3558
Benjamin Kramerbdc49562016-06-12 15:39:02 +00003559static SDValue performFPMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL,
3560 SDValue Op0, SDValue Op1) {
Matt Arsenaultf639c322016-01-28 20:53:42 +00003561 ConstantFPSDNode *K1 = dyn_cast<ConstantFPSDNode>(Op1);
3562 if (!K1)
3563 return SDValue();
3564
3565 ConstantFPSDNode *K0 = dyn_cast<ConstantFPSDNode>(Op0.getOperand(1));
3566 if (!K0)
3567 return SDValue();
3568
3569 // Ordered >= (although NaN inputs should have folded away by now).
3570 APFloat::cmpResult Cmp = K0->getValueAPF().compare(K1->getValueAPF());
3571 if (Cmp == APFloat::cmpGreaterThan)
3572 return SDValue();
3573
3574 // This isn't safe with signaling NaNs because in IEEE mode, min/max on a
3575 // signaling NaN gives a quiet NaN. The quiet NaN input to the min would then
3576 // give the other result, which is different from med3 with a NaN input.
3577 SDValue Var = Op0.getOperand(0);
3578 if (!isKnownNeverSNan(DAG, Var))
3579 return SDValue();
3580
3581 return DAG.getNode(AMDGPUISD::FMED3, SL, K0->getValueType(0),
3582 Var, SDValue(K0, 0), SDValue(K1, 0));
3583}
3584
3585SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
3586 DAGCombinerInfo &DCI) const {
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00003587 SelectionDAG &DAG = DCI.DAG;
3588
3589 unsigned Opc = N->getOpcode();
3590 SDValue Op0 = N->getOperand(0);
3591 SDValue Op1 = N->getOperand(1);
3592
3593 // Only do this if the inner op has one use since this will just increases
3594 // register pressure for no benefit.
3595
Matt Arsenault5b39b342016-01-28 20:53:48 +00003596 if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY) {
3597 // max(max(a, b), c) -> max3(a, b, c)
3598 // min(min(a, b), c) -> min3(a, b, c)
3599 if (Op0.getOpcode() == Opc && Op0.hasOneUse()) {
3600 SDLoc DL(N);
3601 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
3602 DL,
3603 N->getValueType(0),
3604 Op0.getOperand(0),
3605 Op0.getOperand(1),
3606 Op1);
3607 }
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00003608
Matt Arsenault5b39b342016-01-28 20:53:48 +00003609 // Try commuted.
3610 // max(a, max(b, c)) -> max3(a, b, c)
3611 // min(a, min(b, c)) -> min3(a, b, c)
3612 if (Op1.getOpcode() == Opc && Op1.hasOneUse()) {
3613 SDLoc DL(N);
3614 return DAG.getNode(minMaxOpcToMin3Max3Opc(Opc),
3615 DL,
3616 N->getValueType(0),
3617 Op0,
3618 Op1.getOperand(0),
3619 Op1.getOperand(1));
3620 }
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00003621 }
3622
Matt Arsenaultf639c322016-01-28 20:53:42 +00003623 // min(max(x, K0), K1), K0 < K1 -> med3(x, K0, K1)
3624 if (Opc == ISD::SMIN && Op0.getOpcode() == ISD::SMAX && Op0.hasOneUse()) {
3625 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, true))
3626 return Med3;
3627 }
3628
3629 if (Opc == ISD::UMIN && Op0.getOpcode() == ISD::UMAX && Op0.hasOneUse()) {
3630 if (SDValue Med3 = performIntMed3ImmCombine(DAG, SDLoc(N), Op0, Op1, false))
3631 return Med3;
3632 }
3633
3634 // fminnum(fmaxnum(x, K0), K1), K0 < K1 && !is_snan(x) -> fmed3(x, K0, K1)
Matt Arsenault5b39b342016-01-28 20:53:48 +00003635 if (((Opc == ISD::FMINNUM && Op0.getOpcode() == ISD::FMAXNUM) ||
3636 (Opc == AMDGPUISD::FMIN_LEGACY &&
3637 Op0.getOpcode() == AMDGPUISD::FMAX_LEGACY)) &&
Matt Arsenaultf639c322016-01-28 20:53:42 +00003638 N->getValueType(0) == MVT::f32 && Op0.hasOneUse()) {
3639 if (SDValue Res = performFPMed3ImmCombine(DAG, SDLoc(N), Op0, Op1))
3640 return Res;
3641 }
3642
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00003643 return SDValue();
3644}
3645
Matt Arsenault6f6233d2015-01-06 23:00:41 +00003646SDValue SITargetLowering::performSetCCCombine(SDNode *N,
3647 DAGCombinerInfo &DCI) const {
3648 SelectionDAG &DAG = DCI.DAG;
3649 SDLoc SL(N);
3650
3651 SDValue LHS = N->getOperand(0);
3652 SDValue RHS = N->getOperand(1);
3653 EVT VT = LHS.getValueType();
3654
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003655 if (VT != MVT::f32 && VT != MVT::f64 && (Subtarget->has16BitInsts() &&
3656 VT != MVT::f16))
Matt Arsenault6f6233d2015-01-06 23:00:41 +00003657 return SDValue();
3658
3659 // Match isinf pattern
3660 // (fcmp oeq (fabs x), inf) -> (fp_class x, (p_infinity | n_infinity))
3661 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
3662 if (CC == ISD::SETOEQ && LHS.getOpcode() == ISD::FABS) {
3663 const ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
3664 if (!CRHS)
3665 return SDValue();
3666
3667 const APFloat &APF = CRHS->getValueAPF();
3668 if (APF.isInfinity() && !APF.isNegative()) {
3669 unsigned Mask = SIInstrFlags::P_INFINITY | SIInstrFlags::N_INFINITY;
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00003670 return DAG.getNode(AMDGPUISD::FP_CLASS, SL, MVT::i1, LHS.getOperand(0),
3671 DAG.getConstant(Mask, SL, MVT::i32));
Matt Arsenault6f6233d2015-01-06 23:00:41 +00003672 }
3673 }
3674
3675 return SDValue();
3676}
3677
Tom Stellard75aadc22012-12-11 21:25:42 +00003678SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
3679 DAGCombinerInfo &DCI) const {
3680 SelectionDAG &DAG = DCI.DAG;
Andrew Trickef9de2a2013-05-25 02:42:55 +00003681 SDLoc DL(N);
Tom Stellard75aadc22012-12-11 21:25:42 +00003682
3683 switch (N->getOpcode()) {
Matt Arsenault22b4c252014-12-21 16:48:42 +00003684 default:
3685 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
Matt Arsenault6f6233d2015-01-06 23:00:41 +00003686 case ISD::SETCC:
3687 return performSetCCCombine(N, DCI);
Matt Arsenault5b39b342016-01-28 20:53:48 +00003688 case ISD::FMAXNUM:
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00003689 case ISD::FMINNUM:
Matt Arsenault5881f4e2015-06-09 00:52:37 +00003690 case ISD::SMAX:
3691 case ISD::SMIN:
3692 case ISD::UMAX:
Matt Arsenault5b39b342016-01-28 20:53:48 +00003693 case ISD::UMIN:
3694 case AMDGPUISD::FMIN_LEGACY:
3695 case AMDGPUISD::FMAX_LEGACY: {
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00003696 if (DCI.getDAGCombineLevel() >= AfterLegalizeDAG &&
Tom Stellard7c840bc2015-03-16 15:53:55 +00003697 N->getValueType(0) != MVT::f64 &&
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00003698 getTargetMachine().getOptLevel() > CodeGenOpt::None)
Matt Arsenaultf639c322016-01-28 20:53:42 +00003699 return performMinMaxCombine(N, DCI);
Matt Arsenaultcc3c2b32014-11-14 20:08:52 +00003700 break;
3701 }
Matt Arsenault364a6742014-06-11 17:50:44 +00003702
3703 case AMDGPUISD::CVT_F32_UBYTE0:
3704 case AMDGPUISD::CVT_F32_UBYTE1:
3705 case AMDGPUISD::CVT_F32_UBYTE2:
3706 case AMDGPUISD::CVT_F32_UBYTE3: {
3707 unsigned Offset = N->getOpcode() - AMDGPUISD::CVT_F32_UBYTE0;
Konstantin Zhuravlyovfda33ea2016-10-21 22:10:03 +00003708
Matt Arsenault364a6742014-06-11 17:50:44 +00003709 SDValue Src = N->getOperand(0);
Konstantin Zhuravlyovfda33ea2016-10-21 22:10:03 +00003710 SDValue Srl = N->getOperand(0);
3711 if (Srl.getOpcode() == ISD::ZERO_EXTEND)
3712 Srl = Srl.getOperand(0);
Matt Arsenaulta949dc62016-05-09 16:29:50 +00003713
Matt Arsenault327bb5a2016-07-01 22:47:50 +00003714 // TODO: Handle (or x, (srl y, 8)) pattern when known bits are zero.
Konstantin Zhuravlyovfda33ea2016-10-21 22:10:03 +00003715 if (Srl.getOpcode() == ISD::SRL) {
Matt Arsenaulta949dc62016-05-09 16:29:50 +00003716 // cvt_f32_ubyte0 (srl x, 16) -> cvt_f32_ubyte2 x
3717 // cvt_f32_ubyte1 (srl x, 16) -> cvt_f32_ubyte3 x
3718 // cvt_f32_ubyte0 (srl x, 8) -> cvt_f32_ubyte1 x
3719
Konstantin Zhuravlyovfda33ea2016-10-21 22:10:03 +00003720 if (const ConstantSDNode *C =
3721 dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
3722 Srl = DAG.getZExtOrTrunc(Srl.getOperand(0), SDLoc(Srl.getOperand(0)),
3723 EVT(MVT::i32));
3724
Matt Arsenaulta949dc62016-05-09 16:29:50 +00003725 unsigned SrcOffset = C->getZExtValue() + 8 * Offset;
3726 if (SrcOffset < 32 && SrcOffset % 8 == 0) {
3727 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0 + SrcOffset / 8, DL,
Konstantin Zhuravlyovfda33ea2016-10-21 22:10:03 +00003728 MVT::f32, Srl);
Matt Arsenaulta949dc62016-05-09 16:29:50 +00003729 }
3730 }
3731 }
3732
Matt Arsenault364a6742014-06-11 17:50:44 +00003733 APInt Demanded = APInt::getBitsSet(32, 8 * Offset, 8 * Offset + 8);
3734
3735 APInt KnownZero, KnownOne;
3736 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
3737 !DCI.isBeforeLegalizeOps());
3738 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3739 if (TLO.ShrinkDemandedConstant(Src, Demanded) ||
3740 TLI.SimplifyDemandedBits(Src, Demanded, KnownZero, KnownOne, TLO)) {
3741 DCI.CommitTargetLoweringOpt(TLO);
3742 }
3743
3744 break;
3745 }
Konstantin Zhuravlyovfda33ea2016-10-21 22:10:03 +00003746 case ISD::SINT_TO_FP:
Matt Arsenault364a6742014-06-11 17:50:44 +00003747 case ISD::UINT_TO_FP: {
3748 return performUCharToFloatCombine(N, DCI);
Matt Arsenaultde5fbe92016-01-11 17:02:00 +00003749 }
Matt Arsenault02cb0ff2014-09-29 14:59:34 +00003750 case ISD::FADD: {
3751 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3752 break;
3753
3754 EVT VT = N->getValueType(0);
3755 if (VT != MVT::f32)
3756 break;
3757
Matt Arsenault8d630032015-02-20 22:10:41 +00003758 // Only do this if we are not trying to support denormals. v_mad_f32 does
3759 // not support denormals ever.
3760 if (Subtarget->hasFP32Denormals())
3761 break;
3762
Matt Arsenault02cb0ff2014-09-29 14:59:34 +00003763 SDValue LHS = N->getOperand(0);
3764 SDValue RHS = N->getOperand(1);
3765
3766 // These should really be instruction patterns, but writing patterns with
3767 // source modiifiers is a pain.
3768
3769 // fadd (fadd (a, a), b) -> mad 2.0, a, b
3770 if (LHS.getOpcode() == ISD::FADD) {
3771 SDValue A = LHS.getOperand(0);
3772 if (A == LHS.getOperand(1)) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00003773 const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32);
Matt Arsenault8d630032015-02-20 22:10:41 +00003774 return DAG.getNode(ISD::FMAD, DL, VT, Two, A, RHS);
Matt Arsenault02cb0ff2014-09-29 14:59:34 +00003775 }
3776 }
3777
3778 // fadd (b, fadd (a, a)) -> mad 2.0, a, b
3779 if (RHS.getOpcode() == ISD::FADD) {
3780 SDValue A = RHS.getOperand(0);
3781 if (A == RHS.getOperand(1)) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00003782 const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32);
Matt Arsenault8d630032015-02-20 22:10:41 +00003783 return DAG.getNode(ISD::FMAD, DL, VT, Two, A, LHS);
Matt Arsenault02cb0ff2014-09-29 14:59:34 +00003784 }
3785 }
3786
Matt Arsenault8d630032015-02-20 22:10:41 +00003787 return SDValue();
Matt Arsenault02cb0ff2014-09-29 14:59:34 +00003788 }
Matt Arsenault8675db12014-08-29 16:01:14 +00003789 case ISD::FSUB: {
3790 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
3791 break;
3792
3793 EVT VT = N->getValueType(0);
3794
3795 // Try to get the fneg to fold into the source modifier. This undoes generic
3796 // DAG combines and folds them into the mad.
Matt Arsenault8d630032015-02-20 22:10:41 +00003797 //
3798 // Only do this if we are not trying to support denormals. v_mad_f32 does
3799 // not support denormals ever.
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003800 if (VT == MVT::f32 && !Subtarget->hasFP32Denormals()) {
Matt Arsenault8675db12014-08-29 16:01:14 +00003801 SDValue LHS = N->getOperand(0);
3802 SDValue RHS = N->getOperand(1);
Matt Arsenault3d4233f2014-09-29 14:59:38 +00003803 if (LHS.getOpcode() == ISD::FADD) {
3804 // (fsub (fadd a, a), c) -> mad 2.0, a, (fneg c)
3805
3806 SDValue A = LHS.getOperand(0);
3807 if (A == LHS.getOperand(1)) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00003808 const SDValue Two = DAG.getConstantFP(2.0, DL, MVT::f32);
Matt Arsenault3d4233f2014-09-29 14:59:38 +00003809 SDValue NegRHS = DAG.getNode(ISD::FNEG, DL, VT, RHS);
3810
Matt Arsenault8d630032015-02-20 22:10:41 +00003811 return DAG.getNode(ISD::FMAD, DL, VT, Two, A, NegRHS);
Matt Arsenault3d4233f2014-09-29 14:59:38 +00003812 }
3813 }
3814
3815 if (RHS.getOpcode() == ISD::FADD) {
3816 // (fsub c, (fadd a, a)) -> mad -2.0, a, c
3817
3818 SDValue A = RHS.getOperand(0);
3819 if (A == RHS.getOperand(1)) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00003820 const SDValue NegTwo = DAG.getConstantFP(-2.0, DL, MVT::f32);
Matt Arsenault8d630032015-02-20 22:10:41 +00003821 return DAG.getNode(ISD::FMAD, DL, VT, NegTwo, A, LHS);
Matt Arsenault3d4233f2014-09-29 14:59:38 +00003822 }
3823 }
Matt Arsenault8d630032015-02-20 22:10:41 +00003824
3825 return SDValue();
Matt Arsenault8675db12014-08-29 16:01:14 +00003826 }
3827
3828 break;
3829 }
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00003830 case ISD::LOAD:
3831 case ISD::STORE:
3832 case ISD::ATOMIC_LOAD:
3833 case ISD::ATOMIC_STORE:
3834 case ISD::ATOMIC_CMP_SWAP:
3835 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
3836 case ISD::ATOMIC_SWAP:
3837 case ISD::ATOMIC_LOAD_ADD:
3838 case ISD::ATOMIC_LOAD_SUB:
3839 case ISD::ATOMIC_LOAD_AND:
3840 case ISD::ATOMIC_LOAD_OR:
3841 case ISD::ATOMIC_LOAD_XOR:
3842 case ISD::ATOMIC_LOAD_NAND:
3843 case ISD::ATOMIC_LOAD_MIN:
3844 case ISD::ATOMIC_LOAD_MAX:
3845 case ISD::ATOMIC_LOAD_UMIN:
Matt Arsenaulta9dbdca2016-04-12 14:05:04 +00003846 case ISD::ATOMIC_LOAD_UMAX:
3847 case AMDGPUISD::ATOMIC_INC:
3848 case AMDGPUISD::ATOMIC_DEC: { // TODO: Target mem intrinsics.
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00003849 if (DCI.isBeforeLegalize())
3850 break;
Matt Arsenault5565f65e2014-05-22 18:09:07 +00003851
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00003852 MemSDNode *MemNode = cast<MemSDNode>(N);
3853 SDValue Ptr = MemNode->getBasePtr();
3854
3855 // TODO: We could also do this for multiplies.
3856 unsigned AS = MemNode->getAddressSpace();
3857 if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUAS::PRIVATE_ADDRESS) {
3858 SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), AS, DCI);
3859 if (NewPtr) {
Benjamin Kramer6cd780f2015-02-17 15:29:18 +00003860 SmallVector<SDValue, 8> NewOps(MemNode->op_begin(), MemNode->op_end());
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00003861
3862 NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
3863 return SDValue(DAG.UpdateNodeOperands(MemNode, NewOps), 0);
3864 }
3865 }
3866 break;
3867 }
Matt Arsenaultd0101a22015-01-06 23:00:46 +00003868 case ISD::AND:
3869 return performAndCombine(N, DCI);
Matt Arsenaultf2290332015-01-06 23:00:39 +00003870 case ISD::OR:
3871 return performOrCombine(N, DCI);
Matt Arsenaultfa5f7672016-09-14 15:19:03 +00003872 case ISD::XOR:
3873 return performXorCombine(N, DCI);
Matt Arsenaultf2290332015-01-06 23:00:39 +00003874 case AMDGPUISD::FP_CLASS:
3875 return performClassCombine(N, DCI);
Matt Arsenault9cd90712016-04-14 01:42:16 +00003876 case ISD::FCANONICALIZE:
3877 return performFCanonicalizeCombine(N, DCI);
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00003878 case AMDGPUISD::FRACT:
3879 case AMDGPUISD::RCP:
3880 case AMDGPUISD::RSQ:
Matt Arsenault32fc5272016-07-26 16:45:45 +00003881 case AMDGPUISD::RCP_LEGACY:
Matt Arsenaultb6d8c372016-06-20 18:33:56 +00003882 case AMDGPUISD::RSQ_LEGACY:
3883 case AMDGPUISD::RSQ_CLAMP:
3884 case AMDGPUISD::LDEXP: {
3885 SDValue Src = N->getOperand(0);
3886 if (Src.isUndef())
3887 return Src;
3888 break;
3889 }
Matt Arsenaultb2baffa2014-08-15 17:49:05 +00003890 }
Matt Arsenault5565f65e2014-05-22 18:09:07 +00003891 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
Tom Stellard75aadc22012-12-11 21:25:42 +00003892}
Christian Konigd910b7d2013-02-26 17:52:16 +00003893
Christian Konig8e06e2a2013-04-10 08:39:08 +00003894/// \brief Helper function for adjustWritemask
Benjamin Kramer635e3682013-05-23 15:43:05 +00003895static unsigned SubIdx2Lane(unsigned Idx) {
Christian Konig8e06e2a2013-04-10 08:39:08 +00003896 switch (Idx) {
3897 default: return 0;
3898 case AMDGPU::sub0: return 0;
3899 case AMDGPU::sub1: return 1;
3900 case AMDGPU::sub2: return 2;
3901 case AMDGPU::sub3: return 3;
3902 }
3903}
3904
3905/// \brief Adjust the writemask of MIMG instructions
3906void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
3907 SelectionDAG &DAG) const {
3908 SDNode *Users[4] = { };
Tom Stellard54774e52013-10-23 02:53:47 +00003909 unsigned Lane = 0;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00003910 unsigned DmaskIdx = (Node->getNumOperands() - Node->getNumValues() == 9) ? 2 : 3;
3911 unsigned OldDmask = Node->getConstantOperandVal(DmaskIdx);
Tom Stellard54774e52013-10-23 02:53:47 +00003912 unsigned NewDmask = 0;
Christian Konig8e06e2a2013-04-10 08:39:08 +00003913
3914 // Try to figure out the used register components
3915 for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
3916 I != E; ++I) {
3917
3918 // Abort if we can't understand the usage
3919 if (!I->isMachineOpcode() ||
3920 I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
3921 return;
3922
Tom Stellard54774e52013-10-23 02:53:47 +00003923 // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used.
3924 // Note that subregs are packed, i.e. Lane==0 is the first bit set
3925 // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
3926 // set, etc.
Christian Konig8b1ed282013-04-10 08:39:16 +00003927 Lane = SubIdx2Lane(I->getConstantOperandVal(1));
Christian Konig8e06e2a2013-04-10 08:39:08 +00003928
Tom Stellard54774e52013-10-23 02:53:47 +00003929 // Set which texture component corresponds to the lane.
3930 unsigned Comp;
3931 for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) {
3932 assert(Dmask);
Tom Stellard03a5c082013-10-23 03:50:25 +00003933 Comp = countTrailingZeros(Dmask);
Tom Stellard54774e52013-10-23 02:53:47 +00003934 Dmask &= ~(1 << Comp);
3935 }
3936
Christian Konig8e06e2a2013-04-10 08:39:08 +00003937 // Abort if we have more than one user per component
3938 if (Users[Lane])
3939 return;
3940
3941 Users[Lane] = *I;
Tom Stellard54774e52013-10-23 02:53:47 +00003942 NewDmask |= 1 << Comp;
Christian Konig8e06e2a2013-04-10 08:39:08 +00003943 }
3944
Tom Stellard54774e52013-10-23 02:53:47 +00003945 // Abort if there's no change
3946 if (NewDmask == OldDmask)
Christian Konig8e06e2a2013-04-10 08:39:08 +00003947 return;
3948
3949 // Adjust the writemask in the node
3950 std::vector<SDValue> Ops;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00003951 Ops.insert(Ops.end(), Node->op_begin(), Node->op_begin() + DmaskIdx);
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00003952 Ops.push_back(DAG.getTargetConstant(NewDmask, SDLoc(Node), MVT::i32));
Nikolay Haustov2f684f12016-02-26 09:51:05 +00003953 Ops.insert(Ops.end(), Node->op_begin() + DmaskIdx + 1, Node->op_end());
Craig Topper8c0b4d02014-04-28 05:57:50 +00003954 Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops);
Christian Konig8e06e2a2013-04-10 08:39:08 +00003955
Christian Konig8b1ed282013-04-10 08:39:16 +00003956 // If we only got one lane, replace it with a copy
Tom Stellard54774e52013-10-23 02:53:47 +00003957 // (if NewDmask has only one bit set...)
3958 if (NewDmask && (NewDmask & (NewDmask-1)) == 0) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00003959 SDValue RC = DAG.getTargetConstant(AMDGPU::VGPR_32RegClassID, SDLoc(),
3960 MVT::i32);
Christian Konig8b1ed282013-04-10 08:39:16 +00003961 SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
Andrew Trickef9de2a2013-05-25 02:42:55 +00003962 SDLoc(), Users[Lane]->getValueType(0),
Christian Konig8b1ed282013-04-10 08:39:16 +00003963 SDValue(Node, 0), RC);
3964 DAG.ReplaceAllUsesWith(Users[Lane], Copy);
3965 return;
3966 }
3967
Christian Konig8e06e2a2013-04-10 08:39:08 +00003968 // Update the users of the node with the new indices
3969 for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) {
3970
3971 SDNode *User = Users[i];
3972 if (!User)
3973 continue;
3974
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00003975 SDValue Op = DAG.getTargetConstant(Idx, SDLoc(User), MVT::i32);
Christian Konig8e06e2a2013-04-10 08:39:08 +00003976 DAG.UpdateNodeOperands(User, User->getOperand(0), Op);
3977
3978 switch (Idx) {
3979 default: break;
3980 case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
3981 case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
3982 case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
3983 }
3984 }
3985}
3986
Tom Stellardc98ee202015-07-16 19:40:07 +00003987static bool isFrameIndexOp(SDValue Op) {
3988 if (Op.getOpcode() == ISD::AssertZext)
3989 Op = Op.getOperand(0);
3990
3991 return isa<FrameIndexSDNode>(Op);
3992}
3993
Tom Stellard3457a842014-10-09 19:06:00 +00003994/// \brief Legalize target independent instructions (e.g. INSERT_SUBREG)
3995/// with frame index operands.
3996/// LLVM assumes that inputs are to these instructions are registers.
3997void SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
3998 SelectionDAG &DAG) const {
Tom Stellard8dd392e2014-10-09 18:09:15 +00003999
4000 SmallVector<SDValue, 8> Ops;
Tom Stellard3457a842014-10-09 19:06:00 +00004001 for (unsigned i = 0; i < Node->getNumOperands(); ++i) {
Tom Stellardc98ee202015-07-16 19:40:07 +00004002 if (!isFrameIndexOp(Node->getOperand(i))) {
Tom Stellard3457a842014-10-09 19:06:00 +00004003 Ops.push_back(Node->getOperand(i));
Tom Stellard8dd392e2014-10-09 18:09:15 +00004004 continue;
4005 }
4006
Tom Stellard3457a842014-10-09 19:06:00 +00004007 SDLoc DL(Node);
Tom Stellard8dd392e2014-10-09 18:09:15 +00004008 Ops.push_back(SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL,
Tom Stellard3457a842014-10-09 19:06:00 +00004009 Node->getOperand(i).getValueType(),
4010 Node->getOperand(i)), 0));
Tom Stellard8dd392e2014-10-09 18:09:15 +00004011 }
4012
Tom Stellard3457a842014-10-09 19:06:00 +00004013 DAG.UpdateNodeOperands(Node, Ops);
Tom Stellard8dd392e2014-10-09 18:09:15 +00004014}
4015
Matt Arsenault08d84942014-06-03 23:06:13 +00004016/// \brief Fold the instructions after selecting them.
Christian Konig8e06e2a2013-04-10 08:39:08 +00004017SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
4018 SelectionDAG &DAG) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00004019 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Nicolai Haehnlef2c64db2016-02-18 16:44:18 +00004020 unsigned Opcode = Node->getMachineOpcode();
Christian Konig8e06e2a2013-04-10 08:39:08 +00004021
Nicolai Haehnlec06bfa12016-07-11 21:59:43 +00004022 if (TII->isMIMG(Opcode) && !TII->get(Opcode).mayStore() &&
4023 !TII->isGather4(Opcode))
Christian Konig8e06e2a2013-04-10 08:39:08 +00004024 adjustWritemask(Node, DAG);
4025
Nicolai Haehnlef2c64db2016-02-18 16:44:18 +00004026 if (Opcode == AMDGPU::INSERT_SUBREG ||
4027 Opcode == AMDGPU::REG_SEQUENCE) {
Tom Stellard8dd392e2014-10-09 18:09:15 +00004028 legalizeTargetIndependentNode(Node, DAG);
4029 return Node;
4030 }
Tom Stellard654d6692015-01-08 15:08:17 +00004031 return Node;
Christian Konig8e06e2a2013-04-10 08:39:08 +00004032}
Christian Konig8b1ed282013-04-10 08:39:16 +00004033
4034/// \brief Assign the register class depending on the number of
4035/// bits set in the writemask
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00004036void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
Christian Konig8b1ed282013-04-10 08:39:16 +00004037 SDNode *Node) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00004038 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00004039
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00004040 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
Matt Arsenault6005fcb2015-10-21 21:51:02 +00004041
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00004042 if (TII->isVOP3(MI.getOpcode())) {
Matt Arsenault6005fcb2015-10-21 21:51:02 +00004043 // Make sure constant bus requirements are respected.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00004044 TII->legalizeOperandsVOP3(MRI, MI);
Matt Arsenault6005fcb2015-10-21 21:51:02 +00004045 return;
4046 }
Matt Arsenaultcb0ac3d2014-09-26 17:54:59 +00004047
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00004048 if (TII->isMIMG(MI)) {
4049 unsigned VReg = MI.getOperand(0).getReg();
4050 unsigned DmaskIdx = MI.getNumOperands() == 12 ? 3 : 4;
4051 unsigned Writemask = MI.getOperand(DmaskIdx).getImm();
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00004052 unsigned BitsSet = 0;
4053 for (unsigned i = 0; i < 4; ++i)
4054 BitsSet += Writemask & (1 << i) ? 1 : 0;
4055
4056 const TargetRegisterClass *RC;
4057 switch (BitsSet) {
4058 default: return;
Tom Stellard45c0b3a2015-01-07 20:59:25 +00004059 case 1: RC = &AMDGPU::VGPR_32RegClass; break;
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00004060 case 2: RC = &AMDGPU::VReg_64RegClass; break;
4061 case 3: RC = &AMDGPU::VReg_96RegClass; break;
4062 }
4063
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00004064 unsigned NewOpcode = TII->getMaskedMIMGOp(MI.getOpcode(), BitsSet);
4065 MI.setDesc(TII->get(NewOpcode));
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00004066 MRI.setRegClass(VReg, RC);
Christian Konig8b1ed282013-04-10 08:39:16 +00004067 return;
Christian Konig8b1ed282013-04-10 08:39:16 +00004068 }
4069
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00004070 // Replace unused atomics with the no return version.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00004071 int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode());
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00004072 if (NoRetAtomicOp != -1) {
4073 if (!Node->hasAnyUseOfValue(0)) {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00004074 MI.setDesc(TII->get(NoRetAtomicOp));
4075 MI.RemoveOperand(0);
Tom Stellard354a43c2016-04-01 18:27:37 +00004076 return;
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00004077 }
4078
Tom Stellard354a43c2016-04-01 18:27:37 +00004079 // For mubuf_atomic_cmpswap, we need to have tablegen use an extract_subreg
4080 // instruction, because the return type of these instructions is a vec2 of
4081 // the memory type, so it can be tied to the input operand.
4082 // This means these instructions always have a use, so we need to add a
4083 // special case to check if the atomic has only one extract_subreg use,
4084 // which itself has no uses.
4085 if ((Node->hasNUsesOfValue(1, 0) &&
Nicolai Haehnle750082d2016-04-15 14:42:36 +00004086 Node->use_begin()->isMachineOpcode() &&
Tom Stellard354a43c2016-04-01 18:27:37 +00004087 Node->use_begin()->getMachineOpcode() == AMDGPU::EXTRACT_SUBREG &&
4088 !Node->use_begin()->hasAnyUseOfValue(0))) {
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00004089 unsigned Def = MI.getOperand(0).getReg();
Tom Stellard354a43c2016-04-01 18:27:37 +00004090
4091 // Change this into a noret atomic.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00004092 MI.setDesc(TII->get(NoRetAtomicOp));
4093 MI.RemoveOperand(0);
Tom Stellard354a43c2016-04-01 18:27:37 +00004094
4095 // If we only remove the def operand from the atomic instruction, the
4096 // extract_subreg will be left with a use of a vreg without a def.
4097 // So we need to insert an implicit_def to avoid machine verifier
4098 // errors.
Duncan P. N. Exon Smithe4f5e4f2016-06-30 22:52:52 +00004099 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
Tom Stellard354a43c2016-04-01 18:27:37 +00004100 TII->get(AMDGPU::IMPLICIT_DEF), Def);
4101 }
Matt Arsenault7ac9c4a2014-09-08 15:07:31 +00004102 return;
4103 }
Christian Konig8b1ed282013-04-10 08:39:16 +00004104}
Tom Stellard0518ff82013-06-03 17:39:58 +00004105
Benjamin Kramerbdc49562016-06-12 15:39:02 +00004106static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
4107 uint64_t Val) {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00004108 SDValue K = DAG.getTargetConstant(Val, DL, MVT::i32);
Matt Arsenault485defe2014-11-05 19:01:17 +00004109 return SDValue(DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0);
4110}
4111
4112MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
Benjamin Kramerbdc49562016-06-12 15:39:02 +00004113 const SDLoc &DL,
Matt Arsenault485defe2014-11-05 19:01:17 +00004114 SDValue Ptr) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +00004115 const SIInstrInfo *TII = getSubtarget()->getInstrInfo();
Matt Arsenault485defe2014-11-05 19:01:17 +00004116
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00004117 // Build the half of the subregister with the constants before building the
4118 // full 128-bit register. If we are building multiple resource descriptors,
4119 // this will allow CSEing of the 2-component register.
4120 const SDValue Ops0[] = {
4121 DAG.getTargetConstant(AMDGPU::SGPR_64RegClassID, DL, MVT::i32),
4122 buildSMovImm32(DAG, DL, 0),
4123 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
4124 buildSMovImm32(DAG, DL, TII->getDefaultRsrcDataFormat() >> 32),
4125 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
4126 };
Matt Arsenault485defe2014-11-05 19:01:17 +00004127
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00004128 SDValue SubRegHi = SDValue(DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL,
4129 MVT::v2i32, Ops0), 0);
Matt Arsenault485defe2014-11-05 19:01:17 +00004130
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00004131 // Combine the constants and the pointer.
4132 const SDValue Ops1[] = {
4133 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
4134 Ptr,
4135 DAG.getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32),
4136 SubRegHi,
4137 DAG.getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32)
4138 };
Matt Arsenault485defe2014-11-05 19:01:17 +00004139
Matt Arsenault2d6fdb82015-09-25 17:08:42 +00004140 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops1);
Matt Arsenault485defe2014-11-05 19:01:17 +00004141}
4142
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00004143/// \brief Return a resource descriptor with the 'Add TID' bit enabled
Benjamin Kramerdf005cb2015-08-08 18:27:36 +00004144/// The TID (Thread ID) is multiplied by the stride value (bits [61:48]
4145/// of the resource descriptor) to create an offset, which is added to
4146/// the resource pointer.
Benjamin Kramerbdc49562016-06-12 15:39:02 +00004147MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
4148 SDValue Ptr, uint32_t RsrcDword1,
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00004149 uint64_t RsrcDword2And3) const {
4150 SDValue PtrLo = DAG.getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
4151 SDValue PtrHi = DAG.getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
4152 if (RsrcDword1) {
4153 PtrHi = SDValue(DAG.getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00004154 DAG.getConstant(RsrcDword1, DL, MVT::i32)),
4155 0);
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00004156 }
4157
4158 SDValue DataLo = buildSMovImm32(DAG, DL,
4159 RsrcDword2And3 & UINT64_C(0xFFFFFFFF));
4160 SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32);
4161
4162 const SDValue Ops[] = {
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00004163 DAG.getTargetConstant(AMDGPU::SReg_128RegClassID, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00004164 PtrLo,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00004165 DAG.getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00004166 PtrHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00004167 DAG.getTargetConstant(AMDGPU::sub1, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00004168 DataLo,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00004169 DAG.getTargetConstant(AMDGPU::sub2, DL, MVT::i32),
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00004170 DataHi,
Sergey Dmitrouk842a51b2015-04-28 14:05:47 +00004171 DAG.getTargetConstant(AMDGPU::sub3, DL, MVT::i32)
Matt Arsenaultf3cd4512014-11-05 19:01:19 +00004172 };
4173
4174 return DAG.getMachineNode(AMDGPU::REG_SEQUENCE, DL, MVT::v4i32, Ops);
4175}
4176
Tom Stellard94593ee2013-06-03 17:40:18 +00004177SDValue SITargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
4178 const TargetRegisterClass *RC,
4179 unsigned Reg, EVT VT) const {
4180 SDValue VReg = AMDGPUTargetLowering::CreateLiveInRegister(DAG, RC, Reg, VT);
4181
4182 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(DAG.getEntryNode()),
4183 cast<RegisterSDNode>(VReg)->getReg(), VT);
4184}
Tom Stellardd7e6f132015-04-08 01:09:26 +00004185
4186//===----------------------------------------------------------------------===//
4187// SI Inline Assembly Support
4188//===----------------------------------------------------------------------===//
4189
4190std::pair<unsigned, const TargetRegisterClass *>
4191SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
Benjamin Kramer9bfb6272015-07-05 19:29:18 +00004192 StringRef Constraint,
Tom Stellardd7e6f132015-04-08 01:09:26 +00004193 MVT VT) const {
Tom Stellardb3c3bda2015-12-10 02:12:53 +00004194
4195 if (Constraint.size() == 1) {
4196 switch (Constraint[0]) {
4197 case 's':
4198 case 'r':
4199 switch (VT.getSizeInBits()) {
4200 default:
4201 return std::make_pair(0U, nullptr);
4202 case 32:
Matt Arsenaulta609e2d2016-08-30 20:50:08 +00004203 return std::make_pair(0U, &AMDGPU::SReg_32RegClass);
Tom Stellardb3c3bda2015-12-10 02:12:53 +00004204 case 64:
4205 return std::make_pair(0U, &AMDGPU::SGPR_64RegClass);
4206 case 128:
4207 return std::make_pair(0U, &AMDGPU::SReg_128RegClass);
4208 case 256:
4209 return std::make_pair(0U, &AMDGPU::SReg_256RegClass);
4210 }
4211
4212 case 'v':
4213 switch (VT.getSizeInBits()) {
4214 default:
4215 return std::make_pair(0U, nullptr);
4216 case 32:
4217 return std::make_pair(0U, &AMDGPU::VGPR_32RegClass);
4218 case 64:
4219 return std::make_pair(0U, &AMDGPU::VReg_64RegClass);
4220 case 96:
4221 return std::make_pair(0U, &AMDGPU::VReg_96RegClass);
4222 case 128:
4223 return std::make_pair(0U, &AMDGPU::VReg_128RegClass);
4224 case 256:
4225 return std::make_pair(0U, &AMDGPU::VReg_256RegClass);
4226 case 512:
4227 return std::make_pair(0U, &AMDGPU::VReg_512RegClass);
4228 }
Tom Stellardd7e6f132015-04-08 01:09:26 +00004229 }
4230 }
4231
4232 if (Constraint.size() > 1) {
4233 const TargetRegisterClass *RC = nullptr;
4234 if (Constraint[1] == 'v') {
4235 RC = &AMDGPU::VGPR_32RegClass;
4236 } else if (Constraint[1] == 's') {
4237 RC = &AMDGPU::SGPR_32RegClass;
4238 }
4239
4240 if (RC) {
Matt Arsenault0b554ed2015-06-23 02:05:55 +00004241 uint32_t Idx;
4242 bool Failed = Constraint.substr(2).getAsInteger(10, Idx);
4243 if (!Failed && Idx < RC->getNumRegs())
Tom Stellardd7e6f132015-04-08 01:09:26 +00004244 return std::make_pair(RC->getRegister(Idx), RC);
4245 }
4246 }
4247 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
4248}
Tom Stellardb3c3bda2015-12-10 02:12:53 +00004249
4250SITargetLowering::ConstraintType
4251SITargetLowering::getConstraintType(StringRef Constraint) const {
4252 if (Constraint.size() == 1) {
4253 switch (Constraint[0]) {
4254 default: break;
4255 case 's':
4256 case 'v':
4257 return C_RegisterClass;
4258 }
4259 }
4260 return TargetLowering::getConstraintType(Constraint);
4261}