blob: 7911b6f33023ad1d9f79fcba7c98fb5724235995 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//==-----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Defines an instruction selector for the AMDGPU target.
12//
13//===----------------------------------------------------------------------===//
14#include "AMDGPUInstrInfo.h"
15#include "AMDGPUISelLowering.h" // For AMDGPUISD
16#include "AMDGPURegisterInfo.h"
Tom Stellard2e59a452014-06-13 01:32:00 +000017#include "AMDGPUSubtarget.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000018#include "R600InstrInfo.h"
Tom Stellardb02094e2014-07-21 15:45:01 +000019#include "SIDefines.h"
Christian Konigf82901a2013-02-26 17:52:23 +000020#include "SIISelLowering.h"
Tom Stellardb02094e2014-07-21 15:45:01 +000021#include "SIMachineFunctionInfo.h"
Tom Stellard58ac7442014-04-29 23:12:48 +000022#include "llvm/CodeGen/FunctionLoweringInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000023#include "llvm/CodeGen/PseudoSourceValue.h"
Tom Stellardb02094e2014-07-21 15:45:01 +000024#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000026#include "llvm/CodeGen/SelectionDAG.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000027#include "llvm/CodeGen/SelectionDAGISel.h"
Tom Stellard58ac7442014-04-29 23:12:48 +000028#include "llvm/IR/Function.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000029
30using namespace llvm;
31
32//===----------------------------------------------------------------------===//
33// Instruction Selector Implementation
34//===----------------------------------------------------------------------===//
35
36namespace {
37/// AMDGPU specific code to select AMDGPU machine instructions for
38/// SelectionDAG operations.
39class AMDGPUDAGToDAGISel : public SelectionDAGISel {
40 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
41 // make the right decision when generating code for different targets.
42 const AMDGPUSubtarget &Subtarget;
43public:
44 AMDGPUDAGToDAGISel(TargetMachine &TM);
45 virtual ~AMDGPUDAGToDAGISel();
46
Craig Topper5656db42014-04-29 07:57:24 +000047 SDNode *Select(SDNode *N) override;
48 const char *getPassName() const override;
49 void PostprocessISelDAG() override;
Tom Stellard75aadc22012-12-11 21:25:42 +000050
51private:
Tom Stellard7ed0b522014-04-03 20:19:27 +000052 bool isInlineImmediate(SDNode *N) const;
Tom Stellard75aadc22012-12-11 21:25:42 +000053 inline SDValue getSmallIPtrImm(unsigned Imm);
Vincent Lejeunec6896792013-06-04 23:17:15 +000054 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
Tom Stellard84021442013-07-23 01:48:24 +000055 const R600InstrInfo *TII);
Tom Stellard365366f2013-01-23 02:09:06 +000056 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
Vincent Lejeunec6896792013-06-04 23:17:15 +000057 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
Tom Stellard75aadc22012-12-11 21:25:42 +000058
59 // Complex pattern selectors
60 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
61 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
62 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
63
64 static bool checkType(const Value *ptr, unsigned int addrspace);
Nick Lewyckyaad475b2014-04-15 07:22:52 +000065 static bool checkPrivateAddress(const MachineMemOperand *Op);
Tom Stellard75aadc22012-12-11 21:25:42 +000066
67 static bool isGlobalStore(const StoreSDNode *N);
68 static bool isPrivateStore(const StoreSDNode *N);
69 static bool isLocalStore(const StoreSDNode *N);
70 static bool isRegionStore(const StoreSDNode *N);
71
Matt Arsenault2aabb062013-06-18 23:37:58 +000072 bool isCPLoad(const LoadSDNode *N) const;
73 bool isConstantLoad(const LoadSDNode *N, int cbID) const;
74 bool isGlobalLoad(const LoadSDNode *N) const;
75 bool isParamLoad(const LoadSDNode *N) const;
76 bool isPrivateLoad(const LoadSDNode *N) const;
77 bool isLocalLoad(const LoadSDNode *N) const;
78 bool isRegionLoad(const LoadSDNode *N) const;
Tom Stellard75aadc22012-12-11 21:25:42 +000079
Tom Stellard58ac7442014-04-29 23:12:48 +000080 /// \returns True if the current basic block being selected is at control
81 /// flow depth 0. Meaning that the current block dominates the
82 // exit block.
83 bool isCFDepth0() const;
84
Tom Stellarddf94dc32013-08-14 23:24:24 +000085 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
Tom Stellard365366f2013-01-23 02:09:06 +000086 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
Matt Arsenault209a7b92014-04-18 07:40:20 +000087 bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
88 SDValue& Offset);
Tom Stellard75aadc22012-12-11 21:25:42 +000089 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +000090 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
Tom Stellard85e8b6d2014-08-22 18:49:33 +000091 bool isDSOffsetLegal(const SDValue &Base, unsigned Offset,
92 unsigned OffsetBits) const;
93 bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const;
Tom Stellard155bbb72014-08-11 22:18:17 +000094 void SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
95 SDValue &SOffset, SDValue &Offset, SDValue &Offen,
96 SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC,
97 SDValue &TFE) const;
98 bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
99 SDValue &Offset) const;
Tom Stellardb02094e2014-07-21 15:45:01 +0000100 bool SelectMUBUFScratch(SDValue Addr, SDValue &RSrc, SDValue &VAddr,
101 SDValue &SOffset, SDValue &ImmOffset) const;
Tom Stellard155bbb72014-08-11 22:18:17 +0000102 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset,
103 SDValue &Offset, SDValue &GLC, SDValue &SLC,
Tom Stellardb02094e2014-07-21 15:45:01 +0000104 SDValue &TFE) const;
Tom Stellardb4a313a2014-08-01 00:32:39 +0000105 bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
106 bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,
107 SDValue &Clamp, SDValue &Omod) const;
Tom Stellard75aadc22012-12-11 21:25:42 +0000108
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000109 SDNode *SelectADD_SUB_I64(SDNode *N);
Matt Arsenaultf2b0aeb2014-06-23 18:28:28 +0000110 SDNode *SelectDIV_SCALE(SDNode *N);
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000111
Tom Stellard75aadc22012-12-11 21:25:42 +0000112 // Include the pieces autogenerated from the target description.
113#include "AMDGPUGenDAGISel.inc"
114};
115} // end anonymous namespace
116
117/// \brief This pass converts a legalized DAG into a AMDGPU-specific
118// DAG, ready for instruction scheduling.
Matt Arsenault209a7b92014-04-18 07:40:20 +0000119FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000120 return new AMDGPUDAGToDAGISel(TM);
121}
122
Bill Wendlinga3cd3502013-06-19 21:36:55 +0000123AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
Tom Stellard75aadc22012-12-11 21:25:42 +0000124 : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
125}
126
127AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
128}
129
Tom Stellard7ed0b522014-04-03 20:19:27 +0000130bool AMDGPUDAGToDAGISel::isInlineImmediate(SDNode *N) const {
131 const SITargetLowering *TL
132 = static_cast<const SITargetLowering *>(getTargetLowering());
133 return TL->analyzeImmediate(N) == 0;
134}
135
Tom Stellarddf94dc32013-08-14 23:24:24 +0000136/// \brief Determine the register class for \p OpNo
137/// \returns The register class of the virtual register that will be used for
138/// the given operand number \OpNo or NULL if the register class cannot be
139/// determined.
140const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
141 unsigned OpNo) const {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000142 if (!N->isMachineOpcode())
143 return nullptr;
144
Tom Stellarddf94dc32013-08-14 23:24:24 +0000145 switch (N->getMachineOpcode()) {
146 default: {
Eric Christopherd9134482014-08-04 21:25:23 +0000147 const MCInstrDesc &Desc =
148 TM.getSubtargetImpl()->getInstrInfo()->get(N->getMachineOpcode());
Alexey Samsonov3186eb32013-08-15 07:11:34 +0000149 unsigned OpIdx = Desc.getNumDefs() + OpNo;
150 if (OpIdx >= Desc.getNumOperands())
Matt Arsenault209a7b92014-04-18 07:40:20 +0000151 return nullptr;
Alexey Samsonov3186eb32013-08-15 07:11:34 +0000152 int RegClass = Desc.OpInfo[OpIdx].RegClass;
Matt Arsenault209a7b92014-04-18 07:40:20 +0000153 if (RegClass == -1)
154 return nullptr;
155
Eric Christopherd9134482014-08-04 21:25:23 +0000156 return TM.getSubtargetImpl()->getRegisterInfo()->getRegClass(RegClass);
Tom Stellarddf94dc32013-08-14 23:24:24 +0000157 }
158 case AMDGPU::REG_SEQUENCE: {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000159 unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
Eric Christopherd9134482014-08-04 21:25:23 +0000160 const TargetRegisterClass *SuperRC =
161 TM.getSubtargetImpl()->getRegisterInfo()->getRegClass(RCID);
Matt Arsenault209a7b92014-04-18 07:40:20 +0000162
163 SDValue SubRegOp = N->getOperand(OpNo + 1);
164 unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
Eric Christopherd9134482014-08-04 21:25:23 +0000165 return TM.getSubtargetImpl()->getRegisterInfo()->getSubClassWithSubReg(
166 SuperRC, SubRegIdx);
Tom Stellarddf94dc32013-08-14 23:24:24 +0000167 }
168 }
169}
170
Tom Stellard75aadc22012-12-11 21:25:42 +0000171SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
172 return CurDAG->getTargetConstant(Imm, MVT::i32);
173}
174
175bool AMDGPUDAGToDAGISel::SelectADDRParam(
Matt Arsenault209a7b92014-04-18 07:40:20 +0000176 SDValue Addr, SDValue& R1, SDValue& R2) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000177
178 if (Addr.getOpcode() == ISD::FrameIndex) {
179 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
180 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
181 R2 = CurDAG->getTargetConstant(0, MVT::i32);
182 } else {
183 R1 = Addr;
184 R2 = CurDAG->getTargetConstant(0, MVT::i32);
185 }
186 } else if (Addr.getOpcode() == ISD::ADD) {
187 R1 = Addr.getOperand(0);
188 R2 = Addr.getOperand(1);
189 } else {
190 R1 = Addr;
191 R2 = CurDAG->getTargetConstant(0, MVT::i32);
192 }
193 return true;
194}
195
196bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
197 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
198 Addr.getOpcode() == ISD::TargetGlobalAddress) {
199 return false;
200 }
201 return SelectADDRParam(Addr, R1, R2);
202}
203
204
205bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
206 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
207 Addr.getOpcode() == ISD::TargetGlobalAddress) {
208 return false;
209 }
210
211 if (Addr.getOpcode() == ISD::FrameIndex) {
212 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
213 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
214 R2 = CurDAG->getTargetConstant(0, MVT::i64);
215 } else {
216 R1 = Addr;
217 R2 = CurDAG->getTargetConstant(0, MVT::i64);
218 }
219 } else if (Addr.getOpcode() == ISD::ADD) {
220 R1 = Addr.getOperand(0);
221 R2 = Addr.getOperand(1);
222 } else {
223 R1 = Addr;
224 R2 = CurDAG->getTargetConstant(0, MVT::i64);
225 }
226 return true;
227}
228
229SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
230 unsigned int Opc = N->getOpcode();
231 if (N->isMachineOpcode()) {
Tim Northover31d093c2013-09-22 08:21:56 +0000232 N->setNodeId(-1);
Matt Arsenault209a7b92014-04-18 07:40:20 +0000233 return nullptr; // Already selected.
Tom Stellard75aadc22012-12-11 21:25:42 +0000234 }
Matt Arsenault78b86702014-04-18 05:19:26 +0000235
236 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
Tom Stellard75aadc22012-12-11 21:25:42 +0000237 switch (Opc) {
238 default: break;
Tom Stellard1f15bff2014-02-25 21:36:18 +0000239 // We are selecting i64 ADD here instead of custom lower it during
240 // DAG legalization, so we can fold some i64 ADDs used for address
241 // calculation into the LOAD and STORE instructions.
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000242 case ISD::ADD:
243 case ISD::SUB: {
Tom Stellard1f15bff2014-02-25 21:36:18 +0000244 if (N->getValueType(0) != MVT::i64 ||
245 ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
246 break;
247
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000248 return SelectADD_SUB_I64(N);
Tom Stellard1f15bff2014-02-25 21:36:18 +0000249 }
Matt Arsenault064c2062014-06-11 17:40:32 +0000250 case ISD::SCALAR_TO_VECTOR:
Tom Stellard880a80a2014-06-17 16:53:14 +0000251 case AMDGPUISD::BUILD_VERTICAL_VECTOR:
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000252 case ISD::BUILD_VECTOR: {
Tom Stellard8e5da412013-08-14 23:24:32 +0000253 unsigned RegClassID;
Eric Christopherd9134482014-08-04 21:25:23 +0000254 const AMDGPURegisterInfo *TRI = static_cast<const AMDGPURegisterInfo *>(
255 TM.getSubtargetImpl()->getRegisterInfo());
256 const SIRegisterInfo *SIRI = static_cast<const SIRegisterInfo *>(
257 TM.getSubtargetImpl()->getRegisterInfo());
Tom Stellard8e5da412013-08-14 23:24:32 +0000258 EVT VT = N->getValueType(0);
259 unsigned NumVectorElts = VT.getVectorNumElements();
Matt Arsenault064c2062014-06-11 17:40:32 +0000260 EVT EltVT = VT.getVectorElementType();
261 assert(EltVT.bitsEq(MVT::i32));
Tom Stellard8e5da412013-08-14 23:24:32 +0000262 if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
263 bool UseVReg = true;
264 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
265 U != E; ++U) {
266 if (!U->isMachineOpcode()) {
267 continue;
268 }
269 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
270 if (!RC) {
271 continue;
272 }
273 if (SIRI->isSGPRClass(RC)) {
274 UseVReg = false;
275 }
276 }
277 switch(NumVectorElts) {
278 case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID :
279 AMDGPU::SReg_32RegClassID;
280 break;
281 case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
282 AMDGPU::SReg_64RegClassID;
283 break;
284 case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
285 AMDGPU::SReg_128RegClassID;
286 break;
287 case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
288 AMDGPU::SReg_256RegClassID;
289 break;
290 case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
291 AMDGPU::SReg_512RegClassID;
292 break;
Benjamin Kramerbda73ff2013-08-31 21:20:04 +0000293 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
Tom Stellard8e5da412013-08-14 23:24:32 +0000294 }
295 } else {
296 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
297 // that adds a 128 bits reg copy when going through TwoAddressInstructions
298 // pass. We want to avoid 128 bits copies as much as possible because they
299 // can't be bundled by our scheduler.
300 switch(NumVectorElts) {
301 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
Tom Stellard880a80a2014-06-17 16:53:14 +0000302 case 4:
303 if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
304 RegClassID = AMDGPU::R600_Reg128VerticalRegClassID;
305 else
306 RegClassID = AMDGPU::R600_Reg128RegClassID;
307 break;
Tom Stellard8e5da412013-08-14 23:24:32 +0000308 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
309 }
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000310 }
Tom Stellard0344cdf2013-08-01 15:23:42 +0000311
Tom Stellard8e5da412013-08-14 23:24:32 +0000312 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
313
314 if (NumVectorElts == 1) {
Matt Arsenault064c2062014-06-11 17:40:32 +0000315 return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT,
Tom Stellard8e5da412013-08-14 23:24:32 +0000316 N->getOperand(0), RegClass);
Tom Stellard0344cdf2013-08-01 15:23:42 +0000317 }
Tom Stellard8e5da412013-08-14 23:24:32 +0000318
319 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
320 "supported yet");
321 // 16 = Max Num Vector Elements
322 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
323 // 1 = Vector Register Class
Matt Arsenault064c2062014-06-11 17:40:32 +0000324 SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
Tom Stellard8e5da412013-08-14 23:24:32 +0000325
326 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000327 bool IsRegSeq = true;
Matt Arsenault064c2062014-06-11 17:40:32 +0000328 unsigned NOps = N->getNumOperands();
329 for (unsigned i = 0; i < NOps; i++) {
Tom Stellard8e5da412013-08-14 23:24:32 +0000330 // XXX: Why is this here?
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000331 if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
332 IsRegSeq = false;
333 break;
334 }
Tom Stellard8e5da412013-08-14 23:24:32 +0000335 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
336 RegSeqArgs[1 + (2 * i) + 1] =
337 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000338 }
Matt Arsenault064c2062014-06-11 17:40:32 +0000339
340 if (NOps != NumVectorElts) {
341 // Fill in the missing undef elements if this was a scalar_to_vector.
342 assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
343
344 MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
345 SDLoc(N), EltVT);
346 for (unsigned i = NOps; i < NumVectorElts; ++i) {
347 RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
348 RegSeqArgs[1 + (2 * i) + 1] =
349 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
350 }
351 }
352
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000353 if (!IsRegSeq)
354 break;
355 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
Craig Topper481fb282014-04-27 19:21:11 +0000356 RegSeqArgs);
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000357 }
Tom Stellard754f80f2013-04-05 23:31:51 +0000358 case ISD::BUILD_PAIR: {
359 SDValue RC, SubReg0, SubReg1;
Tom Stellarda6c6e1b2013-06-07 20:37:48 +0000360 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
Tom Stellard754f80f2013-04-05 23:31:51 +0000361 break;
362 }
363 if (N->getValueType(0) == MVT::i128) {
364 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
365 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
366 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
367 } else if (N->getValueType(0) == MVT::i64) {
Tom Stellard1aa6cb42014-04-18 00:36:21 +0000368 RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
Tom Stellard754f80f2013-04-05 23:31:51 +0000369 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
370 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
371 } else {
372 llvm_unreachable("Unhandled value type for BUILD_PAIR");
373 }
374 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
375 N->getOperand(1), SubReg1 };
376 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
Andrew Trickef9de2a2013-05-25 02:42:55 +0000377 SDLoc(N), N->getValueType(0), Ops);
Tom Stellard754f80f2013-04-05 23:31:51 +0000378 }
Tom Stellard7ed0b522014-04-03 20:19:27 +0000379
380 case ISD::Constant:
381 case ISD::ConstantFP: {
382 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
383 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
384 N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
385 break;
386
387 uint64_t Imm;
388 if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
389 Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
390 else {
Tom Stellard3cbe0142014-04-07 19:31:13 +0000391 ConstantSDNode *C = cast<ConstantSDNode>(N);
Tom Stellard7ed0b522014-04-03 20:19:27 +0000392 Imm = C->getZExtValue();
393 }
394
395 SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
396 CurDAG->getConstant(Imm & 0xFFFFFFFF, MVT::i32));
397 SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
398 CurDAG->getConstant(Imm >> 32, MVT::i32));
399 const SDValue Ops[] = {
400 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
401 SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
402 SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32)
403 };
404
405 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SDLoc(N),
406 N->getValueType(0), Ops);
407 }
408
Tom Stellard81d871d2013-11-13 23:36:50 +0000409 case AMDGPUISD::REGISTER_LOAD: {
Tom Stellard81d871d2013-11-13 23:36:50 +0000410 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
411 break;
412 SDValue Addr, Offset;
413
414 SelectADDRIndirect(N->getOperand(1), Addr, Offset);
415 const SDValue Ops[] = {
416 Addr,
417 Offset,
418 CurDAG->getTargetConstant(0, MVT::i32),
419 N->getOperand(0),
420 };
421 return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, SDLoc(N),
422 CurDAG->getVTList(MVT::i32, MVT::i64, MVT::Other),
423 Ops);
424 }
425 case AMDGPUISD::REGISTER_STORE: {
Tom Stellard81d871d2013-11-13 23:36:50 +0000426 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
427 break;
428 SDValue Addr, Offset;
429 SelectADDRIndirect(N->getOperand(2), Addr, Offset);
430 const SDValue Ops[] = {
431 N->getOperand(1),
432 Addr,
433 Offset,
434 CurDAG->getTargetConstant(0, MVT::i32),
435 N->getOperand(0),
436 };
437 return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, SDLoc(N),
438 CurDAG->getVTList(MVT::Other),
439 Ops);
440 }
Matt Arsenault78b86702014-04-18 05:19:26 +0000441
442 case AMDGPUISD::BFE_I32:
443 case AMDGPUISD::BFE_U32: {
444 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
445 break;
446
447 // There is a scalar version available, but unlike the vector version which
448 // has a separate operand for the offset and width, the scalar version packs
449 // the width and offset into a single operand. Try to move to the scalar
450 // version if the offsets are constant, so that we can try to keep extended
451 // loads of kernel arguments in SGPRs.
452
453 // TODO: Technically we could try to pattern match scalar bitshifts of
454 // dynamic values, but it's probably not useful.
455 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
456 if (!Offset)
457 break;
458
459 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
460 if (!Width)
461 break;
462
463 bool Signed = Opc == AMDGPUISD::BFE_I32;
464
465 // Transformation function, pack the offset and width of a BFE into
466 // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
467 // source, bits [5:0] contain the offset and bits [22:16] the width.
468
469 uint32_t OffsetVal = Offset->getZExtValue();
470 uint32_t WidthVal = Width->getZExtValue();
471
472 uint32_t PackedVal = OffsetVal | WidthVal << 16;
473
474 SDValue PackedOffsetWidth = CurDAG->getTargetConstant(PackedVal, MVT::i32);
475 return CurDAG->getMachineNode(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
476 SDLoc(N),
477 MVT::i32,
478 N->getOperand(0),
479 PackedOffsetWidth);
480
481 }
Matt Arsenaultf2b0aeb2014-06-23 18:28:28 +0000482 case AMDGPUISD::DIV_SCALE: {
483 return SelectDIV_SCALE(N);
484 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000485 }
Vincent Lejeune0167a312013-09-12 23:45:00 +0000486 return SelectCode(N);
Tom Stellard365366f2013-01-23 02:09:06 +0000487}
488
Tom Stellard75aadc22012-12-11 21:25:42 +0000489
Matt Arsenault209a7b92014-04-18 07:40:20 +0000490bool AMDGPUDAGToDAGISel::checkType(const Value *Ptr, unsigned AS) {
491 assert(AS != 0 && "Use checkPrivateAddress instead.");
492 if (!Ptr)
Tom Stellard75aadc22012-12-11 21:25:42 +0000493 return false;
Matt Arsenault209a7b92014-04-18 07:40:20 +0000494
495 return Ptr->getType()->getPointerAddressSpace() == AS;
Tom Stellard75aadc22012-12-11 21:25:42 +0000496}
497
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000498bool AMDGPUDAGToDAGISel::checkPrivateAddress(const MachineMemOperand *Op) {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000499 if (Op->getPseudoValue())
500 return true;
501
502 if (PointerType *PT = dyn_cast<PointerType>(Op->getValue()->getType()))
503 return PT->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
504
505 return false;
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000506}
507
Tom Stellard75aadc22012-12-11 21:25:42 +0000508bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000509 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000510}
511
512bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000513 const Value *MemVal = N->getMemOperand()->getValue();
514 return (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
515 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
516 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS));
Tom Stellard75aadc22012-12-11 21:25:42 +0000517}
518
519bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000520 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000521}
522
523bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000524 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000525}
526
Tom Stellard1e803092013-07-23 01:48:18 +0000527bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000528 const Value *MemVal = N->getMemOperand()->getValue();
529 if (CbId == -1)
530 return checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS);
531
532 return checkType(MemVal, AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
Tom Stellard75aadc22012-12-11 21:25:42 +0000533}
534
Matt Arsenault2aabb062013-06-18 23:37:58 +0000535bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
Tom Stellard8cb0e472013-07-23 23:54:56 +0000536 if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) {
537 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
538 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
539 N->getMemoryVT().bitsLT(MVT::i32)) {
540 return true;
541 }
542 }
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000543 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000544}
545
Matt Arsenault2aabb062013-06-18 23:37:58 +0000546bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000547 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::PARAM_I_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000548}
549
Matt Arsenault2aabb062013-06-18 23:37:58 +0000550bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000551 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000552}
553
Matt Arsenault2aabb062013-06-18 23:37:58 +0000554bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000555 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000556}
557
Matt Arsenault2aabb062013-06-18 23:37:58 +0000558bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000559 MachineMemOperand *MMO = N->getMemOperand();
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000560 if (checkPrivateAddress(N->getMemOperand())) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000561 if (MMO) {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000562 const PseudoSourceValue *PSV = MMO->getPseudoValue();
Tom Stellard75aadc22012-12-11 21:25:42 +0000563 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
564 return true;
565 }
566 }
567 }
568 return false;
569}
570
Matt Arsenault2aabb062013-06-18 23:37:58 +0000571bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000572 if (checkPrivateAddress(N->getMemOperand())) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000573 // Check to make sure we are not a constant pool load or a constant load
574 // that is marked as a private load
575 if (isCPLoad(N) || isConstantLoad(N, -1)) {
576 return false;
577 }
578 }
Matt Arsenault209a7b92014-04-18 07:40:20 +0000579
580 const Value *MemVal = N->getMemOperand()->getValue();
581 if (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
582 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
583 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS) &&
584 !checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS) &&
585 !checkType(MemVal, AMDGPUAS::PARAM_D_ADDRESS) &&
586 !checkType(MemVal, AMDGPUAS::PARAM_I_ADDRESS)){
Tom Stellard75aadc22012-12-11 21:25:42 +0000587 return true;
588 }
589 return false;
590}
591
Tom Stellard58ac7442014-04-29 23:12:48 +0000592bool AMDGPUDAGToDAGISel::isCFDepth0() const {
593 // FIXME: Figure out a way to use DominatorTree analysis here.
594 const BasicBlock *CurBlock = FuncInfo->MBB->getBasicBlock();
595 const Function *Fn = FuncInfo->Fn;
596 return &Fn->front() == CurBlock || &Fn->back() == CurBlock;
597}
598
599
Tom Stellard75aadc22012-12-11 21:25:42 +0000600const char *AMDGPUDAGToDAGISel::getPassName() const {
601 return "AMDGPU DAG->DAG Pattern Instruction Selection";
602}
603
604#ifdef DEBUGTMP
605#undef INT64_C
606#endif
607#undef DEBUGTMP
608
Tom Stellard41fc7852013-07-23 01:48:42 +0000609//===----------------------------------------------------------------------===//
610// Complex Patterns
611//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000612
Tom Stellard365366f2013-01-23 02:09:06 +0000613bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
Matt Arsenault209a7b92014-04-18 07:40:20 +0000614 SDValue& IntPtr) {
Tom Stellard365366f2013-01-23 02:09:06 +0000615 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
616 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
617 return true;
618 }
619 return false;
620}
621
622bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
623 SDValue& BaseReg, SDValue &Offset) {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000624 if (!isa<ConstantSDNode>(Addr)) {
Tom Stellard365366f2013-01-23 02:09:06 +0000625 BaseReg = Addr;
626 Offset = CurDAG->getIntPtrConstant(0, true);
627 return true;
628 }
629 return false;
630}
631
Tom Stellard75aadc22012-12-11 21:25:42 +0000632bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
633 SDValue &Offset) {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000634 ConstantSDNode *IMMOffset;
Tom Stellard75aadc22012-12-11 21:25:42 +0000635
636 if (Addr.getOpcode() == ISD::ADD
637 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
638 && isInt<16>(IMMOffset->getZExtValue())) {
639
640 Base = Addr.getOperand(0);
641 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
642 return true;
643 // If the pointer address is constant, we can move it to the offset field.
644 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
645 && isInt<16>(IMMOffset->getZExtValue())) {
646 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
Andrew Trickef9de2a2013-05-25 02:42:55 +0000647 SDLoc(CurDAG->getEntryNode()),
Tom Stellard75aadc22012-12-11 21:25:42 +0000648 AMDGPU::ZERO, MVT::i32);
649 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
650 return true;
651 }
652
653 // Default case, no offset
654 Base = Addr;
655 Offset = CurDAG->getTargetConstant(0, MVT::i32);
656 return true;
657}
658
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000659bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
660 SDValue &Offset) {
661 ConstantSDNode *C;
662
663 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
664 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
665 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
666 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
667 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
668 Base = Addr.getOperand(0);
669 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
670 } else {
671 Base = Addr;
672 Offset = CurDAG->getTargetConstant(0, MVT::i32);
673 }
674
675 return true;
676}
Christian Konigd910b7d2013-02-26 17:52:16 +0000677
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000678SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000679 SDLoc DL(N);
680 SDValue LHS = N->getOperand(0);
681 SDValue RHS = N->getOperand(1);
682
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000683 bool IsAdd = (N->getOpcode() == ISD::ADD);
684
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000685 SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
686 SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
687
688 SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
689 DL, MVT::i32, LHS, Sub0);
690 SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
691 DL, MVT::i32, LHS, Sub1);
692
693 SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
694 DL, MVT::i32, RHS, Sub0);
695 SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
696 DL, MVT::i32, RHS, Sub1);
697
698 SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000699 SDValue AddLoArgs[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
700
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000701
702 unsigned Opc = IsAdd ? AMDGPU::S_ADD_I32 : AMDGPU::S_SUB_I32;
703 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
704
705 if (!isCFDepth0()) {
706 Opc = IsAdd ? AMDGPU::V_ADD_I32_e32 : AMDGPU::V_SUB_I32_e32;
707 CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e32 : AMDGPU::V_SUBB_U32_e32;
708 }
709
710 SDNode *AddLo = CurDAG->getMachineNode( Opc, DL, VTList, AddLoArgs);
711 SDValue Carry(AddLo, 1);
712 SDNode *AddHi
713 = CurDAG->getMachineNode(CarryOpc, DL, MVT::i32,
714 SDValue(Hi0, 0), SDValue(Hi1, 0), Carry);
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000715
716 SDValue Args[5] = {
717 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
718 SDValue(AddLo,0),
719 Sub0,
720 SDValue(AddHi,0),
721 Sub1,
722 };
723 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
724}
725
Matt Arsenaultf2b0aeb2014-06-23 18:28:28 +0000726SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
727 SDLoc SL(N);
728 EVT VT = N->getValueType(0);
729
730 assert(VT == MVT::f32 || VT == MVT::f64);
731
732 unsigned Opc
733 = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32;
734
735 const SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32);
736
737 SDValue Ops[] = {
738 N->getOperand(0),
739 N->getOperand(1),
740 N->getOperand(2),
741 Zero,
742 Zero,
743 Zero,
744 Zero
745 };
746
747 return CurDAG->SelectNodeTo(N, Opc, VT, MVT::i1, Ops);
748}
749
Tom Stellard85e8b6d2014-08-22 18:49:33 +0000750bool AMDGPUDAGToDAGISel::isDSOffsetLegal(const SDValue &Base, unsigned Offset,
751 unsigned OffsetBits) const {
752 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
753 if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
754 (OffsetBits == 8 && !isUInt<8>(Offset)))
755 return false;
756
757 if (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS)
758 return true;
759
760 // On Southern Islands instruction with a negative base value and an offset
761 // don't seem to work.
762 return CurDAG->SignBitIsZero(Base);
763}
764
765bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
766 SDValue &Offset) const {
767 if (CurDAG->isBaseWithConstantOffset(Addr)) {
768 SDValue N0 = Addr.getOperand(0);
769 SDValue N1 = Addr.getOperand(1);
770 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
771 if (isDSOffsetLegal(N0, C1->getSExtValue(), 16)) {
772 // (add n0, c0)
773 Base = N0;
774 Offset = N1;
775 return true;
776 }
777 }
778
779 // default case
780 Base = Addr;
781 Offset = CurDAG->getTargetConstant(0, MVT::i16);
782 return true;
783}
784
Tom Stellardb02c2682014-06-24 23:33:07 +0000785static SDValue wrapAddr64Rsrc(SelectionDAG *DAG, SDLoc DL, SDValue Ptr) {
786 return SDValue(DAG->getMachineNode(AMDGPU::SI_ADDR64_RSRC, DL, MVT::v4i32,
787 Ptr), 0);
788}
789
Tom Stellardb02094e2014-07-21 15:45:01 +0000790static bool isLegalMUBUFImmOffset(const ConstantSDNode *Imm) {
791 return isUInt<12>(Imm->getZExtValue());
792}
793
Tom Stellard155bbb72014-08-11 22:18:17 +0000794void AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
795 SDValue &VAddr, SDValue &SOffset,
796 SDValue &Offset, SDValue &Offen,
797 SDValue &Idxen, SDValue &Addr64,
798 SDValue &GLC, SDValue &SLC,
799 SDValue &TFE) const {
Tom Stellardb02c2682014-06-24 23:33:07 +0000800 SDLoc DL(Addr);
801
Tom Stellard155bbb72014-08-11 22:18:17 +0000802 GLC = CurDAG->getTargetConstant(0, MVT::i1);
803 SLC = CurDAG->getTargetConstant(0, MVT::i1);
804 TFE = CurDAG->getTargetConstant(0, MVT::i1);
805
806 Idxen = CurDAG->getTargetConstant(0, MVT::i1);
807 Offen = CurDAG->getTargetConstant(0, MVT::i1);
808 Addr64 = CurDAG->getTargetConstant(0, MVT::i1);
809 SOffset = CurDAG->getTargetConstant(0, MVT::i32);
810
Tom Stellardb02c2682014-06-24 23:33:07 +0000811 if (CurDAG->isBaseWithConstantOffset(Addr)) {
812 SDValue N0 = Addr.getOperand(0);
813 SDValue N1 = Addr.getOperand(1);
814 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
815
Tom Stellardb02094e2014-07-21 15:45:01 +0000816 if (isLegalMUBUFImmOffset(C1)) {
Tom Stellardb02c2682014-06-24 23:33:07 +0000817
818 if (N0.getOpcode() == ISD::ADD) {
Tom Stellard155bbb72014-08-11 22:18:17 +0000819 // (add (add N2, N3), C1) -> addr64
Tom Stellardb02c2682014-06-24 23:33:07 +0000820 SDValue N2 = N0.getOperand(0);
821 SDValue N3 = N0.getOperand(1);
Tom Stellard155bbb72014-08-11 22:18:17 +0000822 Addr64 = CurDAG->getTargetConstant(1, MVT::i1);
823 Ptr = N2;
824 VAddr = N3;
825 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
826 return;
Tom Stellardb02c2682014-06-24 23:33:07 +0000827 }
828
Tom Stellard155bbb72014-08-11 22:18:17 +0000829 // (add N0, C1) -> offset
830 VAddr = CurDAG->getTargetConstant(0, MVT::i32);
831 Ptr = N0;
832 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
833 return;
Tom Stellardb02c2682014-06-24 23:33:07 +0000834 }
835 }
836 if (Addr.getOpcode() == ISD::ADD) {
Tom Stellard155bbb72014-08-11 22:18:17 +0000837 // (add N0, N1) -> addr64
Tom Stellardb02c2682014-06-24 23:33:07 +0000838 SDValue N0 = Addr.getOperand(0);
839 SDValue N1 = Addr.getOperand(1);
Tom Stellard155bbb72014-08-11 22:18:17 +0000840 Addr64 = CurDAG->getTargetConstant(1, MVT::i1);
841 Ptr = N0;
842 VAddr = N1;
843 Offset = CurDAG->getTargetConstant(0, MVT::i16);
844 return;
Tom Stellardb02c2682014-06-24 23:33:07 +0000845 }
846
Tom Stellard155bbb72014-08-11 22:18:17 +0000847 // default case -> offset
848 VAddr = CurDAG->getTargetConstant(0, MVT::i32);
849 Ptr = Addr;
850 Offset = CurDAG->getTargetConstant(0, MVT::i16);
851
852}
853
854bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
855 SDValue &VAddr,
856 SDValue &Offset) const {
857 SDValue Ptr, SOffset, Offen, Idxen, Addr64, GLC, SLC, TFE;
858
859 SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
860 GLC, SLC, TFE);
861
862 ConstantSDNode *C = cast<ConstantSDNode>(Addr64);
863 if (C->getSExtValue()) {
864 SDLoc DL(Addr);
865 SRsrc = wrapAddr64Rsrc(CurDAG, DL, Ptr);
866 return true;
867 }
868 return false;
869}
870
871static SDValue buildRSRC(SelectionDAG *DAG, SDLoc DL, SDValue Ptr,
872 uint32_t RsrcDword1, uint64_t RsrcDword2And3) {
873
874 SDValue PtrLo = DAG->getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
875 SDValue PtrHi = DAG->getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
876 if (RsrcDword1)
877 PtrHi = SDValue(DAG->getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi,
878 DAG->getConstant(RsrcDword1, MVT::i32)), 0);
879
880 SDValue DataLo = DAG->getTargetConstant(
881 RsrcDword2And3 & APInt::getAllOnesValue(32).getZExtValue(), MVT::i32);
882 SDValue DataHi = DAG->getTargetConstant(RsrcDword2And3 >> 32, MVT::i32);
883
884 const SDValue Ops[] = { PtrLo, PtrHi, DataLo, DataHi };
885 return SDValue(DAG->getMachineNode(AMDGPU::SI_BUFFER_RSRC, DL,
886 MVT::v4i32, Ops), 0);
Tom Stellardb02c2682014-06-24 23:33:07 +0000887}
888
Tom Stellardb02094e2014-07-21 15:45:01 +0000889/// \brief Return a resource descriptor with the 'Add TID' bit enabled
890/// The TID (Thread ID) is multipled by the stride value (bits [61:48]
891/// of the resource descriptor) to create an offset, which is added to the
892/// resource ponter.
893static SDValue buildScratchRSRC(SelectionDAG *DAG, SDLoc DL, SDValue Ptr) {
894
895 uint64_t Rsrc = AMDGPU::RSRC_DATA_FORMAT | AMDGPU::RSRC_TID_ENABLE |
Tom Stellard155bbb72014-08-11 22:18:17 +0000896 0xffffffff; // Size
Tom Stellardb02094e2014-07-21 15:45:01 +0000897
Tom Stellard155bbb72014-08-11 22:18:17 +0000898 return buildRSRC(DAG, DL, Ptr, 0, Rsrc);
Tom Stellardb02094e2014-07-21 15:45:01 +0000899}
900
901bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc,
902 SDValue &VAddr, SDValue &SOffset,
903 SDValue &ImmOffset) const {
904
905 SDLoc DL(Addr);
906 MachineFunction &MF = CurDAG->getMachineFunction();
Eric Christopherfc6de422014-08-05 02:39:49 +0000907 const SIRegisterInfo *TRI =
908 static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
Tom Stellardb02094e2014-07-21 15:45:01 +0000909 MachineRegisterInfo &MRI = MF.getRegInfo();
Tom Stellard162a9472014-08-21 20:40:58 +0000910 const SITargetLowering& Lowering =
911 *static_cast<const SITargetLowering*>(getTargetLowering());
Tom Stellardb02094e2014-07-21 15:45:01 +0000912
913 unsigned ScratchPtrReg =
914 TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_PTR);
915 unsigned ScratchOffsetReg =
916 TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_WAVE_OFFSET);
Tom Stellard162a9472014-08-21 20:40:58 +0000917 Lowering.CreateLiveInRegister(*CurDAG, &AMDGPU::SReg_32RegClass,
918 ScratchOffsetReg, MVT::i32);
Tom Stellardb02094e2014-07-21 15:45:01 +0000919
Tom Stellard162a9472014-08-21 20:40:58 +0000920 Rsrc = buildScratchRSRC(CurDAG, DL,
921 CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
922 MRI.getLiveInVirtReg(ScratchPtrReg), MVT::i64));
Tom Stellardb02094e2014-07-21 15:45:01 +0000923 SOffset = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
924 MRI.getLiveInVirtReg(ScratchOffsetReg), MVT::i32);
925
926 // (add n0, c1)
927 if (CurDAG->isBaseWithConstantOffset(Addr)) {
928 SDValue N1 = Addr.getOperand(1);
929 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
930
931 if (isLegalMUBUFImmOffset(C1)) {
932 VAddr = Addr.getOperand(0);
933 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
934 return true;
935 }
936 }
937
938 // (add FI, n0)
939 if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
940 isa<FrameIndexSDNode>(Addr.getOperand(0))) {
941 VAddr = Addr.getOperand(1);
942 ImmOffset = Addr.getOperand(0);
943 return true;
944 }
945
946 // (FI)
947 if (isa<FrameIndexSDNode>(Addr)) {
948 VAddr = SDValue(CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32,
949 CurDAG->getConstant(0, MVT::i32)), 0);
950 ImmOffset = Addr;
951 return true;
952 }
953
954 // (node)
955 VAddr = Addr;
956 ImmOffset = CurDAG->getTargetConstant(0, MVT::i16);
957 return true;
958}
959
Tom Stellard155bbb72014-08-11 22:18:17 +0000960bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
961 SDValue &SOffset, SDValue &Offset,
962 SDValue &GLC, SDValue &SLC,
963 SDValue &TFE) const {
964 SDValue Ptr, VAddr, Offen, Idxen, Addr64;
Tom Stellardb02094e2014-07-21 15:45:01 +0000965
Tom Stellard155bbb72014-08-11 22:18:17 +0000966 SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
967 GLC, SLC, TFE);
Tom Stellardb02094e2014-07-21 15:45:01 +0000968
Tom Stellard155bbb72014-08-11 22:18:17 +0000969 if (!cast<ConstantSDNode>(Offen)->getSExtValue() &&
970 !cast<ConstantSDNode>(Idxen)->getSExtValue() &&
971 !cast<ConstantSDNode>(Addr64)->getSExtValue()) {
972 uint64_t Rsrc = AMDGPU::RSRC_DATA_FORMAT |
973 APInt::getAllOnesValue(32).getZExtValue(); // Size
974 SDLoc DL(Addr);
975 SRsrc = buildRSRC(CurDAG, DL, Ptr, 0, Rsrc);
976 return true;
977 }
978 return false;
Tom Stellardb02094e2014-07-21 15:45:01 +0000979}
980
Tom Stellardb4a313a2014-08-01 00:32:39 +0000981bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
982 SDValue &SrcMods) const {
983
984 unsigned Mods = 0;
985
986 Src = In;
987
988 if (Src.getOpcode() == ISD::FNEG) {
989 Mods |= SISrcMods::NEG;
990 Src = Src.getOperand(0);
991 }
992
993 if (Src.getOpcode() == ISD::FABS) {
994 Mods |= SISrcMods::ABS;
995 Src = Src.getOperand(0);
996 }
997
998 SrcMods = CurDAG->getTargetConstant(Mods, MVT::i32);
999
1000 return true;
1001}
1002
1003bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
1004 SDValue &SrcMods, SDValue &Clamp,
1005 SDValue &Omod) const {
1006 // FIXME: Handle Clamp and Omod
1007 Clamp = CurDAG->getTargetConstant(0, MVT::i32);
1008 Omod = CurDAG->getTargetConstant(0, MVT::i32);
1009
1010 return SelectVOP3Mods(In, Src, SrcMods);
1011}
1012
Christian Konigd910b7d2013-02-26 17:52:16 +00001013void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
Bill Wendlinga3cd3502013-06-19 21:36:55 +00001014 const AMDGPUTargetLowering& Lowering =
Matt Arsenault209a7b92014-04-18 07:40:20 +00001015 *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
Vincent Lejeuneab3baf82013-09-12 23:44:44 +00001016 bool IsModified = false;
1017 do {
1018 IsModified = false;
1019 // Go over all selected nodes and try to fold them a bit more
1020 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
1021 E = CurDAG->allnodes_end(); I != E; ++I) {
Christian Konigd910b7d2013-02-26 17:52:16 +00001022
Vincent Lejeuneab3baf82013-09-12 23:44:44 +00001023 SDNode *Node = I;
Tom Stellard2183b702013-06-03 17:39:46 +00001024
Vincent Lejeuneab3baf82013-09-12 23:44:44 +00001025 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
1026 if (!MachineNode)
1027 continue;
Christian Konigd910b7d2013-02-26 17:52:16 +00001028
Vincent Lejeuneab3baf82013-09-12 23:44:44 +00001029 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
1030 if (ResNode != Node) {
1031 ReplaceUses(Node, ResNode);
1032 IsModified = true;
1033 }
Tom Stellard2183b702013-06-03 17:39:46 +00001034 }
Vincent Lejeuneab3baf82013-09-12 23:44:44 +00001035 CurDAG->RemoveDeadNodes();
1036 } while (IsModified);
Christian Konigd910b7d2013-02-26 17:52:16 +00001037}