blob: 090fd1da920cfc0a9b4203f44bf88f06b112ed11 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//==-----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Defines an instruction selector for the AMDGPU target.
12//
13//===----------------------------------------------------------------------===//
14#include "AMDGPUInstrInfo.h"
15#include "AMDGPUISelLowering.h" // For AMDGPUISD
16#include "AMDGPURegisterInfo.h"
Tom Stellard2e59a452014-06-13 01:32:00 +000017#include "AMDGPUSubtarget.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000018#include "R600InstrInfo.h"
Tom Stellardb02094e2014-07-21 15:45:01 +000019#include "SIDefines.h"
Christian Konigf82901a2013-02-26 17:52:23 +000020#include "SIISelLowering.h"
Tom Stellardb02094e2014-07-21 15:45:01 +000021#include "SIMachineFunctionInfo.h"
Tom Stellard58ac7442014-04-29 23:12:48 +000022#include "llvm/CodeGen/FunctionLoweringInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000023#include "llvm/CodeGen/PseudoSourceValue.h"
Tom Stellardb02094e2014-07-21 15:45:01 +000024#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000026#include "llvm/CodeGen/SelectionDAG.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000027#include "llvm/CodeGen/SelectionDAGISel.h"
Tom Stellard58ac7442014-04-29 23:12:48 +000028#include "llvm/IR/Function.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000029
30using namespace llvm;
31
32//===----------------------------------------------------------------------===//
33// Instruction Selector Implementation
34//===----------------------------------------------------------------------===//
35
36namespace {
37/// AMDGPU specific code to select AMDGPU machine instructions for
38/// SelectionDAG operations.
39class AMDGPUDAGToDAGISel : public SelectionDAGISel {
40 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
41 // make the right decision when generating code for different targets.
42 const AMDGPUSubtarget &Subtarget;
43public:
44 AMDGPUDAGToDAGISel(TargetMachine &TM);
45 virtual ~AMDGPUDAGToDAGISel();
46
Craig Topper5656db42014-04-29 07:57:24 +000047 SDNode *Select(SDNode *N) override;
48 const char *getPassName() const override;
49 void PostprocessISelDAG() override;
Tom Stellard75aadc22012-12-11 21:25:42 +000050
51private:
Tom Stellard7ed0b522014-04-03 20:19:27 +000052 bool isInlineImmediate(SDNode *N) const;
Tom Stellard75aadc22012-12-11 21:25:42 +000053 inline SDValue getSmallIPtrImm(unsigned Imm);
Vincent Lejeunec6896792013-06-04 23:17:15 +000054 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
Tom Stellard84021442013-07-23 01:48:24 +000055 const R600InstrInfo *TII);
Tom Stellard365366f2013-01-23 02:09:06 +000056 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
Vincent Lejeunec6896792013-06-04 23:17:15 +000057 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
Tom Stellard75aadc22012-12-11 21:25:42 +000058
59 // Complex pattern selectors
60 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
61 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
62 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
63
64 static bool checkType(const Value *ptr, unsigned int addrspace);
Nick Lewyckyaad475b2014-04-15 07:22:52 +000065 static bool checkPrivateAddress(const MachineMemOperand *Op);
Tom Stellard75aadc22012-12-11 21:25:42 +000066
67 static bool isGlobalStore(const StoreSDNode *N);
68 static bool isPrivateStore(const StoreSDNode *N);
69 static bool isLocalStore(const StoreSDNode *N);
70 static bool isRegionStore(const StoreSDNode *N);
71
Matt Arsenault2aabb062013-06-18 23:37:58 +000072 bool isCPLoad(const LoadSDNode *N) const;
73 bool isConstantLoad(const LoadSDNode *N, int cbID) const;
74 bool isGlobalLoad(const LoadSDNode *N) const;
75 bool isParamLoad(const LoadSDNode *N) const;
76 bool isPrivateLoad(const LoadSDNode *N) const;
77 bool isLocalLoad(const LoadSDNode *N) const;
78 bool isRegionLoad(const LoadSDNode *N) const;
Tom Stellard75aadc22012-12-11 21:25:42 +000079
Tom Stellard58ac7442014-04-29 23:12:48 +000080 /// \returns True if the current basic block being selected is at control
81 /// flow depth 0. Meaning that the current block dominates the
82 // exit block.
83 bool isCFDepth0() const;
84
Tom Stellarddf94dc32013-08-14 23:24:24 +000085 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
Tom Stellard365366f2013-01-23 02:09:06 +000086 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
Matt Arsenault209a7b92014-04-18 07:40:20 +000087 bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
88 SDValue& Offset);
Tom Stellard75aadc22012-12-11 21:25:42 +000089 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +000090 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
Tom Stellardb02c2682014-06-24 23:33:07 +000091 bool SelectMUBUFAddr64(SDValue Addr, SDValue &Ptr, SDValue &Offset,
Tom Stellardb02094e2014-07-21 15:45:01 +000092 SDValue &ImmOffset) const;
93 bool SelectMUBUFScratch(SDValue Addr, SDValue &RSrc, SDValue &VAddr,
94 SDValue &SOffset, SDValue &ImmOffset) const;
95 bool SelectMUBUFAddr32(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
96 SDValue &SOffset, SDValue &Offset, SDValue &Offen,
97 SDValue &Idxen, SDValue &GLC, SDValue &SLC,
98 SDValue &TFE) const;
Tom Stellardb4a313a2014-08-01 00:32:39 +000099 bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
100 bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,
101 SDValue &Clamp, SDValue &Omod) const;
Tom Stellard75aadc22012-12-11 21:25:42 +0000102
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000103 SDNode *SelectADD_SUB_I64(SDNode *N);
Matt Arsenaultf2b0aeb2014-06-23 18:28:28 +0000104 SDNode *SelectDIV_SCALE(SDNode *N);
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000105
Tom Stellard75aadc22012-12-11 21:25:42 +0000106 // Include the pieces autogenerated from the target description.
107#include "AMDGPUGenDAGISel.inc"
108};
109} // end anonymous namespace
110
111/// \brief This pass converts a legalized DAG into a AMDGPU-specific
112// DAG, ready for instruction scheduling.
Matt Arsenault209a7b92014-04-18 07:40:20 +0000113FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000114 return new AMDGPUDAGToDAGISel(TM);
115}
116
Bill Wendlinga3cd3502013-06-19 21:36:55 +0000117AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
Tom Stellard75aadc22012-12-11 21:25:42 +0000118 : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
119}
120
121AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
122}
123
Tom Stellard7ed0b522014-04-03 20:19:27 +0000124bool AMDGPUDAGToDAGISel::isInlineImmediate(SDNode *N) const {
125 const SITargetLowering *TL
126 = static_cast<const SITargetLowering *>(getTargetLowering());
127 return TL->analyzeImmediate(N) == 0;
128}
129
Tom Stellarddf94dc32013-08-14 23:24:24 +0000130/// \brief Determine the register class for \p OpNo
131/// \returns The register class of the virtual register that will be used for
132/// the given operand number \OpNo or NULL if the register class cannot be
133/// determined.
134const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
135 unsigned OpNo) const {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000136 if (!N->isMachineOpcode())
137 return nullptr;
138
Tom Stellarddf94dc32013-08-14 23:24:24 +0000139 switch (N->getMachineOpcode()) {
140 default: {
141 const MCInstrDesc &Desc = TM.getInstrInfo()->get(N->getMachineOpcode());
Alexey Samsonov3186eb32013-08-15 07:11:34 +0000142 unsigned OpIdx = Desc.getNumDefs() + OpNo;
143 if (OpIdx >= Desc.getNumOperands())
Matt Arsenault209a7b92014-04-18 07:40:20 +0000144 return nullptr;
Alexey Samsonov3186eb32013-08-15 07:11:34 +0000145 int RegClass = Desc.OpInfo[OpIdx].RegClass;
Matt Arsenault209a7b92014-04-18 07:40:20 +0000146 if (RegClass == -1)
147 return nullptr;
148
Tom Stellarddf94dc32013-08-14 23:24:24 +0000149 return TM.getRegisterInfo()->getRegClass(RegClass);
150 }
151 case AMDGPU::REG_SEQUENCE: {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000152 unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
153 const TargetRegisterClass *SuperRC = TM.getRegisterInfo()->getRegClass(RCID);
154
155 SDValue SubRegOp = N->getOperand(OpNo + 1);
156 unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
Tom Stellarddf94dc32013-08-14 23:24:24 +0000157 return TM.getRegisterInfo()->getSubClassWithSubReg(SuperRC, SubRegIdx);
158 }
159 }
160}
161
Tom Stellard75aadc22012-12-11 21:25:42 +0000162SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
163 return CurDAG->getTargetConstant(Imm, MVT::i32);
164}
165
166bool AMDGPUDAGToDAGISel::SelectADDRParam(
Matt Arsenault209a7b92014-04-18 07:40:20 +0000167 SDValue Addr, SDValue& R1, SDValue& R2) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000168
169 if (Addr.getOpcode() == ISD::FrameIndex) {
170 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
171 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
172 R2 = CurDAG->getTargetConstant(0, MVT::i32);
173 } else {
174 R1 = Addr;
175 R2 = CurDAG->getTargetConstant(0, MVT::i32);
176 }
177 } else if (Addr.getOpcode() == ISD::ADD) {
178 R1 = Addr.getOperand(0);
179 R2 = Addr.getOperand(1);
180 } else {
181 R1 = Addr;
182 R2 = CurDAG->getTargetConstant(0, MVT::i32);
183 }
184 return true;
185}
186
187bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
188 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
189 Addr.getOpcode() == ISD::TargetGlobalAddress) {
190 return false;
191 }
192 return SelectADDRParam(Addr, R1, R2);
193}
194
195
196bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
197 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
198 Addr.getOpcode() == ISD::TargetGlobalAddress) {
199 return false;
200 }
201
202 if (Addr.getOpcode() == ISD::FrameIndex) {
203 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
204 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
205 R2 = CurDAG->getTargetConstant(0, MVT::i64);
206 } else {
207 R1 = Addr;
208 R2 = CurDAG->getTargetConstant(0, MVT::i64);
209 }
210 } else if (Addr.getOpcode() == ISD::ADD) {
211 R1 = Addr.getOperand(0);
212 R2 = Addr.getOperand(1);
213 } else {
214 R1 = Addr;
215 R2 = CurDAG->getTargetConstant(0, MVT::i64);
216 }
217 return true;
218}
219
220SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
221 unsigned int Opc = N->getOpcode();
222 if (N->isMachineOpcode()) {
Tim Northover31d093c2013-09-22 08:21:56 +0000223 N->setNodeId(-1);
Matt Arsenault209a7b92014-04-18 07:40:20 +0000224 return nullptr; // Already selected.
Tom Stellard75aadc22012-12-11 21:25:42 +0000225 }
Matt Arsenault78b86702014-04-18 05:19:26 +0000226
227 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
Tom Stellard75aadc22012-12-11 21:25:42 +0000228 switch (Opc) {
229 default: break;
Tom Stellard1f15bff2014-02-25 21:36:18 +0000230 // We are selecting i64 ADD here instead of custom lower it during
231 // DAG legalization, so we can fold some i64 ADDs used for address
232 // calculation into the LOAD and STORE instructions.
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000233 case ISD::ADD:
234 case ISD::SUB: {
Tom Stellard1f15bff2014-02-25 21:36:18 +0000235 if (N->getValueType(0) != MVT::i64 ||
236 ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
237 break;
238
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000239 return SelectADD_SUB_I64(N);
Tom Stellard1f15bff2014-02-25 21:36:18 +0000240 }
Matt Arsenault064c2062014-06-11 17:40:32 +0000241 case ISD::SCALAR_TO_VECTOR:
Tom Stellard880a80a2014-06-17 16:53:14 +0000242 case AMDGPUISD::BUILD_VERTICAL_VECTOR:
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000243 case ISD::BUILD_VECTOR: {
Tom Stellard8e5da412013-08-14 23:24:32 +0000244 unsigned RegClassID;
Tom Stellard8e5da412013-08-14 23:24:32 +0000245 const AMDGPURegisterInfo *TRI =
246 static_cast<const AMDGPURegisterInfo*>(TM.getRegisterInfo());
247 const SIRegisterInfo *SIRI =
248 static_cast<const SIRegisterInfo*>(TM.getRegisterInfo());
249 EVT VT = N->getValueType(0);
250 unsigned NumVectorElts = VT.getVectorNumElements();
Matt Arsenault064c2062014-06-11 17:40:32 +0000251 EVT EltVT = VT.getVectorElementType();
252 assert(EltVT.bitsEq(MVT::i32));
Tom Stellard8e5da412013-08-14 23:24:32 +0000253 if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
254 bool UseVReg = true;
255 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
256 U != E; ++U) {
257 if (!U->isMachineOpcode()) {
258 continue;
259 }
260 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
261 if (!RC) {
262 continue;
263 }
264 if (SIRI->isSGPRClass(RC)) {
265 UseVReg = false;
266 }
267 }
268 switch(NumVectorElts) {
269 case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID :
270 AMDGPU::SReg_32RegClassID;
271 break;
272 case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
273 AMDGPU::SReg_64RegClassID;
274 break;
275 case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
276 AMDGPU::SReg_128RegClassID;
277 break;
278 case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
279 AMDGPU::SReg_256RegClassID;
280 break;
281 case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
282 AMDGPU::SReg_512RegClassID;
283 break;
Benjamin Kramerbda73ff2013-08-31 21:20:04 +0000284 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
Tom Stellard8e5da412013-08-14 23:24:32 +0000285 }
286 } else {
287 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
288 // that adds a 128 bits reg copy when going through TwoAddressInstructions
289 // pass. We want to avoid 128 bits copies as much as possible because they
290 // can't be bundled by our scheduler.
291 switch(NumVectorElts) {
292 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
Tom Stellard880a80a2014-06-17 16:53:14 +0000293 case 4:
294 if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
295 RegClassID = AMDGPU::R600_Reg128VerticalRegClassID;
296 else
297 RegClassID = AMDGPU::R600_Reg128RegClassID;
298 break;
Tom Stellard8e5da412013-08-14 23:24:32 +0000299 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
300 }
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000301 }
Tom Stellard0344cdf2013-08-01 15:23:42 +0000302
Tom Stellard8e5da412013-08-14 23:24:32 +0000303 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
304
305 if (NumVectorElts == 1) {
Matt Arsenault064c2062014-06-11 17:40:32 +0000306 return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT,
Tom Stellard8e5da412013-08-14 23:24:32 +0000307 N->getOperand(0), RegClass);
Tom Stellard0344cdf2013-08-01 15:23:42 +0000308 }
Tom Stellard8e5da412013-08-14 23:24:32 +0000309
310 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
311 "supported yet");
312 // 16 = Max Num Vector Elements
313 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
314 // 1 = Vector Register Class
Matt Arsenault064c2062014-06-11 17:40:32 +0000315 SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
Tom Stellard8e5da412013-08-14 23:24:32 +0000316
317 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000318 bool IsRegSeq = true;
Matt Arsenault064c2062014-06-11 17:40:32 +0000319 unsigned NOps = N->getNumOperands();
320 for (unsigned i = 0; i < NOps; i++) {
Tom Stellard8e5da412013-08-14 23:24:32 +0000321 // XXX: Why is this here?
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000322 if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
323 IsRegSeq = false;
324 break;
325 }
Tom Stellard8e5da412013-08-14 23:24:32 +0000326 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
327 RegSeqArgs[1 + (2 * i) + 1] =
328 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000329 }
Matt Arsenault064c2062014-06-11 17:40:32 +0000330
331 if (NOps != NumVectorElts) {
332 // Fill in the missing undef elements if this was a scalar_to_vector.
333 assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
334
335 MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
336 SDLoc(N), EltVT);
337 for (unsigned i = NOps; i < NumVectorElts; ++i) {
338 RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
339 RegSeqArgs[1 + (2 * i) + 1] =
340 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
341 }
342 }
343
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000344 if (!IsRegSeq)
345 break;
346 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
Craig Topper481fb282014-04-27 19:21:11 +0000347 RegSeqArgs);
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000348 }
Tom Stellard754f80f2013-04-05 23:31:51 +0000349 case ISD::BUILD_PAIR: {
350 SDValue RC, SubReg0, SubReg1;
Tom Stellarda6c6e1b2013-06-07 20:37:48 +0000351 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
Tom Stellard754f80f2013-04-05 23:31:51 +0000352 break;
353 }
354 if (N->getValueType(0) == MVT::i128) {
355 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
356 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
357 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
358 } else if (N->getValueType(0) == MVT::i64) {
Tom Stellard1aa6cb42014-04-18 00:36:21 +0000359 RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
Tom Stellard754f80f2013-04-05 23:31:51 +0000360 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
361 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
362 } else {
363 llvm_unreachable("Unhandled value type for BUILD_PAIR");
364 }
365 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
366 N->getOperand(1), SubReg1 };
367 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
Andrew Trickef9de2a2013-05-25 02:42:55 +0000368 SDLoc(N), N->getValueType(0), Ops);
Tom Stellard754f80f2013-04-05 23:31:51 +0000369 }
Tom Stellard7ed0b522014-04-03 20:19:27 +0000370
371 case ISD::Constant:
372 case ISD::ConstantFP: {
373 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
374 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
375 N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
376 break;
377
378 uint64_t Imm;
379 if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
380 Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
381 else {
Tom Stellard3cbe0142014-04-07 19:31:13 +0000382 ConstantSDNode *C = cast<ConstantSDNode>(N);
Tom Stellard7ed0b522014-04-03 20:19:27 +0000383 Imm = C->getZExtValue();
384 }
385
386 SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
387 CurDAG->getConstant(Imm & 0xFFFFFFFF, MVT::i32));
388 SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
389 CurDAG->getConstant(Imm >> 32, MVT::i32));
390 const SDValue Ops[] = {
391 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
392 SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
393 SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32)
394 };
395
396 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SDLoc(N),
397 N->getValueType(0), Ops);
398 }
399
Tom Stellard81d871d2013-11-13 23:36:50 +0000400 case AMDGPUISD::REGISTER_LOAD: {
Tom Stellard81d871d2013-11-13 23:36:50 +0000401 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
402 break;
403 SDValue Addr, Offset;
404
405 SelectADDRIndirect(N->getOperand(1), Addr, Offset);
406 const SDValue Ops[] = {
407 Addr,
408 Offset,
409 CurDAG->getTargetConstant(0, MVT::i32),
410 N->getOperand(0),
411 };
412 return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, SDLoc(N),
413 CurDAG->getVTList(MVT::i32, MVT::i64, MVT::Other),
414 Ops);
415 }
416 case AMDGPUISD::REGISTER_STORE: {
Tom Stellard81d871d2013-11-13 23:36:50 +0000417 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
418 break;
419 SDValue Addr, Offset;
420 SelectADDRIndirect(N->getOperand(2), Addr, Offset);
421 const SDValue Ops[] = {
422 N->getOperand(1),
423 Addr,
424 Offset,
425 CurDAG->getTargetConstant(0, MVT::i32),
426 N->getOperand(0),
427 };
428 return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, SDLoc(N),
429 CurDAG->getVTList(MVT::Other),
430 Ops);
431 }
Matt Arsenault78b86702014-04-18 05:19:26 +0000432
433 case AMDGPUISD::BFE_I32:
434 case AMDGPUISD::BFE_U32: {
435 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
436 break;
437
438 // There is a scalar version available, but unlike the vector version which
439 // has a separate operand for the offset and width, the scalar version packs
440 // the width and offset into a single operand. Try to move to the scalar
441 // version if the offsets are constant, so that we can try to keep extended
442 // loads of kernel arguments in SGPRs.
443
444 // TODO: Technically we could try to pattern match scalar bitshifts of
445 // dynamic values, but it's probably not useful.
446 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
447 if (!Offset)
448 break;
449
450 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
451 if (!Width)
452 break;
453
454 bool Signed = Opc == AMDGPUISD::BFE_I32;
455
456 // Transformation function, pack the offset and width of a BFE into
457 // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
458 // source, bits [5:0] contain the offset and bits [22:16] the width.
459
460 uint32_t OffsetVal = Offset->getZExtValue();
461 uint32_t WidthVal = Width->getZExtValue();
462
463 uint32_t PackedVal = OffsetVal | WidthVal << 16;
464
465 SDValue PackedOffsetWidth = CurDAG->getTargetConstant(PackedVal, MVT::i32);
466 return CurDAG->getMachineNode(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
467 SDLoc(N),
468 MVT::i32,
469 N->getOperand(0),
470 PackedOffsetWidth);
471
472 }
Matt Arsenaultf2b0aeb2014-06-23 18:28:28 +0000473 case AMDGPUISD::DIV_SCALE: {
474 return SelectDIV_SCALE(N);
475 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000476 }
Vincent Lejeune0167a312013-09-12 23:45:00 +0000477 return SelectCode(N);
Tom Stellard365366f2013-01-23 02:09:06 +0000478}
479
Tom Stellard75aadc22012-12-11 21:25:42 +0000480
Matt Arsenault209a7b92014-04-18 07:40:20 +0000481bool AMDGPUDAGToDAGISel::checkType(const Value *Ptr, unsigned AS) {
482 assert(AS != 0 && "Use checkPrivateAddress instead.");
483 if (!Ptr)
Tom Stellard75aadc22012-12-11 21:25:42 +0000484 return false;
Matt Arsenault209a7b92014-04-18 07:40:20 +0000485
486 return Ptr->getType()->getPointerAddressSpace() == AS;
Tom Stellard75aadc22012-12-11 21:25:42 +0000487}
488
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000489bool AMDGPUDAGToDAGISel::checkPrivateAddress(const MachineMemOperand *Op) {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000490 if (Op->getPseudoValue())
491 return true;
492
493 if (PointerType *PT = dyn_cast<PointerType>(Op->getValue()->getType()))
494 return PT->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
495
496 return false;
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000497}
498
Tom Stellard75aadc22012-12-11 21:25:42 +0000499bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000500 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000501}
502
503bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000504 const Value *MemVal = N->getMemOperand()->getValue();
505 return (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
506 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
507 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS));
Tom Stellard75aadc22012-12-11 21:25:42 +0000508}
509
510bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000511 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000512}
513
514bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000515 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000516}
517
Tom Stellard1e803092013-07-23 01:48:18 +0000518bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000519 const Value *MemVal = N->getMemOperand()->getValue();
520 if (CbId == -1)
521 return checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS);
522
523 return checkType(MemVal, AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
Tom Stellard75aadc22012-12-11 21:25:42 +0000524}
525
Matt Arsenault2aabb062013-06-18 23:37:58 +0000526bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
Tom Stellard8cb0e472013-07-23 23:54:56 +0000527 if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) {
528 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
529 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
530 N->getMemoryVT().bitsLT(MVT::i32)) {
531 return true;
532 }
533 }
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000534 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000535}
536
Matt Arsenault2aabb062013-06-18 23:37:58 +0000537bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000538 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::PARAM_I_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000539}
540
Matt Arsenault2aabb062013-06-18 23:37:58 +0000541bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000542 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000543}
544
Matt Arsenault2aabb062013-06-18 23:37:58 +0000545bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000546 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000547}
548
Matt Arsenault2aabb062013-06-18 23:37:58 +0000549bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000550 MachineMemOperand *MMO = N->getMemOperand();
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000551 if (checkPrivateAddress(N->getMemOperand())) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000552 if (MMO) {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000553 const PseudoSourceValue *PSV = MMO->getPseudoValue();
Tom Stellard75aadc22012-12-11 21:25:42 +0000554 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
555 return true;
556 }
557 }
558 }
559 return false;
560}
561
Matt Arsenault2aabb062013-06-18 23:37:58 +0000562bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000563 if (checkPrivateAddress(N->getMemOperand())) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000564 // Check to make sure we are not a constant pool load or a constant load
565 // that is marked as a private load
566 if (isCPLoad(N) || isConstantLoad(N, -1)) {
567 return false;
568 }
569 }
Matt Arsenault209a7b92014-04-18 07:40:20 +0000570
571 const Value *MemVal = N->getMemOperand()->getValue();
572 if (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
573 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
574 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS) &&
575 !checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS) &&
576 !checkType(MemVal, AMDGPUAS::PARAM_D_ADDRESS) &&
577 !checkType(MemVal, AMDGPUAS::PARAM_I_ADDRESS)){
Tom Stellard75aadc22012-12-11 21:25:42 +0000578 return true;
579 }
580 return false;
581}
582
Tom Stellard58ac7442014-04-29 23:12:48 +0000583bool AMDGPUDAGToDAGISel::isCFDepth0() const {
584 // FIXME: Figure out a way to use DominatorTree analysis here.
585 const BasicBlock *CurBlock = FuncInfo->MBB->getBasicBlock();
586 const Function *Fn = FuncInfo->Fn;
587 return &Fn->front() == CurBlock || &Fn->back() == CurBlock;
588}
589
590
Tom Stellard75aadc22012-12-11 21:25:42 +0000591const char *AMDGPUDAGToDAGISel::getPassName() const {
592 return "AMDGPU DAG->DAG Pattern Instruction Selection";
593}
594
595#ifdef DEBUGTMP
596#undef INT64_C
597#endif
598#undef DEBUGTMP
599
Tom Stellard41fc7852013-07-23 01:48:42 +0000600//===----------------------------------------------------------------------===//
601// Complex Patterns
602//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000603
Tom Stellard365366f2013-01-23 02:09:06 +0000604bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
Matt Arsenault209a7b92014-04-18 07:40:20 +0000605 SDValue& IntPtr) {
Tom Stellard365366f2013-01-23 02:09:06 +0000606 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
607 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
608 return true;
609 }
610 return false;
611}
612
613bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
614 SDValue& BaseReg, SDValue &Offset) {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000615 if (!isa<ConstantSDNode>(Addr)) {
Tom Stellard365366f2013-01-23 02:09:06 +0000616 BaseReg = Addr;
617 Offset = CurDAG->getIntPtrConstant(0, true);
618 return true;
619 }
620 return false;
621}
622
Tom Stellard75aadc22012-12-11 21:25:42 +0000623bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
624 SDValue &Offset) {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000625 ConstantSDNode *IMMOffset;
Tom Stellard75aadc22012-12-11 21:25:42 +0000626
627 if (Addr.getOpcode() == ISD::ADD
628 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
629 && isInt<16>(IMMOffset->getZExtValue())) {
630
631 Base = Addr.getOperand(0);
632 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
633 return true;
634 // If the pointer address is constant, we can move it to the offset field.
635 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
636 && isInt<16>(IMMOffset->getZExtValue())) {
637 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
Andrew Trickef9de2a2013-05-25 02:42:55 +0000638 SDLoc(CurDAG->getEntryNode()),
Tom Stellard75aadc22012-12-11 21:25:42 +0000639 AMDGPU::ZERO, MVT::i32);
640 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
641 return true;
642 }
643
644 // Default case, no offset
645 Base = Addr;
646 Offset = CurDAG->getTargetConstant(0, MVT::i32);
647 return true;
648}
649
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000650bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
651 SDValue &Offset) {
652 ConstantSDNode *C;
653
654 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
655 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
656 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
657 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
658 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
659 Base = Addr.getOperand(0);
660 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
661 } else {
662 Base = Addr;
663 Offset = CurDAG->getTargetConstant(0, MVT::i32);
664 }
665
666 return true;
667}
Christian Konigd910b7d2013-02-26 17:52:16 +0000668
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000669SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000670 SDLoc DL(N);
671 SDValue LHS = N->getOperand(0);
672 SDValue RHS = N->getOperand(1);
673
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000674 bool IsAdd = (N->getOpcode() == ISD::ADD);
675
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000676 SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
677 SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
678
679 SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
680 DL, MVT::i32, LHS, Sub0);
681 SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
682 DL, MVT::i32, LHS, Sub1);
683
684 SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
685 DL, MVT::i32, RHS, Sub0);
686 SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
687 DL, MVT::i32, RHS, Sub1);
688
689 SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000690 SDValue AddLoArgs[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
691
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000692
693 unsigned Opc = IsAdd ? AMDGPU::S_ADD_I32 : AMDGPU::S_SUB_I32;
694 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
695
696 if (!isCFDepth0()) {
697 Opc = IsAdd ? AMDGPU::V_ADD_I32_e32 : AMDGPU::V_SUB_I32_e32;
698 CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e32 : AMDGPU::V_SUBB_U32_e32;
699 }
700
701 SDNode *AddLo = CurDAG->getMachineNode( Opc, DL, VTList, AddLoArgs);
702 SDValue Carry(AddLo, 1);
703 SDNode *AddHi
704 = CurDAG->getMachineNode(CarryOpc, DL, MVT::i32,
705 SDValue(Hi0, 0), SDValue(Hi1, 0), Carry);
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000706
707 SDValue Args[5] = {
708 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
709 SDValue(AddLo,0),
710 Sub0,
711 SDValue(AddHi,0),
712 Sub1,
713 };
714 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
715}
716
Matt Arsenaultf2b0aeb2014-06-23 18:28:28 +0000717SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
718 SDLoc SL(N);
719 EVT VT = N->getValueType(0);
720
721 assert(VT == MVT::f32 || VT == MVT::f64);
722
723 unsigned Opc
724 = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32;
725
726 const SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32);
727
728 SDValue Ops[] = {
729 N->getOperand(0),
730 N->getOperand(1),
731 N->getOperand(2),
732 Zero,
733 Zero,
734 Zero,
735 Zero
736 };
737
738 return CurDAG->SelectNodeTo(N, Opc, VT, MVT::i1, Ops);
739}
740
Tom Stellardb02c2682014-06-24 23:33:07 +0000741static SDValue wrapAddr64Rsrc(SelectionDAG *DAG, SDLoc DL, SDValue Ptr) {
742 return SDValue(DAG->getMachineNode(AMDGPU::SI_ADDR64_RSRC, DL, MVT::v4i32,
743 Ptr), 0);
744}
745
Tom Stellardb02094e2014-07-21 15:45:01 +0000746static bool isLegalMUBUFImmOffset(const ConstantSDNode *Imm) {
747 return isUInt<12>(Imm->getZExtValue());
748}
749
Tom Stellardb02c2682014-06-24 23:33:07 +0000750bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &Ptr,
751 SDValue &Offset,
752 SDValue &ImmOffset) const {
753 SDLoc DL(Addr);
754
755 if (CurDAG->isBaseWithConstantOffset(Addr)) {
756 SDValue N0 = Addr.getOperand(0);
757 SDValue N1 = Addr.getOperand(1);
758 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
759
Tom Stellardb02094e2014-07-21 15:45:01 +0000760 if (isLegalMUBUFImmOffset(C1)) {
Tom Stellardb02c2682014-06-24 23:33:07 +0000761
762 if (N0.getOpcode() == ISD::ADD) {
763 // (add (add N2, N3), C1)
764 SDValue N2 = N0.getOperand(0);
765 SDValue N3 = N0.getOperand(1);
766 Ptr = wrapAddr64Rsrc(CurDAG, DL, N2);
767 Offset = N3;
768 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
769 return true;
770 }
771
772 // (add N0, C1)
773 Ptr = wrapAddr64Rsrc(CurDAG, DL, CurDAG->getTargetConstant(0, MVT::i64));;
774 Offset = N0;
775 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
776 return true;
777 }
778 }
779 if (Addr.getOpcode() == ISD::ADD) {
780 // (add N0, N1)
781 SDValue N0 = Addr.getOperand(0);
782 SDValue N1 = Addr.getOperand(1);
783 Ptr = wrapAddr64Rsrc(CurDAG, DL, N0);
784 Offset = N1;
785 ImmOffset = CurDAG->getTargetConstant(0, MVT::i16);
786 return true;
787 }
788
789 // default case
790 Ptr = wrapAddr64Rsrc(CurDAG, DL, CurDAG->getConstant(0, MVT::i64));
791 Offset = Addr;
792 ImmOffset = CurDAG->getTargetConstant(0, MVT::i16);
793 return true;
794}
795
Tom Stellardb02094e2014-07-21 15:45:01 +0000796/// \brief Return a resource descriptor with the 'Add TID' bit enabled
797/// The TID (Thread ID) is multipled by the stride value (bits [61:48]
798/// of the resource descriptor) to create an offset, which is added to the
799/// resource ponter.
800static SDValue buildScratchRSRC(SelectionDAG *DAG, SDLoc DL, SDValue Ptr) {
801
802 uint64_t Rsrc = AMDGPU::RSRC_DATA_FORMAT | AMDGPU::RSRC_TID_ENABLE |
803 0xffffffff;
804
805 SDValue PtrLo = DAG->getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
806 SDValue PtrHi = DAG->getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
807 SDValue DataLo = DAG->getTargetConstant(
808 Rsrc & APInt::getAllOnesValue(32).getZExtValue(), MVT::i32);
809 SDValue DataHi = DAG->getTargetConstant(Rsrc >> 32, MVT::i32);
810
811 const SDValue Ops[] = { PtrLo, PtrHi, DataLo, DataHi };
812 return SDValue(DAG->getMachineNode(AMDGPU::SI_BUFFER_RSRC, DL,
813 MVT::v4i32, Ops), 0);
814}
815
816bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc,
817 SDValue &VAddr, SDValue &SOffset,
818 SDValue &ImmOffset) const {
819
820 SDLoc DL(Addr);
821 MachineFunction &MF = CurDAG->getMachineFunction();
822 const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo*>(MF.getTarget().getRegisterInfo());
823 MachineRegisterInfo &MRI = MF.getRegInfo();
824
825
826 unsigned ScratchPtrReg =
827 TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_PTR);
828 unsigned ScratchOffsetReg =
829 TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_WAVE_OFFSET);
830
831 Rsrc = buildScratchRSRC(CurDAG, DL, CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, MRI.getLiveInVirtReg(ScratchPtrReg), MVT::i64));
832 SOffset = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
833 MRI.getLiveInVirtReg(ScratchOffsetReg), MVT::i32);
834
835 // (add n0, c1)
836 if (CurDAG->isBaseWithConstantOffset(Addr)) {
837 SDValue N1 = Addr.getOperand(1);
838 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
839
840 if (isLegalMUBUFImmOffset(C1)) {
841 VAddr = Addr.getOperand(0);
842 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
843 return true;
844 }
845 }
846
847 // (add FI, n0)
848 if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
849 isa<FrameIndexSDNode>(Addr.getOperand(0))) {
850 VAddr = Addr.getOperand(1);
851 ImmOffset = Addr.getOperand(0);
852 return true;
853 }
854
855 // (FI)
856 if (isa<FrameIndexSDNode>(Addr)) {
857 VAddr = SDValue(CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32,
858 CurDAG->getConstant(0, MVT::i32)), 0);
859 ImmOffset = Addr;
860 return true;
861 }
862
863 // (node)
864 VAddr = Addr;
865 ImmOffset = CurDAG->getTargetConstant(0, MVT::i16);
866 return true;
867}
868
869bool AMDGPUDAGToDAGISel::SelectMUBUFAddr32(SDValue Addr, SDValue &SRsrc,
870 SDValue &VAddr, SDValue &SOffset,
871 SDValue &Offset, SDValue &Offen,
872 SDValue &Idxen, SDValue &GLC,
873 SDValue &SLC, SDValue &TFE) const {
874
875 GLC = CurDAG->getTargetConstant(0, MVT::i1);
876 SLC = CurDAG->getTargetConstant(0, MVT::i1);
877 TFE = CurDAG->getTargetConstant(0, MVT::i1);
878
879 Idxen = CurDAG->getTargetConstant(0, MVT::i1);
880 Offen = CurDAG->getTargetConstant(1, MVT::i1);
881
882 return SelectMUBUFScratch(Addr, SRsrc, VAddr, SOffset, Offset);
883}
884
Tom Stellardb4a313a2014-08-01 00:32:39 +0000885bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
886 SDValue &SrcMods) const {
887
888 unsigned Mods = 0;
889
890 Src = In;
891
892 if (Src.getOpcode() == ISD::FNEG) {
893 Mods |= SISrcMods::NEG;
894 Src = Src.getOperand(0);
895 }
896
897 if (Src.getOpcode() == ISD::FABS) {
898 Mods |= SISrcMods::ABS;
899 Src = Src.getOperand(0);
900 }
901
902 SrcMods = CurDAG->getTargetConstant(Mods, MVT::i32);
903
904 return true;
905}
906
907bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
908 SDValue &SrcMods, SDValue &Clamp,
909 SDValue &Omod) const {
910 // FIXME: Handle Clamp and Omod
911 Clamp = CurDAG->getTargetConstant(0, MVT::i32);
912 Omod = CurDAG->getTargetConstant(0, MVT::i32);
913
914 return SelectVOP3Mods(In, Src, SrcMods);
915}
916
Christian Konigd910b7d2013-02-26 17:52:16 +0000917void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
Bill Wendlinga3cd3502013-06-19 21:36:55 +0000918 const AMDGPUTargetLowering& Lowering =
Matt Arsenault209a7b92014-04-18 07:40:20 +0000919 *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000920 bool IsModified = false;
921 do {
922 IsModified = false;
923 // Go over all selected nodes and try to fold them a bit more
924 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
925 E = CurDAG->allnodes_end(); I != E; ++I) {
Christian Konigd910b7d2013-02-26 17:52:16 +0000926
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000927 SDNode *Node = I;
Tom Stellard2183b702013-06-03 17:39:46 +0000928
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000929 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
930 if (!MachineNode)
931 continue;
Christian Konigd910b7d2013-02-26 17:52:16 +0000932
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000933 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
934 if (ResNode != Node) {
935 ReplaceUses(Node, ResNode);
936 IsModified = true;
937 }
Tom Stellard2183b702013-06-03 17:39:46 +0000938 }
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000939 CurDAG->RemoveDeadNodes();
940 } while (IsModified);
Christian Konigd910b7d2013-02-26 17:52:16 +0000941}