blob: 30c6576f086988d2c0db538d7213aba0bb0ed122 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//==-----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Defines an instruction selector for the AMDGPU target.
12//
13//===----------------------------------------------------------------------===//
14#include "AMDGPUInstrInfo.h"
15#include "AMDGPUISelLowering.h" // For AMDGPUISD
16#include "AMDGPURegisterInfo.h"
Tom Stellard2e59a452014-06-13 01:32:00 +000017#include "AMDGPUSubtarget.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000018#include "R600InstrInfo.h"
Tom Stellardb02094e2014-07-21 15:45:01 +000019#include "SIDefines.h"
Christian Konigf82901a2013-02-26 17:52:23 +000020#include "SIISelLowering.h"
Tom Stellardb02094e2014-07-21 15:45:01 +000021#include "SIMachineFunctionInfo.h"
Tom Stellard58ac7442014-04-29 23:12:48 +000022#include "llvm/CodeGen/FunctionLoweringInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000023#include "llvm/CodeGen/PseudoSourceValue.h"
Tom Stellardb02094e2014-07-21 15:45:01 +000024#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineRegisterInfo.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000026#include "llvm/CodeGen/SelectionDAG.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000027#include "llvm/CodeGen/SelectionDAGISel.h"
Tom Stellard58ac7442014-04-29 23:12:48 +000028#include "llvm/IR/Function.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000029
30using namespace llvm;
31
32//===----------------------------------------------------------------------===//
33// Instruction Selector Implementation
34//===----------------------------------------------------------------------===//
35
36namespace {
37/// AMDGPU specific code to select AMDGPU machine instructions for
38/// SelectionDAG operations.
39class AMDGPUDAGToDAGISel : public SelectionDAGISel {
40 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
41 // make the right decision when generating code for different targets.
42 const AMDGPUSubtarget &Subtarget;
43public:
44 AMDGPUDAGToDAGISel(TargetMachine &TM);
45 virtual ~AMDGPUDAGToDAGISel();
46
Craig Topper5656db42014-04-29 07:57:24 +000047 SDNode *Select(SDNode *N) override;
48 const char *getPassName() const override;
49 void PostprocessISelDAG() override;
Tom Stellard75aadc22012-12-11 21:25:42 +000050
51private:
Tom Stellard7ed0b522014-04-03 20:19:27 +000052 bool isInlineImmediate(SDNode *N) const;
Tom Stellard75aadc22012-12-11 21:25:42 +000053 inline SDValue getSmallIPtrImm(unsigned Imm);
Vincent Lejeunec6896792013-06-04 23:17:15 +000054 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
Tom Stellard84021442013-07-23 01:48:24 +000055 const R600InstrInfo *TII);
Tom Stellard365366f2013-01-23 02:09:06 +000056 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
Vincent Lejeunec6896792013-06-04 23:17:15 +000057 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
Tom Stellard75aadc22012-12-11 21:25:42 +000058
59 // Complex pattern selectors
60 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
61 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
62 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
63
64 static bool checkType(const Value *ptr, unsigned int addrspace);
Nick Lewyckyaad475b2014-04-15 07:22:52 +000065 static bool checkPrivateAddress(const MachineMemOperand *Op);
Tom Stellard75aadc22012-12-11 21:25:42 +000066
67 static bool isGlobalStore(const StoreSDNode *N);
68 static bool isPrivateStore(const StoreSDNode *N);
69 static bool isLocalStore(const StoreSDNode *N);
70 static bool isRegionStore(const StoreSDNode *N);
71
Matt Arsenault2aabb062013-06-18 23:37:58 +000072 bool isCPLoad(const LoadSDNode *N) const;
73 bool isConstantLoad(const LoadSDNode *N, int cbID) const;
74 bool isGlobalLoad(const LoadSDNode *N) const;
75 bool isParamLoad(const LoadSDNode *N) const;
76 bool isPrivateLoad(const LoadSDNode *N) const;
77 bool isLocalLoad(const LoadSDNode *N) const;
78 bool isRegionLoad(const LoadSDNode *N) const;
Tom Stellard75aadc22012-12-11 21:25:42 +000079
Tom Stellard58ac7442014-04-29 23:12:48 +000080 /// \returns True if the current basic block being selected is at control
81 /// flow depth 0. Meaning that the current block dominates the
82 // exit block.
83 bool isCFDepth0() const;
84
Tom Stellarddf94dc32013-08-14 23:24:24 +000085 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
Tom Stellard365366f2013-01-23 02:09:06 +000086 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
Matt Arsenault209a7b92014-04-18 07:40:20 +000087 bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
88 SDValue& Offset);
Tom Stellard75aadc22012-12-11 21:25:42 +000089 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +000090 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
Tom Stellardb02c2682014-06-24 23:33:07 +000091 bool SelectMUBUFAddr64(SDValue Addr, SDValue &Ptr, SDValue &Offset,
Tom Stellardb02094e2014-07-21 15:45:01 +000092 SDValue &ImmOffset) const;
93 bool SelectMUBUFScratch(SDValue Addr, SDValue &RSrc, SDValue &VAddr,
94 SDValue &SOffset, SDValue &ImmOffset) const;
95 bool SelectMUBUFAddr32(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
96 SDValue &SOffset, SDValue &Offset, SDValue &Offen,
97 SDValue &Idxen, SDValue &GLC, SDValue &SLC,
98 SDValue &TFE) const;
Tom Stellardb4a313a2014-08-01 00:32:39 +000099 bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
100 bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,
101 SDValue &Clamp, SDValue &Omod) const;
Tom Stellard75aadc22012-12-11 21:25:42 +0000102
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000103 SDNode *SelectADD_SUB_I64(SDNode *N);
Matt Arsenaultf2b0aeb2014-06-23 18:28:28 +0000104 SDNode *SelectDIV_SCALE(SDNode *N);
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000105
Tom Stellard75aadc22012-12-11 21:25:42 +0000106 // Include the pieces autogenerated from the target description.
107#include "AMDGPUGenDAGISel.inc"
108};
109} // end anonymous namespace
110
111/// \brief This pass converts a legalized DAG into a AMDGPU-specific
112// DAG, ready for instruction scheduling.
Matt Arsenault209a7b92014-04-18 07:40:20 +0000113FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000114 return new AMDGPUDAGToDAGISel(TM);
115}
116
Bill Wendlinga3cd3502013-06-19 21:36:55 +0000117AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
Tom Stellard75aadc22012-12-11 21:25:42 +0000118 : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
119}
120
121AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
122}
123
Tom Stellard7ed0b522014-04-03 20:19:27 +0000124bool AMDGPUDAGToDAGISel::isInlineImmediate(SDNode *N) const {
125 const SITargetLowering *TL
126 = static_cast<const SITargetLowering *>(getTargetLowering());
127 return TL->analyzeImmediate(N) == 0;
128}
129
Tom Stellarddf94dc32013-08-14 23:24:24 +0000130/// \brief Determine the register class for \p OpNo
131/// \returns The register class of the virtual register that will be used for
132/// the given operand number \OpNo or NULL if the register class cannot be
133/// determined.
134const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
135 unsigned OpNo) const {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000136 if (!N->isMachineOpcode())
137 return nullptr;
138
Tom Stellarddf94dc32013-08-14 23:24:24 +0000139 switch (N->getMachineOpcode()) {
140 default: {
Eric Christopherd9134482014-08-04 21:25:23 +0000141 const MCInstrDesc &Desc =
142 TM.getSubtargetImpl()->getInstrInfo()->get(N->getMachineOpcode());
Alexey Samsonov3186eb32013-08-15 07:11:34 +0000143 unsigned OpIdx = Desc.getNumDefs() + OpNo;
144 if (OpIdx >= Desc.getNumOperands())
Matt Arsenault209a7b92014-04-18 07:40:20 +0000145 return nullptr;
Alexey Samsonov3186eb32013-08-15 07:11:34 +0000146 int RegClass = Desc.OpInfo[OpIdx].RegClass;
Matt Arsenault209a7b92014-04-18 07:40:20 +0000147 if (RegClass == -1)
148 return nullptr;
149
Eric Christopherd9134482014-08-04 21:25:23 +0000150 return TM.getSubtargetImpl()->getRegisterInfo()->getRegClass(RegClass);
Tom Stellarddf94dc32013-08-14 23:24:24 +0000151 }
152 case AMDGPU::REG_SEQUENCE: {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000153 unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
Eric Christopherd9134482014-08-04 21:25:23 +0000154 const TargetRegisterClass *SuperRC =
155 TM.getSubtargetImpl()->getRegisterInfo()->getRegClass(RCID);
Matt Arsenault209a7b92014-04-18 07:40:20 +0000156
157 SDValue SubRegOp = N->getOperand(OpNo + 1);
158 unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
Eric Christopherd9134482014-08-04 21:25:23 +0000159 return TM.getSubtargetImpl()->getRegisterInfo()->getSubClassWithSubReg(
160 SuperRC, SubRegIdx);
Tom Stellarddf94dc32013-08-14 23:24:24 +0000161 }
162 }
163}
164
Tom Stellard75aadc22012-12-11 21:25:42 +0000165SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
166 return CurDAG->getTargetConstant(Imm, MVT::i32);
167}
168
169bool AMDGPUDAGToDAGISel::SelectADDRParam(
Matt Arsenault209a7b92014-04-18 07:40:20 +0000170 SDValue Addr, SDValue& R1, SDValue& R2) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000171
172 if (Addr.getOpcode() == ISD::FrameIndex) {
173 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
174 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
175 R2 = CurDAG->getTargetConstant(0, MVT::i32);
176 } else {
177 R1 = Addr;
178 R2 = CurDAG->getTargetConstant(0, MVT::i32);
179 }
180 } else if (Addr.getOpcode() == ISD::ADD) {
181 R1 = Addr.getOperand(0);
182 R2 = Addr.getOperand(1);
183 } else {
184 R1 = Addr;
185 R2 = CurDAG->getTargetConstant(0, MVT::i32);
186 }
187 return true;
188}
189
190bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
191 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
192 Addr.getOpcode() == ISD::TargetGlobalAddress) {
193 return false;
194 }
195 return SelectADDRParam(Addr, R1, R2);
196}
197
198
199bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
200 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
201 Addr.getOpcode() == ISD::TargetGlobalAddress) {
202 return false;
203 }
204
205 if (Addr.getOpcode() == ISD::FrameIndex) {
206 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
207 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
208 R2 = CurDAG->getTargetConstant(0, MVT::i64);
209 } else {
210 R1 = Addr;
211 R2 = CurDAG->getTargetConstant(0, MVT::i64);
212 }
213 } else if (Addr.getOpcode() == ISD::ADD) {
214 R1 = Addr.getOperand(0);
215 R2 = Addr.getOperand(1);
216 } else {
217 R1 = Addr;
218 R2 = CurDAG->getTargetConstant(0, MVT::i64);
219 }
220 return true;
221}
222
223SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
224 unsigned int Opc = N->getOpcode();
225 if (N->isMachineOpcode()) {
Tim Northover31d093c2013-09-22 08:21:56 +0000226 N->setNodeId(-1);
Matt Arsenault209a7b92014-04-18 07:40:20 +0000227 return nullptr; // Already selected.
Tom Stellard75aadc22012-12-11 21:25:42 +0000228 }
Matt Arsenault78b86702014-04-18 05:19:26 +0000229
230 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
Tom Stellard75aadc22012-12-11 21:25:42 +0000231 switch (Opc) {
232 default: break;
Tom Stellard1f15bff2014-02-25 21:36:18 +0000233 // We are selecting i64 ADD here instead of custom lower it during
234 // DAG legalization, so we can fold some i64 ADDs used for address
235 // calculation into the LOAD and STORE instructions.
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000236 case ISD::ADD:
237 case ISD::SUB: {
Tom Stellard1f15bff2014-02-25 21:36:18 +0000238 if (N->getValueType(0) != MVT::i64 ||
239 ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
240 break;
241
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000242 return SelectADD_SUB_I64(N);
Tom Stellard1f15bff2014-02-25 21:36:18 +0000243 }
Matt Arsenault064c2062014-06-11 17:40:32 +0000244 case ISD::SCALAR_TO_VECTOR:
Tom Stellard880a80a2014-06-17 16:53:14 +0000245 case AMDGPUISD::BUILD_VERTICAL_VECTOR:
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000246 case ISD::BUILD_VECTOR: {
Tom Stellard8e5da412013-08-14 23:24:32 +0000247 unsigned RegClassID;
Eric Christopherd9134482014-08-04 21:25:23 +0000248 const AMDGPURegisterInfo *TRI = static_cast<const AMDGPURegisterInfo *>(
249 TM.getSubtargetImpl()->getRegisterInfo());
250 const SIRegisterInfo *SIRI = static_cast<const SIRegisterInfo *>(
251 TM.getSubtargetImpl()->getRegisterInfo());
Tom Stellard8e5da412013-08-14 23:24:32 +0000252 EVT VT = N->getValueType(0);
253 unsigned NumVectorElts = VT.getVectorNumElements();
Matt Arsenault064c2062014-06-11 17:40:32 +0000254 EVT EltVT = VT.getVectorElementType();
255 assert(EltVT.bitsEq(MVT::i32));
Tom Stellard8e5da412013-08-14 23:24:32 +0000256 if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
257 bool UseVReg = true;
258 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
259 U != E; ++U) {
260 if (!U->isMachineOpcode()) {
261 continue;
262 }
263 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
264 if (!RC) {
265 continue;
266 }
267 if (SIRI->isSGPRClass(RC)) {
268 UseVReg = false;
269 }
270 }
271 switch(NumVectorElts) {
272 case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID :
273 AMDGPU::SReg_32RegClassID;
274 break;
275 case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
276 AMDGPU::SReg_64RegClassID;
277 break;
278 case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
279 AMDGPU::SReg_128RegClassID;
280 break;
281 case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
282 AMDGPU::SReg_256RegClassID;
283 break;
284 case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
285 AMDGPU::SReg_512RegClassID;
286 break;
Benjamin Kramerbda73ff2013-08-31 21:20:04 +0000287 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
Tom Stellard8e5da412013-08-14 23:24:32 +0000288 }
289 } else {
290 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
291 // that adds a 128 bits reg copy when going through TwoAddressInstructions
292 // pass. We want to avoid 128 bits copies as much as possible because they
293 // can't be bundled by our scheduler.
294 switch(NumVectorElts) {
295 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
Tom Stellard880a80a2014-06-17 16:53:14 +0000296 case 4:
297 if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
298 RegClassID = AMDGPU::R600_Reg128VerticalRegClassID;
299 else
300 RegClassID = AMDGPU::R600_Reg128RegClassID;
301 break;
Tom Stellard8e5da412013-08-14 23:24:32 +0000302 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
303 }
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000304 }
Tom Stellard0344cdf2013-08-01 15:23:42 +0000305
Tom Stellard8e5da412013-08-14 23:24:32 +0000306 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
307
308 if (NumVectorElts == 1) {
Matt Arsenault064c2062014-06-11 17:40:32 +0000309 return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT,
Tom Stellard8e5da412013-08-14 23:24:32 +0000310 N->getOperand(0), RegClass);
Tom Stellard0344cdf2013-08-01 15:23:42 +0000311 }
Tom Stellard8e5da412013-08-14 23:24:32 +0000312
313 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
314 "supported yet");
315 // 16 = Max Num Vector Elements
316 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
317 // 1 = Vector Register Class
Matt Arsenault064c2062014-06-11 17:40:32 +0000318 SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
Tom Stellard8e5da412013-08-14 23:24:32 +0000319
320 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000321 bool IsRegSeq = true;
Matt Arsenault064c2062014-06-11 17:40:32 +0000322 unsigned NOps = N->getNumOperands();
323 for (unsigned i = 0; i < NOps; i++) {
Tom Stellard8e5da412013-08-14 23:24:32 +0000324 // XXX: Why is this here?
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000325 if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
326 IsRegSeq = false;
327 break;
328 }
Tom Stellard8e5da412013-08-14 23:24:32 +0000329 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
330 RegSeqArgs[1 + (2 * i) + 1] =
331 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000332 }
Matt Arsenault064c2062014-06-11 17:40:32 +0000333
334 if (NOps != NumVectorElts) {
335 // Fill in the missing undef elements if this was a scalar_to_vector.
336 assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
337
338 MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
339 SDLoc(N), EltVT);
340 for (unsigned i = NOps; i < NumVectorElts; ++i) {
341 RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
342 RegSeqArgs[1 + (2 * i) + 1] =
343 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
344 }
345 }
346
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000347 if (!IsRegSeq)
348 break;
349 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
Craig Topper481fb282014-04-27 19:21:11 +0000350 RegSeqArgs);
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000351 }
Tom Stellard754f80f2013-04-05 23:31:51 +0000352 case ISD::BUILD_PAIR: {
353 SDValue RC, SubReg0, SubReg1;
Tom Stellarda6c6e1b2013-06-07 20:37:48 +0000354 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
Tom Stellard754f80f2013-04-05 23:31:51 +0000355 break;
356 }
357 if (N->getValueType(0) == MVT::i128) {
358 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
359 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
360 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
361 } else if (N->getValueType(0) == MVT::i64) {
Tom Stellard1aa6cb42014-04-18 00:36:21 +0000362 RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
Tom Stellard754f80f2013-04-05 23:31:51 +0000363 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
364 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
365 } else {
366 llvm_unreachable("Unhandled value type for BUILD_PAIR");
367 }
368 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
369 N->getOperand(1), SubReg1 };
370 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
Andrew Trickef9de2a2013-05-25 02:42:55 +0000371 SDLoc(N), N->getValueType(0), Ops);
Tom Stellard754f80f2013-04-05 23:31:51 +0000372 }
Tom Stellard7ed0b522014-04-03 20:19:27 +0000373
374 case ISD::Constant:
375 case ISD::ConstantFP: {
376 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
377 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
378 N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
379 break;
380
381 uint64_t Imm;
382 if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
383 Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
384 else {
Tom Stellard3cbe0142014-04-07 19:31:13 +0000385 ConstantSDNode *C = cast<ConstantSDNode>(N);
Tom Stellard7ed0b522014-04-03 20:19:27 +0000386 Imm = C->getZExtValue();
387 }
388
389 SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
390 CurDAG->getConstant(Imm & 0xFFFFFFFF, MVT::i32));
391 SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
392 CurDAG->getConstant(Imm >> 32, MVT::i32));
393 const SDValue Ops[] = {
394 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
395 SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
396 SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32)
397 };
398
399 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SDLoc(N),
400 N->getValueType(0), Ops);
401 }
402
Tom Stellard81d871d2013-11-13 23:36:50 +0000403 case AMDGPUISD::REGISTER_LOAD: {
Tom Stellard81d871d2013-11-13 23:36:50 +0000404 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
405 break;
406 SDValue Addr, Offset;
407
408 SelectADDRIndirect(N->getOperand(1), Addr, Offset);
409 const SDValue Ops[] = {
410 Addr,
411 Offset,
412 CurDAG->getTargetConstant(0, MVT::i32),
413 N->getOperand(0),
414 };
415 return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, SDLoc(N),
416 CurDAG->getVTList(MVT::i32, MVT::i64, MVT::Other),
417 Ops);
418 }
419 case AMDGPUISD::REGISTER_STORE: {
Tom Stellard81d871d2013-11-13 23:36:50 +0000420 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
421 break;
422 SDValue Addr, Offset;
423 SelectADDRIndirect(N->getOperand(2), Addr, Offset);
424 const SDValue Ops[] = {
425 N->getOperand(1),
426 Addr,
427 Offset,
428 CurDAG->getTargetConstant(0, MVT::i32),
429 N->getOperand(0),
430 };
431 return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, SDLoc(N),
432 CurDAG->getVTList(MVT::Other),
433 Ops);
434 }
Matt Arsenault78b86702014-04-18 05:19:26 +0000435
436 case AMDGPUISD::BFE_I32:
437 case AMDGPUISD::BFE_U32: {
438 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
439 break;
440
441 // There is a scalar version available, but unlike the vector version which
442 // has a separate operand for the offset and width, the scalar version packs
443 // the width and offset into a single operand. Try to move to the scalar
444 // version if the offsets are constant, so that we can try to keep extended
445 // loads of kernel arguments in SGPRs.
446
447 // TODO: Technically we could try to pattern match scalar bitshifts of
448 // dynamic values, but it's probably not useful.
449 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
450 if (!Offset)
451 break;
452
453 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
454 if (!Width)
455 break;
456
457 bool Signed = Opc == AMDGPUISD::BFE_I32;
458
459 // Transformation function, pack the offset and width of a BFE into
460 // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
461 // source, bits [5:0] contain the offset and bits [22:16] the width.
462
463 uint32_t OffsetVal = Offset->getZExtValue();
464 uint32_t WidthVal = Width->getZExtValue();
465
466 uint32_t PackedVal = OffsetVal | WidthVal << 16;
467
468 SDValue PackedOffsetWidth = CurDAG->getTargetConstant(PackedVal, MVT::i32);
469 return CurDAG->getMachineNode(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
470 SDLoc(N),
471 MVT::i32,
472 N->getOperand(0),
473 PackedOffsetWidth);
474
475 }
Matt Arsenaultf2b0aeb2014-06-23 18:28:28 +0000476 case AMDGPUISD::DIV_SCALE: {
477 return SelectDIV_SCALE(N);
478 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000479 }
Vincent Lejeune0167a312013-09-12 23:45:00 +0000480 return SelectCode(N);
Tom Stellard365366f2013-01-23 02:09:06 +0000481}
482
Tom Stellard75aadc22012-12-11 21:25:42 +0000483
Matt Arsenault209a7b92014-04-18 07:40:20 +0000484bool AMDGPUDAGToDAGISel::checkType(const Value *Ptr, unsigned AS) {
485 assert(AS != 0 && "Use checkPrivateAddress instead.");
486 if (!Ptr)
Tom Stellard75aadc22012-12-11 21:25:42 +0000487 return false;
Matt Arsenault209a7b92014-04-18 07:40:20 +0000488
489 return Ptr->getType()->getPointerAddressSpace() == AS;
Tom Stellard75aadc22012-12-11 21:25:42 +0000490}
491
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000492bool AMDGPUDAGToDAGISel::checkPrivateAddress(const MachineMemOperand *Op) {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000493 if (Op->getPseudoValue())
494 return true;
495
496 if (PointerType *PT = dyn_cast<PointerType>(Op->getValue()->getType()))
497 return PT->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
498
499 return false;
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000500}
501
Tom Stellard75aadc22012-12-11 21:25:42 +0000502bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000503 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000504}
505
506bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000507 const Value *MemVal = N->getMemOperand()->getValue();
508 return (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
509 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
510 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS));
Tom Stellard75aadc22012-12-11 21:25:42 +0000511}
512
513bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000514 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000515}
516
517bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000518 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000519}
520
Tom Stellard1e803092013-07-23 01:48:18 +0000521bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000522 const Value *MemVal = N->getMemOperand()->getValue();
523 if (CbId == -1)
524 return checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS);
525
526 return checkType(MemVal, AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
Tom Stellard75aadc22012-12-11 21:25:42 +0000527}
528
Matt Arsenault2aabb062013-06-18 23:37:58 +0000529bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
Tom Stellard8cb0e472013-07-23 23:54:56 +0000530 if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) {
531 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
532 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
533 N->getMemoryVT().bitsLT(MVT::i32)) {
534 return true;
535 }
536 }
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000537 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000538}
539
Matt Arsenault2aabb062013-06-18 23:37:58 +0000540bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000541 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::PARAM_I_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000542}
543
Matt Arsenault2aabb062013-06-18 23:37:58 +0000544bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000545 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000546}
547
Matt Arsenault2aabb062013-06-18 23:37:58 +0000548bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000549 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000550}
551
Matt Arsenault2aabb062013-06-18 23:37:58 +0000552bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000553 MachineMemOperand *MMO = N->getMemOperand();
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000554 if (checkPrivateAddress(N->getMemOperand())) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000555 if (MMO) {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000556 const PseudoSourceValue *PSV = MMO->getPseudoValue();
Tom Stellard75aadc22012-12-11 21:25:42 +0000557 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
558 return true;
559 }
560 }
561 }
562 return false;
563}
564
Matt Arsenault2aabb062013-06-18 23:37:58 +0000565bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000566 if (checkPrivateAddress(N->getMemOperand())) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000567 // Check to make sure we are not a constant pool load or a constant load
568 // that is marked as a private load
569 if (isCPLoad(N) || isConstantLoad(N, -1)) {
570 return false;
571 }
572 }
Matt Arsenault209a7b92014-04-18 07:40:20 +0000573
574 const Value *MemVal = N->getMemOperand()->getValue();
575 if (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
576 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
577 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS) &&
578 !checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS) &&
579 !checkType(MemVal, AMDGPUAS::PARAM_D_ADDRESS) &&
580 !checkType(MemVal, AMDGPUAS::PARAM_I_ADDRESS)){
Tom Stellard75aadc22012-12-11 21:25:42 +0000581 return true;
582 }
583 return false;
584}
585
Tom Stellard58ac7442014-04-29 23:12:48 +0000586bool AMDGPUDAGToDAGISel::isCFDepth0() const {
587 // FIXME: Figure out a way to use DominatorTree analysis here.
588 const BasicBlock *CurBlock = FuncInfo->MBB->getBasicBlock();
589 const Function *Fn = FuncInfo->Fn;
590 return &Fn->front() == CurBlock || &Fn->back() == CurBlock;
591}
592
593
Tom Stellard75aadc22012-12-11 21:25:42 +0000594const char *AMDGPUDAGToDAGISel::getPassName() const {
595 return "AMDGPU DAG->DAG Pattern Instruction Selection";
596}
597
598#ifdef DEBUGTMP
599#undef INT64_C
600#endif
601#undef DEBUGTMP
602
Tom Stellard41fc7852013-07-23 01:48:42 +0000603//===----------------------------------------------------------------------===//
604// Complex Patterns
605//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000606
Tom Stellard365366f2013-01-23 02:09:06 +0000607bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
Matt Arsenault209a7b92014-04-18 07:40:20 +0000608 SDValue& IntPtr) {
Tom Stellard365366f2013-01-23 02:09:06 +0000609 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
610 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
611 return true;
612 }
613 return false;
614}
615
616bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
617 SDValue& BaseReg, SDValue &Offset) {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000618 if (!isa<ConstantSDNode>(Addr)) {
Tom Stellard365366f2013-01-23 02:09:06 +0000619 BaseReg = Addr;
620 Offset = CurDAG->getIntPtrConstant(0, true);
621 return true;
622 }
623 return false;
624}
625
Tom Stellard75aadc22012-12-11 21:25:42 +0000626bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
627 SDValue &Offset) {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000628 ConstantSDNode *IMMOffset;
Tom Stellard75aadc22012-12-11 21:25:42 +0000629
630 if (Addr.getOpcode() == ISD::ADD
631 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
632 && isInt<16>(IMMOffset->getZExtValue())) {
633
634 Base = Addr.getOperand(0);
635 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
636 return true;
637 // If the pointer address is constant, we can move it to the offset field.
638 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
639 && isInt<16>(IMMOffset->getZExtValue())) {
640 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
Andrew Trickef9de2a2013-05-25 02:42:55 +0000641 SDLoc(CurDAG->getEntryNode()),
Tom Stellard75aadc22012-12-11 21:25:42 +0000642 AMDGPU::ZERO, MVT::i32);
643 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
644 return true;
645 }
646
647 // Default case, no offset
648 Base = Addr;
649 Offset = CurDAG->getTargetConstant(0, MVT::i32);
650 return true;
651}
652
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000653bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
654 SDValue &Offset) {
655 ConstantSDNode *C;
656
657 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
658 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
659 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
660 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
661 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
662 Base = Addr.getOperand(0);
663 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
664 } else {
665 Base = Addr;
666 Offset = CurDAG->getTargetConstant(0, MVT::i32);
667 }
668
669 return true;
670}
Christian Konigd910b7d2013-02-26 17:52:16 +0000671
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000672SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000673 SDLoc DL(N);
674 SDValue LHS = N->getOperand(0);
675 SDValue RHS = N->getOperand(1);
676
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000677 bool IsAdd = (N->getOpcode() == ISD::ADD);
678
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000679 SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
680 SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
681
682 SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
683 DL, MVT::i32, LHS, Sub0);
684 SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
685 DL, MVT::i32, LHS, Sub1);
686
687 SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
688 DL, MVT::i32, RHS, Sub0);
689 SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
690 DL, MVT::i32, RHS, Sub1);
691
692 SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000693 SDValue AddLoArgs[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
694
Matt Arsenaultb8b51532014-06-23 18:00:38 +0000695
696 unsigned Opc = IsAdd ? AMDGPU::S_ADD_I32 : AMDGPU::S_SUB_I32;
697 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
698
699 if (!isCFDepth0()) {
700 Opc = IsAdd ? AMDGPU::V_ADD_I32_e32 : AMDGPU::V_SUB_I32_e32;
701 CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e32 : AMDGPU::V_SUBB_U32_e32;
702 }
703
704 SDNode *AddLo = CurDAG->getMachineNode( Opc, DL, VTList, AddLoArgs);
705 SDValue Carry(AddLo, 1);
706 SDNode *AddHi
707 = CurDAG->getMachineNode(CarryOpc, DL, MVT::i32,
708 SDValue(Hi0, 0), SDValue(Hi1, 0), Carry);
Matt Arsenault9fa3f932014-06-23 18:00:34 +0000709
710 SDValue Args[5] = {
711 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
712 SDValue(AddLo,0),
713 Sub0,
714 SDValue(AddHi,0),
715 Sub1,
716 };
717 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
718}
719
Matt Arsenaultf2b0aeb2014-06-23 18:28:28 +0000720SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
721 SDLoc SL(N);
722 EVT VT = N->getValueType(0);
723
724 assert(VT == MVT::f32 || VT == MVT::f64);
725
726 unsigned Opc
727 = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32;
728
729 const SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32);
730
731 SDValue Ops[] = {
732 N->getOperand(0),
733 N->getOperand(1),
734 N->getOperand(2),
735 Zero,
736 Zero,
737 Zero,
738 Zero
739 };
740
741 return CurDAG->SelectNodeTo(N, Opc, VT, MVT::i1, Ops);
742}
743
Tom Stellardb02c2682014-06-24 23:33:07 +0000744static SDValue wrapAddr64Rsrc(SelectionDAG *DAG, SDLoc DL, SDValue Ptr) {
745 return SDValue(DAG->getMachineNode(AMDGPU::SI_ADDR64_RSRC, DL, MVT::v4i32,
746 Ptr), 0);
747}
748
Tom Stellardb02094e2014-07-21 15:45:01 +0000749static bool isLegalMUBUFImmOffset(const ConstantSDNode *Imm) {
750 return isUInt<12>(Imm->getZExtValue());
751}
752
Tom Stellardb02c2682014-06-24 23:33:07 +0000753bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &Ptr,
754 SDValue &Offset,
755 SDValue &ImmOffset) const {
756 SDLoc DL(Addr);
757
758 if (CurDAG->isBaseWithConstantOffset(Addr)) {
759 SDValue N0 = Addr.getOperand(0);
760 SDValue N1 = Addr.getOperand(1);
761 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
762
Tom Stellardb02094e2014-07-21 15:45:01 +0000763 if (isLegalMUBUFImmOffset(C1)) {
Tom Stellardb02c2682014-06-24 23:33:07 +0000764
765 if (N0.getOpcode() == ISD::ADD) {
766 // (add (add N2, N3), C1)
767 SDValue N2 = N0.getOperand(0);
768 SDValue N3 = N0.getOperand(1);
769 Ptr = wrapAddr64Rsrc(CurDAG, DL, N2);
770 Offset = N3;
771 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
772 return true;
773 }
774
775 // (add N0, C1)
776 Ptr = wrapAddr64Rsrc(CurDAG, DL, CurDAG->getTargetConstant(0, MVT::i64));;
777 Offset = N0;
778 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
779 return true;
780 }
781 }
782 if (Addr.getOpcode() == ISD::ADD) {
783 // (add N0, N1)
784 SDValue N0 = Addr.getOperand(0);
785 SDValue N1 = Addr.getOperand(1);
786 Ptr = wrapAddr64Rsrc(CurDAG, DL, N0);
787 Offset = N1;
788 ImmOffset = CurDAG->getTargetConstant(0, MVT::i16);
789 return true;
790 }
791
792 // default case
793 Ptr = wrapAddr64Rsrc(CurDAG, DL, CurDAG->getConstant(0, MVT::i64));
794 Offset = Addr;
795 ImmOffset = CurDAG->getTargetConstant(0, MVT::i16);
796 return true;
797}
798
Tom Stellardb02094e2014-07-21 15:45:01 +0000799/// \brief Return a resource descriptor with the 'Add TID' bit enabled
800/// The TID (Thread ID) is multipled by the stride value (bits [61:48]
801/// of the resource descriptor) to create an offset, which is added to the
802/// resource ponter.
803static SDValue buildScratchRSRC(SelectionDAG *DAG, SDLoc DL, SDValue Ptr) {
804
805 uint64_t Rsrc = AMDGPU::RSRC_DATA_FORMAT | AMDGPU::RSRC_TID_ENABLE |
806 0xffffffff;
807
808 SDValue PtrLo = DAG->getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
809 SDValue PtrHi = DAG->getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
810 SDValue DataLo = DAG->getTargetConstant(
811 Rsrc & APInt::getAllOnesValue(32).getZExtValue(), MVT::i32);
812 SDValue DataHi = DAG->getTargetConstant(Rsrc >> 32, MVT::i32);
813
814 const SDValue Ops[] = { PtrLo, PtrHi, DataLo, DataHi };
815 return SDValue(DAG->getMachineNode(AMDGPU::SI_BUFFER_RSRC, DL,
816 MVT::v4i32, Ops), 0);
817}
818
819bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc,
820 SDValue &VAddr, SDValue &SOffset,
821 SDValue &ImmOffset) const {
822
823 SDLoc DL(Addr);
824 MachineFunction &MF = CurDAG->getMachineFunction();
Eric Christopherd9134482014-08-04 21:25:23 +0000825 const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(
826 MF.getTarget().getSubtargetImpl()->getRegisterInfo());
Tom Stellardb02094e2014-07-21 15:45:01 +0000827 MachineRegisterInfo &MRI = MF.getRegInfo();
828
829
830 unsigned ScratchPtrReg =
831 TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_PTR);
832 unsigned ScratchOffsetReg =
833 TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_WAVE_OFFSET);
834
835 Rsrc = buildScratchRSRC(CurDAG, DL, CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, MRI.getLiveInVirtReg(ScratchPtrReg), MVT::i64));
836 SOffset = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
837 MRI.getLiveInVirtReg(ScratchOffsetReg), MVT::i32);
838
839 // (add n0, c1)
840 if (CurDAG->isBaseWithConstantOffset(Addr)) {
841 SDValue N1 = Addr.getOperand(1);
842 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
843
844 if (isLegalMUBUFImmOffset(C1)) {
845 VAddr = Addr.getOperand(0);
846 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
847 return true;
848 }
849 }
850
851 // (add FI, n0)
852 if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
853 isa<FrameIndexSDNode>(Addr.getOperand(0))) {
854 VAddr = Addr.getOperand(1);
855 ImmOffset = Addr.getOperand(0);
856 return true;
857 }
858
859 // (FI)
860 if (isa<FrameIndexSDNode>(Addr)) {
861 VAddr = SDValue(CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32,
862 CurDAG->getConstant(0, MVT::i32)), 0);
863 ImmOffset = Addr;
864 return true;
865 }
866
867 // (node)
868 VAddr = Addr;
869 ImmOffset = CurDAG->getTargetConstant(0, MVT::i16);
870 return true;
871}
872
873bool AMDGPUDAGToDAGISel::SelectMUBUFAddr32(SDValue Addr, SDValue &SRsrc,
874 SDValue &VAddr, SDValue &SOffset,
875 SDValue &Offset, SDValue &Offen,
876 SDValue &Idxen, SDValue &GLC,
877 SDValue &SLC, SDValue &TFE) const {
878
879 GLC = CurDAG->getTargetConstant(0, MVT::i1);
880 SLC = CurDAG->getTargetConstant(0, MVT::i1);
881 TFE = CurDAG->getTargetConstant(0, MVT::i1);
882
883 Idxen = CurDAG->getTargetConstant(0, MVT::i1);
884 Offen = CurDAG->getTargetConstant(1, MVT::i1);
885
886 return SelectMUBUFScratch(Addr, SRsrc, VAddr, SOffset, Offset);
887}
888
Tom Stellardb4a313a2014-08-01 00:32:39 +0000889bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
890 SDValue &SrcMods) const {
891
892 unsigned Mods = 0;
893
894 Src = In;
895
896 if (Src.getOpcode() == ISD::FNEG) {
897 Mods |= SISrcMods::NEG;
898 Src = Src.getOperand(0);
899 }
900
901 if (Src.getOpcode() == ISD::FABS) {
902 Mods |= SISrcMods::ABS;
903 Src = Src.getOperand(0);
904 }
905
906 SrcMods = CurDAG->getTargetConstant(Mods, MVT::i32);
907
908 return true;
909}
910
911bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
912 SDValue &SrcMods, SDValue &Clamp,
913 SDValue &Omod) const {
914 // FIXME: Handle Clamp and Omod
915 Clamp = CurDAG->getTargetConstant(0, MVT::i32);
916 Omod = CurDAG->getTargetConstant(0, MVT::i32);
917
918 return SelectVOP3Mods(In, Src, SrcMods);
919}
920
Christian Konigd910b7d2013-02-26 17:52:16 +0000921void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
Bill Wendlinga3cd3502013-06-19 21:36:55 +0000922 const AMDGPUTargetLowering& Lowering =
Matt Arsenault209a7b92014-04-18 07:40:20 +0000923 *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000924 bool IsModified = false;
925 do {
926 IsModified = false;
927 // Go over all selected nodes and try to fold them a bit more
928 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
929 E = CurDAG->allnodes_end(); I != E; ++I) {
Christian Konigd910b7d2013-02-26 17:52:16 +0000930
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000931 SDNode *Node = I;
Tom Stellard2183b702013-06-03 17:39:46 +0000932
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000933 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
934 if (!MachineNode)
935 continue;
Christian Konigd910b7d2013-02-26 17:52:16 +0000936
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000937 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
938 if (ResNode != Node) {
939 ReplaceUses(Node, ResNode);
940 IsModified = true;
941 }
Tom Stellard2183b702013-06-03 17:39:46 +0000942 }
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000943 CurDAG->RemoveDeadNodes();
944 } while (IsModified);
Christian Konigd910b7d2013-02-26 17:52:16 +0000945}