blob: 8385baa1011bcb0bad49b96b33717765a03c838f [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//==-----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Defines an instruction selector for the AMDGPU target.
12//
13//===----------------------------------------------------------------------===//
14#include "AMDGPUInstrInfo.h"
15#include "AMDGPUISelLowering.h" // For AMDGPUISD
16#include "AMDGPURegisterInfo.h"
Tom Stellard2e59a452014-06-13 01:32:00 +000017#include "AMDGPUSubtarget.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000018#include "R600InstrInfo.h"
Christian Konigf82901a2013-02-26 17:52:23 +000019#include "SIISelLowering.h"
Tom Stellard58ac7442014-04-29 23:12:48 +000020#include "llvm/CodeGen/FunctionLoweringInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000021#include "llvm/CodeGen/PseudoSourceValue.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000022#include "llvm/CodeGen/SelectionDAG.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000023#include "llvm/CodeGen/SelectionDAGISel.h"
Tom Stellard58ac7442014-04-29 23:12:48 +000024#include "llvm/IR/Function.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000025
26using namespace llvm;
27
28//===----------------------------------------------------------------------===//
29// Instruction Selector Implementation
30//===----------------------------------------------------------------------===//
31
32namespace {
33/// AMDGPU specific code to select AMDGPU machine instructions for
34/// SelectionDAG operations.
35class AMDGPUDAGToDAGISel : public SelectionDAGISel {
36 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
37 // make the right decision when generating code for different targets.
38 const AMDGPUSubtarget &Subtarget;
39public:
40 AMDGPUDAGToDAGISel(TargetMachine &TM);
41 virtual ~AMDGPUDAGToDAGISel();
42
Craig Topper5656db42014-04-29 07:57:24 +000043 SDNode *Select(SDNode *N) override;
44 const char *getPassName() const override;
45 void PostprocessISelDAG() override;
Tom Stellard75aadc22012-12-11 21:25:42 +000046
47private:
Tom Stellard7ed0b522014-04-03 20:19:27 +000048 bool isInlineImmediate(SDNode *N) const;
Tom Stellard75aadc22012-12-11 21:25:42 +000049 inline SDValue getSmallIPtrImm(unsigned Imm);
Vincent Lejeunec6896792013-06-04 23:17:15 +000050 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
Tom Stellard84021442013-07-23 01:48:24 +000051 const R600InstrInfo *TII);
Tom Stellard365366f2013-01-23 02:09:06 +000052 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
Vincent Lejeunec6896792013-06-04 23:17:15 +000053 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
Tom Stellard75aadc22012-12-11 21:25:42 +000054
55 // Complex pattern selectors
56 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
57 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
58 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
59
60 static bool checkType(const Value *ptr, unsigned int addrspace);
Nick Lewyckyaad475b2014-04-15 07:22:52 +000061 static bool checkPrivateAddress(const MachineMemOperand *Op);
Tom Stellard75aadc22012-12-11 21:25:42 +000062
63 static bool isGlobalStore(const StoreSDNode *N);
64 static bool isPrivateStore(const StoreSDNode *N);
65 static bool isLocalStore(const StoreSDNode *N);
66 static bool isRegionStore(const StoreSDNode *N);
67
Matt Arsenault2aabb062013-06-18 23:37:58 +000068 bool isCPLoad(const LoadSDNode *N) const;
69 bool isConstantLoad(const LoadSDNode *N, int cbID) const;
70 bool isGlobalLoad(const LoadSDNode *N) const;
71 bool isParamLoad(const LoadSDNode *N) const;
72 bool isPrivateLoad(const LoadSDNode *N) const;
73 bool isLocalLoad(const LoadSDNode *N) const;
74 bool isRegionLoad(const LoadSDNode *N) const;
Tom Stellard75aadc22012-12-11 21:25:42 +000075
Tom Stellard58ac7442014-04-29 23:12:48 +000076 /// \returns True if the current basic block being selected is at control
77 /// flow depth 0. Meaning that the current block dominates the
78 // exit block.
79 bool isCFDepth0() const;
80
Tom Stellarddf94dc32013-08-14 23:24:24 +000081 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
Tom Stellard365366f2013-01-23 02:09:06 +000082 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
Matt Arsenault209a7b92014-04-18 07:40:20 +000083 bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
84 SDValue& Offset);
Tom Stellard75aadc22012-12-11 21:25:42 +000085 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +000086 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
Tom Stellard75aadc22012-12-11 21:25:42 +000087
88 // Include the pieces autogenerated from the target description.
89#include "AMDGPUGenDAGISel.inc"
90};
91} // end anonymous namespace
92
93/// \brief This pass converts a legalized DAG into a AMDGPU-specific
94// DAG, ready for instruction scheduling.
Matt Arsenault209a7b92014-04-18 07:40:20 +000095FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM) {
Tom Stellard75aadc22012-12-11 21:25:42 +000096 return new AMDGPUDAGToDAGISel(TM);
97}
98
Bill Wendlinga3cd3502013-06-19 21:36:55 +000099AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
Tom Stellard75aadc22012-12-11 21:25:42 +0000100 : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
101}
102
103AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
104}
105
Tom Stellard7ed0b522014-04-03 20:19:27 +0000106bool AMDGPUDAGToDAGISel::isInlineImmediate(SDNode *N) const {
107 const SITargetLowering *TL
108 = static_cast<const SITargetLowering *>(getTargetLowering());
109 return TL->analyzeImmediate(N) == 0;
110}
111
Tom Stellarddf94dc32013-08-14 23:24:24 +0000112/// \brief Determine the register class for \p OpNo
113/// \returns The register class of the virtual register that will be used for
114/// the given operand number \OpNo or NULL if the register class cannot be
115/// determined.
116const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
117 unsigned OpNo) const {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000118 if (!N->isMachineOpcode())
119 return nullptr;
120
Tom Stellarddf94dc32013-08-14 23:24:24 +0000121 switch (N->getMachineOpcode()) {
122 default: {
123 const MCInstrDesc &Desc = TM.getInstrInfo()->get(N->getMachineOpcode());
Alexey Samsonov3186eb32013-08-15 07:11:34 +0000124 unsigned OpIdx = Desc.getNumDefs() + OpNo;
125 if (OpIdx >= Desc.getNumOperands())
Matt Arsenault209a7b92014-04-18 07:40:20 +0000126 return nullptr;
Alexey Samsonov3186eb32013-08-15 07:11:34 +0000127 int RegClass = Desc.OpInfo[OpIdx].RegClass;
Matt Arsenault209a7b92014-04-18 07:40:20 +0000128 if (RegClass == -1)
129 return nullptr;
130
Tom Stellarddf94dc32013-08-14 23:24:24 +0000131 return TM.getRegisterInfo()->getRegClass(RegClass);
132 }
133 case AMDGPU::REG_SEQUENCE: {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000134 unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
135 const TargetRegisterClass *SuperRC = TM.getRegisterInfo()->getRegClass(RCID);
136
137 SDValue SubRegOp = N->getOperand(OpNo + 1);
138 unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
Tom Stellarddf94dc32013-08-14 23:24:24 +0000139 return TM.getRegisterInfo()->getSubClassWithSubReg(SuperRC, SubRegIdx);
140 }
141 }
142}
143
Tom Stellard75aadc22012-12-11 21:25:42 +0000144SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
145 return CurDAG->getTargetConstant(Imm, MVT::i32);
146}
147
148bool AMDGPUDAGToDAGISel::SelectADDRParam(
Matt Arsenault209a7b92014-04-18 07:40:20 +0000149 SDValue Addr, SDValue& R1, SDValue& R2) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000150
151 if (Addr.getOpcode() == ISD::FrameIndex) {
152 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
153 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
154 R2 = CurDAG->getTargetConstant(0, MVT::i32);
155 } else {
156 R1 = Addr;
157 R2 = CurDAG->getTargetConstant(0, MVT::i32);
158 }
159 } else if (Addr.getOpcode() == ISD::ADD) {
160 R1 = Addr.getOperand(0);
161 R2 = Addr.getOperand(1);
162 } else {
163 R1 = Addr;
164 R2 = CurDAG->getTargetConstant(0, MVT::i32);
165 }
166 return true;
167}
168
169bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
170 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
171 Addr.getOpcode() == ISD::TargetGlobalAddress) {
172 return false;
173 }
174 return SelectADDRParam(Addr, R1, R2);
175}
176
177
178bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
179 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
180 Addr.getOpcode() == ISD::TargetGlobalAddress) {
181 return false;
182 }
183
184 if (Addr.getOpcode() == ISD::FrameIndex) {
185 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
186 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
187 R2 = CurDAG->getTargetConstant(0, MVT::i64);
188 } else {
189 R1 = Addr;
190 R2 = CurDAG->getTargetConstant(0, MVT::i64);
191 }
192 } else if (Addr.getOpcode() == ISD::ADD) {
193 R1 = Addr.getOperand(0);
194 R2 = Addr.getOperand(1);
195 } else {
196 R1 = Addr;
197 R2 = CurDAG->getTargetConstant(0, MVT::i64);
198 }
199 return true;
200}
201
202SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
203 unsigned int Opc = N->getOpcode();
204 if (N->isMachineOpcode()) {
Tim Northover31d093c2013-09-22 08:21:56 +0000205 N->setNodeId(-1);
Matt Arsenault209a7b92014-04-18 07:40:20 +0000206 return nullptr; // Already selected.
Tom Stellard75aadc22012-12-11 21:25:42 +0000207 }
Matt Arsenault78b86702014-04-18 05:19:26 +0000208
209 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
Tom Stellard75aadc22012-12-11 21:25:42 +0000210 switch (Opc) {
211 default: break;
Tom Stellard1f15bff2014-02-25 21:36:18 +0000212 // We are selecting i64 ADD here instead of custom lower it during
213 // DAG legalization, so we can fold some i64 ADDs used for address
214 // calculation into the LOAD and STORE instructions.
215 case ISD::ADD: {
Tom Stellard1f15bff2014-02-25 21:36:18 +0000216 if (N->getValueType(0) != MVT::i64 ||
217 ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
218 break;
219
220 SDLoc DL(N);
221 SDValue LHS = N->getOperand(0);
222 SDValue RHS = N->getOperand(1);
223
224 SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
225 SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
226
227 SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
228 DL, MVT::i32, LHS, Sub0);
229 SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
230 DL, MVT::i32, LHS, Sub1);
231
232 SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
233 DL, MVT::i32, RHS, Sub0);
234 SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
235 DL, MVT::i32, RHS, Sub1);
236
237 SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
238
239 SmallVector<SDValue, 8> AddLoArgs;
240 AddLoArgs.push_back(SDValue(Lo0, 0));
241 AddLoArgs.push_back(SDValue(Lo1, 0));
242
Tom Stellard73b98ed2014-05-15 14:41:54 +0000243 SDNode *AddLo = CurDAG->getMachineNode(
244 isCFDepth0() ? AMDGPU::S_ADD_I32 : AMDGPU::V_ADD_I32_e32,
245 DL, VTList, AddLoArgs);
Tom Stellard1f15bff2014-02-25 21:36:18 +0000246 SDValue Carry = SDValue(AddLo, 1);
Tom Stellard73b98ed2014-05-15 14:41:54 +0000247 SDNode *AddHi = CurDAG->getMachineNode(
248 isCFDepth0() ? AMDGPU::S_ADDC_U32 : AMDGPU::V_ADDC_U32_e32,
249 DL, MVT::i32, SDValue(Hi0, 0), SDValue(Hi1, 0), Carry);
Tom Stellard1f15bff2014-02-25 21:36:18 +0000250
251 SDValue Args[5] = {
252 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
253 SDValue(AddLo,0),
254 Sub0,
255 SDValue(AddHi,0),
256 Sub1,
257 };
Craig Topper481fb282014-04-27 19:21:11 +0000258 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
Tom Stellard1f15bff2014-02-25 21:36:18 +0000259 }
Matt Arsenault064c2062014-06-11 17:40:32 +0000260 case ISD::SCALAR_TO_VECTOR:
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000261 case ISD::BUILD_VECTOR: {
Tom Stellard8e5da412013-08-14 23:24:32 +0000262 unsigned RegClassID;
Tom Stellard8e5da412013-08-14 23:24:32 +0000263 const AMDGPURegisterInfo *TRI =
264 static_cast<const AMDGPURegisterInfo*>(TM.getRegisterInfo());
265 const SIRegisterInfo *SIRI =
266 static_cast<const SIRegisterInfo*>(TM.getRegisterInfo());
267 EVT VT = N->getValueType(0);
268 unsigned NumVectorElts = VT.getVectorNumElements();
Matt Arsenault064c2062014-06-11 17:40:32 +0000269 EVT EltVT = VT.getVectorElementType();
270 assert(EltVT.bitsEq(MVT::i32));
Tom Stellard8e5da412013-08-14 23:24:32 +0000271 if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
272 bool UseVReg = true;
273 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
274 U != E; ++U) {
275 if (!U->isMachineOpcode()) {
276 continue;
277 }
278 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
279 if (!RC) {
280 continue;
281 }
282 if (SIRI->isSGPRClass(RC)) {
283 UseVReg = false;
284 }
285 }
286 switch(NumVectorElts) {
287 case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID :
288 AMDGPU::SReg_32RegClassID;
289 break;
290 case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
291 AMDGPU::SReg_64RegClassID;
292 break;
293 case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
294 AMDGPU::SReg_128RegClassID;
295 break;
296 case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
297 AMDGPU::SReg_256RegClassID;
298 break;
299 case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
300 AMDGPU::SReg_512RegClassID;
301 break;
Benjamin Kramerbda73ff2013-08-31 21:20:04 +0000302 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
Tom Stellard8e5da412013-08-14 23:24:32 +0000303 }
304 } else {
305 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
306 // that adds a 128 bits reg copy when going through TwoAddressInstructions
307 // pass. We want to avoid 128 bits copies as much as possible because they
308 // can't be bundled by our scheduler.
309 switch(NumVectorElts) {
310 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
311 case 4: RegClassID = AMDGPU::R600_Reg128RegClassID; break;
312 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
313 }
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000314 }
Tom Stellard0344cdf2013-08-01 15:23:42 +0000315
Tom Stellard8e5da412013-08-14 23:24:32 +0000316 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
317
318 if (NumVectorElts == 1) {
Matt Arsenault064c2062014-06-11 17:40:32 +0000319 return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT,
Tom Stellard8e5da412013-08-14 23:24:32 +0000320 N->getOperand(0), RegClass);
Tom Stellard0344cdf2013-08-01 15:23:42 +0000321 }
Tom Stellard8e5da412013-08-14 23:24:32 +0000322
323 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
324 "supported yet");
325 // 16 = Max Num Vector Elements
326 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
327 // 1 = Vector Register Class
Matt Arsenault064c2062014-06-11 17:40:32 +0000328 SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
Tom Stellard8e5da412013-08-14 23:24:32 +0000329
330 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000331 bool IsRegSeq = true;
Matt Arsenault064c2062014-06-11 17:40:32 +0000332 unsigned NOps = N->getNumOperands();
333 for (unsigned i = 0; i < NOps; i++) {
Tom Stellard8e5da412013-08-14 23:24:32 +0000334 // XXX: Why is this here?
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000335 if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
336 IsRegSeq = false;
337 break;
338 }
Tom Stellard8e5da412013-08-14 23:24:32 +0000339 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
340 RegSeqArgs[1 + (2 * i) + 1] =
341 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000342 }
Matt Arsenault064c2062014-06-11 17:40:32 +0000343
344 if (NOps != NumVectorElts) {
345 // Fill in the missing undef elements if this was a scalar_to_vector.
346 assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
347
348 MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
349 SDLoc(N), EltVT);
350 for (unsigned i = NOps; i < NumVectorElts; ++i) {
351 RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
352 RegSeqArgs[1 + (2 * i) + 1] =
353 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
354 }
355 }
356
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000357 if (!IsRegSeq)
358 break;
359 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
Craig Topper481fb282014-04-27 19:21:11 +0000360 RegSeqArgs);
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000361 }
Tom Stellard754f80f2013-04-05 23:31:51 +0000362 case ISD::BUILD_PAIR: {
363 SDValue RC, SubReg0, SubReg1;
Tom Stellarda6c6e1b2013-06-07 20:37:48 +0000364 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
Tom Stellard754f80f2013-04-05 23:31:51 +0000365 break;
366 }
367 if (N->getValueType(0) == MVT::i128) {
368 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
369 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
370 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
371 } else if (N->getValueType(0) == MVT::i64) {
Tom Stellard1aa6cb42014-04-18 00:36:21 +0000372 RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
Tom Stellard754f80f2013-04-05 23:31:51 +0000373 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
374 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
375 } else {
376 llvm_unreachable("Unhandled value type for BUILD_PAIR");
377 }
378 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
379 N->getOperand(1), SubReg1 };
380 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
Andrew Trickef9de2a2013-05-25 02:42:55 +0000381 SDLoc(N), N->getValueType(0), Ops);
Tom Stellard754f80f2013-04-05 23:31:51 +0000382 }
Tom Stellard7ed0b522014-04-03 20:19:27 +0000383
384 case ISD::Constant:
385 case ISD::ConstantFP: {
386 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
387 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
388 N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
389 break;
390
391 uint64_t Imm;
392 if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
393 Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
394 else {
Tom Stellard3cbe0142014-04-07 19:31:13 +0000395 ConstantSDNode *C = cast<ConstantSDNode>(N);
Tom Stellard7ed0b522014-04-03 20:19:27 +0000396 Imm = C->getZExtValue();
397 }
398
399 SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
400 CurDAG->getConstant(Imm & 0xFFFFFFFF, MVT::i32));
401 SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
402 CurDAG->getConstant(Imm >> 32, MVT::i32));
403 const SDValue Ops[] = {
404 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
405 SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
406 SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32)
407 };
408
409 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SDLoc(N),
410 N->getValueType(0), Ops);
411 }
412
Tom Stellard81d871d2013-11-13 23:36:50 +0000413 case AMDGPUISD::REGISTER_LOAD: {
Tom Stellard81d871d2013-11-13 23:36:50 +0000414 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
415 break;
416 SDValue Addr, Offset;
417
418 SelectADDRIndirect(N->getOperand(1), Addr, Offset);
419 const SDValue Ops[] = {
420 Addr,
421 Offset,
422 CurDAG->getTargetConstant(0, MVT::i32),
423 N->getOperand(0),
424 };
425 return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, SDLoc(N),
426 CurDAG->getVTList(MVT::i32, MVT::i64, MVT::Other),
427 Ops);
428 }
429 case AMDGPUISD::REGISTER_STORE: {
Tom Stellard81d871d2013-11-13 23:36:50 +0000430 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
431 break;
432 SDValue Addr, Offset;
433 SelectADDRIndirect(N->getOperand(2), Addr, Offset);
434 const SDValue Ops[] = {
435 N->getOperand(1),
436 Addr,
437 Offset,
438 CurDAG->getTargetConstant(0, MVT::i32),
439 N->getOperand(0),
440 };
441 return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, SDLoc(N),
442 CurDAG->getVTList(MVT::Other),
443 Ops);
444 }
Matt Arsenault78b86702014-04-18 05:19:26 +0000445
446 case AMDGPUISD::BFE_I32:
447 case AMDGPUISD::BFE_U32: {
448 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
449 break;
450
451 // There is a scalar version available, but unlike the vector version which
452 // has a separate operand for the offset and width, the scalar version packs
453 // the width and offset into a single operand. Try to move to the scalar
454 // version if the offsets are constant, so that we can try to keep extended
455 // loads of kernel arguments in SGPRs.
456
457 // TODO: Technically we could try to pattern match scalar bitshifts of
458 // dynamic values, but it's probably not useful.
459 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
460 if (!Offset)
461 break;
462
463 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
464 if (!Width)
465 break;
466
467 bool Signed = Opc == AMDGPUISD::BFE_I32;
468
469 // Transformation function, pack the offset and width of a BFE into
470 // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
471 // source, bits [5:0] contain the offset and bits [22:16] the width.
472
473 uint32_t OffsetVal = Offset->getZExtValue();
474 uint32_t WidthVal = Width->getZExtValue();
475
476 uint32_t PackedVal = OffsetVal | WidthVal << 16;
477
478 SDValue PackedOffsetWidth = CurDAG->getTargetConstant(PackedVal, MVT::i32);
479 return CurDAG->getMachineNode(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
480 SDLoc(N),
481 MVT::i32,
482 N->getOperand(0),
483 PackedOffsetWidth);
484
485 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000486 }
Vincent Lejeune0167a312013-09-12 23:45:00 +0000487 return SelectCode(N);
Tom Stellard365366f2013-01-23 02:09:06 +0000488}
489
Tom Stellard75aadc22012-12-11 21:25:42 +0000490
Matt Arsenault209a7b92014-04-18 07:40:20 +0000491bool AMDGPUDAGToDAGISel::checkType(const Value *Ptr, unsigned AS) {
492 assert(AS != 0 && "Use checkPrivateAddress instead.");
493 if (!Ptr)
Tom Stellard75aadc22012-12-11 21:25:42 +0000494 return false;
Matt Arsenault209a7b92014-04-18 07:40:20 +0000495
496 return Ptr->getType()->getPointerAddressSpace() == AS;
Tom Stellard75aadc22012-12-11 21:25:42 +0000497}
498
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000499bool AMDGPUDAGToDAGISel::checkPrivateAddress(const MachineMemOperand *Op) {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000500 if (Op->getPseudoValue())
501 return true;
502
503 if (PointerType *PT = dyn_cast<PointerType>(Op->getValue()->getType()))
504 return PT->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
505
506 return false;
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000507}
508
Tom Stellard75aadc22012-12-11 21:25:42 +0000509bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000510 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000511}
512
513bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000514 const Value *MemVal = N->getMemOperand()->getValue();
515 return (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
516 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
517 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS));
Tom Stellard75aadc22012-12-11 21:25:42 +0000518}
519
520bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000521 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000522}
523
524bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000525 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000526}
527
Tom Stellard1e803092013-07-23 01:48:18 +0000528bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000529 const Value *MemVal = N->getMemOperand()->getValue();
530 if (CbId == -1)
531 return checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS);
532
533 return checkType(MemVal, AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
Tom Stellard75aadc22012-12-11 21:25:42 +0000534}
535
Matt Arsenault2aabb062013-06-18 23:37:58 +0000536bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
Tom Stellard8cb0e472013-07-23 23:54:56 +0000537 if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) {
538 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
539 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
540 N->getMemoryVT().bitsLT(MVT::i32)) {
541 return true;
542 }
543 }
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000544 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000545}
546
Matt Arsenault2aabb062013-06-18 23:37:58 +0000547bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000548 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::PARAM_I_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000549}
550
Matt Arsenault2aabb062013-06-18 23:37:58 +0000551bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000552 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000553}
554
Matt Arsenault2aabb062013-06-18 23:37:58 +0000555bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000556 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000557}
558
Matt Arsenault2aabb062013-06-18 23:37:58 +0000559bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000560 MachineMemOperand *MMO = N->getMemOperand();
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000561 if (checkPrivateAddress(N->getMemOperand())) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000562 if (MMO) {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000563 const PseudoSourceValue *PSV = MMO->getPseudoValue();
Tom Stellard75aadc22012-12-11 21:25:42 +0000564 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
565 return true;
566 }
567 }
568 }
569 return false;
570}
571
Matt Arsenault2aabb062013-06-18 23:37:58 +0000572bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
Nick Lewyckyaad475b2014-04-15 07:22:52 +0000573 if (checkPrivateAddress(N->getMemOperand())) {
Tom Stellard75aadc22012-12-11 21:25:42 +0000574 // Check to make sure we are not a constant pool load or a constant load
575 // that is marked as a private load
576 if (isCPLoad(N) || isConstantLoad(N, -1)) {
577 return false;
578 }
579 }
Matt Arsenault209a7b92014-04-18 07:40:20 +0000580
581 const Value *MemVal = N->getMemOperand()->getValue();
582 if (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
583 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
584 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS) &&
585 !checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS) &&
586 !checkType(MemVal, AMDGPUAS::PARAM_D_ADDRESS) &&
587 !checkType(MemVal, AMDGPUAS::PARAM_I_ADDRESS)){
Tom Stellard75aadc22012-12-11 21:25:42 +0000588 return true;
589 }
590 return false;
591}
592
Tom Stellard58ac7442014-04-29 23:12:48 +0000593bool AMDGPUDAGToDAGISel::isCFDepth0() const {
594 // FIXME: Figure out a way to use DominatorTree analysis here.
595 const BasicBlock *CurBlock = FuncInfo->MBB->getBasicBlock();
596 const Function *Fn = FuncInfo->Fn;
597 return &Fn->front() == CurBlock || &Fn->back() == CurBlock;
598}
599
600
Tom Stellard75aadc22012-12-11 21:25:42 +0000601const char *AMDGPUDAGToDAGISel::getPassName() const {
602 return "AMDGPU DAG->DAG Pattern Instruction Selection";
603}
604
605#ifdef DEBUGTMP
606#undef INT64_C
607#endif
608#undef DEBUGTMP
609
Tom Stellard41fc7852013-07-23 01:48:42 +0000610//===----------------------------------------------------------------------===//
611// Complex Patterns
612//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000613
Tom Stellard365366f2013-01-23 02:09:06 +0000614bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
Matt Arsenault209a7b92014-04-18 07:40:20 +0000615 SDValue& IntPtr) {
Tom Stellard365366f2013-01-23 02:09:06 +0000616 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
617 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
618 return true;
619 }
620 return false;
621}
622
623bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
624 SDValue& BaseReg, SDValue &Offset) {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000625 if (!isa<ConstantSDNode>(Addr)) {
Tom Stellard365366f2013-01-23 02:09:06 +0000626 BaseReg = Addr;
627 Offset = CurDAG->getIntPtrConstant(0, true);
628 return true;
629 }
630 return false;
631}
632
Tom Stellard75aadc22012-12-11 21:25:42 +0000633bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
634 SDValue &Offset) {
Matt Arsenault209a7b92014-04-18 07:40:20 +0000635 ConstantSDNode *IMMOffset;
Tom Stellard75aadc22012-12-11 21:25:42 +0000636
637 if (Addr.getOpcode() == ISD::ADD
638 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
639 && isInt<16>(IMMOffset->getZExtValue())) {
640
641 Base = Addr.getOperand(0);
642 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
643 return true;
644 // If the pointer address is constant, we can move it to the offset field.
645 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
646 && isInt<16>(IMMOffset->getZExtValue())) {
647 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
Andrew Trickef9de2a2013-05-25 02:42:55 +0000648 SDLoc(CurDAG->getEntryNode()),
Tom Stellard75aadc22012-12-11 21:25:42 +0000649 AMDGPU::ZERO, MVT::i32);
650 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
651 return true;
652 }
653
654 // Default case, no offset
655 Base = Addr;
656 Offset = CurDAG->getTargetConstant(0, MVT::i32);
657 return true;
658}
659
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000660bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
661 SDValue &Offset) {
662 ConstantSDNode *C;
663
664 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
665 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
666 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
667 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
668 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
669 Base = Addr.getOperand(0);
670 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
671 } else {
672 Base = Addr;
673 Offset = CurDAG->getTargetConstant(0, MVT::i32);
674 }
675
676 return true;
677}
Christian Konigd910b7d2013-02-26 17:52:16 +0000678
679void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
Bill Wendlinga3cd3502013-06-19 21:36:55 +0000680 const AMDGPUTargetLowering& Lowering =
Matt Arsenault209a7b92014-04-18 07:40:20 +0000681 *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000682 bool IsModified = false;
683 do {
684 IsModified = false;
685 // Go over all selected nodes and try to fold them a bit more
686 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
687 E = CurDAG->allnodes_end(); I != E; ++I) {
Christian Konigd910b7d2013-02-26 17:52:16 +0000688
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000689 SDNode *Node = I;
Tom Stellard2183b702013-06-03 17:39:46 +0000690
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000691 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
692 if (!MachineNode)
693 continue;
Christian Konigd910b7d2013-02-26 17:52:16 +0000694
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000695 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
696 if (ResNode != Node) {
697 ReplaceUses(Node, ResNode);
698 IsModified = true;
699 }
Tom Stellard2183b702013-06-03 17:39:46 +0000700 }
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000701 CurDAG->RemoveDeadNodes();
702 } while (IsModified);
Christian Konigd910b7d2013-02-26 17:52:16 +0000703}