blob: 9a15d4d34faac7c23cbf90ec69729558eab7fc39 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//==-----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Defines an instruction selector for the AMDGPU target.
12//
13//===----------------------------------------------------------------------===//
14#include "AMDGPUInstrInfo.h"
15#include "AMDGPUISelLowering.h" // For AMDGPUISD
16#include "AMDGPURegisterInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000017#include "R600InstrInfo.h"
Christian Konigf82901a2013-02-26 17:52:23 +000018#include "SIISelLowering.h"
Matt Arsenault2aabb062013-06-18 23:37:58 +000019#include "llvm/Analysis/ValueTracking.h"
Tom Stellard2183b702013-06-03 17:39:46 +000020#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000021#include "llvm/CodeGen/PseudoSourceValue.h"
Benjamin Kramerd78bb462013-05-23 17:10:37 +000022#include "llvm/CodeGen/SelectionDAG.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000023#include "llvm/CodeGen/SelectionDAGISel.h"
Chandler Carrutha4ea2692014-03-04 11:26:31 +000024#include "llvm/IR/ValueMap.h"
Tom Stellard75aadc22012-12-11 21:25:42 +000025#include "llvm/Support/Compiler.h"
26#include <list>
27#include <queue>
28
29using namespace llvm;
30
31//===----------------------------------------------------------------------===//
32// Instruction Selector Implementation
33//===----------------------------------------------------------------------===//
34
35namespace {
36/// AMDGPU specific code to select AMDGPU machine instructions for
37/// SelectionDAG operations.
38class AMDGPUDAGToDAGISel : public SelectionDAGISel {
39 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
40 // make the right decision when generating code for different targets.
41 const AMDGPUSubtarget &Subtarget;
42public:
43 AMDGPUDAGToDAGISel(TargetMachine &TM);
44 virtual ~AMDGPUDAGToDAGISel();
45
46 SDNode *Select(SDNode *N);
47 virtual const char *getPassName() const;
Christian Konigd910b7d2013-02-26 17:52:16 +000048 virtual void PostprocessISelDAG();
Tom Stellard75aadc22012-12-11 21:25:42 +000049
50private:
Tom Stellard7ed0b522014-04-03 20:19:27 +000051 bool isInlineImmediate(SDNode *N) const;
Tom Stellard75aadc22012-12-11 21:25:42 +000052 inline SDValue getSmallIPtrImm(unsigned Imm);
Vincent Lejeunec6896792013-06-04 23:17:15 +000053 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
Tom Stellard84021442013-07-23 01:48:24 +000054 const R600InstrInfo *TII);
Tom Stellard365366f2013-01-23 02:09:06 +000055 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
Vincent Lejeunec6896792013-06-04 23:17:15 +000056 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
Tom Stellard75aadc22012-12-11 21:25:42 +000057
58 // Complex pattern selectors
59 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
60 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
61 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
Tom Stellard41fc7852013-07-23 01:48:42 +000062 SDValue SimplifyI24(SDValue &Op);
63 bool SelectI24(SDValue Addr, SDValue &Op);
64 bool SelectU24(SDValue Addr, SDValue &Op);
Tom Stellard75aadc22012-12-11 21:25:42 +000065
66 static bool checkType(const Value *ptr, unsigned int addrspace);
Tom Stellard75aadc22012-12-11 21:25:42 +000067
68 static bool isGlobalStore(const StoreSDNode *N);
69 static bool isPrivateStore(const StoreSDNode *N);
70 static bool isLocalStore(const StoreSDNode *N);
71 static bool isRegionStore(const StoreSDNode *N);
72
Matt Arsenault2aabb062013-06-18 23:37:58 +000073 bool isCPLoad(const LoadSDNode *N) const;
74 bool isConstantLoad(const LoadSDNode *N, int cbID) const;
75 bool isGlobalLoad(const LoadSDNode *N) const;
76 bool isParamLoad(const LoadSDNode *N) const;
77 bool isPrivateLoad(const LoadSDNode *N) const;
78 bool isLocalLoad(const LoadSDNode *N) const;
79 bool isRegionLoad(const LoadSDNode *N) const;
Tom Stellard75aadc22012-12-11 21:25:42 +000080
Tom Stellarddf94dc32013-08-14 23:24:24 +000081 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
Tom Stellard365366f2013-01-23 02:09:06 +000082 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
83 bool SelectGlobalValueVariableOffset(SDValue Addr,
84 SDValue &BaseReg, SDValue& Offset);
Tom Stellard75aadc22012-12-11 21:25:42 +000085 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
Tom Stellardf3b2a1e2013-02-06 17:32:29 +000086 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
Tom Stellard75aadc22012-12-11 21:25:42 +000087
88 // Include the pieces autogenerated from the target description.
89#include "AMDGPUGenDAGISel.inc"
90};
91} // end anonymous namespace
92
93/// \brief This pass converts a legalized DAG into a AMDGPU-specific
94// DAG, ready for instruction scheduling.
95FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM
96 ) {
97 return new AMDGPUDAGToDAGISel(TM);
98}
99
Bill Wendlinga3cd3502013-06-19 21:36:55 +0000100AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
Tom Stellard75aadc22012-12-11 21:25:42 +0000101 : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
102}
103
104AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
105}
106
Tom Stellard7ed0b522014-04-03 20:19:27 +0000107bool AMDGPUDAGToDAGISel::isInlineImmediate(SDNode *N) const {
108 const SITargetLowering *TL
109 = static_cast<const SITargetLowering *>(getTargetLowering());
110 return TL->analyzeImmediate(N) == 0;
111}
112
Tom Stellarddf94dc32013-08-14 23:24:24 +0000113/// \brief Determine the register class for \p OpNo
114/// \returns The register class of the virtual register that will be used for
115/// the given operand number \OpNo or NULL if the register class cannot be
116/// determined.
117const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
118 unsigned OpNo) const {
119 if (!N->isMachineOpcode()) {
120 return NULL;
121 }
122 switch (N->getMachineOpcode()) {
123 default: {
124 const MCInstrDesc &Desc = TM.getInstrInfo()->get(N->getMachineOpcode());
Alexey Samsonov3186eb32013-08-15 07:11:34 +0000125 unsigned OpIdx = Desc.getNumDefs() + OpNo;
126 if (OpIdx >= Desc.getNumOperands())
127 return NULL;
128 int RegClass = Desc.OpInfo[OpIdx].RegClass;
Tom Stellarddf94dc32013-08-14 23:24:24 +0000129 if (RegClass == -1) {
130 return NULL;
131 }
132 return TM.getRegisterInfo()->getRegClass(RegClass);
133 }
134 case AMDGPU::REG_SEQUENCE: {
135 const TargetRegisterClass *SuperRC = TM.getRegisterInfo()->getRegClass(
136 cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
137 unsigned SubRegIdx =
138 dyn_cast<ConstantSDNode>(N->getOperand(OpNo + 1))->getZExtValue();
139 return TM.getRegisterInfo()->getSubClassWithSubReg(SuperRC, SubRegIdx);
140 }
141 }
142}
143
Tom Stellard75aadc22012-12-11 21:25:42 +0000144SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
145 return CurDAG->getTargetConstant(Imm, MVT::i32);
146}
147
148bool AMDGPUDAGToDAGISel::SelectADDRParam(
149 SDValue Addr, SDValue& R1, SDValue& R2) {
150
151 if (Addr.getOpcode() == ISD::FrameIndex) {
152 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
153 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
154 R2 = CurDAG->getTargetConstant(0, MVT::i32);
155 } else {
156 R1 = Addr;
157 R2 = CurDAG->getTargetConstant(0, MVT::i32);
158 }
159 } else if (Addr.getOpcode() == ISD::ADD) {
160 R1 = Addr.getOperand(0);
161 R2 = Addr.getOperand(1);
162 } else {
163 R1 = Addr;
164 R2 = CurDAG->getTargetConstant(0, MVT::i32);
165 }
166 return true;
167}
168
169bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
170 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
171 Addr.getOpcode() == ISD::TargetGlobalAddress) {
172 return false;
173 }
174 return SelectADDRParam(Addr, R1, R2);
175}
176
177
178bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
179 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
180 Addr.getOpcode() == ISD::TargetGlobalAddress) {
181 return false;
182 }
183
184 if (Addr.getOpcode() == ISD::FrameIndex) {
185 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
186 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
187 R2 = CurDAG->getTargetConstant(0, MVT::i64);
188 } else {
189 R1 = Addr;
190 R2 = CurDAG->getTargetConstant(0, MVT::i64);
191 }
192 } else if (Addr.getOpcode() == ISD::ADD) {
193 R1 = Addr.getOperand(0);
194 R2 = Addr.getOperand(1);
195 } else {
196 R1 = Addr;
197 R2 = CurDAG->getTargetConstant(0, MVT::i64);
198 }
199 return true;
200}
201
202SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
203 unsigned int Opc = N->getOpcode();
204 if (N->isMachineOpcode()) {
Tim Northover31d093c2013-09-22 08:21:56 +0000205 N->setNodeId(-1);
Tom Stellard75aadc22012-12-11 21:25:42 +0000206 return NULL; // Already selected.
207 }
208 switch (Opc) {
209 default: break;
Tom Stellard1f15bff2014-02-25 21:36:18 +0000210 // We are selecting i64 ADD here instead of custom lower it during
211 // DAG legalization, so we can fold some i64 ADDs used for address
212 // calculation into the LOAD and STORE instructions.
213 case ISD::ADD: {
214 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
215 if (N->getValueType(0) != MVT::i64 ||
216 ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
217 break;
218
219 SDLoc DL(N);
220 SDValue LHS = N->getOperand(0);
221 SDValue RHS = N->getOperand(1);
222
223 SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
224 SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
225
226 SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
227 DL, MVT::i32, LHS, Sub0);
228 SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
229 DL, MVT::i32, LHS, Sub1);
230
231 SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
232 DL, MVT::i32, RHS, Sub0);
233 SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
234 DL, MVT::i32, RHS, Sub1);
235
236 SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
237
238 SmallVector<SDValue, 8> AddLoArgs;
239 AddLoArgs.push_back(SDValue(Lo0, 0));
240 AddLoArgs.push_back(SDValue(Lo1, 0));
241
242 SDNode *AddLo = CurDAG->getMachineNode(AMDGPU::S_ADD_I32, DL,
243 VTList, AddLoArgs);
244 SDValue Carry = SDValue(AddLo, 1);
245 SDNode *AddHi = CurDAG->getMachineNode(AMDGPU::S_ADDC_U32, DL,
246 MVT::i32, SDValue(Hi0, 0),
247 SDValue(Hi1, 0), Carry);
248
249 SDValue Args[5] = {
250 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
251 SDValue(AddLo,0),
252 Sub0,
253 SDValue(AddHi,0),
254 Sub1,
255 };
256 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args, 5);
257 }
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000258 case ISD::BUILD_VECTOR: {
Tom Stellard8e5da412013-08-14 23:24:32 +0000259 unsigned RegClassID;
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000260 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
Tom Stellard8e5da412013-08-14 23:24:32 +0000261 const AMDGPURegisterInfo *TRI =
262 static_cast<const AMDGPURegisterInfo*>(TM.getRegisterInfo());
263 const SIRegisterInfo *SIRI =
264 static_cast<const SIRegisterInfo*>(TM.getRegisterInfo());
265 EVT VT = N->getValueType(0);
266 unsigned NumVectorElts = VT.getVectorNumElements();
267 assert(VT.getVectorElementType().bitsEq(MVT::i32));
268 if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
269 bool UseVReg = true;
270 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
271 U != E; ++U) {
272 if (!U->isMachineOpcode()) {
273 continue;
274 }
275 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
276 if (!RC) {
277 continue;
278 }
279 if (SIRI->isSGPRClass(RC)) {
280 UseVReg = false;
281 }
282 }
283 switch(NumVectorElts) {
284 case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID :
285 AMDGPU::SReg_32RegClassID;
286 break;
287 case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
288 AMDGPU::SReg_64RegClassID;
289 break;
290 case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
291 AMDGPU::SReg_128RegClassID;
292 break;
293 case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
294 AMDGPU::SReg_256RegClassID;
295 break;
296 case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
297 AMDGPU::SReg_512RegClassID;
298 break;
Benjamin Kramerbda73ff2013-08-31 21:20:04 +0000299 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
Tom Stellard8e5da412013-08-14 23:24:32 +0000300 }
301 } else {
302 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
303 // that adds a 128 bits reg copy when going through TwoAddressInstructions
304 // pass. We want to avoid 128 bits copies as much as possible because they
305 // can't be bundled by our scheduler.
306 switch(NumVectorElts) {
307 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
308 case 4: RegClassID = AMDGPU::R600_Reg128RegClassID; break;
309 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
310 }
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000311 }
Tom Stellard0344cdf2013-08-01 15:23:42 +0000312
Tom Stellard8e5da412013-08-14 23:24:32 +0000313 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
314
315 if (NumVectorElts == 1) {
316 return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS,
317 VT.getVectorElementType(),
318 N->getOperand(0), RegClass);
Tom Stellard0344cdf2013-08-01 15:23:42 +0000319 }
Tom Stellard8e5da412013-08-14 23:24:32 +0000320
321 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
322 "supported yet");
323 // 16 = Max Num Vector Elements
324 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
325 // 1 = Vector Register Class
326 SDValue RegSeqArgs[16 * 2 + 1];
327
328 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000329 bool IsRegSeq = true;
330 for (unsigned i = 0; i < N->getNumOperands(); i++) {
Tom Stellard8e5da412013-08-14 23:24:32 +0000331 // XXX: Why is this here?
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000332 if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
333 IsRegSeq = false;
334 break;
335 }
Tom Stellard8e5da412013-08-14 23:24:32 +0000336 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
337 RegSeqArgs[1 + (2 * i) + 1] =
338 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
Vincent Lejeune3b6f20e2013-03-05 15:04:49 +0000339 }
340 if (!IsRegSeq)
341 break;
342 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
343 RegSeqArgs, 2 * N->getNumOperands() + 1);
344 }
Tom Stellard754f80f2013-04-05 23:31:51 +0000345 case ISD::BUILD_PAIR: {
346 SDValue RC, SubReg0, SubReg1;
347 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
Tom Stellarda6c6e1b2013-06-07 20:37:48 +0000348 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
Tom Stellard754f80f2013-04-05 23:31:51 +0000349 break;
350 }
351 if (N->getValueType(0) == MVT::i128) {
352 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
353 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
354 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
355 } else if (N->getValueType(0) == MVT::i64) {
Tom Stellard2f7cdda2013-08-06 23:08:28 +0000356 RC = CurDAG->getTargetConstant(AMDGPU::VSrc_64RegClassID, MVT::i32);
Tom Stellard754f80f2013-04-05 23:31:51 +0000357 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
358 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
359 } else {
360 llvm_unreachable("Unhandled value type for BUILD_PAIR");
361 }
362 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
363 N->getOperand(1), SubReg1 };
364 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
Andrew Trickef9de2a2013-05-25 02:42:55 +0000365 SDLoc(N), N->getValueType(0), Ops);
Tom Stellard754f80f2013-04-05 23:31:51 +0000366 }
Tom Stellard7ed0b522014-04-03 20:19:27 +0000367
368 case ISD::Constant:
369 case ISD::ConstantFP: {
370 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
371 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
372 N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
373 break;
374
375 uint64_t Imm;
376 if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
377 Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
378 else {
Tom Stellard3cbe0142014-04-07 19:31:13 +0000379 ConstantSDNode *C = cast<ConstantSDNode>(N);
Tom Stellard7ed0b522014-04-03 20:19:27 +0000380 Imm = C->getZExtValue();
381 }
382
383 SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
384 CurDAG->getConstant(Imm & 0xFFFFFFFF, MVT::i32));
385 SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
386 CurDAG->getConstant(Imm >> 32, MVT::i32));
387 const SDValue Ops[] = {
388 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
389 SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
390 SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32)
391 };
392
393 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SDLoc(N),
394 N->getValueType(0), Ops);
395 }
396
Tom Stellard81d871d2013-11-13 23:36:50 +0000397 case AMDGPUISD::REGISTER_LOAD: {
398 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
399 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
400 break;
401 SDValue Addr, Offset;
402
403 SelectADDRIndirect(N->getOperand(1), Addr, Offset);
404 const SDValue Ops[] = {
405 Addr,
406 Offset,
407 CurDAG->getTargetConstant(0, MVT::i32),
408 N->getOperand(0),
409 };
410 return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, SDLoc(N),
411 CurDAG->getVTList(MVT::i32, MVT::i64, MVT::Other),
412 Ops);
413 }
414 case AMDGPUISD::REGISTER_STORE: {
415 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
416 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
417 break;
418 SDValue Addr, Offset;
419 SelectADDRIndirect(N->getOperand(2), Addr, Offset);
420 const SDValue Ops[] = {
421 N->getOperand(1),
422 Addr,
423 Offset,
424 CurDAG->getTargetConstant(0, MVT::i32),
425 N->getOperand(0),
426 };
427 return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, SDLoc(N),
428 CurDAG->getVTList(MVT::Other),
429 Ops);
430 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000431 }
Vincent Lejeune0167a312013-09-12 23:45:00 +0000432 return SelectCode(N);
Tom Stellard365366f2013-01-23 02:09:06 +0000433}
434
Tom Stellard75aadc22012-12-11 21:25:42 +0000435
436bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) {
437 if (!ptr) {
438 return false;
439 }
440 Type *ptrType = ptr->getType();
441 return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace;
442}
443
Tom Stellard75aadc22012-12-11 21:25:42 +0000444bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
445 return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
446}
447
448bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
449 return (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
450 && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
451 && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS));
452}
453
454bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
455 return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
456}
457
458bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
459 return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
460}
461
Tom Stellard1e803092013-07-23 01:48:18 +0000462bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
463 if (CbId == -1) {
464 return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS);
Tom Stellard75aadc22012-12-11 21:25:42 +0000465 }
Tom Stellard1e803092013-07-23 01:48:18 +0000466 return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
Tom Stellard75aadc22012-12-11 21:25:42 +0000467}
468
Matt Arsenault2aabb062013-06-18 23:37:58 +0000469bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
Tom Stellard8cb0e472013-07-23 23:54:56 +0000470 if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) {
471 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
472 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
473 N->getMemoryVT().bitsLT(MVT::i32)) {
474 return true;
475 }
476 }
Tom Stellard75aadc22012-12-11 21:25:42 +0000477 return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
478}
479
Matt Arsenault2aabb062013-06-18 23:37:58 +0000480bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000481 return checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS);
482}
483
Matt Arsenault2aabb062013-06-18 23:37:58 +0000484bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000485 return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
486}
487
Matt Arsenault2aabb062013-06-18 23:37:58 +0000488bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000489 return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
490}
491
Matt Arsenault2aabb062013-06-18 23:37:58 +0000492bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000493 MachineMemOperand *MMO = N->getMemOperand();
494 if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
495 if (MMO) {
496 const Value *V = MMO->getValue();
497 const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V);
498 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
499 return true;
500 }
501 }
502 }
503 return false;
504}
505
Matt Arsenault2aabb062013-06-18 23:37:58 +0000506bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
Tom Stellard75aadc22012-12-11 21:25:42 +0000507 if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
508 // Check to make sure we are not a constant pool load or a constant load
509 // that is marked as a private load
510 if (isCPLoad(N) || isConstantLoad(N, -1)) {
511 return false;
512 }
513 }
514 if (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
515 && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
516 && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS)
517 && !checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)
518 && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_D_ADDRESS)
519 && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS)) {
520 return true;
521 }
522 return false;
523}
524
525const char *AMDGPUDAGToDAGISel::getPassName() const {
526 return "AMDGPU DAG->DAG Pattern Instruction Selection";
527}
528
529#ifdef DEBUGTMP
530#undef INT64_C
531#endif
532#undef DEBUGTMP
533
Tom Stellard41fc7852013-07-23 01:48:42 +0000534//===----------------------------------------------------------------------===//
535// Complex Patterns
536//===----------------------------------------------------------------------===//
Tom Stellard75aadc22012-12-11 21:25:42 +0000537
Tom Stellard365366f2013-01-23 02:09:06 +0000538bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
539 SDValue& IntPtr) {
540 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
541 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
542 return true;
543 }
544 return false;
545}
546
547bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
548 SDValue& BaseReg, SDValue &Offset) {
549 if (!dyn_cast<ConstantSDNode>(Addr)) {
550 BaseReg = Addr;
551 Offset = CurDAG->getIntPtrConstant(0, true);
552 return true;
553 }
554 return false;
555}
556
Tom Stellard75aadc22012-12-11 21:25:42 +0000557bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
558 SDValue &Offset) {
559 ConstantSDNode * IMMOffset;
560
561 if (Addr.getOpcode() == ISD::ADD
562 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
563 && isInt<16>(IMMOffset->getZExtValue())) {
564
565 Base = Addr.getOperand(0);
566 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
567 return true;
568 // If the pointer address is constant, we can move it to the offset field.
569 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
570 && isInt<16>(IMMOffset->getZExtValue())) {
571 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
Andrew Trickef9de2a2013-05-25 02:42:55 +0000572 SDLoc(CurDAG->getEntryNode()),
Tom Stellard75aadc22012-12-11 21:25:42 +0000573 AMDGPU::ZERO, MVT::i32);
574 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
575 return true;
576 }
577
578 // Default case, no offset
579 Base = Addr;
580 Offset = CurDAG->getTargetConstant(0, MVT::i32);
581 return true;
582}
583
Tom Stellardf3b2a1e2013-02-06 17:32:29 +0000584bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
585 SDValue &Offset) {
586 ConstantSDNode *C;
587
588 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
589 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
590 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
591 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
592 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
593 Base = Addr.getOperand(0);
594 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
595 } else {
596 Base = Addr;
597 Offset = CurDAG->getTargetConstant(0, MVT::i32);
598 }
599
600 return true;
601}
Christian Konigd910b7d2013-02-26 17:52:16 +0000602
Tom Stellard41fc7852013-07-23 01:48:42 +0000603SDValue AMDGPUDAGToDAGISel::SimplifyI24(SDValue &Op) {
604 APInt Demanded = APInt(32, 0x00FFFFFF);
605 APInt KnownZero, KnownOne;
606 TargetLowering::TargetLoweringOpt TLO(*CurDAG, true, true);
607 const TargetLowering *TLI = getTargetLowering();
608 if (TLI->SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) {
609 CurDAG->ReplaceAllUsesWith(Op, TLO.New);
610 CurDAG->RepositionNode(Op.getNode(), TLO.New.getNode());
611 return SimplifyI24(TLO.New);
612 } else {
613 return Op;
614 }
615}
616
617bool AMDGPUDAGToDAGISel::SelectI24(SDValue Op, SDValue &I24) {
618
619 assert(Op.getValueType() == MVT::i32);
620
621 if (CurDAG->ComputeNumSignBits(Op) == 9) {
622 I24 = SimplifyI24(Op);
623 return true;
624 }
625 return false;
626}
627
628bool AMDGPUDAGToDAGISel::SelectU24(SDValue Op, SDValue &U24) {
629 APInt KnownZero;
630 APInt KnownOne;
631 CurDAG->ComputeMaskedBits(Op, KnownZero, KnownOne);
632
633 assert (Op.getValueType() == MVT::i32);
634
635 // ANY_EXTEND and EXTLOAD operations can only be done on types smaller than
636 // i32. These smaller types are legal to use with the i24 instructions.
637 if ((KnownZero & APInt(KnownZero.getBitWidth(), 0xFF000000)) == 0xFF000000 ||
638 Op.getOpcode() == ISD::ANY_EXTEND ||
639 ISD::isEXTLoad(Op.getNode())) {
640 U24 = SimplifyI24(Op);
641 return true;
642 }
643 return false;
644}
645
Christian Konigd910b7d2013-02-26 17:52:16 +0000646void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
Bill Wendlinga3cd3502013-06-19 21:36:55 +0000647 const AMDGPUTargetLowering& Lowering =
648 (*(const AMDGPUTargetLowering*)getTargetLowering());
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000649 bool IsModified = false;
650 do {
651 IsModified = false;
652 // Go over all selected nodes and try to fold them a bit more
653 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
654 E = CurDAG->allnodes_end(); I != E; ++I) {
Christian Konigd910b7d2013-02-26 17:52:16 +0000655
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000656 SDNode *Node = I;
Tom Stellard2183b702013-06-03 17:39:46 +0000657
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000658 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
659 if (!MachineNode)
660 continue;
Christian Konigd910b7d2013-02-26 17:52:16 +0000661
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000662 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
663 if (ResNode != Node) {
664 ReplaceUses(Node, ResNode);
665 IsModified = true;
666 }
Tom Stellard2183b702013-06-03 17:39:46 +0000667 }
Vincent Lejeuneab3baf82013-09-12 23:44:44 +0000668 CurDAG->RemoveDeadNodes();
669 } while (IsModified);
Christian Konigd910b7d2013-02-26 17:52:16 +0000670}