blob: 22bdb90cca09e118e25be5973115b3d3e79281cb [file] [log] [blame]
Tom Stellardf98f2ce2012-12-11 21:25:42 +00001//===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//==-----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Defines an instruction selector for the AMDGPU target.
12//
13//===----------------------------------------------------------------------===//
14#include "AMDGPUInstrInfo.h"
15#include "AMDGPUISelLowering.h" // For AMDGPUISD
16#include "AMDGPURegisterInfo.h"
Tom Stellardf98f2ce2012-12-11 21:25:42 +000017#include "R600InstrInfo.h"
Christian Konigd3b55092013-02-26 17:52:23 +000018#include "SIISelLowering.h"
Tom Stellardf98f2ce2012-12-11 21:25:42 +000019#include "llvm/ADT/ValueMap.h"
Matt Arsenault70a3dc12013-06-18 23:37:58 +000020#include "llvm/Analysis/ValueTracking.h"
Tom Stellard8a72c732013-06-03 17:39:46 +000021#include "llvm/CodeGen/MachineRegisterInfo.h"
Tom Stellardf98f2ce2012-12-11 21:25:42 +000022#include "llvm/CodeGen/PseudoSourceValue.h"
Benjamin Kramer5c352902013-05-23 17:10:37 +000023#include "llvm/CodeGen/SelectionDAG.h"
Tom Stellardf98f2ce2012-12-11 21:25:42 +000024#include "llvm/CodeGen/SelectionDAGISel.h"
25#include "llvm/Support/Compiler.h"
26#include <list>
27#include <queue>
28
29using namespace llvm;
30
31//===----------------------------------------------------------------------===//
32// Instruction Selector Implementation
33//===----------------------------------------------------------------------===//
34
35namespace {
36/// AMDGPU specific code to select AMDGPU machine instructions for
37/// SelectionDAG operations.
38class AMDGPUDAGToDAGISel : public SelectionDAGISel {
39 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
40 // make the right decision when generating code for different targets.
41 const AMDGPUSubtarget &Subtarget;
42public:
43 AMDGPUDAGToDAGISel(TargetMachine &TM);
44 virtual ~AMDGPUDAGToDAGISel();
45
46 SDNode *Select(SDNode *N);
47 virtual const char *getPassName() const;
Christian Konigc018eca2013-02-26 17:52:16 +000048 virtual void PostprocessISelDAG();
Tom Stellardf98f2ce2012-12-11 21:25:42 +000049
50private:
51 inline SDValue getSmallIPtrImm(unsigned Imm);
Vincent Lejeunee67a4af2013-06-04 23:17:15 +000052 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
Tom Stellard58d33352013-07-23 01:48:24 +000053 const R600InstrInfo *TII);
Tom Stellard9f7818d2013-01-23 02:09:06 +000054 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
Vincent Lejeunee67a4af2013-06-04 23:17:15 +000055 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
Tom Stellardf98f2ce2012-12-11 21:25:42 +000056
57 // Complex pattern selectors
58 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
59 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
60 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
Tom Stellard3f5d63b2013-07-23 01:48:42 +000061 SDValue SimplifyI24(SDValue &Op);
62 bool SelectI24(SDValue Addr, SDValue &Op);
63 bool SelectU24(SDValue Addr, SDValue &Op);
Tom Stellardf98f2ce2012-12-11 21:25:42 +000064
65 static bool checkType(const Value *ptr, unsigned int addrspace);
Tom Stellardf98f2ce2012-12-11 21:25:42 +000066
67 static bool isGlobalStore(const StoreSDNode *N);
68 static bool isPrivateStore(const StoreSDNode *N);
69 static bool isLocalStore(const StoreSDNode *N);
70 static bool isRegionStore(const StoreSDNode *N);
71
Matt Arsenault70a3dc12013-06-18 23:37:58 +000072 bool isCPLoad(const LoadSDNode *N) const;
73 bool isConstantLoad(const LoadSDNode *N, int cbID) const;
74 bool isGlobalLoad(const LoadSDNode *N) const;
75 bool isParamLoad(const LoadSDNode *N) const;
76 bool isPrivateLoad(const LoadSDNode *N) const;
77 bool isLocalLoad(const LoadSDNode *N) const;
78 bool isRegionLoad(const LoadSDNode *N) const;
Tom Stellardf98f2ce2012-12-11 21:25:42 +000079
Tom Stellard636298b2013-08-14 23:24:24 +000080 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
Tom Stellard9f7818d2013-01-23 02:09:06 +000081 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
82 bool SelectGlobalValueVariableOffset(SDValue Addr,
83 SDValue &BaseReg, SDValue& Offset);
Tom Stellardf98f2ce2012-12-11 21:25:42 +000084 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
Tom Stellardc0b0c672013-02-06 17:32:29 +000085 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
Tom Stellardf98f2ce2012-12-11 21:25:42 +000086
87 // Include the pieces autogenerated from the target description.
88#include "AMDGPUGenDAGISel.inc"
89};
90} // end anonymous namespace
91
92/// \brief This pass converts a legalized DAG into a AMDGPU-specific
93// DAG, ready for instruction scheduling.
94FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM
95 ) {
96 return new AMDGPUDAGToDAGISel(TM);
97}
98
Bill Wendlingba54bca2013-06-19 21:36:55 +000099AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000100 : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
101}
102
103AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
104}
105
Tom Stellard636298b2013-08-14 23:24:24 +0000106/// \brief Determine the register class for \p OpNo
107/// \returns The register class of the virtual register that will be used for
108/// the given operand number \OpNo or NULL if the register class cannot be
109/// determined.
110const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
111 unsigned OpNo) const {
112 if (!N->isMachineOpcode()) {
113 return NULL;
114 }
115 switch (N->getMachineOpcode()) {
116 default: {
117 const MCInstrDesc &Desc = TM.getInstrInfo()->get(N->getMachineOpcode());
118 int RegClass = Desc.OpInfo[Desc.getNumDefs() + OpNo].RegClass;
119 if (RegClass == -1) {
120 return NULL;
121 }
122 return TM.getRegisterInfo()->getRegClass(RegClass);
123 }
124 case AMDGPU::REG_SEQUENCE: {
125 const TargetRegisterClass *SuperRC = TM.getRegisterInfo()->getRegClass(
126 cast<ConstantSDNode>(N->getOperand(0))->getZExtValue());
127 unsigned SubRegIdx =
128 dyn_cast<ConstantSDNode>(N->getOperand(OpNo + 1))->getZExtValue();
129 return TM.getRegisterInfo()->getSubClassWithSubReg(SuperRC, SubRegIdx);
130 }
131 }
132}
133
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000134SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
135 return CurDAG->getTargetConstant(Imm, MVT::i32);
136}
137
138bool AMDGPUDAGToDAGISel::SelectADDRParam(
139 SDValue Addr, SDValue& R1, SDValue& R2) {
140
141 if (Addr.getOpcode() == ISD::FrameIndex) {
142 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
143 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
144 R2 = CurDAG->getTargetConstant(0, MVT::i32);
145 } else {
146 R1 = Addr;
147 R2 = CurDAG->getTargetConstant(0, MVT::i32);
148 }
149 } else if (Addr.getOpcode() == ISD::ADD) {
150 R1 = Addr.getOperand(0);
151 R2 = Addr.getOperand(1);
152 } else {
153 R1 = Addr;
154 R2 = CurDAG->getTargetConstant(0, MVT::i32);
155 }
156 return true;
157}
158
159bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
160 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
161 Addr.getOpcode() == ISD::TargetGlobalAddress) {
162 return false;
163 }
164 return SelectADDRParam(Addr, R1, R2);
165}
166
167
168bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
169 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
170 Addr.getOpcode() == ISD::TargetGlobalAddress) {
171 return false;
172 }
173
174 if (Addr.getOpcode() == ISD::FrameIndex) {
175 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
176 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
177 R2 = CurDAG->getTargetConstant(0, MVT::i64);
178 } else {
179 R1 = Addr;
180 R2 = CurDAG->getTargetConstant(0, MVT::i64);
181 }
182 } else if (Addr.getOpcode() == ISD::ADD) {
183 R1 = Addr.getOperand(0);
184 R2 = Addr.getOperand(1);
185 } else {
186 R1 = Addr;
187 R2 = CurDAG->getTargetConstant(0, MVT::i64);
188 }
189 return true;
190}
191
192SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
Tom Stellard58d33352013-07-23 01:48:24 +0000193 const R600InstrInfo *TII =
194 static_cast<const R600InstrInfo*>(TM.getInstrInfo());
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000195 unsigned int Opc = N->getOpcode();
196 if (N->isMachineOpcode()) {
197 return NULL; // Already selected.
198 }
199 switch (Opc) {
200 default: break;
Tom Stellard58d33352013-07-23 01:48:24 +0000201 case AMDGPUISD::CONST_ADDRESS: {
202 for (SDNode::use_iterator I = N->use_begin(), Next = llvm::next(I);
203 I != SDNode::use_end(); I = Next) {
204 Next = llvm::next(I);
205 if (!I->isMachineOpcode()) {
206 continue;
207 }
208 unsigned Opcode = I->getMachineOpcode();
209 bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1;
210 int SrcIdx = I.getOperandNo();
211 int SelIdx;
212 // Unlike MachineInstrs, SDNodes do not have results in their operand
213 // list, so we need to increment the SrcIdx, since
214 // R600InstrInfo::getOperandIdx is based on the MachineInstr indices.
215 if (HasDst) {
216 SrcIdx++;
217 }
218
219 SelIdx = TII->getSelIdx(I->getMachineOpcode(), SrcIdx);
220 if (SelIdx < 0) {
221 continue;
222 }
223
224 SDValue CstOffset;
225 if (N->getValueType(0).isVector() ||
226 !SelectGlobalValueConstantOffset(N->getOperand(0), CstOffset))
227 continue;
228
229 // Gather constants values
230 int SrcIndices[] = {
231 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
232 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
233 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2),
234 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
235 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
236 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
237 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
238 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
239 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
240 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
241 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
242 };
243 std::vector<unsigned> Consts;
244 for (unsigned i = 0; i < sizeof(SrcIndices) / sizeof(int); i++) {
245 int OtherSrcIdx = SrcIndices[i];
246 int OtherSelIdx = TII->getSelIdx(Opcode, OtherSrcIdx);
247 if (OtherSrcIdx < 0 || OtherSelIdx < 0) {
248 continue;
249 }
250 if (HasDst) {
251 OtherSrcIdx--;
252 OtherSelIdx--;
253 }
254 if (RegisterSDNode *Reg =
255 dyn_cast<RegisterSDNode>(I->getOperand(OtherSrcIdx))) {
256 if (Reg->getReg() == AMDGPU::ALU_CONST) {
257 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(I->getOperand(OtherSelIdx));
258 Consts.push_back(Cst->getZExtValue());
259 }
260 }
261 }
262
263 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset);
264 Consts.push_back(Cst->getZExtValue());
265 if (!TII->fitsConstReadLimitations(Consts))
266 continue;
267
268 // Convert back to SDNode indices
269 if (HasDst) {
270 SrcIdx--;
271 SelIdx--;
272 }
273 std::vector<SDValue> Ops;
274 for (int i = 0, e = I->getNumOperands(); i != e; ++i) {
275 if (i == SrcIdx) {
276 Ops.push_back(CurDAG->getRegister(AMDGPU::ALU_CONST, MVT::f32));
277 } else if (i == SelIdx) {
278 Ops.push_back(CstOffset);
279 } else {
280 Ops.push_back(I->getOperand(i));
281 }
282 }
283 CurDAG->UpdateNodeOperands(*I, Ops.data(), Ops.size());
284 }
285 break;
286 }
Vincent Lejeunecae68012013-03-05 15:04:49 +0000287 case ISD::BUILD_VECTOR: {
Tom Stellard38d5e1c2013-08-14 23:24:32 +0000288 unsigned RegClassID;
Vincent Lejeunecae68012013-03-05 15:04:49 +0000289 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
Tom Stellard38d5e1c2013-08-14 23:24:32 +0000290 const AMDGPURegisterInfo *TRI =
291 static_cast<const AMDGPURegisterInfo*>(TM.getRegisterInfo());
292 const SIRegisterInfo *SIRI =
293 static_cast<const SIRegisterInfo*>(TM.getRegisterInfo());
294 EVT VT = N->getValueType(0);
295 unsigned NumVectorElts = VT.getVectorNumElements();
296 assert(VT.getVectorElementType().bitsEq(MVT::i32));
297 if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
298 bool UseVReg = true;
299 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
300 U != E; ++U) {
301 if (!U->isMachineOpcode()) {
302 continue;
303 }
304 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
305 if (!RC) {
306 continue;
307 }
308 if (SIRI->isSGPRClass(RC)) {
309 UseVReg = false;
310 }
311 }
312 switch(NumVectorElts) {
313 case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID :
314 AMDGPU::SReg_32RegClassID;
315 break;
316 case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
317 AMDGPU::SReg_64RegClassID;
318 break;
319 case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
320 AMDGPU::SReg_128RegClassID;
321 break;
322 case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
323 AMDGPU::SReg_256RegClassID;
324 break;
325 case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
326 AMDGPU::SReg_512RegClassID;
327 break;
328 }
329 } else {
330 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
331 // that adds a 128 bits reg copy when going through TwoAddressInstructions
332 // pass. We want to avoid 128 bits copies as much as possible because they
333 // can't be bundled by our scheduler.
334 switch(NumVectorElts) {
335 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
336 case 4: RegClassID = AMDGPU::R600_Reg128RegClassID; break;
337 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
338 }
Vincent Lejeunecae68012013-03-05 15:04:49 +0000339 }
Tom Stellard692ee102013-08-01 15:23:42 +0000340
Tom Stellard38d5e1c2013-08-14 23:24:32 +0000341 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
342
343 if (NumVectorElts == 1) {
344 return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS,
345 VT.getVectorElementType(),
346 N->getOperand(0), RegClass);
Tom Stellard692ee102013-08-01 15:23:42 +0000347 }
Tom Stellard38d5e1c2013-08-14 23:24:32 +0000348
349 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
350 "supported yet");
351 // 16 = Max Num Vector Elements
352 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
353 // 1 = Vector Register Class
354 SDValue RegSeqArgs[16 * 2 + 1];
355
356 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
Vincent Lejeunecae68012013-03-05 15:04:49 +0000357 bool IsRegSeq = true;
358 for (unsigned i = 0; i < N->getNumOperands(); i++) {
Tom Stellard38d5e1c2013-08-14 23:24:32 +0000359 // XXX: Why is this here?
Vincent Lejeunecae68012013-03-05 15:04:49 +0000360 if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
361 IsRegSeq = false;
362 break;
363 }
Tom Stellard38d5e1c2013-08-14 23:24:32 +0000364 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
365 RegSeqArgs[1 + (2 * i) + 1] =
366 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
Vincent Lejeunecae68012013-03-05 15:04:49 +0000367 }
368 if (!IsRegSeq)
369 break;
370 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
371 RegSeqArgs, 2 * N->getNumOperands() + 1);
372 }
Tom Stellard17ea10c2013-04-05 23:31:51 +0000373 case ISD::BUILD_PAIR: {
374 SDValue RC, SubReg0, SubReg1;
375 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
Tom Stellard3ff0abf2013-06-07 20:37:48 +0000376 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
Tom Stellard17ea10c2013-04-05 23:31:51 +0000377 break;
378 }
379 if (N->getValueType(0) == MVT::i128) {
380 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
381 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
382 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
383 } else if (N->getValueType(0) == MVT::i64) {
Tom Stellard3492eef2013-08-06 23:08:28 +0000384 RC = CurDAG->getTargetConstant(AMDGPU::VSrc_64RegClassID, MVT::i32);
Tom Stellard17ea10c2013-04-05 23:31:51 +0000385 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
386 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
387 } else {
388 llvm_unreachable("Unhandled value type for BUILD_PAIR");
389 }
390 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
391 N->getOperand(1), SubReg1 };
392 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
Andrew Trickac6d9be2013-05-25 02:42:55 +0000393 SDLoc(N), N->getValueType(0), Ops);
Tom Stellard17ea10c2013-04-05 23:31:51 +0000394 }
395
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000396 case ISD::ConstantFP:
397 case ISD::Constant: {
398 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
399 // XXX: Custom immediate lowering not implemented yet. Instead we use
400 // pseudo instructions defined in SIInstructions.td
Tom Stellard3ff0abf2013-06-07 20:37:48 +0000401 if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000402 break;
403 }
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000404
405 uint64_t ImmValue = 0;
406 unsigned ImmReg = AMDGPU::ALU_LITERAL_X;
407
408 if (N->getOpcode() == ISD::ConstantFP) {
409 // XXX: 64-bit Immediates not supported yet
410 assert(N->getValueType(0) != MVT::f64);
411
412 ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N);
413 APFloat Value = C->getValueAPF();
414 float FloatValue = Value.convertToFloat();
415 if (FloatValue == 0.0) {
416 ImmReg = AMDGPU::ZERO;
417 } else if (FloatValue == 0.5) {
418 ImmReg = AMDGPU::HALF;
419 } else if (FloatValue == 1.0) {
420 ImmReg = AMDGPU::ONE;
421 } else {
422 ImmValue = Value.bitcastToAPInt().getZExtValue();
423 }
424 } else {
425 // XXX: 64-bit Immediates not supported yet
426 assert(N->getValueType(0) != MVT::i64);
427
428 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
429 if (C->getZExtValue() == 0) {
430 ImmReg = AMDGPU::ZERO;
431 } else if (C->getZExtValue() == 1) {
432 ImmReg = AMDGPU::ONE_INT;
433 } else {
434 ImmValue = C->getZExtValue();
435 }
436 }
437
438 for (SDNode::use_iterator Use = N->use_begin(), Next = llvm::next(Use);
439 Use != SDNode::use_end(); Use = Next) {
440 Next = llvm::next(Use);
441 std::vector<SDValue> Ops;
442 for (unsigned i = 0; i < Use->getNumOperands(); ++i) {
443 Ops.push_back(Use->getOperand(i));
444 }
445
446 if (!Use->isMachineOpcode()) {
447 if (ImmReg == AMDGPU::ALU_LITERAL_X) {
448 // We can only use literal constants (e.g. AMDGPU::ZERO,
449 // AMDGPU::ONE, etc) in machine opcodes.
450 continue;
451 }
452 } else {
Vincent Lejeunedf65b0f2013-02-14 16:55:01 +0000453 if (!TII->isALUInstr(Use->getMachineOpcode()) ||
454 (TII->get(Use->getMachineOpcode()).TSFlags &
455 R600_InstFlag::VECTOR)) {
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000456 continue;
457 }
458
Tom Stellard5e48a0e2013-06-25 21:22:18 +0000459 int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(),
460 AMDGPU::OpName::literal);
Tom Stellarde3d4cbc2013-06-28 15:47:08 +0000461 if (ImmIdx == -1) {
462 continue;
463 }
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000464
Tom Stellarde3d4cbc2013-06-28 15:47:08 +0000465 if (TII->getOperandIdx(Use->getMachineOpcode(),
466 AMDGPU::OpName::dst) != -1) {
467 // subtract one from ImmIdx, because the DST operand is usually index
468 // 0 for MachineInstrs, but we have no DST in the Ops vector.
469 ImmIdx--;
470 }
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000471
472 // Check that we aren't already using an immediate.
473 // XXX: It's possible for an instruction to have more than one
474 // immediate operand, but this is not supported yet.
475 if (ImmReg == AMDGPU::ALU_LITERAL_X) {
476 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Use->getOperand(ImmIdx));
477 assert(C);
478
479 if (C->getZExtValue() != 0) {
480 // This instruction is already using an immediate.
481 continue;
482 }
483
484 // Set the immediate value
485 Ops[ImmIdx] = CurDAG->getTargetConstant(ImmValue, MVT::i32);
486 }
487 }
488 // Set the immediate register
489 Ops[Use.getOperandNo()] = CurDAG->getRegister(ImmReg, MVT::i32);
490
491 CurDAG->UpdateNodeOperands(*Use, Ops.data(), Use->getNumOperands());
492 }
493 break;
494 }
495 }
Tom Stellard9f7818d2013-01-23 02:09:06 +0000496 SDNode *Result = SelectCode(N);
497
498 // Fold operands of selected node
499
500 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
Tom Stellard3ff0abf2013-06-07 20:37:48 +0000501 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
Tom Stellard9f7818d2013-01-23 02:09:06 +0000502 const R600InstrInfo *TII =
503 static_cast<const R600InstrInfo*>(TM.getInstrInfo());
Vincent Lejeunee67a4af2013-06-04 23:17:15 +0000504 if (Result && Result->isMachineOpcode() && Result->getMachineOpcode() == AMDGPU::DOT_4) {
505 bool IsModified = false;
506 do {
507 std::vector<SDValue> Ops;
508 for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end();
509 I != E; ++I)
510 Ops.push_back(*I);
511 IsModified = FoldDotOperands(Result->getMachineOpcode(), TII, Ops);
512 if (IsModified) {
513 Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size());
514 }
515 } while (IsModified);
Matt Arsenault70a3dc12013-06-18 23:37:58 +0000516
Vincent Lejeunee67a4af2013-06-04 23:17:15 +0000517 }
Vincent Lejeunedf65b0f2013-02-14 16:55:01 +0000518 if (Result && Result->isMachineOpcode() &&
519 !(TII->get(Result->getMachineOpcode()).TSFlags & R600_InstFlag::VECTOR)
Tom Stellarde3d4cbc2013-06-28 15:47:08 +0000520 && TII->hasInstrModifiers(Result->getMachineOpcode())) {
Tom Stellard58d33352013-07-23 01:48:24 +0000521 // Fold FNEG/FABS
Tom Stellard4bdf9892013-01-31 22:11:54 +0000522 // TODO: Isel can generate multiple MachineInst, we need to recursively
523 // parse Result
Tom Stellard9f7818d2013-01-23 02:09:06 +0000524 bool IsModified = false;
525 do {
526 std::vector<SDValue> Ops;
527 for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end();
528 I != E; ++I)
529 Ops.push_back(*I);
530 IsModified = FoldOperands(Result->getMachineOpcode(), TII, Ops);
531 if (IsModified) {
Tom Stellard4bdf9892013-01-31 22:11:54 +0000532 Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size());
Tom Stellard9f7818d2013-01-23 02:09:06 +0000533 }
534 } while (IsModified);
Tom Stellard4bdf9892013-01-31 22:11:54 +0000535
536 // If node has a single use which is CLAMP_R600, folds it
537 if (Result->hasOneUse() && Result->isMachineOpcode()) {
538 SDNode *PotentialClamp = *Result->use_begin();
539 if (PotentialClamp->isMachineOpcode() &&
540 PotentialClamp->getMachineOpcode() == AMDGPU::CLAMP_R600) {
541 unsigned ClampIdx =
Tom Stellard5e48a0e2013-06-25 21:22:18 +0000542 TII->getOperandIdx(Result->getMachineOpcode(), AMDGPU::OpName::clamp);
Tom Stellard4bdf9892013-01-31 22:11:54 +0000543 std::vector<SDValue> Ops;
544 unsigned NumOp = Result->getNumOperands();
545 for (unsigned i = 0; i < NumOp; ++i) {
546 Ops.push_back(Result->getOperand(i));
547 }
548 Ops[ClampIdx - 1] = CurDAG->getTargetConstant(1, MVT::i32);
549 Result = CurDAG->SelectNodeTo(PotentialClamp,
550 Result->getMachineOpcode(), PotentialClamp->getVTList(),
551 Ops.data(), NumOp);
552 }
553 }
Tom Stellard9f7818d2013-01-23 02:09:06 +0000554 }
555 }
556
557 return Result;
558}
559
Vincent Lejeunee67a4af2013-06-04 23:17:15 +0000560bool AMDGPUDAGToDAGISel::FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg,
Tom Stellard58d33352013-07-23 01:48:24 +0000561 SDValue &Abs, const R600InstrInfo *TII) {
Vincent Lejeunee67a4af2013-06-04 23:17:15 +0000562 switch (Src.getOpcode()) {
Vincent Lejeunee67a4af2013-06-04 23:17:15 +0000563 case ISD::FNEG:
564 Src = Src.getOperand(0);
565 Neg = CurDAG->getTargetConstant(1, MVT::i32);
566 return true;
567 case ISD::FABS:
568 if (!Abs.getNode())
569 return false;
570 Src = Src.getOperand(0);
571 Abs = CurDAG->getTargetConstant(1, MVT::i32);
572 return true;
573 case ISD::BITCAST:
574 Src = Src.getOperand(0);
575 return true;
576 default:
577 return false;
578 }
579}
580
Tom Stellard9f7818d2013-01-23 02:09:06 +0000581bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode,
582 const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
583 int OperandIdx[] = {
Tom Stellard5e48a0e2013-06-25 21:22:18 +0000584 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
585 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
586 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2)
Tom Stellard9f7818d2013-01-23 02:09:06 +0000587 };
588 int SelIdx[] = {
Tom Stellard5e48a0e2013-06-25 21:22:18 +0000589 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel),
590 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel),
591 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_sel)
Tom Stellard9f7818d2013-01-23 02:09:06 +0000592 };
Tom Stellard4bdf9892013-01-31 22:11:54 +0000593 int NegIdx[] = {
Tom Stellard5e48a0e2013-06-25 21:22:18 +0000594 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg),
595 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg),
596 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_neg)
Tom Stellard4bdf9892013-01-31 22:11:54 +0000597 };
598 int AbsIdx[] = {
Tom Stellard5e48a0e2013-06-25 21:22:18 +0000599 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs),
600 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs),
Tom Stellard4bdf9892013-01-31 22:11:54 +0000601 -1
602 };
603
Vincent Lejeunee67a4af2013-06-04 23:17:15 +0000604
Tom Stellard9f7818d2013-01-23 02:09:06 +0000605 for (unsigned i = 0; i < 3; i++) {
606 if (OperandIdx[i] < 0)
607 return false;
Vincent Lejeunee67a4af2013-06-04 23:17:15 +0000608 SDValue &Src = Ops[OperandIdx[i] - 1];
609 SDValue &Sel = Ops[SelIdx[i] - 1];
610 SDValue &Neg = Ops[NegIdx[i] - 1];
611 SDValue FakeAbs;
612 SDValue &Abs = (AbsIdx[i] > -1) ? Ops[AbsIdx[i] - 1] : FakeAbs;
Tom Stellard58d33352013-07-23 01:48:24 +0000613 if (FoldOperand(Src, Sel, Neg, Abs, TII))
Vincent Lejeunee67a4af2013-06-04 23:17:15 +0000614 return true;
615 }
616 return false;
617}
Vincent Lejeune3ab0ba32013-03-14 15:50:45 +0000618
Vincent Lejeunee67a4af2013-06-04 23:17:15 +0000619bool AMDGPUDAGToDAGISel::FoldDotOperands(unsigned Opcode,
620 const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
621 int OperandIdx[] = {
Tom Stellard5e48a0e2013-06-25 21:22:18 +0000622 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
623 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
624 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
625 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
626 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
627 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
628 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
629 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
Vincent Lejeunee67a4af2013-06-04 23:17:15 +0000630 };
631 int SelIdx[] = {
Tom Stellard5e48a0e2013-06-25 21:22:18 +0000632 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_X),
633 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Y),
634 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Z),
635 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_W),
636 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_X),
637 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Y),
638 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Z),
639 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_W)
Vincent Lejeunee67a4af2013-06-04 23:17:15 +0000640 };
641 int NegIdx[] = {
Tom Stellard5e48a0e2013-06-25 21:22:18 +0000642 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_X),
643 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Y),
644 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Z),
645 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_W),
646 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_X),
647 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Y),
648 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Z),
649 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_W)
Vincent Lejeunee67a4af2013-06-04 23:17:15 +0000650 };
651 int AbsIdx[] = {
Tom Stellard5e48a0e2013-06-25 21:22:18 +0000652 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_X),
653 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Y),
654 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Z),
655 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_W),
656 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_X),
657 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Y),
658 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Z),
659 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_W)
Vincent Lejeunee67a4af2013-06-04 23:17:15 +0000660 };
Vincent Lejeune3ab0ba32013-03-14 15:50:45 +0000661
Vincent Lejeunee67a4af2013-06-04 23:17:15 +0000662 for (unsigned i = 0; i < 8; i++) {
663 if (OperandIdx[i] < 0)
664 return false;
665 SDValue &Src = Ops[OperandIdx[i] - 1];
666 SDValue &Sel = Ops[SelIdx[i] - 1];
667 SDValue &Neg = Ops[NegIdx[i] - 1];
668 SDValue &Abs = Ops[AbsIdx[i] - 1];
Tom Stellard58d33352013-07-23 01:48:24 +0000669 if (FoldOperand(Src, Sel, Neg, Abs, TII))
Vincent Lejeunee67a4af2013-06-04 23:17:15 +0000670 return true;
671 }
Tom Stellard9f7818d2013-01-23 02:09:06 +0000672 return false;
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000673}
674
675bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) {
676 if (!ptr) {
677 return false;
678 }
679 Type *ptrType = ptr->getType();
680 return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace;
681}
682
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000683bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
684 return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
685}
686
687bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
688 return (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
689 && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
690 && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS));
691}
692
693bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
694 return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
695}
696
697bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
698 return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
699}
700
Tom Stellarda7eea052013-07-23 01:48:18 +0000701bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
702 if (CbId == -1) {
703 return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS);
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000704 }
Tom Stellarda7eea052013-07-23 01:48:18 +0000705 return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000706}
707
Matt Arsenault70a3dc12013-06-18 23:37:58 +0000708bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
Tom Stellard8ea83d42013-07-23 23:54:56 +0000709 if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) {
710 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
711 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
712 N->getMemoryVT().bitsLT(MVT::i32)) {
713 return true;
714 }
715 }
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000716 return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
717}
718
Matt Arsenault70a3dc12013-06-18 23:37:58 +0000719bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000720 return checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS);
721}
722
Matt Arsenault70a3dc12013-06-18 23:37:58 +0000723bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000724 return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
725}
726
Matt Arsenault70a3dc12013-06-18 23:37:58 +0000727bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000728 return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
729}
730
Matt Arsenault70a3dc12013-06-18 23:37:58 +0000731bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000732 MachineMemOperand *MMO = N->getMemOperand();
733 if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
734 if (MMO) {
735 const Value *V = MMO->getValue();
736 const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V);
737 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
738 return true;
739 }
740 }
741 }
742 return false;
743}
744
Matt Arsenault70a3dc12013-06-18 23:37:58 +0000745bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000746 if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
747 // Check to make sure we are not a constant pool load or a constant load
748 // that is marked as a private load
749 if (isCPLoad(N) || isConstantLoad(N, -1)) {
750 return false;
751 }
752 }
753 if (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
754 && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
755 && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS)
756 && !checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)
757 && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_D_ADDRESS)
758 && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS)) {
759 return true;
760 }
761 return false;
762}
763
764const char *AMDGPUDAGToDAGISel::getPassName() const {
765 return "AMDGPU DAG->DAG Pattern Instruction Selection";
766}
767
768#ifdef DEBUGTMP
769#undef INT64_C
770#endif
771#undef DEBUGTMP
772
Tom Stellard3f5d63b2013-07-23 01:48:42 +0000773//===----------------------------------------------------------------------===//
774// Complex Patterns
775//===----------------------------------------------------------------------===//
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000776
Tom Stellard9f7818d2013-01-23 02:09:06 +0000777bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
778 SDValue& IntPtr) {
779 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
780 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
781 return true;
782 }
783 return false;
784}
785
786bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
787 SDValue& BaseReg, SDValue &Offset) {
788 if (!dyn_cast<ConstantSDNode>(Addr)) {
789 BaseReg = Addr;
790 Offset = CurDAG->getIntPtrConstant(0, true);
791 return true;
792 }
793 return false;
794}
795
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000796bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
797 SDValue &Offset) {
798 ConstantSDNode * IMMOffset;
799
800 if (Addr.getOpcode() == ISD::ADD
801 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
802 && isInt<16>(IMMOffset->getZExtValue())) {
803
804 Base = Addr.getOperand(0);
805 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
806 return true;
807 // If the pointer address is constant, we can move it to the offset field.
808 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
809 && isInt<16>(IMMOffset->getZExtValue())) {
810 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
Andrew Trickac6d9be2013-05-25 02:42:55 +0000811 SDLoc(CurDAG->getEntryNode()),
Tom Stellardf98f2ce2012-12-11 21:25:42 +0000812 AMDGPU::ZERO, MVT::i32);
813 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
814 return true;
815 }
816
817 // Default case, no offset
818 Base = Addr;
819 Offset = CurDAG->getTargetConstant(0, MVT::i32);
820 return true;
821}
822
Tom Stellardc0b0c672013-02-06 17:32:29 +0000823bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
824 SDValue &Offset) {
825 ConstantSDNode *C;
826
827 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
828 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
829 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
830 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
831 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
832 Base = Addr.getOperand(0);
833 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
834 } else {
835 Base = Addr;
836 Offset = CurDAG->getTargetConstant(0, MVT::i32);
837 }
838
839 return true;
840}
Christian Konigc018eca2013-02-26 17:52:16 +0000841
Tom Stellard3f5d63b2013-07-23 01:48:42 +0000842SDValue AMDGPUDAGToDAGISel::SimplifyI24(SDValue &Op) {
843 APInt Demanded = APInt(32, 0x00FFFFFF);
844 APInt KnownZero, KnownOne;
845 TargetLowering::TargetLoweringOpt TLO(*CurDAG, true, true);
846 const TargetLowering *TLI = getTargetLowering();
847 if (TLI->SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) {
848 CurDAG->ReplaceAllUsesWith(Op, TLO.New);
849 CurDAG->RepositionNode(Op.getNode(), TLO.New.getNode());
850 return SimplifyI24(TLO.New);
851 } else {
852 return Op;
853 }
854}
855
856bool AMDGPUDAGToDAGISel::SelectI24(SDValue Op, SDValue &I24) {
857
858 assert(Op.getValueType() == MVT::i32);
859
860 if (CurDAG->ComputeNumSignBits(Op) == 9) {
861 I24 = SimplifyI24(Op);
862 return true;
863 }
864 return false;
865}
866
867bool AMDGPUDAGToDAGISel::SelectU24(SDValue Op, SDValue &U24) {
868 APInt KnownZero;
869 APInt KnownOne;
870 CurDAG->ComputeMaskedBits(Op, KnownZero, KnownOne);
871
872 assert (Op.getValueType() == MVT::i32);
873
874 // ANY_EXTEND and EXTLOAD operations can only be done on types smaller than
875 // i32. These smaller types are legal to use with the i24 instructions.
876 if ((KnownZero & APInt(KnownZero.getBitWidth(), 0xFF000000)) == 0xFF000000 ||
877 Op.getOpcode() == ISD::ANY_EXTEND ||
878 ISD::isEXTLoad(Op.getNode())) {
879 U24 = SimplifyI24(Op);
880 return true;
881 }
882 return false;
883}
884
Christian Konigc018eca2013-02-26 17:52:16 +0000885void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
886
Tom Stellard3ff0abf2013-06-07 20:37:48 +0000887 if (Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) {
Tom Stellard8a72c732013-06-03 17:39:46 +0000888 return;
889 }
890
Christian Konigc018eca2013-02-26 17:52:16 +0000891 // Go over all selected nodes and try to fold them a bit more
Bill Wendlingba54bca2013-06-19 21:36:55 +0000892 const AMDGPUTargetLowering& Lowering =
893 (*(const AMDGPUTargetLowering*)getTargetLowering());
Christian Konigc018eca2013-02-26 17:52:16 +0000894 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
895 E = CurDAG->allnodes_end(); I != E; ++I) {
896
Tom Stellard8a72c732013-06-03 17:39:46 +0000897 SDNode *Node = I;
Tom Stellard8a72c732013-06-03 17:39:46 +0000898
899 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
900 if (!MachineNode)
Christian Konigc018eca2013-02-26 17:52:16 +0000901 continue;
902
Tom Stellard8a72c732013-06-03 17:39:46 +0000903 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
904 if (ResNode != Node) {
Christian Konigc018eca2013-02-26 17:52:16 +0000905 ReplaceUses(Node, ResNode);
Tom Stellard8a72c732013-06-03 17:39:46 +0000906 }
Christian Konigc018eca2013-02-26 17:52:16 +0000907 }
908}