Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 1 | //===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //==-----------------------------------------------------------------------===// |
| 9 | // |
| 10 | /// \file |
| 11 | /// \brief Defines an instruction selector for the AMDGPU target. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | #include "AMDGPUInstrInfo.h" |
| 15 | #include "AMDGPUISelLowering.h" // For AMDGPUISD |
| 16 | #include "AMDGPURegisterInfo.h" |
| 17 | #include "AMDILDevices.h" |
| 18 | #include "R600InstrInfo.h" |
Christian Konig | f82901a | 2013-02-26 17:52:23 +0000 | [diff] [blame] | 19 | #include "SIISelLowering.h" |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 20 | #include "llvm/ADT/ValueMap.h" |
| 21 | #include "llvm/CodeGen/PseudoSourceValue.h" |
Benjamin Kramer | d78bb46 | 2013-05-23 17:10:37 +0000 | [diff] [blame] | 22 | #include "llvm/CodeGen/SelectionDAG.h" |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 23 | #include "llvm/CodeGen/SelectionDAGISel.h" |
| 24 | #include "llvm/Support/Compiler.h" |
| 25 | #include <list> |
| 26 | #include <queue> |
| 27 | |
| 28 | using namespace llvm; |
| 29 | |
| 30 | //===----------------------------------------------------------------------===// |
| 31 | // Instruction Selector Implementation |
| 32 | //===----------------------------------------------------------------------===// |
| 33 | |
| 34 | namespace { |
| 35 | /// AMDGPU specific code to select AMDGPU machine instructions for |
| 36 | /// SelectionDAG operations. |
| 37 | class AMDGPUDAGToDAGISel : public SelectionDAGISel { |
| 38 | // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can |
| 39 | // make the right decision when generating code for different targets. |
| 40 | const AMDGPUSubtarget &Subtarget; |
| 41 | public: |
| 42 | AMDGPUDAGToDAGISel(TargetMachine &TM); |
| 43 | virtual ~AMDGPUDAGToDAGISel(); |
| 44 | |
| 45 | SDNode *Select(SDNode *N); |
| 46 | virtual const char *getPassName() const; |
Christian Konig | d910b7d | 2013-02-26 17:52:16 +0000 | [diff] [blame] | 47 | virtual void PostprocessISelDAG(); |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 48 | |
| 49 | private: |
| 50 | inline SDValue getSmallIPtrImm(unsigned Imm); |
Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 51 | bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &); |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 52 | |
| 53 | // Complex pattern selectors |
| 54 | bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2); |
| 55 | bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2); |
| 56 | bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2); |
| 57 | |
| 58 | static bool checkType(const Value *ptr, unsigned int addrspace); |
| 59 | static const Value *getBasePointerValue(const Value *V); |
| 60 | |
| 61 | static bool isGlobalStore(const StoreSDNode *N); |
| 62 | static bool isPrivateStore(const StoreSDNode *N); |
| 63 | static bool isLocalStore(const StoreSDNode *N); |
| 64 | static bool isRegionStore(const StoreSDNode *N); |
| 65 | |
| 66 | static bool isCPLoad(const LoadSDNode *N); |
| 67 | static bool isConstantLoad(const LoadSDNode *N, int cbID); |
| 68 | static bool isGlobalLoad(const LoadSDNode *N); |
| 69 | static bool isParamLoad(const LoadSDNode *N); |
| 70 | static bool isPrivateLoad(const LoadSDNode *N); |
| 71 | static bool isLocalLoad(const LoadSDNode *N); |
| 72 | static bool isRegionLoad(const LoadSDNode *N); |
| 73 | |
Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 74 | bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr); |
| 75 | bool SelectGlobalValueVariableOffset(SDValue Addr, |
| 76 | SDValue &BaseReg, SDValue& Offset); |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 77 | bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset); |
Tom Stellard | f3b2a1e | 2013-02-06 17:32:29 +0000 | [diff] [blame] | 78 | bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset); |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 79 | |
| 80 | // Include the pieces autogenerated from the target description. |
| 81 | #include "AMDGPUGenDAGISel.inc" |
| 82 | }; |
| 83 | } // end anonymous namespace |
| 84 | |
| 85 | /// \brief This pass converts a legalized DAG into a AMDGPU-specific |
| 86 | // DAG, ready for instruction scheduling. |
| 87 | FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM |
| 88 | ) { |
| 89 | return new AMDGPUDAGToDAGISel(TM); |
| 90 | } |
| 91 | |
| 92 | AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM |
| 93 | ) |
| 94 | : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) { |
| 95 | } |
| 96 | |
| 97 | AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() { |
| 98 | } |
| 99 | |
| 100 | SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) { |
| 101 | return CurDAG->getTargetConstant(Imm, MVT::i32); |
| 102 | } |
| 103 | |
| 104 | bool AMDGPUDAGToDAGISel::SelectADDRParam( |
| 105 | SDValue Addr, SDValue& R1, SDValue& R2) { |
| 106 | |
| 107 | if (Addr.getOpcode() == ISD::FrameIndex) { |
| 108 | if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) { |
| 109 | R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32); |
| 110 | R2 = CurDAG->getTargetConstant(0, MVT::i32); |
| 111 | } else { |
| 112 | R1 = Addr; |
| 113 | R2 = CurDAG->getTargetConstant(0, MVT::i32); |
| 114 | } |
| 115 | } else if (Addr.getOpcode() == ISD::ADD) { |
| 116 | R1 = Addr.getOperand(0); |
| 117 | R2 = Addr.getOperand(1); |
| 118 | } else { |
| 119 | R1 = Addr; |
| 120 | R2 = CurDAG->getTargetConstant(0, MVT::i32); |
| 121 | } |
| 122 | return true; |
| 123 | } |
| 124 | |
| 125 | bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) { |
| 126 | if (Addr.getOpcode() == ISD::TargetExternalSymbol || |
| 127 | Addr.getOpcode() == ISD::TargetGlobalAddress) { |
| 128 | return false; |
| 129 | } |
| 130 | return SelectADDRParam(Addr, R1, R2); |
| 131 | } |
| 132 | |
| 133 | |
| 134 | bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) { |
| 135 | if (Addr.getOpcode() == ISD::TargetExternalSymbol || |
| 136 | Addr.getOpcode() == ISD::TargetGlobalAddress) { |
| 137 | return false; |
| 138 | } |
| 139 | |
| 140 | if (Addr.getOpcode() == ISD::FrameIndex) { |
| 141 | if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) { |
| 142 | R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64); |
| 143 | R2 = CurDAG->getTargetConstant(0, MVT::i64); |
| 144 | } else { |
| 145 | R1 = Addr; |
| 146 | R2 = CurDAG->getTargetConstant(0, MVT::i64); |
| 147 | } |
| 148 | } else if (Addr.getOpcode() == ISD::ADD) { |
| 149 | R1 = Addr.getOperand(0); |
| 150 | R2 = Addr.getOperand(1); |
| 151 | } else { |
| 152 | R1 = Addr; |
| 153 | R2 = CurDAG->getTargetConstant(0, MVT::i64); |
| 154 | } |
| 155 | return true; |
| 156 | } |
| 157 | |
| 158 | SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { |
| 159 | unsigned int Opc = N->getOpcode(); |
| 160 | if (N->isMachineOpcode()) { |
| 161 | return NULL; // Already selected. |
| 162 | } |
| 163 | switch (Opc) { |
| 164 | default: break; |
Vincent Lejeune | 3b6f20e | 2013-03-05 15:04:49 +0000 | [diff] [blame] | 165 | case ISD::BUILD_VECTOR: { |
| 166 | const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>(); |
| 167 | if (ST.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) { |
| 168 | break; |
| 169 | } |
| 170 | // BUILD_VECTOR is usually lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG |
| 171 | // that adds a 128 bits reg copy when going through TwoAddressInstructions |
| 172 | // pass. We want to avoid 128 bits copies as much as possible because they |
| 173 | // can't be bundled by our scheduler. |
| 174 | SDValue RegSeqArgs[9] = { |
| 175 | CurDAG->getTargetConstant(AMDGPU::R600_Reg128RegClassID, MVT::i32), |
| 176 | SDValue(), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32), |
| 177 | SDValue(), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32), |
| 178 | SDValue(), CurDAG->getTargetConstant(AMDGPU::sub2, MVT::i32), |
| 179 | SDValue(), CurDAG->getTargetConstant(AMDGPU::sub3, MVT::i32) |
| 180 | }; |
| 181 | bool IsRegSeq = true; |
| 182 | for (unsigned i = 0; i < N->getNumOperands(); i++) { |
| 183 | if (dyn_cast<RegisterSDNode>(N->getOperand(i))) { |
| 184 | IsRegSeq = false; |
| 185 | break; |
| 186 | } |
| 187 | RegSeqArgs[2 * i + 1] = N->getOperand(i); |
| 188 | } |
| 189 | if (!IsRegSeq) |
| 190 | break; |
| 191 | return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), |
| 192 | RegSeqArgs, 2 * N->getNumOperands() + 1); |
| 193 | } |
Tom Stellard | 754f80f | 2013-04-05 23:31:51 +0000 | [diff] [blame] | 194 | case ISD::BUILD_PAIR: { |
| 195 | SDValue RC, SubReg0, SubReg1; |
| 196 | const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>(); |
| 197 | if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) { |
| 198 | break; |
| 199 | } |
| 200 | if (N->getValueType(0) == MVT::i128) { |
| 201 | RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32); |
| 202 | SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32); |
| 203 | SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32); |
| 204 | } else if (N->getValueType(0) == MVT::i64) { |
| 205 | RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32); |
| 206 | SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32); |
| 207 | SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32); |
| 208 | } else { |
| 209 | llvm_unreachable("Unhandled value type for BUILD_PAIR"); |
| 210 | } |
| 211 | const SDValue Ops[] = { RC, N->getOperand(0), SubReg0, |
| 212 | N->getOperand(1), SubReg1 }; |
| 213 | return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, |
Andrew Trick | ef9de2a | 2013-05-25 02:42:55 +0000 | [diff] [blame^] | 214 | SDLoc(N), N->getValueType(0), Ops); |
Tom Stellard | 754f80f | 2013-04-05 23:31:51 +0000 | [diff] [blame] | 215 | } |
| 216 | |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 217 | case ISD::ConstantFP: |
| 218 | case ISD::Constant: { |
| 219 | const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>(); |
| 220 | // XXX: Custom immediate lowering not implemented yet. Instead we use |
| 221 | // pseudo instructions defined in SIInstructions.td |
| 222 | if (ST.device()->getGeneration() > AMDGPUDeviceInfo::HD6XXX) { |
| 223 | break; |
| 224 | } |
| 225 | const R600InstrInfo *TII = static_cast<const R600InstrInfo*>(TM.getInstrInfo()); |
| 226 | |
| 227 | uint64_t ImmValue = 0; |
| 228 | unsigned ImmReg = AMDGPU::ALU_LITERAL_X; |
| 229 | |
| 230 | if (N->getOpcode() == ISD::ConstantFP) { |
| 231 | // XXX: 64-bit Immediates not supported yet |
| 232 | assert(N->getValueType(0) != MVT::f64); |
| 233 | |
| 234 | ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N); |
| 235 | APFloat Value = C->getValueAPF(); |
| 236 | float FloatValue = Value.convertToFloat(); |
| 237 | if (FloatValue == 0.0) { |
| 238 | ImmReg = AMDGPU::ZERO; |
| 239 | } else if (FloatValue == 0.5) { |
| 240 | ImmReg = AMDGPU::HALF; |
| 241 | } else if (FloatValue == 1.0) { |
| 242 | ImmReg = AMDGPU::ONE; |
| 243 | } else { |
| 244 | ImmValue = Value.bitcastToAPInt().getZExtValue(); |
| 245 | } |
| 246 | } else { |
| 247 | // XXX: 64-bit Immediates not supported yet |
| 248 | assert(N->getValueType(0) != MVT::i64); |
| 249 | |
| 250 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(N); |
| 251 | if (C->getZExtValue() == 0) { |
| 252 | ImmReg = AMDGPU::ZERO; |
| 253 | } else if (C->getZExtValue() == 1) { |
| 254 | ImmReg = AMDGPU::ONE_INT; |
| 255 | } else { |
| 256 | ImmValue = C->getZExtValue(); |
| 257 | } |
| 258 | } |
| 259 | |
| 260 | for (SDNode::use_iterator Use = N->use_begin(), Next = llvm::next(Use); |
| 261 | Use != SDNode::use_end(); Use = Next) { |
| 262 | Next = llvm::next(Use); |
| 263 | std::vector<SDValue> Ops; |
| 264 | for (unsigned i = 0; i < Use->getNumOperands(); ++i) { |
| 265 | Ops.push_back(Use->getOperand(i)); |
| 266 | } |
| 267 | |
| 268 | if (!Use->isMachineOpcode()) { |
| 269 | if (ImmReg == AMDGPU::ALU_LITERAL_X) { |
| 270 | // We can only use literal constants (e.g. AMDGPU::ZERO, |
| 271 | // AMDGPU::ONE, etc) in machine opcodes. |
| 272 | continue; |
| 273 | } |
| 274 | } else { |
Vincent Lejeune | f694c10 | 2013-02-14 16:55:01 +0000 | [diff] [blame] | 275 | if (!TII->isALUInstr(Use->getMachineOpcode()) || |
| 276 | (TII->get(Use->getMachineOpcode()).TSFlags & |
| 277 | R600_InstFlag::VECTOR)) { |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 278 | continue; |
| 279 | } |
| 280 | |
| 281 | int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(), R600Operands::IMM); |
| 282 | assert(ImmIdx != -1); |
| 283 | |
| 284 | // subtract one from ImmIdx, because the DST operand is usually index |
| 285 | // 0 for MachineInstrs, but we have no DST in the Ops vector. |
| 286 | ImmIdx--; |
| 287 | |
| 288 | // Check that we aren't already using an immediate. |
| 289 | // XXX: It's possible for an instruction to have more than one |
| 290 | // immediate operand, but this is not supported yet. |
| 291 | if (ImmReg == AMDGPU::ALU_LITERAL_X) { |
| 292 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Use->getOperand(ImmIdx)); |
| 293 | assert(C); |
| 294 | |
| 295 | if (C->getZExtValue() != 0) { |
| 296 | // This instruction is already using an immediate. |
| 297 | continue; |
| 298 | } |
| 299 | |
| 300 | // Set the immediate value |
| 301 | Ops[ImmIdx] = CurDAG->getTargetConstant(ImmValue, MVT::i32); |
| 302 | } |
| 303 | } |
| 304 | // Set the immediate register |
| 305 | Ops[Use.getOperandNo()] = CurDAG->getRegister(ImmReg, MVT::i32); |
| 306 | |
| 307 | CurDAG->UpdateNodeOperands(*Use, Ops.data(), Use->getNumOperands()); |
| 308 | } |
| 309 | break; |
| 310 | } |
| 311 | } |
Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 312 | SDNode *Result = SelectCode(N); |
| 313 | |
| 314 | // Fold operands of selected node |
| 315 | |
| 316 | const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>(); |
| 317 | if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD6XXX) { |
| 318 | const R600InstrInfo *TII = |
| 319 | static_cast<const R600InstrInfo*>(TM.getInstrInfo()); |
Vincent Lejeune | f694c10 | 2013-02-14 16:55:01 +0000 | [diff] [blame] | 320 | if (Result && Result->isMachineOpcode() && |
| 321 | !(TII->get(Result->getMachineOpcode()).TSFlags & R600_InstFlag::VECTOR) |
Tom Stellard | 4926921 | 2013-01-31 22:11:54 +0000 | [diff] [blame] | 322 | && TII->isALUInstr(Result->getMachineOpcode())) { |
| 323 | // Fold FNEG/FABS/CONST_ADDRESS |
| 324 | // TODO: Isel can generate multiple MachineInst, we need to recursively |
| 325 | // parse Result |
Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 326 | bool IsModified = false; |
| 327 | do { |
| 328 | std::vector<SDValue> Ops; |
| 329 | for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end(); |
| 330 | I != E; ++I) |
| 331 | Ops.push_back(*I); |
| 332 | IsModified = FoldOperands(Result->getMachineOpcode(), TII, Ops); |
| 333 | if (IsModified) { |
Tom Stellard | 4926921 | 2013-01-31 22:11:54 +0000 | [diff] [blame] | 334 | Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size()); |
Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 335 | } |
| 336 | } while (IsModified); |
Tom Stellard | 4926921 | 2013-01-31 22:11:54 +0000 | [diff] [blame] | 337 | |
| 338 | // If node has a single use which is CLAMP_R600, folds it |
| 339 | if (Result->hasOneUse() && Result->isMachineOpcode()) { |
| 340 | SDNode *PotentialClamp = *Result->use_begin(); |
| 341 | if (PotentialClamp->isMachineOpcode() && |
| 342 | PotentialClamp->getMachineOpcode() == AMDGPU::CLAMP_R600) { |
| 343 | unsigned ClampIdx = |
| 344 | TII->getOperandIdx(Result->getMachineOpcode(), R600Operands::CLAMP); |
| 345 | std::vector<SDValue> Ops; |
| 346 | unsigned NumOp = Result->getNumOperands(); |
| 347 | for (unsigned i = 0; i < NumOp; ++i) { |
| 348 | Ops.push_back(Result->getOperand(i)); |
| 349 | } |
| 350 | Ops[ClampIdx - 1] = CurDAG->getTargetConstant(1, MVT::i32); |
| 351 | Result = CurDAG->SelectNodeTo(PotentialClamp, |
| 352 | Result->getMachineOpcode(), PotentialClamp->getVTList(), |
| 353 | Ops.data(), NumOp); |
| 354 | } |
| 355 | } |
Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 356 | } |
| 357 | } |
| 358 | |
| 359 | return Result; |
| 360 | } |
| 361 | |
| 362 | bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode, |
| 363 | const R600InstrInfo *TII, std::vector<SDValue> &Ops) { |
| 364 | int OperandIdx[] = { |
| 365 | TII->getOperandIdx(Opcode, R600Operands::SRC0), |
| 366 | TII->getOperandIdx(Opcode, R600Operands::SRC1), |
| 367 | TII->getOperandIdx(Opcode, R600Operands::SRC2) |
| 368 | }; |
| 369 | int SelIdx[] = { |
| 370 | TII->getOperandIdx(Opcode, R600Operands::SRC0_SEL), |
| 371 | TII->getOperandIdx(Opcode, R600Operands::SRC1_SEL), |
| 372 | TII->getOperandIdx(Opcode, R600Operands::SRC2_SEL) |
| 373 | }; |
Tom Stellard | 4926921 | 2013-01-31 22:11:54 +0000 | [diff] [blame] | 374 | int NegIdx[] = { |
| 375 | TII->getOperandIdx(Opcode, R600Operands::SRC0_NEG), |
| 376 | TII->getOperandIdx(Opcode, R600Operands::SRC1_NEG), |
| 377 | TII->getOperandIdx(Opcode, R600Operands::SRC2_NEG) |
| 378 | }; |
| 379 | int AbsIdx[] = { |
| 380 | TII->getOperandIdx(Opcode, R600Operands::SRC0_ABS), |
| 381 | TII->getOperandIdx(Opcode, R600Operands::SRC1_ABS), |
| 382 | -1 |
| 383 | }; |
| 384 | |
Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 385 | for (unsigned i = 0; i < 3; i++) { |
| 386 | if (OperandIdx[i] < 0) |
| 387 | return false; |
| 388 | SDValue Operand = Ops[OperandIdx[i] - 1]; |
| 389 | switch (Operand.getOpcode()) { |
| 390 | case AMDGPUISD::CONST_ADDRESS: { |
| 391 | SDValue CstOffset; |
Vincent Lejeune | 0a22bc4 | 2013-03-14 15:50:45 +0000 | [diff] [blame] | 392 | if (Operand.getValueType().isVector() || |
| 393 | !SelectGlobalValueConstantOffset(Operand.getOperand(0), CstOffset)) |
| 394 | break; |
| 395 | |
| 396 | // Gather others constants values |
| 397 | std::vector<unsigned> Consts; |
| 398 | for (unsigned j = 0; j < 3; j++) { |
| 399 | int SrcIdx = OperandIdx[j]; |
| 400 | if (SrcIdx < 0) |
| 401 | break; |
| 402 | if (RegisterSDNode *Reg = dyn_cast<RegisterSDNode>(Ops[SrcIdx - 1])) { |
| 403 | if (Reg->getReg() == AMDGPU::ALU_CONST) { |
| 404 | ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Ops[SelIdx[j] - 1]); |
| 405 | Consts.push_back(Cst->getZExtValue()); |
| 406 | } |
| 407 | } |
Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 408 | } |
Vincent Lejeune | 0a22bc4 | 2013-03-14 15:50:45 +0000 | [diff] [blame] | 409 | |
| 410 | ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset); |
| 411 | Consts.push_back(Cst->getZExtValue()); |
| 412 | if (!TII->fitsConstReadLimitations(Consts)) |
| 413 | break; |
| 414 | |
| 415 | Ops[OperandIdx[i] - 1] = CurDAG->getRegister(AMDGPU::ALU_CONST, MVT::f32); |
| 416 | Ops[SelIdx[i] - 1] = CstOffset; |
| 417 | return true; |
Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 418 | } |
Tom Stellard | 4926921 | 2013-01-31 22:11:54 +0000 | [diff] [blame] | 419 | case ISD::FNEG: |
| 420 | if (NegIdx[i] < 0) |
| 421 | break; |
| 422 | Ops[OperandIdx[i] - 1] = Operand.getOperand(0); |
| 423 | Ops[NegIdx[i] - 1] = CurDAG->getTargetConstant(1, MVT::i32); |
| 424 | return true; |
| 425 | case ISD::FABS: |
| 426 | if (AbsIdx[i] < 0) |
| 427 | break; |
| 428 | Ops[OperandIdx[i] - 1] = Operand.getOperand(0); |
| 429 | Ops[AbsIdx[i] - 1] = CurDAG->getTargetConstant(1, MVT::i32); |
| 430 | return true; |
Tom Stellard | dd04c83 | 2013-01-31 22:11:53 +0000 | [diff] [blame] | 431 | case ISD::BITCAST: |
| 432 | Ops[OperandIdx[i] - 1] = Operand.getOperand(0); |
| 433 | return true; |
Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 434 | default: |
| 435 | break; |
| 436 | } |
| 437 | } |
| 438 | return false; |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 439 | } |
| 440 | |
| 441 | bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) { |
| 442 | if (!ptr) { |
| 443 | return false; |
| 444 | } |
| 445 | Type *ptrType = ptr->getType(); |
| 446 | return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace; |
| 447 | } |
| 448 | |
| 449 | const Value * AMDGPUDAGToDAGISel::getBasePointerValue(const Value *V) { |
| 450 | if (!V) { |
| 451 | return NULL; |
| 452 | } |
| 453 | const Value *ret = NULL; |
| 454 | ValueMap<const Value *, bool> ValueBitMap; |
| 455 | std::queue<const Value *, std::list<const Value *> > ValueQueue; |
| 456 | ValueQueue.push(V); |
| 457 | while (!ValueQueue.empty()) { |
| 458 | V = ValueQueue.front(); |
| 459 | if (ValueBitMap.find(V) == ValueBitMap.end()) { |
| 460 | ValueBitMap[V] = true; |
| 461 | if (dyn_cast<Argument>(V) && dyn_cast<PointerType>(V->getType())) { |
| 462 | ret = V; |
| 463 | break; |
| 464 | } else if (dyn_cast<GlobalVariable>(V)) { |
| 465 | ret = V; |
| 466 | break; |
| 467 | } else if (dyn_cast<Constant>(V)) { |
| 468 | const ConstantExpr *CE = dyn_cast<ConstantExpr>(V); |
| 469 | if (CE) { |
| 470 | ValueQueue.push(CE->getOperand(0)); |
| 471 | } |
| 472 | } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { |
| 473 | ret = AI; |
| 474 | break; |
| 475 | } else if (const Instruction *I = dyn_cast<Instruction>(V)) { |
| 476 | uint32_t numOps = I->getNumOperands(); |
| 477 | for (uint32_t x = 0; x < numOps; ++x) { |
| 478 | ValueQueue.push(I->getOperand(x)); |
| 479 | } |
| 480 | } else { |
| 481 | assert(!"Found a Value that we didn't know how to handle!"); |
| 482 | } |
| 483 | } |
| 484 | ValueQueue.pop(); |
| 485 | } |
| 486 | return ret; |
| 487 | } |
| 488 | |
| 489 | bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) { |
| 490 | return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS); |
| 491 | } |
| 492 | |
| 493 | bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) { |
| 494 | return (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS) |
| 495 | && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS) |
| 496 | && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS)); |
| 497 | } |
| 498 | |
| 499 | bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) { |
| 500 | return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS); |
| 501 | } |
| 502 | |
| 503 | bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) { |
| 504 | return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS); |
| 505 | } |
| 506 | |
| 507 | bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int cbID) { |
| 508 | if (checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)) { |
| 509 | return true; |
| 510 | } |
| 511 | MachineMemOperand *MMO = N->getMemOperand(); |
| 512 | const Value *V = MMO->getValue(); |
| 513 | const Value *BV = getBasePointerValue(V); |
| 514 | if (MMO |
| 515 | && MMO->getValue() |
| 516 | && ((V && dyn_cast<GlobalValue>(V)) |
| 517 | || (BV && dyn_cast<GlobalValue>( |
| 518 | getBasePointerValue(MMO->getValue()))))) { |
| 519 | return checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS); |
| 520 | } else { |
| 521 | return false; |
| 522 | } |
| 523 | } |
| 524 | |
| 525 | bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) { |
| 526 | return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS); |
| 527 | } |
| 528 | |
| 529 | bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) { |
| 530 | return checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS); |
| 531 | } |
| 532 | |
| 533 | bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) { |
| 534 | return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS); |
| 535 | } |
| 536 | |
| 537 | bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) { |
| 538 | return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS); |
| 539 | } |
| 540 | |
| 541 | bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) { |
| 542 | MachineMemOperand *MMO = N->getMemOperand(); |
| 543 | if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) { |
| 544 | if (MMO) { |
| 545 | const Value *V = MMO->getValue(); |
| 546 | const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V); |
| 547 | if (PSV && PSV == PseudoSourceValue::getConstantPool()) { |
| 548 | return true; |
| 549 | } |
| 550 | } |
| 551 | } |
| 552 | return false; |
| 553 | } |
| 554 | |
| 555 | bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) { |
| 556 | if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) { |
| 557 | // Check to make sure we are not a constant pool load or a constant load |
| 558 | // that is marked as a private load |
| 559 | if (isCPLoad(N) || isConstantLoad(N, -1)) { |
| 560 | return false; |
| 561 | } |
| 562 | } |
| 563 | if (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS) |
| 564 | && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS) |
| 565 | && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS) |
| 566 | && !checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS) |
| 567 | && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_D_ADDRESS) |
| 568 | && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS)) { |
| 569 | return true; |
| 570 | } |
| 571 | return false; |
| 572 | } |
| 573 | |
| 574 | const char *AMDGPUDAGToDAGISel::getPassName() const { |
| 575 | return "AMDGPU DAG->DAG Pattern Instruction Selection"; |
| 576 | } |
| 577 | |
| 578 | #ifdef DEBUGTMP |
| 579 | #undef INT64_C |
| 580 | #endif |
| 581 | #undef DEBUGTMP |
| 582 | |
| 583 | ///==== AMDGPU Functions ====/// |
| 584 | |
Tom Stellard | 365366f | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 585 | bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr, |
| 586 | SDValue& IntPtr) { |
| 587 | if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) { |
| 588 | IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true); |
| 589 | return true; |
| 590 | } |
| 591 | return false; |
| 592 | } |
| 593 | |
| 594 | bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr, |
| 595 | SDValue& BaseReg, SDValue &Offset) { |
| 596 | if (!dyn_cast<ConstantSDNode>(Addr)) { |
| 597 | BaseReg = Addr; |
| 598 | Offset = CurDAG->getIntPtrConstant(0, true); |
| 599 | return true; |
| 600 | } |
| 601 | return false; |
| 602 | } |
| 603 | |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 604 | bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base, |
| 605 | SDValue &Offset) { |
| 606 | ConstantSDNode * IMMOffset; |
| 607 | |
| 608 | if (Addr.getOpcode() == ISD::ADD |
| 609 | && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1))) |
| 610 | && isInt<16>(IMMOffset->getZExtValue())) { |
| 611 | |
| 612 | Base = Addr.getOperand(0); |
| 613 | Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32); |
| 614 | return true; |
| 615 | // If the pointer address is constant, we can move it to the offset field. |
| 616 | } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr)) |
| 617 | && isInt<16>(IMMOffset->getZExtValue())) { |
| 618 | Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), |
Andrew Trick | ef9de2a | 2013-05-25 02:42:55 +0000 | [diff] [blame^] | 619 | SDLoc(CurDAG->getEntryNode()), |
Tom Stellard | 75aadc2 | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 620 | AMDGPU::ZERO, MVT::i32); |
| 621 | Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32); |
| 622 | return true; |
| 623 | } |
| 624 | |
| 625 | // Default case, no offset |
| 626 | Base = Addr; |
| 627 | Offset = CurDAG->getTargetConstant(0, MVT::i32); |
| 628 | return true; |
| 629 | } |
| 630 | |
Tom Stellard | f3b2a1e | 2013-02-06 17:32:29 +0000 | [diff] [blame] | 631 | bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base, |
| 632 | SDValue &Offset) { |
| 633 | ConstantSDNode *C; |
| 634 | |
| 635 | if ((C = dyn_cast<ConstantSDNode>(Addr))) { |
| 636 | Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32); |
| 637 | Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32); |
| 638 | } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) && |
| 639 | (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) { |
| 640 | Base = Addr.getOperand(0); |
| 641 | Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32); |
| 642 | } else { |
| 643 | Base = Addr; |
| 644 | Offset = CurDAG->getTargetConstant(0, MVT::i32); |
| 645 | } |
| 646 | |
| 647 | return true; |
| 648 | } |
Christian Konig | d910b7d | 2013-02-26 17:52:16 +0000 | [diff] [blame] | 649 | |
| 650 | void AMDGPUDAGToDAGISel::PostprocessISelDAG() { |
| 651 | |
| 652 | // Go over all selected nodes and try to fold them a bit more |
| 653 | const AMDGPUTargetLowering& Lowering = ((const AMDGPUTargetLowering&)TLI); |
| 654 | for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), |
| 655 | E = CurDAG->allnodes_end(); I != E; ++I) { |
| 656 | |
| 657 | MachineSDNode *Node = dyn_cast<MachineSDNode>(I); |
| 658 | if (!Node) |
| 659 | continue; |
| 660 | |
| 661 | SDNode *ResNode = Lowering.PostISelFolding(Node, *CurDAG); |
| 662 | if (ResNode != Node) |
| 663 | ReplaceUses(Node, ResNode); |
| 664 | } |
| 665 | } |
| 666 | |