| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 1 | //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | /// \file |
| 11 | /// \brief R600 Implementation of TargetInstrInfo. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #include "R600InstrInfo.h" |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 16 | #include "AMDGPUSubtarget.h" |
| Chandler Carruth | 58a2cbe | 2013-01-02 10:22:59 +0000 | [diff] [blame] | 17 | #include "AMDGPUTargetMachine.h" |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 18 | #include "R600Defines.h" |
| Tom Stellard | c0b0c67 | 2013-02-06 17:32:29 +0000 | [diff] [blame] | 19 | #include "R600MachineFunctionInfo.h" |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 20 | #include "R600RegisterInfo.h" |
| 21 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| Tom Stellard | c0b0c67 | 2013-02-06 17:32:29 +0000 | [diff] [blame] | 22 | #include "llvm/CodeGen/MachineFrameInfo.h" |
| 23 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 24 | |
| 25 | #define GET_INSTRINFO_CTOR |
| 26 | #include "AMDGPUGenDFAPacketizer.inc" |
| 27 | |
| 28 | using namespace llvm; |
| 29 | |
| 30 | R600InstrInfo::R600InstrInfo(AMDGPUTargetMachine &tm) |
| 31 | : AMDGPUInstrInfo(tm), |
| Vincent Lejeune | 631591e | 2013-04-30 00:13:39 +0000 | [diff] [blame] | 32 | RI(tm, *this), |
| 33 | ST(tm.getSubtarget<AMDGPUSubtarget>()) |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 34 | { } |
| 35 | |
| 36 | const R600RegisterInfo &R600InstrInfo::getRegisterInfo() const { |
| 37 | return RI; |
| 38 | } |
| 39 | |
| 40 | bool R600InstrInfo::isTrig(const MachineInstr &MI) const { |
| 41 | return get(MI.getOpcode()).TSFlags & R600_InstFlag::TRIG; |
| 42 | } |
| 43 | |
| 44 | bool R600InstrInfo::isVector(const MachineInstr &MI) const { |
| 45 | return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR; |
| 46 | } |
| 47 | |
| 48 | void |
| 49 | R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB, |
| 50 | MachineBasicBlock::iterator MI, DebugLoc DL, |
| 51 | unsigned DestReg, unsigned SrcReg, |
| 52 | bool KillSrc) const { |
| 53 | if (AMDGPU::R600_Reg128RegClass.contains(DestReg) |
| 54 | && AMDGPU::R600_Reg128RegClass.contains(SrcReg)) { |
| 55 | for (unsigned I = 0; I < 4; I++) { |
| 56 | unsigned SubRegIndex = RI.getSubRegFromChannel(I); |
| 57 | buildDefaultInstruction(MBB, MI, AMDGPU::MOV, |
| 58 | RI.getSubReg(DestReg, SubRegIndex), |
| 59 | RI.getSubReg(SrcReg, SubRegIndex)) |
| 60 | .addReg(DestReg, |
| 61 | RegState::Define | RegState::Implicit); |
| 62 | } |
| 63 | } else { |
| 64 | |
| 65 | // We can't copy vec4 registers |
| 66 | assert(!AMDGPU::R600_Reg128RegClass.contains(DestReg) |
| 67 | && !AMDGPU::R600_Reg128RegClass.contains(SrcReg)); |
| 68 | |
| 69 | MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV, |
| 70 | DestReg, SrcReg); |
| 71 | NewMI->getOperand(getOperandIdx(*NewMI, R600Operands::SRC0)) |
| 72 | .setIsKill(KillSrc); |
| 73 | } |
| 74 | } |
| 75 | |
| 76 | MachineInstr * R600InstrInfo::getMovImmInstr(MachineFunction *MF, |
| 77 | unsigned DstReg, int64_t Imm) const { |
| 78 | MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::MOV), DebugLoc()); |
| NAKAMURA Takumi | 6b207d3 | 2012-12-20 00:22:11 +0000 | [diff] [blame] | 79 | MachineInstrBuilder MIB(*MF, MI); |
| 80 | MIB.addReg(DstReg, RegState::Define); |
| 81 | MIB.addReg(AMDGPU::ALU_LITERAL_X); |
| 82 | MIB.addImm(Imm); |
| 83 | MIB.addReg(0); // PREDICATE_BIT |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 84 | |
| 85 | return MI; |
| 86 | } |
| 87 | |
| 88 | unsigned R600InstrInfo::getIEQOpcode() const { |
| 89 | return AMDGPU::SETE_INT; |
| 90 | } |
| 91 | |
| 92 | bool R600InstrInfo::isMov(unsigned Opcode) const { |
| 93 | |
| 94 | |
| 95 | switch(Opcode) { |
| 96 | default: return false; |
| 97 | case AMDGPU::MOV: |
| 98 | case AMDGPU::MOV_IMM_F32: |
| 99 | case AMDGPU::MOV_IMM_I32: |
| 100 | return true; |
| 101 | } |
| 102 | } |
| 103 | |
| 104 | // Some instructions act as place holders to emulate operations that the GPU |
| 105 | // hardware does automatically. This function can be used to check if |
| 106 | // an opcode falls into this category. |
| 107 | bool R600InstrInfo::isPlaceHolderOpcode(unsigned Opcode) const { |
| 108 | switch (Opcode) { |
| 109 | default: return false; |
| 110 | case AMDGPU::RETURN: |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 111 | return true; |
| 112 | } |
| 113 | } |
| 114 | |
| 115 | bool R600InstrInfo::isReductionOp(unsigned Opcode) const { |
| 116 | switch(Opcode) { |
| 117 | default: return false; |
| 118 | case AMDGPU::DOT4_r600_pseudo: |
| 119 | case AMDGPU::DOT4_eg_pseudo: |
| 120 | return true; |
| 121 | } |
| 122 | } |
| 123 | |
| 124 | bool R600InstrInfo::isCubeOp(unsigned Opcode) const { |
| 125 | switch(Opcode) { |
| 126 | default: return false; |
| 127 | case AMDGPU::CUBE_r600_pseudo: |
| 128 | case AMDGPU::CUBE_r600_real: |
| 129 | case AMDGPU::CUBE_eg_pseudo: |
| 130 | case AMDGPU::CUBE_eg_real: |
| 131 | return true; |
| 132 | } |
| 133 | } |
| 134 | |
| 135 | bool R600InstrInfo::isALUInstr(unsigned Opcode) const { |
| 136 | unsigned TargetFlags = get(Opcode).TSFlags; |
| 137 | |
| 138 | return ((TargetFlags & R600_InstFlag::OP1) | |
| 139 | (TargetFlags & R600_InstFlag::OP2) | |
| 140 | (TargetFlags & R600_InstFlag::OP3)); |
| 141 | } |
| 142 | |
| Vincent Lejeune | abcde26 | 2013-04-30 00:14:17 +0000 | [diff] [blame^] | 143 | bool R600InstrInfo::isTransOnly(unsigned Opcode) const { |
| 144 | return (get(Opcode).TSFlags & R600_InstFlag::TRANS_ONLY); |
| 145 | } |
| 146 | |
| 147 | bool R600InstrInfo::isTransOnly(const MachineInstr *MI) const { |
| 148 | return isTransOnly(MI->getOpcode()); |
| 149 | } |
| 150 | |
| Vincent Lejeune | 631591e | 2013-04-30 00:13:39 +0000 | [diff] [blame] | 151 | bool R600InstrInfo::usesVertexCache(unsigned Opcode) const { |
| 152 | return ST.hasVertexCache() && get(Opcode).TSFlags & R600_InstFlag::VTX_INST; |
| 153 | } |
| 154 | |
| 155 | bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const { |
| 156 | return usesVertexCache(MI->getOpcode()); |
| 157 | } |
| 158 | |
| 159 | bool R600InstrInfo::usesTextureCache(unsigned Opcode) const { |
| 160 | return (!ST.hasVertexCache() && get(Opcode).TSFlags & R600_InstFlag::VTX_INST) || |
| 161 | (get(Opcode).TSFlags & R600_InstFlag::TEX_INST); |
| 162 | } |
| 163 | |
| 164 | bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const { |
| 165 | return usesTextureCache(MI->getOpcode()); |
| 166 | } |
| 167 | |
| Vincent Lejeune | 3ab0ba3 | 2013-03-14 15:50:45 +0000 | [diff] [blame] | 168 | bool |
| 169 | R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts) |
| 170 | const { |
| 171 | assert (Consts.size() <= 12 && "Too many operands in instructions group"); |
| 172 | unsigned Pair1 = 0, Pair2 = 0; |
| 173 | for (unsigned i = 0, n = Consts.size(); i < n; ++i) { |
| 174 | unsigned ReadConstHalf = Consts[i] & 2; |
| 175 | unsigned ReadConstIndex = Consts[i] & (~3); |
| 176 | unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf; |
| 177 | if (!Pair1) { |
| 178 | Pair1 = ReadHalfConst; |
| 179 | continue; |
| 180 | } |
| 181 | if (Pair1 == ReadHalfConst) |
| 182 | continue; |
| 183 | if (!Pair2) { |
| 184 | Pair2 = ReadHalfConst; |
| 185 | continue; |
| 186 | } |
| 187 | if (Pair2 != ReadHalfConst) |
| 188 | return false; |
| 189 | } |
| 190 | return true; |
| 191 | } |
| 192 | |
| 193 | bool |
| 194 | R600InstrInfo::canBundle(const std::vector<MachineInstr *> &MIs) const { |
| 195 | std::vector<unsigned> Consts; |
| 196 | for (unsigned i = 0, n = MIs.size(); i < n; i++) { |
| 197 | const MachineInstr *MI = MIs[i]; |
| 198 | |
| 199 | const R600Operands::Ops OpTable[3][2] = { |
| 200 | {R600Operands::SRC0, R600Operands::SRC0_SEL}, |
| 201 | {R600Operands::SRC1, R600Operands::SRC1_SEL}, |
| 202 | {R600Operands::SRC2, R600Operands::SRC2_SEL}, |
| 203 | }; |
| 204 | |
| 205 | if (!isALUInstr(MI->getOpcode())) |
| 206 | continue; |
| 207 | |
| 208 | for (unsigned j = 0; j < 3; j++) { |
| 209 | int SrcIdx = getOperandIdx(MI->getOpcode(), OpTable[j][0]); |
| 210 | if (SrcIdx < 0) |
| 211 | break; |
| 212 | if (MI->getOperand(SrcIdx).getReg() == AMDGPU::ALU_CONST) { |
| 213 | unsigned Const = MI->getOperand( |
| 214 | getOperandIdx(MI->getOpcode(), OpTable[j][1])).getImm(); |
| 215 | Consts.push_back(Const); |
| 216 | } |
| 217 | } |
| 218 | } |
| 219 | return fitsConstReadLimitations(Consts); |
| 220 | } |
| 221 | |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 222 | DFAPacketizer *R600InstrInfo::CreateTargetScheduleState(const TargetMachine *TM, |
| 223 | const ScheduleDAG *DAG) const { |
| 224 | const InstrItineraryData *II = TM->getInstrItineraryData(); |
| 225 | return TM->getSubtarget<AMDGPUSubtarget>().createDFAPacketizer(II); |
| 226 | } |
| 227 | |
| 228 | static bool |
| 229 | isPredicateSetter(unsigned Opcode) { |
| 230 | switch (Opcode) { |
| 231 | case AMDGPU::PRED_X: |
| 232 | return true; |
| 233 | default: |
| 234 | return false; |
| 235 | } |
| 236 | } |
| 237 | |
| 238 | static MachineInstr * |
| 239 | findFirstPredicateSetterFrom(MachineBasicBlock &MBB, |
| 240 | MachineBasicBlock::iterator I) { |
| 241 | while (I != MBB.begin()) { |
| 242 | --I; |
| 243 | MachineInstr *MI = I; |
| 244 | if (isPredicateSetter(MI->getOpcode())) |
| 245 | return MI; |
| 246 | } |
| 247 | |
| 248 | return NULL; |
| 249 | } |
| 250 | |
| Vincent Lejeune | fd49dac | 2013-03-11 18:15:06 +0000 | [diff] [blame] | 251 | static |
| 252 | bool isJump(unsigned Opcode) { |
| 253 | return Opcode == AMDGPU::JUMP || Opcode == AMDGPU::JUMP_COND; |
| 254 | } |
| 255 | |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 256 | bool |
| 257 | R600InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, |
| 258 | MachineBasicBlock *&TBB, |
| 259 | MachineBasicBlock *&FBB, |
| 260 | SmallVectorImpl<MachineOperand> &Cond, |
| 261 | bool AllowModify) const { |
| 262 | // Most of the following comes from the ARM implementation of AnalyzeBranch |
| 263 | |
| 264 | // If the block has no terminators, it just falls into the block after it. |
| 265 | MachineBasicBlock::iterator I = MBB.end(); |
| 266 | if (I == MBB.begin()) |
| 267 | return false; |
| 268 | --I; |
| 269 | while (I->isDebugValue()) { |
| 270 | if (I == MBB.begin()) |
| 271 | return false; |
| 272 | --I; |
| 273 | } |
| Vincent Lejeune | fd49dac | 2013-03-11 18:15:06 +0000 | [diff] [blame] | 274 | if (!isJump(static_cast<MachineInstr *>(I)->getOpcode())) { |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 275 | return false; |
| 276 | } |
| 277 | |
| 278 | // Get the last instruction in the block. |
| 279 | MachineInstr *LastInst = I; |
| 280 | |
| 281 | // If there is only one terminator instruction, process it. |
| 282 | unsigned LastOpc = LastInst->getOpcode(); |
| 283 | if (I == MBB.begin() || |
| Vincent Lejeune | fd49dac | 2013-03-11 18:15:06 +0000 | [diff] [blame] | 284 | !isJump(static_cast<MachineInstr *>(--I)->getOpcode())) { |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 285 | if (LastOpc == AMDGPU::JUMP) { |
| Vincent Lejeune | fd49dac | 2013-03-11 18:15:06 +0000 | [diff] [blame] | 286 | TBB = LastInst->getOperand(0).getMBB(); |
| 287 | return false; |
| 288 | } else if (LastOpc == AMDGPU::JUMP_COND) { |
| 289 | MachineInstr *predSet = I; |
| 290 | while (!isPredicateSetter(predSet->getOpcode())) { |
| 291 | predSet = --I; |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 292 | } |
| Vincent Lejeune | fd49dac | 2013-03-11 18:15:06 +0000 | [diff] [blame] | 293 | TBB = LastInst->getOperand(0).getMBB(); |
| 294 | Cond.push_back(predSet->getOperand(1)); |
| 295 | Cond.push_back(predSet->getOperand(2)); |
| 296 | Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false)); |
| 297 | return false; |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 298 | } |
| 299 | return true; // Can't handle indirect branch. |
| 300 | } |
| 301 | |
| 302 | // Get the instruction before it if it is a terminator. |
| 303 | MachineInstr *SecondLastInst = I; |
| 304 | unsigned SecondLastOpc = SecondLastInst->getOpcode(); |
| 305 | |
| 306 | // If the block ends with a B and a Bcc, handle it. |
| Vincent Lejeune | fd49dac | 2013-03-11 18:15:06 +0000 | [diff] [blame] | 307 | if (SecondLastOpc == AMDGPU::JUMP_COND && LastOpc == AMDGPU::JUMP) { |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 308 | MachineInstr *predSet = --I; |
| 309 | while (!isPredicateSetter(predSet->getOpcode())) { |
| 310 | predSet = --I; |
| 311 | } |
| 312 | TBB = SecondLastInst->getOperand(0).getMBB(); |
| 313 | FBB = LastInst->getOperand(0).getMBB(); |
| 314 | Cond.push_back(predSet->getOperand(1)); |
| 315 | Cond.push_back(predSet->getOperand(2)); |
| 316 | Cond.push_back(MachineOperand::CreateReg(AMDGPU::PRED_SEL_ONE, false)); |
| 317 | return false; |
| 318 | } |
| 319 | |
| 320 | // Otherwise, can't handle this. |
| 321 | return true; |
| 322 | } |
| 323 | |
| 324 | int R600InstrInfo::getBranchInstr(const MachineOperand &op) const { |
| 325 | const MachineInstr *MI = op.getParent(); |
| 326 | |
| 327 | switch (MI->getDesc().OpInfo->RegClass) { |
| 328 | default: // FIXME: fallthrough?? |
| 329 | case AMDGPU::GPRI32RegClassID: return AMDGPU::BRANCH_COND_i32; |
| 330 | case AMDGPU::GPRF32RegClassID: return AMDGPU::BRANCH_COND_f32; |
| 331 | }; |
| 332 | } |
| 333 | |
| 334 | unsigned |
| 335 | R600InstrInfo::InsertBranch(MachineBasicBlock &MBB, |
| 336 | MachineBasicBlock *TBB, |
| 337 | MachineBasicBlock *FBB, |
| 338 | const SmallVectorImpl<MachineOperand> &Cond, |
| 339 | DebugLoc DL) const { |
| 340 | assert(TBB && "InsertBranch must not be told to insert a fallthrough"); |
| 341 | |
| 342 | if (FBB == 0) { |
| 343 | if (Cond.empty()) { |
| Vincent Lejeune | fd49dac | 2013-03-11 18:15:06 +0000 | [diff] [blame] | 344 | BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(TBB); |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 345 | return 1; |
| 346 | } else { |
| 347 | MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end()); |
| 348 | assert(PredSet && "No previous predicate !"); |
| 349 | addFlag(PredSet, 0, MO_FLAG_PUSH); |
| 350 | PredSet->getOperand(2).setImm(Cond[1].getImm()); |
| 351 | |
| Vincent Lejeune | fd49dac | 2013-03-11 18:15:06 +0000 | [diff] [blame] | 352 | BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND)) |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 353 | .addMBB(TBB) |
| 354 | .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill); |
| 355 | return 1; |
| 356 | } |
| 357 | } else { |
| 358 | MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end()); |
| 359 | assert(PredSet && "No previous predicate !"); |
| 360 | addFlag(PredSet, 0, MO_FLAG_PUSH); |
| 361 | PredSet->getOperand(2).setImm(Cond[1].getImm()); |
| Vincent Lejeune | fd49dac | 2013-03-11 18:15:06 +0000 | [diff] [blame] | 362 | BuildMI(&MBB, DL, get(AMDGPU::JUMP_COND)) |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 363 | .addMBB(TBB) |
| 364 | .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill); |
| Vincent Lejeune | fd49dac | 2013-03-11 18:15:06 +0000 | [diff] [blame] | 365 | BuildMI(&MBB, DL, get(AMDGPU::JUMP)).addMBB(FBB); |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 366 | return 2; |
| 367 | } |
| 368 | } |
| 369 | |
| 370 | unsigned |
| 371 | R600InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { |
| 372 | |
| 373 | // Note : we leave PRED* instructions there. |
| 374 | // They may be needed when predicating instructions. |
| 375 | |
| 376 | MachineBasicBlock::iterator I = MBB.end(); |
| 377 | |
| 378 | if (I == MBB.begin()) { |
| 379 | return 0; |
| 380 | } |
| 381 | --I; |
| 382 | switch (I->getOpcode()) { |
| 383 | default: |
| 384 | return 0; |
| Vincent Lejeune | fd49dac | 2013-03-11 18:15:06 +0000 | [diff] [blame] | 385 | case AMDGPU::JUMP_COND: { |
| 386 | MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I); |
| 387 | clearFlag(predSet, 0, MO_FLAG_PUSH); |
| 388 | I->eraseFromParent(); |
| 389 | break; |
| 390 | } |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 391 | case AMDGPU::JUMP: |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 392 | I->eraseFromParent(); |
| 393 | break; |
| 394 | } |
| 395 | I = MBB.end(); |
| 396 | |
| 397 | if (I == MBB.begin()) { |
| 398 | return 1; |
| 399 | } |
| 400 | --I; |
| 401 | switch (I->getOpcode()) { |
| 402 | // FIXME: only one case?? |
| 403 | default: |
| 404 | return 1; |
| Vincent Lejeune | fd49dac | 2013-03-11 18:15:06 +0000 | [diff] [blame] | 405 | case AMDGPU::JUMP_COND: { |
| 406 | MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I); |
| 407 | clearFlag(predSet, 0, MO_FLAG_PUSH); |
| 408 | I->eraseFromParent(); |
| 409 | break; |
| 410 | } |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 411 | case AMDGPU::JUMP: |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 412 | I->eraseFromParent(); |
| 413 | break; |
| 414 | } |
| 415 | return 2; |
| 416 | } |
| 417 | |
| 418 | bool |
| 419 | R600InstrInfo::isPredicated(const MachineInstr *MI) const { |
| 420 | int idx = MI->findFirstPredOperandIdx(); |
| 421 | if (idx < 0) |
| 422 | return false; |
| 423 | |
| 424 | unsigned Reg = MI->getOperand(idx).getReg(); |
| 425 | switch (Reg) { |
| 426 | default: return false; |
| 427 | case AMDGPU::PRED_SEL_ONE: |
| 428 | case AMDGPU::PRED_SEL_ZERO: |
| 429 | case AMDGPU::PREDICATE_BIT: |
| 430 | return true; |
| 431 | } |
| 432 | } |
| 433 | |
| 434 | bool |
| 435 | R600InstrInfo::isPredicable(MachineInstr *MI) const { |
| 436 | // XXX: KILL* instructions can be predicated, but they must be the last |
| 437 | // instruction in a clause, so this means any instructions after them cannot |
| 438 | // be predicated. Until we have proper support for instruction clauses in the |
| 439 | // backend, we will mark KILL* instructions as unpredicable. |
| 440 | |
| 441 | if (MI->getOpcode() == AMDGPU::KILLGT) { |
| 442 | return false; |
| Vincent Lejeune | 62b0a9b | 2013-03-05 19:12:06 +0000 | [diff] [blame] | 443 | } else if (isVector(*MI)) { |
| 444 | return false; |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 445 | } else { |
| 446 | return AMDGPUInstrInfo::isPredicable(MI); |
| 447 | } |
| 448 | } |
| 449 | |
| 450 | |
| 451 | bool |
| 452 | R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB, |
| 453 | unsigned NumCyles, |
| 454 | unsigned ExtraPredCycles, |
| 455 | const BranchProbability &Probability) const{ |
| 456 | return true; |
| 457 | } |
| 458 | |
| 459 | bool |
| 460 | R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB, |
| 461 | unsigned NumTCycles, |
| 462 | unsigned ExtraTCycles, |
| 463 | MachineBasicBlock &FMBB, |
| 464 | unsigned NumFCycles, |
| 465 | unsigned ExtraFCycles, |
| 466 | const BranchProbability &Probability) const { |
| 467 | return true; |
| 468 | } |
| 469 | |
| 470 | bool |
| 471 | R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB, |
| 472 | unsigned NumCyles, |
| 473 | const BranchProbability &Probability) |
| 474 | const { |
| 475 | return true; |
| 476 | } |
| 477 | |
| 478 | bool |
| 479 | R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB, |
| 480 | MachineBasicBlock &FMBB) const { |
| 481 | return false; |
| 482 | } |
| 483 | |
| 484 | |
| 485 | bool |
| 486 | R600InstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { |
| 487 | MachineOperand &MO = Cond[1]; |
| 488 | switch (MO.getImm()) { |
| 489 | case OPCODE_IS_ZERO_INT: |
| 490 | MO.setImm(OPCODE_IS_NOT_ZERO_INT); |
| 491 | break; |
| 492 | case OPCODE_IS_NOT_ZERO_INT: |
| 493 | MO.setImm(OPCODE_IS_ZERO_INT); |
| 494 | break; |
| 495 | case OPCODE_IS_ZERO: |
| 496 | MO.setImm(OPCODE_IS_NOT_ZERO); |
| 497 | break; |
| 498 | case OPCODE_IS_NOT_ZERO: |
| 499 | MO.setImm(OPCODE_IS_ZERO); |
| 500 | break; |
| 501 | default: |
| 502 | return true; |
| 503 | } |
| 504 | |
| 505 | MachineOperand &MO2 = Cond[2]; |
| 506 | switch (MO2.getReg()) { |
| 507 | case AMDGPU::PRED_SEL_ZERO: |
| 508 | MO2.setReg(AMDGPU::PRED_SEL_ONE); |
| 509 | break; |
| 510 | case AMDGPU::PRED_SEL_ONE: |
| 511 | MO2.setReg(AMDGPU::PRED_SEL_ZERO); |
| 512 | break; |
| 513 | default: |
| 514 | return true; |
| 515 | } |
| 516 | return false; |
| 517 | } |
| 518 | |
| 519 | bool |
| 520 | R600InstrInfo::DefinesPredicate(MachineInstr *MI, |
| 521 | std::vector<MachineOperand> &Pred) const { |
| 522 | return isPredicateSetter(MI->getOpcode()); |
| 523 | } |
| 524 | |
| 525 | |
| 526 | bool |
| 527 | R600InstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1, |
| 528 | const SmallVectorImpl<MachineOperand> &Pred2) const { |
| 529 | return false; |
| 530 | } |
| 531 | |
| 532 | |
| 533 | bool |
| 534 | R600InstrInfo::PredicateInstruction(MachineInstr *MI, |
| 535 | const SmallVectorImpl<MachineOperand> &Pred) const { |
| 536 | int PIdx = MI->findFirstPredOperandIdx(); |
| 537 | |
| 538 | if (PIdx != -1) { |
| 539 | MachineOperand &PMO = MI->getOperand(PIdx); |
| 540 | PMO.setReg(Pred[2].getReg()); |
| NAKAMURA Takumi | 6b207d3 | 2012-12-20 00:22:11 +0000 | [diff] [blame] | 541 | MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI); |
| 542 | MIB.addReg(AMDGPU::PREDICATE_BIT, RegState::Implicit); |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 543 | return true; |
| 544 | } |
| 545 | |
| 546 | return false; |
| 547 | } |
| 548 | |
| 549 | unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData, |
| 550 | const MachineInstr *MI, |
| 551 | unsigned *PredCost) const { |
| 552 | if (PredCost) |
| 553 | *PredCost = 2; |
| 554 | return 2; |
| 555 | } |
| 556 | |
| Tom Stellard | c0b0c67 | 2013-02-06 17:32:29 +0000 | [diff] [blame] | 557 | int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const { |
| 558 | const MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 559 | const MachineFrameInfo *MFI = MF.getFrameInfo(); |
| 560 | int Offset = 0; |
| 561 | |
| 562 | if (MFI->getNumObjects() == 0) { |
| 563 | return -1; |
| 564 | } |
| 565 | |
| 566 | if (MRI.livein_empty()) { |
| 567 | return 0; |
| 568 | } |
| 569 | |
| 570 | for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(), |
| 571 | LE = MRI.livein_end(); |
| 572 | LI != LE; ++LI) { |
| 573 | Offset = std::max(Offset, |
| 574 | GET_REG_INDEX(RI.getEncodingValue(LI->first))); |
| 575 | } |
| 576 | |
| 577 | return Offset + 1; |
| 578 | } |
| 579 | |
| 580 | int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const { |
| 581 | int Offset = 0; |
| 582 | const MachineFrameInfo *MFI = MF.getFrameInfo(); |
| 583 | |
| 584 | // Variable sized objects are not supported |
| 585 | assert(!MFI->hasVarSizedObjects()); |
| 586 | |
| 587 | if (MFI->getNumObjects() == 0) { |
| 588 | return -1; |
| 589 | } |
| 590 | |
| 591 | Offset = TM.getFrameLowering()->getFrameIndexOffset(MF, -1); |
| 592 | |
| 593 | return getIndirectIndexBegin(MF) + Offset; |
| 594 | } |
| 595 | |
| 596 | std::vector<unsigned> R600InstrInfo::getIndirectReservedRegs( |
| 597 | const MachineFunction &MF) const { |
| 598 | const AMDGPUFrameLowering *TFL = |
| 599 | static_cast<const AMDGPUFrameLowering*>(TM.getFrameLowering()); |
| 600 | std::vector<unsigned> Regs; |
| 601 | |
| 602 | unsigned StackWidth = TFL->getStackWidth(MF); |
| 603 | int End = getIndirectIndexEnd(MF); |
| 604 | |
| 605 | if (End == -1) { |
| 606 | return Regs; |
| 607 | } |
| 608 | |
| 609 | for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) { |
| 610 | unsigned SuperReg = AMDGPU::R600_Reg128RegClass.getRegister(Index); |
| 611 | Regs.push_back(SuperReg); |
| 612 | for (unsigned Chan = 0; Chan < StackWidth; ++Chan) { |
| 613 | unsigned Reg = AMDGPU::R600_TReg32RegClass.getRegister((4 * Index) + Chan); |
| 614 | Regs.push_back(Reg); |
| 615 | } |
| 616 | } |
| 617 | return Regs; |
| 618 | } |
| 619 | |
| 620 | unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex, |
| 621 | unsigned Channel) const { |
| 622 | // XXX: Remove when we support a stack width > 2 |
| 623 | assert(Channel == 0); |
| 624 | return RegIndex; |
| 625 | } |
| 626 | |
| 627 | const TargetRegisterClass * R600InstrInfo::getIndirectAddrStoreRegClass( |
| 628 | unsigned SourceReg) const { |
| 629 | return &AMDGPU::R600_TReg32RegClass; |
| 630 | } |
| 631 | |
| 632 | const TargetRegisterClass *R600InstrInfo::getIndirectAddrLoadRegClass() const { |
| 633 | return &AMDGPU::TRegMemRegClass; |
| 634 | } |
| 635 | |
| 636 | MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB, |
| 637 | MachineBasicBlock::iterator I, |
| 638 | unsigned ValueReg, unsigned Address, |
| 639 | unsigned OffsetReg) const { |
| 640 | unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); |
| 641 | MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg, |
| 642 | AMDGPU::AR_X, OffsetReg); |
| 643 | setImmOperand(MOVA, R600Operands::WRITE, 0); |
| 644 | |
| 645 | MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV, |
| 646 | AddrReg, ValueReg) |
| 647 | .addReg(AMDGPU::AR_X, RegState::Implicit); |
| 648 | setImmOperand(Mov, R600Operands::DST_REL, 1); |
| 649 | return Mov; |
| 650 | } |
| 651 | |
| 652 | MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB, |
| 653 | MachineBasicBlock::iterator I, |
| 654 | unsigned ValueReg, unsigned Address, |
| 655 | unsigned OffsetReg) const { |
| 656 | unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); |
| 657 | MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg, |
| 658 | AMDGPU::AR_X, |
| 659 | OffsetReg); |
| 660 | setImmOperand(MOVA, R600Operands::WRITE, 0); |
| 661 | MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV, |
| 662 | ValueReg, |
| 663 | AddrReg) |
| 664 | .addReg(AMDGPU::AR_X, RegState::Implicit); |
| 665 | setImmOperand(Mov, R600Operands::SRC0_REL, 1); |
| 666 | |
| 667 | return Mov; |
| 668 | } |
| 669 | |
| 670 | const TargetRegisterClass *R600InstrInfo::getSuperIndirectRegClass() const { |
| 671 | return &AMDGPU::IndirectRegRegClass; |
| 672 | } |
| 673 | |
| Vincent Lejeune | dae2a20 | 2013-04-03 16:49:34 +0000 | [diff] [blame] | 674 | unsigned R600InstrInfo::getMaxAlusPerClause() const { |
| 675 | return 115; |
| 676 | } |
| Tom Stellard | c0b0c67 | 2013-02-06 17:32:29 +0000 | [diff] [blame] | 677 | |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 678 | MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB, |
| 679 | MachineBasicBlock::iterator I, |
| 680 | unsigned Opcode, |
| 681 | unsigned DstReg, |
| 682 | unsigned Src0Reg, |
| 683 | unsigned Src1Reg) const { |
| 684 | MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode), |
| 685 | DstReg); // $dst |
| 686 | |
| 687 | if (Src1Reg) { |
| 688 | MIB.addImm(0) // $update_exec_mask |
| 689 | .addImm(0); // $update_predicate |
| 690 | } |
| 691 | MIB.addImm(1) // $write |
| 692 | .addImm(0) // $omod |
| 693 | .addImm(0) // $dst_rel |
| 694 | .addImm(0) // $dst_clamp |
| 695 | .addReg(Src0Reg) // $src0 |
| 696 | .addImm(0) // $src0_neg |
| 697 | .addImm(0) // $src0_rel |
| Tom Stellard | 9f7818d | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 698 | .addImm(0) // $src0_abs |
| 699 | .addImm(-1); // $src0_sel |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 700 | |
| 701 | if (Src1Reg) { |
| 702 | MIB.addReg(Src1Reg) // $src1 |
| 703 | .addImm(0) // $src1_neg |
| 704 | .addImm(0) // $src1_rel |
| Tom Stellard | 9f7818d | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 705 | .addImm(0) // $src1_abs |
| 706 | .addImm(-1); // $src1_sel |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 707 | } |
| 708 | |
| 709 | //XXX: The r600g finalizer expects this to be 1, once we've moved the |
| 710 | //scheduling to the backend, we can change the default to 0. |
| 711 | MIB.addImm(1) // $last |
| 712 | .addReg(AMDGPU::PRED_SEL_OFF) // $pred_sel |
| Vincent Lejeune | e332e35 | 2013-04-30 00:14:08 +0000 | [diff] [blame] | 713 | .addImm(0) // $literal |
| 714 | .addImm(0); // $bank_swizzle |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 715 | |
| 716 | return MIB; |
| 717 | } |
| 718 | |
| 719 | MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB, |
| 720 | MachineBasicBlock::iterator I, |
| 721 | unsigned DstReg, |
| 722 | uint64_t Imm) const { |
| 723 | MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg, |
| 724 | AMDGPU::ALU_LITERAL_X); |
| 725 | setImmOperand(MovImm, R600Operands::IMM, Imm); |
| 726 | return MovImm; |
| 727 | } |
| 728 | |
| 729 | int R600InstrInfo::getOperandIdx(const MachineInstr &MI, |
| 730 | R600Operands::Ops Op) const { |
| 731 | return getOperandIdx(MI.getOpcode(), Op); |
| 732 | } |
| 733 | |
| 734 | int R600InstrInfo::getOperandIdx(unsigned Opcode, |
| 735 | R600Operands::Ops Op) const { |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 736 | unsigned TargetFlags = get(Opcode).TSFlags; |
| 737 | unsigned OpTableIdx; |
| 738 | |
| 739 | if (!HAS_NATIVE_OPERANDS(TargetFlags)) { |
| 740 | switch (Op) { |
| 741 | case R600Operands::DST: return 0; |
| 742 | case R600Operands::SRC0: return 1; |
| 743 | case R600Operands::SRC1: return 2; |
| 744 | case R600Operands::SRC2: return 3; |
| 745 | default: |
| 746 | assert(!"Unknown operand type for instruction"); |
| 747 | return -1; |
| 748 | } |
| 749 | } |
| 750 | |
| 751 | if (TargetFlags & R600_InstFlag::OP1) { |
| 752 | OpTableIdx = 0; |
| 753 | } else if (TargetFlags & R600_InstFlag::OP2) { |
| 754 | OpTableIdx = 1; |
| 755 | } else { |
| 756 | assert((TargetFlags & R600_InstFlag::OP3) && "OP1, OP2, or OP3 not defined " |
| 757 | "for this instruction"); |
| 758 | OpTableIdx = 2; |
| 759 | } |
| 760 | |
| Tom Stellard | 9f7818d | 2013-01-23 02:09:06 +0000 | [diff] [blame] | 761 | return R600Operands::ALUOpTable[OpTableIdx][Op]; |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 762 | } |
| 763 | |
| 764 | void R600InstrInfo::setImmOperand(MachineInstr *MI, R600Operands::Ops Op, |
| 765 | int64_t Imm) const { |
| 766 | int Idx = getOperandIdx(*MI, Op); |
| 767 | assert(Idx != -1 && "Operand not supported for this instruction."); |
| 768 | assert(MI->getOperand(Idx).isImm()); |
| 769 | MI->getOperand(Idx).setImm(Imm); |
| 770 | } |
| 771 | |
| 772 | //===----------------------------------------------------------------------===// |
| 773 | // Instruction flag getters/setters |
| 774 | //===----------------------------------------------------------------------===// |
| 775 | |
| 776 | bool R600InstrInfo::hasFlagOperand(const MachineInstr &MI) const { |
| 777 | return GET_FLAG_OPERAND_IDX(get(MI.getOpcode()).TSFlags) != 0; |
| 778 | } |
| 779 | |
| 780 | MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx, |
| 781 | unsigned Flag) const { |
| 782 | unsigned TargetFlags = get(MI->getOpcode()).TSFlags; |
| 783 | int FlagIndex = 0; |
| 784 | if (Flag != 0) { |
| 785 | // If we pass something other than the default value of Flag to this |
| 786 | // function, it means we are want to set a flag on an instruction |
| 787 | // that uses native encoding. |
| 788 | assert(HAS_NATIVE_OPERANDS(TargetFlags)); |
| 789 | bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3; |
| 790 | switch (Flag) { |
| 791 | case MO_FLAG_CLAMP: |
| 792 | FlagIndex = getOperandIdx(*MI, R600Operands::CLAMP); |
| 793 | break; |
| 794 | case MO_FLAG_MASK: |
| 795 | FlagIndex = getOperandIdx(*MI, R600Operands::WRITE); |
| 796 | break; |
| 797 | case MO_FLAG_NOT_LAST: |
| 798 | case MO_FLAG_LAST: |
| 799 | FlagIndex = getOperandIdx(*MI, R600Operands::LAST); |
| 800 | break; |
| 801 | case MO_FLAG_NEG: |
| 802 | switch (SrcIdx) { |
| 803 | case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_NEG); break; |
| 804 | case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_NEG); break; |
| 805 | case 2: FlagIndex = getOperandIdx(*MI, R600Operands::SRC2_NEG); break; |
| 806 | } |
| 807 | break; |
| 808 | |
| 809 | case MO_FLAG_ABS: |
| 810 | assert(!IsOP3 && "Cannot set absolute value modifier for OP3 " |
| 811 | "instructions."); |
| Tom Stellard | 08f2d93 | 2012-12-13 19:38:52 +0000 | [diff] [blame] | 812 | (void)IsOP3; |
| Tom Stellard | f98f2ce | 2012-12-11 21:25:42 +0000 | [diff] [blame] | 813 | switch (SrcIdx) { |
| 814 | case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_ABS); break; |
| 815 | case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_ABS); break; |
| 816 | } |
| 817 | break; |
| 818 | |
| 819 | default: |
| 820 | FlagIndex = -1; |
| 821 | break; |
| 822 | } |
| 823 | assert(FlagIndex != -1 && "Flag not supported for this instruction"); |
| 824 | } else { |
| 825 | FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags); |
| 826 | assert(FlagIndex != 0 && |
| 827 | "Instruction flags not supported for this instruction"); |
| 828 | } |
| 829 | |
| 830 | MachineOperand &FlagOp = MI->getOperand(FlagIndex); |
| 831 | assert(FlagOp.isImm()); |
| 832 | return FlagOp; |
| 833 | } |
| 834 | |
| 835 | void R600InstrInfo::addFlag(MachineInstr *MI, unsigned Operand, |
| 836 | unsigned Flag) const { |
| 837 | unsigned TargetFlags = get(MI->getOpcode()).TSFlags; |
| 838 | if (Flag == 0) { |
| 839 | return; |
| 840 | } |
| 841 | if (HAS_NATIVE_OPERANDS(TargetFlags)) { |
| 842 | MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag); |
| 843 | if (Flag == MO_FLAG_NOT_LAST) { |
| 844 | clearFlag(MI, Operand, MO_FLAG_LAST); |
| 845 | } else if (Flag == MO_FLAG_MASK) { |
| 846 | clearFlag(MI, Operand, Flag); |
| 847 | } else { |
| 848 | FlagOp.setImm(1); |
| 849 | } |
| 850 | } else { |
| 851 | MachineOperand &FlagOp = getFlagOp(MI, Operand); |
| 852 | FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand))); |
| 853 | } |
| 854 | } |
| 855 | |
| 856 | void R600InstrInfo::clearFlag(MachineInstr *MI, unsigned Operand, |
| 857 | unsigned Flag) const { |
| 858 | unsigned TargetFlags = get(MI->getOpcode()).TSFlags; |
| 859 | if (HAS_NATIVE_OPERANDS(TargetFlags)) { |
| 860 | MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag); |
| 861 | FlagOp.setImm(0); |
| 862 | } else { |
| 863 | MachineOperand &FlagOp = getFlagOp(MI); |
| 864 | unsigned InstFlags = FlagOp.getImm(); |
| 865 | InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand)); |
| 866 | FlagOp.setImm(InstFlags); |
| 867 | } |
| 868 | } |