Vincent Lejeune | 62f38ca | 2013-03-05 18:41:32 +0000 | [diff] [blame] | 1 | //===-- R600MachineScheduler.cpp - R600 Scheduler Interface -*- C++ -*-----===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | /// \file |
| 11 | /// \brief R600 Machine Scheduler interface |
| 12 | // TODO: Scheduling is optimised for VLIW4 arch, modify it to support TRANS slot |
| 13 | // |
| 14 | //===----------------------------------------------------------------------===// |
| 15 | |
| 16 | #define DEBUG_TYPE "misched" |
| 17 | |
| 18 | #include "R600MachineScheduler.h" |
| 19 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 20 | #include "llvm/CodeGen/LiveIntervalAnalysis.h" |
| 21 | #include "llvm/Pass.h" |
| 22 | #include "llvm/PassManager.h" |
NAKAMURA Takumi | 3f179b5 | 2013-03-11 08:19:28 +0000 | [diff] [blame] | 23 | #include "llvm/Support/raw_ostream.h" |
Vincent Lejeune | 62f38ca | 2013-03-05 18:41:32 +0000 | [diff] [blame] | 24 | #include <set> |
NAKAMURA Takumi | 3f179b5 | 2013-03-11 08:19:28 +0000 | [diff] [blame] | 25 | |
Vincent Lejeune | 62f38ca | 2013-03-05 18:41:32 +0000 | [diff] [blame] | 26 | using namespace llvm; |
| 27 | |
| 28 | void R600SchedStrategy::initialize(ScheduleDAGMI *dag) { |
| 29 | |
| 30 | DAG = dag; |
| 31 | TII = static_cast<const R600InstrInfo*>(DAG->TII); |
| 32 | TRI = static_cast<const R600RegisterInfo*>(DAG->TRI); |
| 33 | MRI = &DAG->MRI; |
| 34 | Available[IDAlu]->clear(); |
| 35 | Available[IDFetch]->clear(); |
| 36 | Available[IDOther]->clear(); |
| 37 | CurInstKind = IDOther; |
| 38 | CurEmitted = 0; |
| 39 | OccupedSlotsMask = 15; |
Vincent Lejeune | dae2a20 | 2013-04-03 16:49:34 +0000 | [diff] [blame] | 40 | InstKindLimit[IDAlu] = TII->getMaxAlusPerClause(); |
Vincent Lejeune | 62f38ca | 2013-03-05 18:41:32 +0000 | [diff] [blame] | 41 | |
| 42 | |
| 43 | const AMDGPUSubtarget &ST = DAG->TM.getSubtarget<AMDGPUSubtarget>(); |
Vincent Lejeune | dcfcf1d | 2013-05-17 16:49:55 +0000 | [diff] [blame^] | 44 | InstKindLimit[IDFetch] = ST.getTexVTXClauseSize(); |
Vincent Lejeune | 62f38ca | 2013-03-05 18:41:32 +0000 | [diff] [blame] | 45 | } |
| 46 | |
| 47 | void R600SchedStrategy::MoveUnits(ReadyQueue *QSrc, ReadyQueue *QDst) |
| 48 | { |
| 49 | if (QSrc->empty()) |
| 50 | return; |
| 51 | for (ReadyQueue::iterator I = QSrc->begin(), |
| 52 | E = QSrc->end(); I != E; ++I) { |
| 53 | (*I)->NodeQueueId &= ~QSrc->getID(); |
| 54 | QDst->push(*I); |
| 55 | } |
| 56 | QSrc->clear(); |
| 57 | } |
| 58 | |
| 59 | SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) { |
| 60 | SUnit *SU = 0; |
| 61 | IsTopNode = true; |
| 62 | NextInstKind = IDOther; |
| 63 | |
| 64 | // check if we might want to switch current clause type |
| 65 | bool AllowSwitchToAlu = (CurInstKind == IDOther) || |
Vincent Lejeune | dcfcf1d | 2013-05-17 16:49:55 +0000 | [diff] [blame^] | 66 | (CurEmitted >= InstKindLimit[CurInstKind]) || |
Vincent Lejeune | 62f38ca | 2013-03-05 18:41:32 +0000 | [diff] [blame] | 67 | (Available[CurInstKind]->empty()); |
Vincent Lejeune | dcfcf1d | 2013-05-17 16:49:55 +0000 | [diff] [blame^] | 68 | bool AllowSwitchFromAlu = (CurEmitted >= InstKindLimit[CurInstKind]) && |
Vincent Lejeune | 62f38ca | 2013-03-05 18:41:32 +0000 | [diff] [blame] | 69 | (!Available[IDFetch]->empty() || !Available[IDOther]->empty()); |
| 70 | |
| 71 | if ((AllowSwitchToAlu && CurInstKind != IDAlu) || |
| 72 | (!AllowSwitchFromAlu && CurInstKind == IDAlu)) { |
| 73 | // try to pick ALU |
| 74 | SU = pickAlu(); |
| 75 | if (SU) { |
Vincent Lejeune | dcfcf1d | 2013-05-17 16:49:55 +0000 | [diff] [blame^] | 76 | if (CurEmitted >= InstKindLimit[IDAlu]) |
Vincent Lejeune | 62f38ca | 2013-03-05 18:41:32 +0000 | [diff] [blame] | 77 | CurEmitted = 0; |
| 78 | NextInstKind = IDAlu; |
| 79 | } |
| 80 | } |
| 81 | |
| 82 | if (!SU) { |
| 83 | // try to pick FETCH |
| 84 | SU = pickOther(IDFetch); |
| 85 | if (SU) |
| 86 | NextInstKind = IDFetch; |
| 87 | } |
| 88 | |
| 89 | // try to pick other |
| 90 | if (!SU) { |
| 91 | SU = pickOther(IDOther); |
| 92 | if (SU) |
| 93 | NextInstKind = IDOther; |
| 94 | } |
| 95 | |
| 96 | DEBUG( |
| 97 | if (SU) { |
| 98 | dbgs() << "picked node: "; |
| 99 | SU->dump(DAG); |
| 100 | } else { |
| 101 | dbgs() << "NO NODE "; |
| 102 | for (int i = 0; i < IDLast; ++i) { |
| 103 | Available[i]->dump(); |
| 104 | Pending[i]->dump(); |
| 105 | } |
| 106 | for (unsigned i = 0; i < DAG->SUnits.size(); i++) { |
| 107 | const SUnit &S = DAG->SUnits[i]; |
| 108 | if (!S.isScheduled) |
| 109 | S.dump(DAG); |
| 110 | } |
| 111 | } |
| 112 | ); |
| 113 | |
| 114 | return SU; |
| 115 | } |
| 116 | |
| 117 | void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) { |
| 118 | |
| 119 | DEBUG(dbgs() << "scheduled: "); |
| 120 | DEBUG(SU->dump(DAG)); |
| 121 | |
| 122 | if (NextInstKind != CurInstKind) { |
| 123 | DEBUG(dbgs() << "Instruction Type Switch\n"); |
| 124 | if (NextInstKind != IDAlu) |
| 125 | OccupedSlotsMask = 15; |
| 126 | CurEmitted = 0; |
| 127 | CurInstKind = NextInstKind; |
| 128 | } |
| 129 | |
| 130 | if (CurInstKind == IDAlu) { |
| 131 | switch (getAluKind(SU)) { |
| 132 | case AluT_XYZW: |
| 133 | CurEmitted += 4; |
| 134 | break; |
| 135 | case AluDiscarded: |
| 136 | break; |
| 137 | default: { |
| 138 | ++CurEmitted; |
| 139 | for (MachineInstr::mop_iterator It = SU->getInstr()->operands_begin(), |
| 140 | E = SU->getInstr()->operands_end(); It != E; ++It) { |
| 141 | MachineOperand &MO = *It; |
| 142 | if (MO.isReg() && MO.getReg() == AMDGPU::ALU_LITERAL_X) |
| 143 | ++CurEmitted; |
| 144 | } |
| 145 | } |
| 146 | } |
| 147 | } else { |
| 148 | ++CurEmitted; |
| 149 | } |
| 150 | |
| 151 | |
| 152 | DEBUG(dbgs() << CurEmitted << " Instructions Emitted in this clause\n"); |
| 153 | |
| 154 | if (CurInstKind != IDFetch) { |
| 155 | MoveUnits(Pending[IDFetch], Available[IDFetch]); |
| 156 | } |
| 157 | MoveUnits(Pending[IDOther], Available[IDOther]); |
| 158 | } |
| 159 | |
| 160 | void R600SchedStrategy::releaseTopNode(SUnit *SU) { |
| 161 | int IK = getInstKind(SU); |
| 162 | |
| 163 | DEBUG(dbgs() << IK << " <= "); |
| 164 | DEBUG(SU->dump(DAG)); |
| 165 | |
| 166 | Pending[IK]->push(SU); |
| 167 | } |
| 168 | |
| 169 | void R600SchedStrategy::releaseBottomNode(SUnit *SU) { |
| 170 | } |
| 171 | |
| 172 | bool R600SchedStrategy::regBelongsToClass(unsigned Reg, |
| 173 | const TargetRegisterClass *RC) const { |
| 174 | if (!TargetRegisterInfo::isVirtualRegister(Reg)) { |
| 175 | return RC->contains(Reg); |
| 176 | } else { |
| 177 | return MRI->getRegClass(Reg) == RC; |
| 178 | } |
| 179 | } |
| 180 | |
| 181 | R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const { |
| 182 | MachineInstr *MI = SU->getInstr(); |
| 183 | |
| 184 | switch (MI->getOpcode()) { |
| 185 | case AMDGPU::INTERP_PAIR_XY: |
| 186 | case AMDGPU::INTERP_PAIR_ZW: |
| 187 | case AMDGPU::INTERP_VEC_LOAD: |
| 188 | return AluT_XYZW; |
| 189 | case AMDGPU::COPY: |
| 190 | if (TargetRegisterInfo::isPhysicalRegister(MI->getOperand(1).getReg())) { |
| 191 | // %vregX = COPY Tn_X is likely to be discarded in favor of an |
| 192 | // assignement of Tn_X to %vregX, don't considers it in scheduling |
| 193 | return AluDiscarded; |
| 194 | } |
| 195 | else if (MI->getOperand(1).isUndef()) { |
| 196 | // MI will become a KILL, don't considers it in scheduling |
| 197 | return AluDiscarded; |
| 198 | } |
| 199 | default: |
| 200 | break; |
| 201 | } |
| 202 | |
| 203 | // Does the instruction take a whole IG ? |
| 204 | if(TII->isVector(*MI) || |
| 205 | TII->isCubeOp(MI->getOpcode()) || |
| 206 | TII->isReductionOp(MI->getOpcode())) |
| 207 | return AluT_XYZW; |
| 208 | |
| 209 | // Is the result already assigned to a channel ? |
| 210 | unsigned DestSubReg = MI->getOperand(0).getSubReg(); |
| 211 | switch (DestSubReg) { |
| 212 | case AMDGPU::sub0: |
| 213 | return AluT_X; |
| 214 | case AMDGPU::sub1: |
| 215 | return AluT_Y; |
| 216 | case AMDGPU::sub2: |
| 217 | return AluT_Z; |
| 218 | case AMDGPU::sub3: |
| 219 | return AluT_W; |
| 220 | default: |
| 221 | break; |
| 222 | } |
| 223 | |
| 224 | // Is the result already member of a X/Y/Z/W class ? |
| 225 | unsigned DestReg = MI->getOperand(0).getReg(); |
| 226 | if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_XRegClass) || |
| 227 | regBelongsToClass(DestReg, &AMDGPU::R600_AddrRegClass)) |
| 228 | return AluT_X; |
| 229 | if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_YRegClass)) |
| 230 | return AluT_Y; |
| 231 | if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass)) |
| 232 | return AluT_Z; |
| 233 | if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_WRegClass)) |
| 234 | return AluT_W; |
| 235 | if (regBelongsToClass(DestReg, &AMDGPU::R600_Reg128RegClass)) |
| 236 | return AluT_XYZW; |
| 237 | |
| 238 | return AluAny; |
| 239 | |
| 240 | } |
| 241 | |
| 242 | int R600SchedStrategy::getInstKind(SUnit* SU) { |
| 243 | int Opcode = SU->getInstr()->getOpcode(); |
| 244 | |
| 245 | if (TII->isALUInstr(Opcode)) { |
| 246 | return IDAlu; |
| 247 | } |
| 248 | |
| 249 | switch (Opcode) { |
| 250 | case AMDGPU::COPY: |
| 251 | case AMDGPU::CONST_COPY: |
| 252 | case AMDGPU::INTERP_PAIR_XY: |
| 253 | case AMDGPU::INTERP_PAIR_ZW: |
| 254 | case AMDGPU::INTERP_VEC_LOAD: |
| 255 | case AMDGPU::DOT4_eg_pseudo: |
| 256 | case AMDGPU::DOT4_r600_pseudo: |
| 257 | return IDAlu; |
| 258 | case AMDGPU::TEX_VTX_CONSTBUF: |
| 259 | case AMDGPU::TEX_VTX_TEXBUF: |
| 260 | case AMDGPU::TEX_LD: |
| 261 | case AMDGPU::TEX_GET_TEXTURE_RESINFO: |
| 262 | case AMDGPU::TEX_GET_GRADIENTS_H: |
| 263 | case AMDGPU::TEX_GET_GRADIENTS_V: |
| 264 | case AMDGPU::TEX_SET_GRADIENTS_H: |
| 265 | case AMDGPU::TEX_SET_GRADIENTS_V: |
| 266 | case AMDGPU::TEX_SAMPLE: |
| 267 | case AMDGPU::TEX_SAMPLE_C: |
| 268 | case AMDGPU::TEX_SAMPLE_L: |
| 269 | case AMDGPU::TEX_SAMPLE_C_L: |
| 270 | case AMDGPU::TEX_SAMPLE_LB: |
| 271 | case AMDGPU::TEX_SAMPLE_C_LB: |
| 272 | case AMDGPU::TEX_SAMPLE_G: |
| 273 | case AMDGPU::TEX_SAMPLE_C_G: |
| 274 | case AMDGPU::TXD: |
| 275 | case AMDGPU::TXD_SHADOW: |
| 276 | return IDFetch; |
| 277 | default: |
| 278 | DEBUG( |
| 279 | dbgs() << "other inst: "; |
| 280 | SU->dump(DAG); |
| 281 | ); |
| 282 | return IDOther; |
| 283 | } |
| 284 | } |
| 285 | |
Vincent Lejeune | 62f38ca | 2013-03-05 18:41:32 +0000 | [diff] [blame] | 286 | SUnit *R600SchedStrategy::PopInst(std::multiset<SUnit *, CompareSUnit> &Q) { |
| 287 | if (Q.empty()) |
| 288 | return NULL; |
| 289 | for (std::set<SUnit *, CompareSUnit>::iterator It = Q.begin(), E = Q.end(); |
| 290 | It != E; ++It) { |
| 291 | SUnit *SU = *It; |
Vincent Lejeune | 3ab0ba3 | 2013-03-14 15:50:45 +0000 | [diff] [blame] | 292 | InstructionsGroupCandidate.push_back(SU->getInstr()); |
| 293 | if (TII->canBundle(InstructionsGroupCandidate)) { |
| 294 | InstructionsGroupCandidate.pop_back(); |
Vincent Lejeune | 62f38ca | 2013-03-05 18:41:32 +0000 | [diff] [blame] | 295 | Q.erase(It); |
| 296 | return SU; |
Vincent Lejeune | 3ab0ba3 | 2013-03-14 15:50:45 +0000 | [diff] [blame] | 297 | } else { |
| 298 | InstructionsGroupCandidate.pop_back(); |
Vincent Lejeune | 62f38ca | 2013-03-05 18:41:32 +0000 | [diff] [blame] | 299 | } |
| 300 | } |
| 301 | return NULL; |
| 302 | } |
| 303 | |
| 304 | void R600SchedStrategy::LoadAlu() { |
| 305 | ReadyQueue *QSrc = Pending[IDAlu]; |
| 306 | for (ReadyQueue::iterator I = QSrc->begin(), |
| 307 | E = QSrc->end(); I != E; ++I) { |
| 308 | (*I)->NodeQueueId &= ~QSrc->getID(); |
| 309 | AluKind AK = getAluKind(*I); |
| 310 | AvailableAlus[AK].insert(*I); |
| 311 | } |
| 312 | QSrc->clear(); |
| 313 | } |
| 314 | |
| 315 | void R600SchedStrategy::PrepareNextSlot() { |
| 316 | DEBUG(dbgs() << "New Slot\n"); |
| 317 | assert (OccupedSlotsMask && "Slot wasn't filled"); |
| 318 | OccupedSlotsMask = 0; |
Vincent Lejeune | 3ab0ba3 | 2013-03-14 15:50:45 +0000 | [diff] [blame] | 319 | InstructionsGroupCandidate.clear(); |
Vincent Lejeune | 62f38ca | 2013-03-05 18:41:32 +0000 | [diff] [blame] | 320 | LoadAlu(); |
| 321 | } |
| 322 | |
| 323 | void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) { |
| 324 | unsigned DestReg = MI->getOperand(0).getReg(); |
| 325 | // PressureRegister crashes if an operand is def and used in the same inst |
| 326 | // and we try to constraint its regclass |
| 327 | for (MachineInstr::mop_iterator It = MI->operands_begin(), |
| 328 | E = MI->operands_end(); It != E; ++It) { |
| 329 | MachineOperand &MO = *It; |
| 330 | if (MO.isReg() && !MO.isDef() && |
| 331 | MO.getReg() == MI->getOperand(0).getReg()) |
| 332 | return; |
| 333 | } |
| 334 | // Constrains the regclass of DestReg to assign it to Slot |
| 335 | switch (Slot) { |
| 336 | case 0: |
| 337 | MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_XRegClass); |
| 338 | break; |
| 339 | case 1: |
| 340 | MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_YRegClass); |
| 341 | break; |
| 342 | case 2: |
| 343 | MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass); |
| 344 | break; |
| 345 | case 3: |
| 346 | MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_WRegClass); |
| 347 | break; |
| 348 | } |
| 349 | } |
| 350 | |
| 351 | SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot) { |
| 352 | static const AluKind IndexToID[] = {AluT_X, AluT_Y, AluT_Z, AluT_W}; |
| 353 | SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]]); |
| 354 | SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny]); |
| 355 | if (!UnslotedSU) { |
| 356 | return SlotedSU; |
| 357 | } else if (!SlotedSU) { |
| 358 | AssignSlot(UnslotedSU->getInstr(), Slot); |
| 359 | return UnslotedSU; |
| 360 | } else { |
| 361 | //Determine which one to pick (the lesser one) |
| 362 | if (CompareSUnit()(SlotedSU, UnslotedSU)) { |
| 363 | AvailableAlus[AluAny].insert(UnslotedSU); |
| 364 | return SlotedSU; |
| 365 | } else { |
| 366 | AvailableAlus[IndexToID[Slot]].insert(SlotedSU); |
| 367 | AssignSlot(UnslotedSU->getInstr(), Slot); |
| 368 | return UnslotedSU; |
| 369 | } |
| 370 | } |
| 371 | } |
| 372 | |
| 373 | bool R600SchedStrategy::isAvailablesAluEmpty() const { |
| 374 | return Pending[IDAlu]->empty() && AvailableAlus[AluAny].empty() && |
| 375 | AvailableAlus[AluT_XYZW].empty() && AvailableAlus[AluT_X].empty() && |
| 376 | AvailableAlus[AluT_Y].empty() && AvailableAlus[AluT_Z].empty() && |
| 377 | AvailableAlus[AluT_W].empty() && AvailableAlus[AluDiscarded].empty(); |
| 378 | } |
| 379 | |
| 380 | SUnit* R600SchedStrategy::pickAlu() { |
| 381 | while (!isAvailablesAluEmpty()) { |
| 382 | if (!OccupedSlotsMask) { |
| 383 | // Flush physical reg copies (RA will discard them) |
| 384 | if (!AvailableAlus[AluDiscarded].empty()) { |
| 385 | OccupedSlotsMask = 15; |
| 386 | return PopInst(AvailableAlus[AluDiscarded]); |
| 387 | } |
| 388 | // If there is a T_XYZW alu available, use it |
| 389 | if (!AvailableAlus[AluT_XYZW].empty()) { |
| 390 | OccupedSlotsMask = 15; |
| 391 | return PopInst(AvailableAlus[AluT_XYZW]); |
| 392 | } |
| 393 | } |
| 394 | for (unsigned Chan = 0; Chan < 4; ++Chan) { |
| 395 | bool isOccupied = OccupedSlotsMask & (1 << Chan); |
| 396 | if (!isOccupied) { |
| 397 | SUnit *SU = AttemptFillSlot(Chan); |
| 398 | if (SU) { |
| 399 | OccupedSlotsMask |= (1 << Chan); |
Vincent Lejeune | 3ab0ba3 | 2013-03-14 15:50:45 +0000 | [diff] [blame] | 400 | InstructionsGroupCandidate.push_back(SU->getInstr()); |
Vincent Lejeune | 62f38ca | 2013-03-05 18:41:32 +0000 | [diff] [blame] | 401 | return SU; |
| 402 | } |
| 403 | } |
| 404 | } |
| 405 | PrepareNextSlot(); |
| 406 | } |
| 407 | return NULL; |
| 408 | } |
| 409 | |
| 410 | SUnit* R600SchedStrategy::pickOther(int QID) { |
| 411 | SUnit *SU = 0; |
| 412 | ReadyQueue *AQ = Available[QID]; |
| 413 | |
| 414 | if (AQ->empty()) { |
| 415 | MoveUnits(Pending[QID], AQ); |
| 416 | } |
| 417 | if (!AQ->empty()) { |
| 418 | SU = *AQ->begin(); |
| 419 | AQ->remove(AQ->begin()); |
| 420 | } |
| 421 | return SU; |
| 422 | } |
| 423 | |