Tom Stellard | 42fb60e | 2015-01-14 15:42:31 +0000 | [diff] [blame] | 1 | //===-- SIPrepareScratchRegs.cpp - Use predicates for control flow --------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | /// \file |
| 11 | /// |
| 12 | /// This pass loads scratch pointer and scratch offset into a register or a |
| 13 | /// frame index which can be used anywhere in the program. These values will |
| 14 | /// be used for spilling VGPRs. |
| 15 | /// |
| 16 | //===----------------------------------------------------------------------===// |
| 17 | |
| 18 | #include "AMDGPU.h" |
| 19 | #include "AMDGPUSubtarget.h" |
| 20 | #include "SIDefines.h" |
| 21 | #include "SIInstrInfo.h" |
| 22 | #include "SIMachineFunctionInfo.h" |
| 23 | #include "llvm/CodeGen/MachineFrameInfo.h" |
| 24 | #include "llvm/CodeGen/MachineFunction.h" |
| 25 | #include "llvm/CodeGen/MachineFunctionPass.h" |
| 26 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
| 27 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
| 28 | #include "llvm/CodeGen/RegisterScavenging.h" |
| 29 | #include "llvm/IR/Function.h" |
| 30 | #include "llvm/IR/LLVMContext.h" |
| 31 | |
| 32 | using namespace llvm; |
| 33 | |
| 34 | namespace { |
| 35 | |
| 36 | class SIPrepareScratchRegs : public MachineFunctionPass { |
| 37 | |
| 38 | private: |
| 39 | static char ID; |
| 40 | |
| 41 | public: |
| 42 | SIPrepareScratchRegs() : MachineFunctionPass(ID) { } |
| 43 | |
| 44 | bool runOnMachineFunction(MachineFunction &MF) override; |
| 45 | |
| 46 | const char *getPassName() const override { |
| 47 | return "SI prepare scratch registers"; |
| 48 | } |
| 49 | |
| 50 | }; |
| 51 | |
| 52 | } // End anonymous namespace |
| 53 | |
| 54 | char SIPrepareScratchRegs::ID = 0; |
| 55 | |
| 56 | FunctionPass *llvm::createSIPrepareScratchRegs() { |
| 57 | return new SIPrepareScratchRegs(); |
| 58 | } |
| 59 | |
| 60 | bool SIPrepareScratchRegs::runOnMachineFunction(MachineFunction &MF) { |
| 61 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 62 | const SIInstrInfo *TII = |
| 63 | static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo()); |
| 64 | const SIRegisterInfo *TRI = &TII->getRegisterInfo(); |
| 65 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 66 | MachineFrameInfo *FrameInfo = MF.getFrameInfo(); |
| 67 | MachineBasicBlock *Entry = MF.begin(); |
| 68 | MachineBasicBlock::iterator I = Entry->begin(); |
| 69 | DebugLoc DL = I->getDebugLoc(); |
| 70 | |
| 71 | // FIXME: If we don't have enough VGPRs for SGPR spilling we will need to |
| 72 | // run this pass. |
| 73 | if (!MFI->hasSpilledVGPRs()) |
| 74 | return false; |
| 75 | |
| 76 | unsigned ScratchPtrPreloadReg = |
| 77 | TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_PTR); |
| 78 | unsigned ScratchOffsetPreloadReg = |
| 79 | TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_WAVE_OFFSET); |
| 80 | |
| 81 | if (!Entry->isLiveIn(ScratchPtrPreloadReg)) |
| 82 | Entry->addLiveIn(ScratchPtrPreloadReg); |
| 83 | |
| 84 | if (!Entry->isLiveIn(ScratchOffsetPreloadReg)) |
| 85 | Entry->addLiveIn(ScratchOffsetPreloadReg); |
| 86 | |
| 87 | // Load the scratch pointer |
| 88 | unsigned ScratchPtrReg = |
| 89 | TRI->findUnusedRegister(MRI, &AMDGPU::SGPR_64RegClass); |
| 90 | int ScratchPtrFI = -1; |
| 91 | |
| 92 | if (ScratchPtrReg != AMDGPU::NoRegister) { |
| 93 | // Found an SGPR to use. |
| 94 | MRI.setPhysRegUsed(ScratchPtrReg); |
| 95 | BuildMI(*Entry, I, DL, TII->get(AMDGPU::S_MOV_B64), ScratchPtrReg) |
| 96 | .addReg(ScratchPtrPreloadReg); |
| 97 | } else { |
| 98 | // No SGPR is available, we must spill. |
| 99 | ScratchPtrFI = FrameInfo->CreateSpillStackObject(8, 4); |
| 100 | BuildMI(*Entry, I, DL, TII->get(AMDGPU::SI_SPILL_S64_SAVE)) |
| 101 | .addReg(ScratchPtrPreloadReg) |
| 102 | .addFrameIndex(ScratchPtrFI); |
| 103 | } |
| 104 | |
| 105 | // Load the scratch offset. |
| 106 | unsigned ScratchOffsetReg = |
| 107 | TRI->findUnusedRegister(MRI, &AMDGPU::SGPR_32RegClass); |
| 108 | int ScratchOffsetFI = ~0; |
| 109 | |
| 110 | if (ScratchOffsetReg != AMDGPU::NoRegister) { |
| 111 | // Found an SGPR to use |
| 112 | MRI.setPhysRegUsed(ScratchOffsetReg); |
| 113 | BuildMI(*Entry, I, DL, TII->get(AMDGPU::S_MOV_B32), ScratchOffsetReg) |
| 114 | .addReg(ScratchOffsetPreloadReg); |
| 115 | } else { |
| 116 | // No SGPR is available, we must spill. |
| 117 | ScratchOffsetFI = FrameInfo->CreateSpillStackObject(4,4); |
| 118 | BuildMI(*Entry, I, DL, TII->get(AMDGPU::SI_SPILL_S32_SAVE)) |
| 119 | .addReg(ScratchOffsetPreloadReg) |
| 120 | .addFrameIndex(ScratchOffsetFI); |
| 121 | } |
| 122 | |
| 123 | |
| 124 | // Now that we have the scratch pointer and offset values, we need to |
| 125 | // add them to all the SI_SPILL_V* instructions. |
| 126 | |
| 127 | RegScavenger RS; |
| 128 | bool UseRegScavenger = |
| 129 | (ScratchPtrReg == AMDGPU::NoRegister || |
| 130 | ScratchOffsetReg == AMDGPU::NoRegister); |
| 131 | for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); |
| 132 | BI != BE; ++BI) { |
| 133 | |
| 134 | MachineBasicBlock &MBB = *BI; |
| 135 | if (UseRegScavenger) |
| 136 | RS.enterBasicBlock(&MBB); |
| 137 | |
| 138 | for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); |
| 139 | I != E; ++I) { |
| 140 | MachineInstr &MI = *I; |
| 141 | DebugLoc DL = MI.getDebugLoc(); |
| 142 | switch(MI.getOpcode()) { |
| 143 | default: break;; |
| 144 | case AMDGPU::SI_SPILL_V512_SAVE: |
| 145 | case AMDGPU::SI_SPILL_V256_SAVE: |
| 146 | case AMDGPU::SI_SPILL_V128_SAVE: |
| 147 | case AMDGPU::SI_SPILL_V96_SAVE: |
| 148 | case AMDGPU::SI_SPILL_V64_SAVE: |
| 149 | case AMDGPU::SI_SPILL_V32_SAVE: |
| 150 | case AMDGPU::SI_SPILL_V32_RESTORE: |
| 151 | case AMDGPU::SI_SPILL_V64_RESTORE: |
| 152 | case AMDGPU::SI_SPILL_V128_RESTORE: |
| 153 | case AMDGPU::SI_SPILL_V256_RESTORE: |
| 154 | case AMDGPU::SI_SPILL_V512_RESTORE: |
| 155 | |
| 156 | // Scratch Pointer |
| 157 | if (ScratchPtrReg == AMDGPU::NoRegister) { |
| 158 | ScratchPtrReg = RS.scavengeRegister(&AMDGPU::SGPR_64RegClass, 0); |
| 159 | BuildMI(MBB, I, DL, TII->get(AMDGPU::SI_SPILL_S64_RESTORE), |
| 160 | ScratchPtrReg) |
| 161 | .addFrameIndex(ScratchPtrFI) |
| 162 | .addReg(AMDGPU::NoRegister) |
| 163 | .addReg(AMDGPU::NoRegister); |
| 164 | } else if (!MBB.isLiveIn(ScratchPtrReg)) { |
| 165 | MBB.addLiveIn(ScratchPtrReg); |
| 166 | } |
| 167 | |
| 168 | if (ScratchOffsetReg == AMDGPU::NoRegister) { |
| 169 | ScratchOffsetReg = RS.scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); |
| 170 | BuildMI(MBB, I, DL, TII->get(AMDGPU::SI_SPILL_S32_RESTORE), |
| 171 | ScratchOffsetReg) |
| 172 | .addFrameIndex(ScratchOffsetFI) |
| 173 | .addReg(AMDGPU::NoRegister) |
| 174 | .addReg(AMDGPU::NoRegister); |
| 175 | } else if (!MBB.isLiveIn(ScratchOffsetReg)) { |
| 176 | MBB.addLiveIn(ScratchOffsetReg); |
| 177 | } |
| 178 | |
| 179 | if (ScratchPtrReg == AMDGPU::NoRegister || |
| 180 | ScratchOffsetReg == AMDGPU::NoRegister) { |
| 181 | LLVMContext &Ctx = MF.getFunction()->getContext(); |
| 182 | Ctx.emitError("ran out of SGPRs for spilling VGPRs"); |
| 183 | ScratchPtrReg = AMDGPU::SGPR0; |
| 184 | ScratchOffsetReg = AMDGPU::SGPR0; |
| 185 | } |
| 186 | MI.getOperand(2).setReg(ScratchPtrReg); |
| 187 | MI.getOperand(3).setReg(ScratchOffsetReg); |
| 188 | |
| 189 | break; |
| 190 | } |
| 191 | if (UseRegScavenger) |
| 192 | RS.forward(); |
| 193 | } |
| 194 | } |
| 195 | return true; |
| 196 | } |