Matt Arsenault | 0c90e95 | 2015-11-06 18:17:45 +0000 | [diff] [blame] | 1 | //===----------------------- SIFrameLowering.cpp --------------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //==-----------------------------------------------------------------------===// |
| 9 | |
| 10 | #include "SIFrameLowering.h" |
Matt Arsenault | 0e3d389 | 2015-11-30 21:15:53 +0000 | [diff] [blame] | 11 | #include "SIInstrInfo.h" |
| 12 | #include "SIMachineFunctionInfo.h" |
Matt Arsenault | 0c90e95 | 2015-11-06 18:17:45 +0000 | [diff] [blame] | 13 | #include "SIRegisterInfo.h" |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 14 | #include "AMDGPUSubtarget.h" |
| 15 | |
Matt Arsenault | 0c90e95 | 2015-11-06 18:17:45 +0000 | [diff] [blame] | 16 | #include "llvm/CodeGen/MachineFrameInfo.h" |
| 17 | #include "llvm/CodeGen/MachineFunction.h" |
Matt Arsenault | 0e3d389 | 2015-11-30 21:15:53 +0000 | [diff] [blame] | 18 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
Matt Arsenault | 0c90e95 | 2015-11-06 18:17:45 +0000 | [diff] [blame] | 19 | #include "llvm/CodeGen/RegisterScavenging.h" |
| 20 | |
| 21 | using namespace llvm; |
| 22 | |
Matt Arsenault | 0e3d389 | 2015-11-30 21:15:53 +0000 | [diff] [blame] | 23 | |
Konstantin Zhuravlyov | e03b1d7 | 2017-02-08 13:02:33 +0000 | [diff] [blame] | 24 | static ArrayRef<MCPhysReg> getAllSGPR128(const SISubtarget &ST, |
| 25 | const MachineFunction &MF) { |
Matt Arsenault | ab3429c | 2016-05-18 15:19:50 +0000 | [diff] [blame] | 26 | return makeArrayRef(AMDGPU::SGPR_128RegClass.begin(), |
Konstantin Zhuravlyov | e03b1d7 | 2017-02-08 13:02:33 +0000 | [diff] [blame] | 27 | ST.getMaxNumSGPRs(MF) / 4); |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 28 | } |
| 29 | |
Konstantin Zhuravlyov | e03b1d7 | 2017-02-08 13:02:33 +0000 | [diff] [blame] | 30 | static ArrayRef<MCPhysReg> getAllSGPRs(const SISubtarget &ST, |
| 31 | const MachineFunction &MF) { |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 32 | return makeArrayRef(AMDGPU::SGPR_32RegClass.begin(), |
Konstantin Zhuravlyov | e03b1d7 | 2017-02-08 13:02:33 +0000 | [diff] [blame] | 33 | ST.getMaxNumSGPRs(MF)); |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 34 | } |
| 35 | |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 36 | void SIFrameLowering::emitFlatScratchInit(const SISubtarget &ST, |
Matt Arsenault | 57bc432 | 2016-08-31 21:52:21 +0000 | [diff] [blame] | 37 | MachineFunction &MF, |
| 38 | MachineBasicBlock &MBB) const { |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 39 | const SIInstrInfo *TII = ST.getInstrInfo(); |
| 40 | const SIRegisterInfo* TRI = &TII->getRegisterInfo(); |
| 41 | |
Matt Arsenault | 57bc432 | 2016-08-31 21:52:21 +0000 | [diff] [blame] | 42 | // We don't need this if we only have spills since there is no user facing |
| 43 | // scratch. |
| 44 | |
| 45 | // TODO: If we know we don't have flat instructions earlier, we can omit |
| 46 | // this from the input registers. |
| 47 | // |
| 48 | // TODO: We only need to know if we access scratch space through a flat |
| 49 | // pointer. Because we only detect if flat instructions are used at all, |
| 50 | // this will be used more often than necessary on VI. |
| 51 | |
| 52 | // Debug location must be unknown since the first debug location is used to |
| 53 | // determine the end of the prologue. |
| 54 | DebugLoc DL; |
| 55 | MachineBasicBlock::iterator I = MBB.begin(); |
| 56 | |
| 57 | unsigned FlatScratchInitReg |
| 58 | = TRI->getPreloadedValue(MF, SIRegisterInfo::FLAT_SCRATCH_INIT); |
| 59 | |
| 60 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 61 | MRI.addLiveIn(FlatScratchInitReg); |
| 62 | MBB.addLiveIn(FlatScratchInitReg); |
| 63 | |
Matt Arsenault | 57bc432 | 2016-08-31 21:52:21 +0000 | [diff] [blame] | 64 | unsigned FlatScrInitLo = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub0); |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 65 | unsigned FlatScrInitHi = TRI->getSubReg(FlatScratchInitReg, AMDGPU::sub1); |
Matt Arsenault | 57bc432 | 2016-08-31 21:52:21 +0000 | [diff] [blame] | 66 | |
| 67 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 68 | unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg(); |
| 69 | |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 70 | // Do a 64-bit pointer add. |
| 71 | if (ST.flatScratchIsPointer()) { |
| 72 | BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), AMDGPU::FLAT_SCR_LO) |
| 73 | .addReg(FlatScrInitLo) |
| 74 | .addReg(ScratchWaveOffsetReg); |
| 75 | BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADDC_U32), AMDGPU::FLAT_SCR_HI) |
| 76 | .addReg(FlatScrInitHi) |
| 77 | .addImm(0); |
| 78 | |
| 79 | return; |
| 80 | } |
| 81 | |
| 82 | // Copy the size in bytes. |
| 83 | BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), AMDGPU::FLAT_SCR_LO) |
| 84 | .addReg(FlatScrInitHi, RegState::Kill); |
| 85 | |
Matt Arsenault | 57bc432 | 2016-08-31 21:52:21 +0000 | [diff] [blame] | 86 | // Add wave offset in bytes to private base offset. |
| 87 | // See comment in AMDKernelCodeT.h for enable_sgpr_flat_scratch_init. |
| 88 | BuildMI(MBB, I, DL, TII->get(AMDGPU::S_ADD_U32), FlatScrInitLo) |
| 89 | .addReg(FlatScrInitLo) |
| 90 | .addReg(ScratchWaveOffsetReg); |
| 91 | |
| 92 | // Convert offset to 256-byte units. |
| 93 | BuildMI(MBB, I, DL, TII->get(AMDGPU::S_LSHR_B32), AMDGPU::FLAT_SCR_HI) |
| 94 | .addReg(FlatScrInitLo, RegState::Kill) |
| 95 | .addImm(8); |
| 96 | } |
| 97 | |
| 98 | unsigned SIFrameLowering::getReservedPrivateSegmentBufferReg( |
| 99 | const SISubtarget &ST, |
| 100 | const SIInstrInfo *TII, |
| 101 | const SIRegisterInfo *TRI, |
| 102 | SIMachineFunctionInfo *MFI, |
| 103 | MachineFunction &MF) const { |
| 104 | |
| 105 | // We need to insert initialization of the scratch resource descriptor. |
| 106 | unsigned ScratchRsrcReg = MFI->getScratchRSrcReg(); |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 107 | if (ScratchRsrcReg == AMDGPU::NoRegister) |
| 108 | return AMDGPU::NoRegister; |
Matt Arsenault | 57bc432 | 2016-08-31 21:52:21 +0000 | [diff] [blame] | 109 | |
| 110 | if (ST.hasSGPRInitBug() || |
| 111 | ScratchRsrcReg != TRI->reservedPrivateSegmentBufferReg(MF)) |
| 112 | return ScratchRsrcReg; |
| 113 | |
| 114 | // We reserved the last registers for this. Shift it down to the end of those |
| 115 | // which were actually used. |
| 116 | // |
| 117 | // FIXME: It might be safer to use a pseudoregister before replacement. |
| 118 | |
| 119 | // FIXME: We should be able to eliminate unused input registers. We only |
| 120 | // cannot do this for the resources required for scratch access. For now we |
| 121 | // skip over user SGPRs and may leave unused holes. |
| 122 | |
| 123 | // We find the resource first because it has an alignment requirement. |
| 124 | |
| 125 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 126 | |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 127 | unsigned NumPreloaded = (MFI->getNumPreloadedSGPRs() + 3) / 4; |
Konstantin Zhuravlyov | e03b1d7 | 2017-02-08 13:02:33 +0000 | [diff] [blame] | 128 | ArrayRef<MCPhysReg> AllSGPR128s = getAllSGPR128(ST, MF); |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 129 | AllSGPR128s = AllSGPR128s.slice(std::min(static_cast<unsigned>(AllSGPR128s.size()), NumPreloaded)); |
| 130 | |
Matt Arsenault | e0bf7d0 | 2017-02-21 19:12:08 +0000 | [diff] [blame] | 131 | // Skip the last N reserved elements because they should have already been |
| 132 | // reserved for VCC etc. |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 133 | for (MCPhysReg Reg : AllSGPR128s) { |
Matt Arsenault | 57bc432 | 2016-08-31 21:52:21 +0000 | [diff] [blame] | 134 | // Pick the first unallocated one. Make sure we don't clobber the other |
| 135 | // reserved input we needed. |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 136 | if (!MRI.isPhysRegUsed(Reg) && MRI.isAllocatable(Reg)) { |
Matt Arsenault | 57bc432 | 2016-08-31 21:52:21 +0000 | [diff] [blame] | 137 | MRI.replaceRegWith(ScratchRsrcReg, Reg); |
| 138 | MFI->setScratchRSrcReg(Reg); |
| 139 | return Reg; |
| 140 | } |
| 141 | } |
| 142 | |
| 143 | return ScratchRsrcReg; |
| 144 | } |
| 145 | |
| 146 | unsigned SIFrameLowering::getReservedPrivateSegmentWaveByteOffsetReg( |
| 147 | const SISubtarget &ST, |
| 148 | const SIInstrInfo *TII, |
| 149 | const SIRegisterInfo *TRI, |
| 150 | SIMachineFunctionInfo *MFI, |
| 151 | MachineFunction &MF) const { |
| 152 | unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg(); |
| 153 | if (ST.hasSGPRInitBug() || |
| 154 | ScratchWaveOffsetReg != TRI->reservedPrivateSegmentWaveByteOffsetReg(MF)) |
| 155 | return ScratchWaveOffsetReg; |
| 156 | |
| 157 | unsigned ScratchRsrcReg = MFI->getScratchRSrcReg(); |
| 158 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
| 159 | unsigned NumPreloaded = MFI->getNumPreloadedSGPRs(); |
| 160 | |
Konstantin Zhuravlyov | e03b1d7 | 2017-02-08 13:02:33 +0000 | [diff] [blame] | 161 | ArrayRef<MCPhysReg> AllSGPRs = getAllSGPRs(ST, MF); |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 162 | if (NumPreloaded > AllSGPRs.size()) |
| 163 | return ScratchWaveOffsetReg; |
| 164 | |
| 165 | AllSGPRs = AllSGPRs.slice(NumPreloaded); |
| 166 | |
Matt Arsenault | 57bc432 | 2016-08-31 21:52:21 +0000 | [diff] [blame] | 167 | // We need to drop register from the end of the list that we cannot use |
| 168 | // for the scratch wave offset. |
| 169 | // + 2 s102 and s103 do not exist on VI. |
| 170 | // + 2 for vcc |
| 171 | // + 2 for xnack_mask |
| 172 | // + 2 for flat_scratch |
| 173 | // + 4 for registers reserved for scratch resource register |
| 174 | // + 1 for register reserved for scratch wave offset. (By exluding this |
| 175 | // register from the list to consider, it means that when this |
| 176 | // register is being used for the scratch wave offset and there |
| 177 | // are no other free SGPRs, then the value will stay in this register. |
| 178 | // ---- |
| 179 | // 13 |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 180 | if (AllSGPRs.size() < 13) |
| 181 | return ScratchWaveOffsetReg; |
| 182 | |
| 183 | for (MCPhysReg Reg : AllSGPRs.drop_back(13)) { |
Matt Arsenault | 57bc432 | 2016-08-31 21:52:21 +0000 | [diff] [blame] | 184 | // Pick the first unallocated SGPR. Be careful not to pick an alias of the |
| 185 | // scratch descriptor, since we haven’t added its uses yet. |
| 186 | if (!MRI.isPhysRegUsed(Reg)) { |
| 187 | if (!MRI.isAllocatable(Reg) || |
| 188 | TRI->isSubRegisterEq(ScratchRsrcReg, Reg)) |
| 189 | continue; |
| 190 | |
| 191 | MRI.replaceRegWith(ScratchWaveOffsetReg, Reg); |
| 192 | MFI->setScratchWaveOffsetReg(Reg); |
| 193 | return Reg; |
| 194 | } |
| 195 | } |
| 196 | |
| 197 | return ScratchWaveOffsetReg; |
| 198 | } |
| 199 | |
Matt Arsenault | 0e3d389 | 2015-11-30 21:15:53 +0000 | [diff] [blame] | 200 | void SIFrameLowering::emitPrologue(MachineFunction &MF, |
| 201 | MachineBasicBlock &MBB) const { |
Konstantin Zhuravlyov | f2f3d14 | 2016-06-25 03:11:28 +0000 | [diff] [blame] | 202 | // Emit debugger prologue if "amdgpu-debugger-emit-prologue" attribute was |
| 203 | // specified. |
| 204 | const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); |
| 205 | if (ST.debuggerEmitPrologue()) |
| 206 | emitDebuggerPrologue(MF, MBB); |
| 207 | |
Matt Arsenault | 0e3d389 | 2015-11-30 21:15:53 +0000 | [diff] [blame] | 208 | assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported"); |
| 209 | |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 210 | SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
Matt Arsenault | 0e3d389 | 2015-11-30 21:15:53 +0000 | [diff] [blame] | 211 | |
| 212 | // If we only have SGPR spills, we won't actually be using scratch memory |
| 213 | // since these spill to VGPRs. |
| 214 | // |
| 215 | // FIXME: We should be cleaning up these unused SGPR spill frame indices |
| 216 | // somewhere. |
Matt Arsenault | 0e3d389 | 2015-11-30 21:15:53 +0000 | [diff] [blame] | 217 | |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 218 | const SIInstrInfo *TII = ST.getInstrInfo(); |
Matt Arsenault | 0e3d389 | 2015-11-30 21:15:53 +0000 | [diff] [blame] | 219 | const SIRegisterInfo *TRI = &TII->getRegisterInfo(); |
Matt Arsenault | 296b849 | 2016-02-12 06:31:30 +0000 | [diff] [blame] | 220 | MachineRegisterInfo &MRI = MF.getRegInfo(); |
Matt Arsenault | 57bc432 | 2016-08-31 21:52:21 +0000 | [diff] [blame] | 221 | |
| 222 | unsigned ScratchRsrcReg |
| 223 | = getReservedPrivateSegmentBufferReg(ST, TII, TRI, MFI, MF); |
| 224 | unsigned ScratchWaveOffsetReg |
| 225 | = getReservedPrivateSegmentWaveByteOffsetReg(ST, TII, TRI, MFI, MF); |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 226 | |
| 227 | if (ScratchRsrcReg == AMDGPU::NoRegister) { |
| 228 | assert(ScratchWaveOffsetReg == AMDGPU::NoRegister); |
| 229 | return; |
| 230 | } |
| 231 | |
Matt Arsenault | 57bc432 | 2016-08-31 21:52:21 +0000 | [diff] [blame] | 232 | assert(!TRI->isSubRegister(ScratchRsrcReg, ScratchWaveOffsetReg)); |
| 233 | |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 234 | // We need to do the replacement of the private segment buffer and wave offset |
| 235 | // register even if there are no stack objects. There could be stores to undef |
| 236 | // or a constant without an associated object. |
| 237 | |
| 238 | // FIXME: We still have implicit uses on SGPR spill instructions in case they |
| 239 | // need to spill to vector memory. It's likely that will not happen, but at |
| 240 | // this point it appears we need the setup. This part of the prolog should be |
| 241 | // emitted after frame indices are eliminated. |
| 242 | |
| 243 | if (MF.getFrameInfo().hasStackObjects() && MFI->hasFlatScratchInit()) |
Matt Arsenault | e823d92 | 2017-02-18 18:29:53 +0000 | [diff] [blame] | 244 | emitFlatScratchInit(ST, MF, MBB); |
Matt Arsenault | 0e3d389 | 2015-11-30 21:15:53 +0000 | [diff] [blame] | 245 | |
| 246 | // We need to insert initialization of the scratch resource descriptor. |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 247 | unsigned PreloadedScratchWaveOffsetReg = TRI->getPreloadedValue( |
| 248 | MF, SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET); |
| 249 | |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 250 | |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 251 | unsigned PreloadedPrivateBufferReg = AMDGPU::NoRegister; |
Tom Stellard | 2f3f985 | 2017-01-25 01:25:13 +0000 | [diff] [blame] | 252 | if (ST.isAmdCodeObjectV2(MF) || ST.isMesaGfxShader(MF)) { |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 253 | PreloadedPrivateBufferReg = TRI->getPreloadedValue( |
| 254 | MF, SIRegisterInfo::PRIVATE_SEGMENT_BUFFER); |
| 255 | } |
| 256 | |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 257 | bool OffsetRegUsed = !MRI.use_empty(ScratchWaveOffsetReg); |
| 258 | bool ResourceRegUsed = !MRI.use_empty(ScratchRsrcReg); |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 259 | |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 260 | // We added live-ins during argument lowering, but since they were not used |
| 261 | // they were deleted. We're adding the uses now, so add them back. |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 262 | if (OffsetRegUsed) { |
| 263 | assert(PreloadedScratchWaveOffsetReg != AMDGPU::NoRegister && |
| 264 | "scratch wave offset input is required"); |
| 265 | MRI.addLiveIn(PreloadedScratchWaveOffsetReg); |
| 266 | MBB.addLiveIn(PreloadedScratchWaveOffsetReg); |
| 267 | } |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 268 | |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 269 | if (ResourceRegUsed && PreloadedPrivateBufferReg != AMDGPU::NoRegister) { |
Tom Stellard | 2f3f985 | 2017-01-25 01:25:13 +0000 | [diff] [blame] | 270 | assert(ST.isAmdCodeObjectV2(MF) || ST.isMesaGfxShader(MF)); |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 271 | MRI.addLiveIn(PreloadedPrivateBufferReg); |
| 272 | MBB.addLiveIn(PreloadedPrivateBufferReg); |
| 273 | } |
| 274 | |
Matt Arsenault | 57bc432 | 2016-08-31 21:52:21 +0000 | [diff] [blame] | 275 | // Make the register selected live throughout the function. |
| 276 | for (MachineBasicBlock &OtherBB : MF) { |
| 277 | if (&OtherBB == &MBB) |
| 278 | continue; |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 279 | |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 280 | if (OffsetRegUsed) |
| 281 | OtherBB.addLiveIn(ScratchWaveOffsetReg); |
| 282 | |
| 283 | if (ResourceRegUsed) |
| 284 | OtherBB.addLiveIn(ScratchRsrcReg); |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 285 | } |
| 286 | |
Matt Arsenault | 0e3d389 | 2015-11-30 21:15:53 +0000 | [diff] [blame] | 287 | DebugLoc DL; |
Matt Arsenault | 57bc432 | 2016-08-31 21:52:21 +0000 | [diff] [blame] | 288 | MachineBasicBlock::iterator I = MBB.begin(); |
Matt Arsenault | 0e3d389 | 2015-11-30 21:15:53 +0000 | [diff] [blame] | 289 | |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 290 | // If we reserved the original input registers, we don't need to copy to the |
| 291 | // reserved registers. |
| 292 | |
| 293 | bool CopyBuffer = ResourceRegUsed && |
| 294 | PreloadedPrivateBufferReg != AMDGPU::NoRegister && |
Tom Stellard | 2f3f985 | 2017-01-25 01:25:13 +0000 | [diff] [blame] | 295 | ST.isAmdCodeObjectV2(MF) && |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 296 | ScratchRsrcReg != PreloadedPrivateBufferReg; |
| 297 | |
| 298 | // This needs to be careful of the copying order to avoid overwriting one of |
| 299 | // the input registers before it's been copied to it's final |
| 300 | // destination. Usually the offset should be copied first. |
| 301 | bool CopyBufferFirst = TRI->isSubRegisterEq(PreloadedPrivateBufferReg, |
| 302 | ScratchWaveOffsetReg); |
| 303 | if (CopyBuffer && CopyBufferFirst) { |
| 304 | BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg) |
| 305 | .addReg(PreloadedPrivateBufferReg, RegState::Kill); |
| 306 | } |
| 307 | |
| 308 | if (OffsetRegUsed && |
| 309 | PreloadedScratchWaveOffsetReg != ScratchWaveOffsetReg) { |
Matt Arsenault | 1d21517 | 2016-08-31 21:52:25 +0000 | [diff] [blame] | 310 | BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchWaveOffsetReg) |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 311 | .addReg(PreloadedScratchWaveOffsetReg, RegState::Kill); |
| 312 | } |
Matt Arsenault | 0e3d389 | 2015-11-30 21:15:53 +0000 | [diff] [blame] | 313 | |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 314 | if (CopyBuffer && !CopyBufferFirst) { |
Matt Arsenault | 1d21517 | 2016-08-31 21:52:25 +0000 | [diff] [blame] | 315 | BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), ScratchRsrcReg) |
| 316 | .addReg(PreloadedPrivateBufferReg, RegState::Kill); |
Matt Arsenault | 08906a3 | 2016-10-28 19:43:31 +0000 | [diff] [blame] | 317 | } |
| 318 | |
Tom Stellard | 2f3f985 | 2017-01-25 01:25:13 +0000 | [diff] [blame] | 319 | if (ResourceRegUsed && (ST.isMesaGfxShader(MF) || (PreloadedPrivateBufferReg == AMDGPU::NoRegister))) { |
| 320 | assert(!ST.isAmdCodeObjectV2(MF)); |
Matt Arsenault | 1d21517 | 2016-08-31 21:52:25 +0000 | [diff] [blame] | 321 | const MCInstrDesc &SMovB32 = TII->get(AMDGPU::S_MOV_B32); |
| 322 | |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 323 | unsigned Rsrc2 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub2); |
| 324 | unsigned Rsrc3 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub3); |
| 325 | |
| 326 | // Use relocations to get the pointer, and setup the other bits manually. |
| 327 | uint64_t Rsrc23 = TII->getScratchRsrcWords23(); |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 328 | |
Tom Stellard | 2f3f985 | 2017-01-25 01:25:13 +0000 | [diff] [blame] | 329 | if (MFI->hasPrivateMemoryInputPtr()) { |
| 330 | unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1); |
| 331 | |
| 332 | if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) { |
| 333 | const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64); |
| 334 | |
| 335 | BuildMI(MBB, I, DL, Mov64, Rsrc01) |
| 336 | .addReg(PreloadedPrivateBufferReg) |
| 337 | .addReg(ScratchRsrcReg, RegState::ImplicitDefine); |
| 338 | } else { |
| 339 | const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM); |
| 340 | |
| 341 | PointerType *PtrTy = |
| 342 | PointerType::get(Type::getInt64Ty(MF.getFunction()->getContext()), |
| 343 | AMDGPUAS::CONSTANT_ADDRESS); |
| 344 | MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); |
| 345 | auto MMO = MF.getMachineMemOperand(PtrInfo, |
| 346 | MachineMemOperand::MOLoad | |
| 347 | MachineMemOperand::MOInvariant | |
| 348 | MachineMemOperand::MODereferenceable, |
| 349 | 0, 0); |
| 350 | BuildMI(MBB, I, DL, LoadDwordX2, Rsrc01) |
| 351 | .addReg(PreloadedPrivateBufferReg) |
| 352 | .addImm(0) // offset |
| 353 | .addImm(0) // glc |
| 354 | .addMemOperand(MMO) |
| 355 | .addReg(ScratchRsrcReg, RegState::ImplicitDefine); |
| 356 | } |
| 357 | } else { |
| 358 | unsigned Rsrc0 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0); |
| 359 | unsigned Rsrc1 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub1); |
| 360 | |
| 361 | BuildMI(MBB, I, DL, SMovB32, Rsrc0) |
| 362 | .addExternalSymbol("SCRATCH_RSRC_DWORD0") |
| 363 | .addReg(ScratchRsrcReg, RegState::ImplicitDefine); |
| 364 | |
| 365 | BuildMI(MBB, I, DL, SMovB32, Rsrc1) |
| 366 | .addExternalSymbol("SCRATCH_RSRC_DWORD1") |
| 367 | .addReg(ScratchRsrcReg, RegState::ImplicitDefine); |
| 368 | |
| 369 | } |
Matt Arsenault | 26f8f3d | 2015-11-30 21:16:03 +0000 | [diff] [blame] | 370 | |
| 371 | BuildMI(MBB, I, DL, SMovB32, Rsrc2) |
| 372 | .addImm(Rsrc23 & 0xffffffff) |
| 373 | .addReg(ScratchRsrcReg, RegState::ImplicitDefine); |
| 374 | |
| 375 | BuildMI(MBB, I, DL, SMovB32, Rsrc3) |
| 376 | .addImm(Rsrc23 >> 32) |
| 377 | .addReg(ScratchRsrcReg, RegState::ImplicitDefine); |
| 378 | } |
Matt Arsenault | 0e3d389 | 2015-11-30 21:15:53 +0000 | [diff] [blame] | 379 | } |
| 380 | |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 381 | void SIFrameLowering::emitEpilogue(MachineFunction &MF, |
| 382 | MachineBasicBlock &MBB) const { |
| 383 | |
| 384 | } |
| 385 | |
Matt Arsenault | 7b6c5d2 | 2017-02-22 22:23:32 +0000 | [diff] [blame^] | 386 | static bool allStackObjectsAreDead(const MachineFrameInfo &MFI) { |
| 387 | for (int I = MFI.getObjectIndexBegin(), E = MFI.getObjectIndexEnd(); |
| 388 | I != E; ++I) { |
| 389 | if (!MFI.isDeadObjectIndex(I)) |
| 390 | return false; |
| 391 | } |
| 392 | |
| 393 | return true; |
| 394 | } |
| 395 | |
Matt Arsenault | 0c90e95 | 2015-11-06 18:17:45 +0000 | [diff] [blame] | 396 | void SIFrameLowering::processFunctionBeforeFrameFinalized( |
| 397 | MachineFunction &MF, |
| 398 | RegScavenger *RS) const { |
Matthias Braun | 941a705 | 2016-07-28 18:40:00 +0000 | [diff] [blame] | 399 | MachineFrameInfo &MFI = MF.getFrameInfo(); |
Matt Arsenault | 0e3d389 | 2015-11-30 21:15:53 +0000 | [diff] [blame] | 400 | |
Matthias Braun | 941a705 | 2016-07-28 18:40:00 +0000 | [diff] [blame] | 401 | if (!MFI.hasStackObjects()) |
Matt Arsenault | 0e3d389 | 2015-11-30 21:15:53 +0000 | [diff] [blame] | 402 | return; |
| 403 | |
Matt Arsenault | 7b6c5d2 | 2017-02-22 22:23:32 +0000 | [diff] [blame^] | 404 | const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); |
| 405 | const SIInstrInfo *TII = ST.getInstrInfo(); |
| 406 | const SIRegisterInfo &TRI = TII->getRegisterInfo(); |
| 407 | SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>(); |
| 408 | bool AllSGPRSpilledToVGPRs = false; |
| 409 | |
| 410 | if (TRI.spillSGPRToVGPR() && FuncInfo->hasSpilledSGPRs()) { |
| 411 | AllSGPRSpilledToVGPRs = true; |
| 412 | |
| 413 | // Process all SGPR spills before frame offsets are finalized. Ideally SGPRs |
| 414 | // are spilled to VGPRs, in which case we can eliminate the stack usage. |
| 415 | // |
| 416 | // XXX - This operates under the assumption that only other SGPR spills are |
| 417 | // users of the frame index. I'm not 100% sure this is correct. The |
| 418 | // StackColoring pass has a comment saying a future improvement would be to |
| 419 | // merging of allocas with spill slots, but for now according to |
| 420 | // MachineFrameInfo isSpillSlot can't alias any other object. |
| 421 | for (MachineBasicBlock &MBB : MF) { |
| 422 | MachineBasicBlock::iterator Next; |
| 423 | for (auto I = MBB.begin(), E = MBB.end(); I != E; I = Next) { |
| 424 | MachineInstr &MI = *I; |
| 425 | Next = std::next(I); |
| 426 | |
| 427 | if (TII->isSGPRSpill(MI)) { |
| 428 | int FI = TII->getNamedOperand(MI, AMDGPU::OpName::addr)->getIndex(); |
| 429 | if (FuncInfo->allocateSGPRSpillToVGPR(MF, FI)) { |
| 430 | bool Spilled = TRI.eliminateSGPRToVGPRSpillFrameIndex(MI, FI, RS); |
| 431 | (void)Spilled; |
| 432 | assert(Spilled && "failed to spill SGPR to VGPR when allocated"); |
| 433 | } else |
| 434 | AllSGPRSpilledToVGPRs = false; |
| 435 | } |
| 436 | } |
| 437 | } |
| 438 | |
| 439 | FuncInfo->removeSGPRToVGPRFrameIndices(MFI); |
| 440 | } |
| 441 | |
| 442 | // FIXME: The other checks should be redundant with allStackObjectsAreDead, |
| 443 | // but currently hasNonSpillStackObjects is set only from source |
| 444 | // allocas. Stack temps produced from legalization are not counted currently. |
| 445 | if (FuncInfo->hasNonSpillStackObjects() || FuncInfo->hasSpilledVGPRs() || |
| 446 | !AllSGPRSpilledToVGPRs || !allStackObjectsAreDead(MFI)) { |
| 447 | assert(RS && "RegScavenger required if spilling"); |
| 448 | |
Matt Arsenault | 707780b | 2017-02-22 21:05:25 +0000 | [diff] [blame] | 449 | // We force this to be at offset 0 so no user object ever has 0 as an |
| 450 | // address, so we may use 0 as an invalid pointer value. This is because |
| 451 | // LLVM assumes 0 is an invalid pointer in address space 0. Because alloca |
| 452 | // is required to be address space 0, we are forced to accept this for |
| 453 | // now. Ideally we could have the stack in another address space with 0 as a |
| 454 | // valid pointer, and -1 as the null value. |
| 455 | // |
| 456 | // This will also waste additional space when user stack objects require > 4 |
| 457 | // byte alignment. |
| 458 | // |
| 459 | // The main cost here is losing the offset for addressing modes. However |
| 460 | // this also ensures we shouldn't need a register for the offset when |
| 461 | // emergency scavenging. |
| 462 | int ScavengeFI = MFI.CreateFixedObject( |
| 463 | AMDGPU::SGPR_32RegClass.getSize(), 0, false); |
| 464 | RS->addScavengingFrameIndex(ScavengeFI); |
| 465 | } |
Matt Arsenault | 0c90e95 | 2015-11-06 18:17:45 +0000 | [diff] [blame] | 466 | } |
Konstantin Zhuravlyov | f2f3d14 | 2016-06-25 03:11:28 +0000 | [diff] [blame] | 467 | |
| 468 | void SIFrameLowering::emitDebuggerPrologue(MachineFunction &MF, |
| 469 | MachineBasicBlock &MBB) const { |
| 470 | const SISubtarget &ST = MF.getSubtarget<SISubtarget>(); |
| 471 | const SIInstrInfo *TII = ST.getInstrInfo(); |
| 472 | const SIRegisterInfo *TRI = &TII->getRegisterInfo(); |
| 473 | const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); |
| 474 | |
| 475 | MachineBasicBlock::iterator I = MBB.begin(); |
| 476 | DebugLoc DL; |
| 477 | |
| 478 | // For each dimension: |
| 479 | for (unsigned i = 0; i < 3; ++i) { |
| 480 | // Get work group ID SGPR, and make it live-in again. |
| 481 | unsigned WorkGroupIDSGPR = MFI->getWorkGroupIDSGPR(i); |
| 482 | MF.getRegInfo().addLiveIn(WorkGroupIDSGPR); |
| 483 | MBB.addLiveIn(WorkGroupIDSGPR); |
| 484 | |
| 485 | // Since SGPRs are spilled into VGPRs, copy work group ID SGPR to VGPR in |
| 486 | // order to spill it to scratch. |
| 487 | unsigned WorkGroupIDVGPR = |
| 488 | MF.getRegInfo().createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| 489 | BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), WorkGroupIDVGPR) |
| 490 | .addReg(WorkGroupIDSGPR); |
| 491 | |
| 492 | // Spill work group ID. |
| 493 | int WorkGroupIDObjectIdx = MFI->getDebuggerWorkGroupIDStackObjectIndex(i); |
| 494 | TII->storeRegToStackSlot(MBB, I, WorkGroupIDVGPR, false, |
| 495 | WorkGroupIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI); |
| 496 | |
| 497 | // Get work item ID VGPR, and make it live-in again. |
| 498 | unsigned WorkItemIDVGPR = MFI->getWorkItemIDVGPR(i); |
| 499 | MF.getRegInfo().addLiveIn(WorkItemIDVGPR); |
| 500 | MBB.addLiveIn(WorkItemIDVGPR); |
| 501 | |
| 502 | // Spill work item ID. |
| 503 | int WorkItemIDObjectIdx = MFI->getDebuggerWorkItemIDStackObjectIndex(i); |
| 504 | TII->storeRegToStackSlot(MBB, I, WorkItemIDVGPR, false, |
| 505 | WorkItemIDObjectIdx, &AMDGPU::VGPR_32RegClass, TRI); |
| 506 | } |
| 507 | } |