Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 1 | //===- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies ---------===// |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 2 | // |
Chandler Carruth | 2946cd7 | 2019-01-19 08:50:56 +0000 | [diff] [blame] | 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | /// \file |
| 10 | /// Copies from VGPR to SGPR registers are illegal and the register coalescer |
| 11 | /// will sometimes generate these illegal copies in situations like this: |
| 12 | /// |
| 13 | /// Register Class <vsrc> is the union of <vgpr> and <sgpr> |
| 14 | /// |
| 15 | /// BB0: |
Francis Visoiu Mistrih | 93ef145 | 2017-11-30 12:12:19 +0000 | [diff] [blame] | 16 | /// %0 <sgpr> = SCALAR_INST |
| 17 | /// %1 <vsrc> = COPY %0 <sgpr> |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 18 | /// ... |
| 19 | /// BRANCH %cond BB1, BB2 |
| 20 | /// BB1: |
Francis Visoiu Mistrih | 93ef145 | 2017-11-30 12:12:19 +0000 | [diff] [blame] | 21 | /// %2 <vgpr> = VECTOR_INST |
| 22 | /// %3 <vsrc> = COPY %2 <vgpr> |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 23 | /// BB2: |
Francis Visoiu Mistrih | 25528d6 | 2017-12-04 17:18:51 +0000 | [diff] [blame] | 24 | /// %4 <vsrc> = PHI %1 <vsrc>, <%bb.0>, %3 <vrsc>, <%bb.1> |
Francis Visoiu Mistrih | 93ef145 | 2017-11-30 12:12:19 +0000 | [diff] [blame] | 25 | /// %5 <vgpr> = VECTOR_INST %4 <vsrc> |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 26 | /// |
NAKAMURA Takumi | 78e80cd | 2013-11-14 04:05:22 +0000 | [diff] [blame] | 27 | /// |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 28 | /// The coalescer will begin at BB0 and eliminate its copy, then the resulting |
| 29 | /// code will look like this: |
| 30 | /// |
| 31 | /// BB0: |
Francis Visoiu Mistrih | 93ef145 | 2017-11-30 12:12:19 +0000 | [diff] [blame] | 32 | /// %0 <sgpr> = SCALAR_INST |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 33 | /// ... |
| 34 | /// BRANCH %cond BB1, BB2 |
| 35 | /// BB1: |
Francis Visoiu Mistrih | 93ef145 | 2017-11-30 12:12:19 +0000 | [diff] [blame] | 36 | /// %2 <vgpr> = VECTOR_INST |
| 37 | /// %3 <vsrc> = COPY %2 <vgpr> |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 38 | /// BB2: |
Francis Visoiu Mistrih | 25528d6 | 2017-12-04 17:18:51 +0000 | [diff] [blame] | 39 | /// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <vsrc>, <%bb.1> |
Francis Visoiu Mistrih | 93ef145 | 2017-11-30 12:12:19 +0000 | [diff] [blame] | 40 | /// %5 <vgpr> = VECTOR_INST %4 <sgpr> |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 41 | /// |
| 42 | /// Now that the result of the PHI instruction is an SGPR, the register |
Francis Visoiu Mistrih | 93ef145 | 2017-11-30 12:12:19 +0000 | [diff] [blame] | 43 | /// allocator is now forced to constrain the register class of %3 to |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 44 | /// <sgpr> so we end up with final code like this: |
NAKAMURA Takumi | 78e80cd | 2013-11-14 04:05:22 +0000 | [diff] [blame] | 45 | /// |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 46 | /// BB0: |
Francis Visoiu Mistrih | 93ef145 | 2017-11-30 12:12:19 +0000 | [diff] [blame] | 47 | /// %0 <sgpr> = SCALAR_INST |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 48 | /// ... |
| 49 | /// BRANCH %cond BB1, BB2 |
| 50 | /// BB1: |
Francis Visoiu Mistrih | 93ef145 | 2017-11-30 12:12:19 +0000 | [diff] [blame] | 51 | /// %2 <vgpr> = VECTOR_INST |
| 52 | /// %3 <sgpr> = COPY %2 <vgpr> |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 53 | /// BB2: |
Francis Visoiu Mistrih | 25528d6 | 2017-12-04 17:18:51 +0000 | [diff] [blame] | 54 | /// %4 <sgpr> = PHI %0 <sgpr>, <%bb.0>, %3 <sgpr>, <%bb.1> |
Francis Visoiu Mistrih | 93ef145 | 2017-11-30 12:12:19 +0000 | [diff] [blame] | 55 | /// %5 <vgpr> = VECTOR_INST %4 <sgpr> |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 56 | /// |
NAKAMURA Takumi | 78e80cd | 2013-11-14 04:05:22 +0000 | [diff] [blame] | 57 | /// Now this code contains an illegal copy from a VGPR to an SGPR. |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 58 | /// |
| 59 | /// In order to avoid this problem, this pass searches for PHI instructions |
| 60 | /// which define a <vsrc> register and constrains its definition class to |
| 61 | /// <vgpr> if the user of the PHI's definition register is a vector instruction. |
| 62 | /// If the PHI's definition class is constrained to <vgpr> then the coalescer |
| 63 | /// will be unable to perform the COPY removal from the above example which |
| 64 | /// ultimately led to the creation of an illegal COPY. |
| 65 | //===----------------------------------------------------------------------===// |
| 66 | |
| 67 | #include "AMDGPU.h" |
Eric Christopher | d913448 | 2014-08-04 21:25:23 +0000 | [diff] [blame] | 68 | #include "AMDGPUSubtarget.h" |
Reid Kleckner | 05da2fe | 2019-11-13 13:15:01 -0800 | [diff] [blame] | 69 | #include "MCTargetDesc/AMDGPUMCTargetDesc.h" |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 70 | #include "SIInstrInfo.h" |
Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 71 | #include "SIRegisterInfo.h" |
Chandler Carruth | 6bda14b | 2017-06-06 11:49:48 +0000 | [diff] [blame] | 72 | #include "llvm/ADT/DenseSet.h" |
Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 73 | #include "llvm/ADT/STLExtras.h" |
| 74 | #include "llvm/ADT/SmallSet.h" |
| 75 | #include "llvm/ADT/SmallVector.h" |
| 76 | #include "llvm/CodeGen/MachineBasicBlock.h" |
Tom Stellard | 0bc6881 | 2016-11-29 00:46:46 +0000 | [diff] [blame] | 77 | #include "llvm/CodeGen/MachineDominators.h" |
Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 78 | #include "llvm/CodeGen/MachineFunction.h" |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 79 | #include "llvm/CodeGen/MachineFunctionPass.h" |
Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 80 | #include "llvm/CodeGen/MachineInstr.h" |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 81 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 82 | #include "llvm/CodeGen/MachineOperand.h" |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 83 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
David Blaikie | b3bde2e | 2017-11-17 01:07:10 +0000 | [diff] [blame] | 84 | #include "llvm/CodeGen/TargetRegisterInfo.h" |
Reid Kleckner | 05da2fe | 2019-11-13 13:15:01 -0800 | [diff] [blame] | 85 | #include "llvm/InitializePasses.h" |
Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 86 | #include "llvm/Pass.h" |
| 87 | #include "llvm/Support/CodeGen.h" |
| 88 | #include "llvm/Support/CommandLine.h" |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 89 | #include "llvm/Support/Debug.h" |
Hans Wennborg | a74fd70 | 2013-11-14 23:24:09 +0000 | [diff] [blame] | 90 | #include "llvm/Support/raw_ostream.h" |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 91 | #include "llvm/Target/TargetMachine.h" |
Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 92 | #include <cassert> |
| 93 | #include <cstdint> |
| 94 | #include <iterator> |
| 95 | #include <list> |
| 96 | #include <map> |
| 97 | #include <tuple> |
| 98 | #include <utility> |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 99 | |
| 100 | using namespace llvm; |
| 101 | |
Matt Arsenault | 98f8394 | 2016-04-21 18:21:54 +0000 | [diff] [blame] | 102 | #define DEBUG_TYPE "si-fix-sgpr-copies" |
Chandler Carruth | 84e68b2 | 2014-04-22 02:41:26 +0000 | [diff] [blame] | 103 | |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 104 | static cl::opt<bool> EnableM0Merge( |
| 105 | "amdgpu-enable-merge-m0", |
| 106 | cl::desc("Merge and hoist M0 initializations"), |
Austin Kerbow | 423b4a1 | 2019-07-15 22:07:05 +0000 | [diff] [blame] | 107 | cl::init(true)); |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 108 | |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 109 | namespace { |
| 110 | |
| 111 | class SIFixSGPRCopies : public MachineFunctionPass { |
Tom Stellard | 0bc6881 | 2016-11-29 00:46:46 +0000 | [diff] [blame] | 112 | MachineDominatorTree *MDT; |
Alexander Timofeev | b934728 | 2018-04-25 12:32:46 +0000 | [diff] [blame] | 113 | |
Matt Arsenault | 782c03b | 2015-11-03 22:30:13 +0000 | [diff] [blame] | 114 | public: |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 115 | static char ID; |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 116 | |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 117 | MachineRegisterInfo *MRI; |
| 118 | const SIRegisterInfo *TRI; |
| 119 | const SIInstrInfo *TII; |
| 120 | |
Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 121 | SIFixSGPRCopies() : MachineFunctionPass(ID) {} |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 122 | |
Craig Topper | 5656db4 | 2014-04-29 07:57:24 +0000 | [diff] [blame] | 123 | bool runOnMachineFunction(MachineFunction &MF) override; |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 124 | |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 125 | void processPHINode(MachineInstr &MI); |
| 126 | |
Mehdi Amini | 117296c | 2016-10-01 02:56:57 +0000 | [diff] [blame] | 127 | StringRef getPassName() const override { return "SI Fix SGPR copies"; } |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 128 | |
Matt Arsenault | 0cb8517 | 2015-09-25 17:21:28 +0000 | [diff] [blame] | 129 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
Tom Stellard | 0bc6881 | 2016-11-29 00:46:46 +0000 | [diff] [blame] | 130 | AU.addRequired<MachineDominatorTree>(); |
| 131 | AU.addPreserved<MachineDominatorTree>(); |
Matt Arsenault | 0cb8517 | 2015-09-25 17:21:28 +0000 | [diff] [blame] | 132 | AU.setPreservesCFG(); |
| 133 | MachineFunctionPass::getAnalysisUsage(AU); |
| 134 | } |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 135 | }; |
| 136 | |
Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 137 | } // end anonymous namespace |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 138 | |
Tom Stellard | 0bc6881 | 2016-11-29 00:46:46 +0000 | [diff] [blame] | 139 | INITIALIZE_PASS_BEGIN(SIFixSGPRCopies, DEBUG_TYPE, |
| 140 | "SI Fix SGPR copies", false, false) |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 141 | INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) |
Tom Stellard | 0bc6881 | 2016-11-29 00:46:46 +0000 | [diff] [blame] | 142 | INITIALIZE_PASS_END(SIFixSGPRCopies, DEBUG_TYPE, |
| 143 | "SI Fix SGPR copies", false, false) |
| 144 | |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 145 | char SIFixSGPRCopies::ID = 0; |
| 146 | |
Matt Arsenault | 782c03b | 2015-11-03 22:30:13 +0000 | [diff] [blame] | 147 | char &llvm::SIFixSGPRCopiesID = SIFixSGPRCopies::ID; |
| 148 | |
| 149 | FunctionPass *llvm::createSIFixSGPRCopiesPass() { |
| 150 | return new SIFixSGPRCopies(); |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 151 | } |
| 152 | |
Stanislav Mekhanoshin | e67cc38 | 2019-07-11 21:19:33 +0000 | [diff] [blame] | 153 | static bool hasVectorOperands(const MachineInstr &MI, |
| 154 | const SIRegisterInfo *TRI) { |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 155 | const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
| 156 | for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { |
| 157 | if (!MI.getOperand(i).isReg() || |
Daniel Sanders | 2bea69b | 2019-08-01 23:27:28 +0000 | [diff] [blame] | 158 | !Register::isVirtualRegister(MI.getOperand(i).getReg())) |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 159 | continue; |
| 160 | |
Stanislav Mekhanoshin | e67cc38 | 2019-07-11 21:19:33 +0000 | [diff] [blame] | 161 | if (TRI->hasVectorRegisters(MRI.getRegClass(MI.getOperand(i).getReg()))) |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 162 | return true; |
| 163 | } |
| 164 | return false; |
| 165 | } |
| 166 | |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 167 | static std::pair<const TargetRegisterClass *, const TargetRegisterClass *> |
| 168 | getCopyRegClasses(const MachineInstr &Copy, |
| 169 | const SIRegisterInfo &TRI, |
| 170 | const MachineRegisterInfo &MRI) { |
Daniel Sanders | 0c47611 | 2019-08-15 19:22:08 +0000 | [diff] [blame] | 171 | Register DstReg = Copy.getOperand(0).getReg(); |
| 172 | Register SrcReg = Copy.getOperand(1).getReg(); |
Matt Arsenault | 120a0c9 | 2014-12-03 05:22:39 +0000 | [diff] [blame] | 173 | |
Daniel Sanders | 2bea69b | 2019-08-01 23:27:28 +0000 | [diff] [blame] | 174 | const TargetRegisterClass *SrcRC = Register::isVirtualRegister(SrcReg) |
| 175 | ? MRI.getRegClass(SrcReg) |
| 176 | : TRI.getPhysRegClass(SrcReg); |
Tom Stellard | d33d7f1 | 2015-05-12 14:18:11 +0000 | [diff] [blame] | 177 | |
Matt Arsenault | f0d9e47 | 2015-10-13 00:07:54 +0000 | [diff] [blame] | 178 | // We don't really care about the subregister here. |
| 179 | // SrcRC = TRI.getSubRegClass(SrcRC, Copy.getOperand(1).getSubReg()); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 180 | |
Daniel Sanders | 2bea69b | 2019-08-01 23:27:28 +0000 | [diff] [blame] | 181 | const TargetRegisterClass *DstRC = Register::isVirtualRegister(DstReg) |
| 182 | ? MRI.getRegClass(DstReg) |
| 183 | : TRI.getPhysRegClass(DstReg); |
Matt Arsenault | f0d9e47 | 2015-10-13 00:07:54 +0000 | [diff] [blame] | 184 | |
| 185 | return std::make_pair(SrcRC, DstRC); |
| 186 | } |
| 187 | |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 188 | static bool isVGPRToSGPRCopy(const TargetRegisterClass *SrcRC, |
| 189 | const TargetRegisterClass *DstRC, |
| 190 | const SIRegisterInfo &TRI) { |
Nicolai Haehnle | 814abb5 | 2018-10-31 13:27:08 +0000 | [diff] [blame] | 191 | return SrcRC != &AMDGPU::VReg_1RegClass && TRI.isSGPRClass(DstRC) && |
Stanislav Mekhanoshin | e67cc38 | 2019-07-11 21:19:33 +0000 | [diff] [blame] | 192 | TRI.hasVectorRegisters(SrcRC); |
Matt Arsenault | f0d9e47 | 2015-10-13 00:07:54 +0000 | [diff] [blame] | 193 | } |
| 194 | |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 195 | static bool isSGPRToVGPRCopy(const TargetRegisterClass *SrcRC, |
| 196 | const TargetRegisterClass *DstRC, |
| 197 | const SIRegisterInfo &TRI) { |
Nicolai Haehnle | 814abb5 | 2018-10-31 13:27:08 +0000 | [diff] [blame] | 198 | return DstRC != &AMDGPU::VReg_1RegClass && TRI.isSGPRClass(SrcRC) && |
Stanislav Mekhanoshin | e67cc38 | 2019-07-11 21:19:33 +0000 | [diff] [blame] | 199 | TRI.hasVectorRegisters(DstRC); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 200 | } |
| 201 | |
Stanislav Mekhanoshin | 465a1ff | 2017-06-20 18:32:42 +0000 | [diff] [blame] | 202 | static bool tryChangeVGPRtoSGPRinCopy(MachineInstr &MI, |
| 203 | const SIRegisterInfo *TRI, |
| 204 | const SIInstrInfo *TII) { |
| 205 | MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); |
| 206 | auto &Src = MI.getOperand(1); |
Daniel Sanders | 0c47611 | 2019-08-15 19:22:08 +0000 | [diff] [blame] | 207 | Register DstReg = MI.getOperand(0).getReg(); |
| 208 | Register SrcReg = Src.getReg(); |
Daniel Sanders | 2bea69b | 2019-08-01 23:27:28 +0000 | [diff] [blame] | 209 | if (!Register::isVirtualRegister(SrcReg) || |
| 210 | !Register::isVirtualRegister(DstReg)) |
Stanislav Mekhanoshin | 465a1ff | 2017-06-20 18:32:42 +0000 | [diff] [blame] | 211 | return false; |
| 212 | |
| 213 | for (const auto &MO : MRI.reg_nodbg_operands(DstReg)) { |
| 214 | const auto *UseMI = MO.getParent(); |
| 215 | if (UseMI == &MI) |
| 216 | continue; |
| 217 | if (MO.isDef() || UseMI->getParent() != MI.getParent() || |
| 218 | UseMI->getOpcode() <= TargetOpcode::GENERIC_OP_END || |
| 219 | !TII->isOperandLegal(*UseMI, UseMI->getOperandNo(&MO), &Src)) |
| 220 | return false; |
| 221 | } |
| 222 | // Change VGPR to SGPR destination. |
| 223 | MRI.setRegClass(DstReg, TRI->getEquivalentSGPRClass(MRI.getRegClass(DstReg))); |
| 224 | return true; |
| 225 | } |
| 226 | |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 227 | // Distribute an SGPR->VGPR copy of a REG_SEQUENCE into a VGPR REG_SEQUENCE. |
| 228 | // |
| 229 | // SGPRx = ... |
| 230 | // SGPRy = REG_SEQUENCE SGPRx, sub0 ... |
| 231 | // VGPRz = COPY SGPRy |
| 232 | // |
| 233 | // ==> |
| 234 | // |
| 235 | // VGPRx = COPY SGPRx |
| 236 | // VGPRz = REG_SEQUENCE VGPRx, sub0 |
| 237 | // |
| 238 | // This exposes immediate folding opportunities when materializing 64-bit |
| 239 | // immediates. |
| 240 | static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI, |
| 241 | const SIRegisterInfo *TRI, |
| 242 | const SIInstrInfo *TII, |
| 243 | MachineRegisterInfo &MRI) { |
| 244 | assert(MI.isRegSequence()); |
| 245 | |
Daniel Sanders | 0c47611 | 2019-08-15 19:22:08 +0000 | [diff] [blame] | 246 | Register DstReg = MI.getOperand(0).getReg(); |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 247 | if (!TRI->isSGPRClass(MRI.getRegClass(DstReg))) |
| 248 | return false; |
| 249 | |
| 250 | if (!MRI.hasOneUse(DstReg)) |
| 251 | return false; |
| 252 | |
| 253 | MachineInstr &CopyUse = *MRI.use_instr_begin(DstReg); |
| 254 | if (!CopyUse.isCopy()) |
| 255 | return false; |
| 256 | |
Matt Arsenault | fe78ffb | 2017-04-11 22:29:19 +0000 | [diff] [blame] | 257 | // It is illegal to have vreg inputs to a physreg defining reg_sequence. |
Daniel Sanders | 2bea69b | 2019-08-01 23:27:28 +0000 | [diff] [blame] | 258 | if (Register::isPhysicalRegister(CopyUse.getOperand(0).getReg())) |
Matt Arsenault | fe78ffb | 2017-04-11 22:29:19 +0000 | [diff] [blame] | 259 | return false; |
| 260 | |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 261 | const TargetRegisterClass *SrcRC, *DstRC; |
| 262 | std::tie(SrcRC, DstRC) = getCopyRegClasses(CopyUse, *TRI, MRI); |
| 263 | |
| 264 | if (!isSGPRToVGPRCopy(SrcRC, DstRC, *TRI)) |
| 265 | return false; |
| 266 | |
Stanislav Mekhanoshin | 465a1ff | 2017-06-20 18:32:42 +0000 | [diff] [blame] | 267 | if (tryChangeVGPRtoSGPRinCopy(CopyUse, TRI, TII)) |
| 268 | return true; |
| 269 | |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 270 | // TODO: Could have multiple extracts? |
| 271 | unsigned SubReg = CopyUse.getOperand(1).getSubReg(); |
| 272 | if (SubReg != AMDGPU::NoSubRegister) |
| 273 | return false; |
| 274 | |
| 275 | MRI.setRegClass(DstReg, DstRC); |
| 276 | |
| 277 | // SGPRx = ... |
| 278 | // SGPRy = REG_SEQUENCE SGPRx, sub0 ... |
| 279 | // VGPRz = COPY SGPRy |
| 280 | |
| 281 | // => |
| 282 | // VGPRx = COPY SGPRx |
| 283 | // VGPRz = REG_SEQUENCE VGPRx, sub0 |
| 284 | |
| 285 | MI.getOperand(0).setReg(CopyUse.getOperand(0).getReg()); |
Stanislav Mekhanoshin | e67cc38 | 2019-07-11 21:19:33 +0000 | [diff] [blame] | 286 | bool IsAGPR = TRI->hasAGPRs(DstRC); |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 287 | |
| 288 | for (unsigned I = 1, N = MI.getNumOperands(); I != N; I += 2) { |
Daniel Sanders | 0c47611 | 2019-08-15 19:22:08 +0000 | [diff] [blame] | 289 | Register SrcReg = MI.getOperand(I).getReg(); |
Nicolai Haehnle | 82fc962 | 2016-01-07 17:10:29 +0000 | [diff] [blame] | 290 | unsigned SrcSubReg = MI.getOperand(I).getSubReg(); |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 291 | |
| 292 | const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg); |
| 293 | assert(TRI->isSGPRClass(SrcRC) && |
| 294 | "Expected SGPR REG_SEQUENCE to only have SGPR inputs"); |
| 295 | |
| 296 | SrcRC = TRI->getSubRegClass(SrcRC, SrcSubReg); |
| 297 | const TargetRegisterClass *NewSrcRC = TRI->getEquivalentVGPRClass(SrcRC); |
| 298 | |
Daniel Sanders | 0c47611 | 2019-08-15 19:22:08 +0000 | [diff] [blame] | 299 | Register TmpReg = MRI.createVirtualRegister(NewSrcRC); |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 300 | |
Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 301 | BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY), |
| 302 | TmpReg) |
| 303 | .add(MI.getOperand(I)); |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 304 | |
Stanislav Mekhanoshin | e67cc38 | 2019-07-11 21:19:33 +0000 | [diff] [blame] | 305 | if (IsAGPR) { |
| 306 | const TargetRegisterClass *NewSrcRC = TRI->getEquivalentAGPRClass(SrcRC); |
Daniel Sanders | 0c47611 | 2019-08-15 19:22:08 +0000 | [diff] [blame] | 307 | Register TmpAReg = MRI.createVirtualRegister(NewSrcRC); |
Stanislav Mekhanoshin | e67cc38 | 2019-07-11 21:19:33 +0000 | [diff] [blame] | 308 | unsigned Opc = NewSrcRC == &AMDGPU::AGPR_32RegClass ? |
| 309 | AMDGPU::V_ACCVGPR_WRITE_B32 : AMDGPU::COPY; |
| 310 | BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(Opc), |
| 311 | TmpAReg) |
| 312 | .addReg(TmpReg, RegState::Kill); |
| 313 | TmpReg = TmpAReg; |
| 314 | } |
| 315 | |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 316 | MI.getOperand(I).setReg(TmpReg); |
| 317 | } |
| 318 | |
| 319 | CopyUse.eraseFromParent(); |
| 320 | return true; |
| 321 | } |
| 322 | |
Tom Stellard | 00cfa74 | 2016-12-06 21:13:30 +0000 | [diff] [blame] | 323 | static bool isSafeToFoldImmIntoCopy(const MachineInstr *Copy, |
| 324 | const MachineInstr *MoveImm, |
| 325 | const SIInstrInfo *TII, |
| 326 | unsigned &SMovOp, |
| 327 | int64_t &Imm) { |
Connor Abbott | 8c217d0 | 2017-08-04 18:36:49 +0000 | [diff] [blame] | 328 | if (Copy->getOpcode() != AMDGPU::COPY) |
| 329 | return false; |
| 330 | |
Tom Stellard | 00cfa74 | 2016-12-06 21:13:30 +0000 | [diff] [blame] | 331 | if (!MoveImm->isMoveImmediate()) |
| 332 | return false; |
| 333 | |
| 334 | const MachineOperand *ImmOp = |
| 335 | TII->getNamedOperand(*MoveImm, AMDGPU::OpName::src0); |
| 336 | if (!ImmOp->isImm()) |
| 337 | return false; |
| 338 | |
| 339 | // FIXME: Handle copies with sub-regs. |
| 340 | if (Copy->getOperand(0).getSubReg()) |
| 341 | return false; |
| 342 | |
| 343 | switch (MoveImm->getOpcode()) { |
| 344 | default: |
| 345 | return false; |
| 346 | case AMDGPU::V_MOV_B32_e32: |
| 347 | SMovOp = AMDGPU::S_MOV_B32; |
| 348 | break; |
| 349 | case AMDGPU::V_MOV_B64_PSEUDO: |
| 350 | SMovOp = AMDGPU::S_MOV_B64; |
| 351 | break; |
| 352 | } |
| 353 | Imm = ImmOp->getImm(); |
| 354 | return true; |
| 355 | } |
| 356 | |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 357 | template <class UnaryPredicate> |
| 358 | bool searchPredecessors(const MachineBasicBlock *MBB, |
| 359 | const MachineBasicBlock *CutOff, |
| 360 | UnaryPredicate Predicate) { |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 361 | if (MBB == CutOff) |
| 362 | return false; |
| 363 | |
Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 364 | DenseSet<const MachineBasicBlock *> Visited; |
| 365 | SmallVector<MachineBasicBlock *, 4> Worklist(MBB->pred_begin(), |
| 366 | MBB->pred_end()); |
Wei Ding | 74da350 | 2017-04-12 23:51:47 +0000 | [diff] [blame] | 367 | |
| 368 | while (!Worklist.empty()) { |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 369 | MachineBasicBlock *MBB = Worklist.pop_back_val(); |
Wei Ding | 74da350 | 2017-04-12 23:51:47 +0000 | [diff] [blame] | 370 | |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 371 | if (!Visited.insert(MBB).second) |
Wei Ding | 74da350 | 2017-04-12 23:51:47 +0000 | [diff] [blame] | 372 | continue; |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 373 | if (MBB == CutOff) |
| 374 | continue; |
| 375 | if (Predicate(MBB)) |
Wei Ding | 74da350 | 2017-04-12 23:51:47 +0000 | [diff] [blame] | 376 | return true; |
| 377 | |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 378 | Worklist.append(MBB->pred_begin(), MBB->pred_end()); |
Wei Ding | 74da350 | 2017-04-12 23:51:47 +0000 | [diff] [blame] | 379 | } |
| 380 | |
| 381 | return false; |
| 382 | } |
| 383 | |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 384 | // Checks if there is potential path From instruction To instruction. |
| 385 | // If CutOff is specified and it sits in between of that path we ignore |
| 386 | // a higher portion of the path and report it is not reachable. |
| 387 | static bool isReachable(const MachineInstr *From, |
| 388 | const MachineInstr *To, |
| 389 | const MachineBasicBlock *CutOff, |
| 390 | MachineDominatorTree &MDT) { |
| 391 | // If either From block dominates To block or instructions are in the same |
| 392 | // block and From is higher. |
| 393 | if (MDT.dominates(From, To)) |
| 394 | return true; |
| 395 | |
| 396 | const MachineBasicBlock *MBBFrom = From->getParent(); |
| 397 | const MachineBasicBlock *MBBTo = To->getParent(); |
| 398 | if (MBBFrom == MBBTo) |
| 399 | return false; |
| 400 | |
| 401 | // Instructions are in different blocks, do predecessor search. |
| 402 | // We should almost never get here since we do not usually produce M0 stores |
| 403 | // other than -1. |
| 404 | return searchPredecessors(MBBTo, CutOff, [MBBFrom] |
| 405 | (const MachineBasicBlock *MBB) { return MBB == MBBFrom; }); |
| 406 | } |
| 407 | |
Austin Kerbow | 423b4a1 | 2019-07-15 22:07:05 +0000 | [diff] [blame] | 408 | // Return the first non-prologue instruction in the block. |
| 409 | static MachineBasicBlock::iterator |
| 410 | getFirstNonPrologue(MachineBasicBlock *MBB, const TargetInstrInfo *TII) { |
| 411 | MachineBasicBlock::iterator I = MBB->getFirstNonPHI(); |
| 412 | while (I != MBB->end() && TII->isBasicBlockPrologue(*I)) |
| 413 | ++I; |
| 414 | |
| 415 | return I; |
| 416 | } |
| 417 | |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 418 | // Hoist and merge identical SGPR initializations into a common predecessor. |
| 419 | // This is intended to combine M0 initializations, but can work with any |
| 420 | // SGPR. A VGPR cannot be processed since we cannot guarantee vector |
| 421 | // executioon. |
| 422 | static bool hoistAndMergeSGPRInits(unsigned Reg, |
| 423 | const MachineRegisterInfo &MRI, |
Austin Kerbow | 666af67 | 2019-09-11 21:28:41 +0000 | [diff] [blame] | 424 | const TargetRegisterInfo *TRI, |
Austin Kerbow | 423b4a1 | 2019-07-15 22:07:05 +0000 | [diff] [blame] | 425 | MachineDominatorTree &MDT, |
| 426 | const TargetInstrInfo *TII) { |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 427 | // List of inits by immediate value. |
Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 428 | using InitListMap = std::map<unsigned, std::list<MachineInstr *>>; |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 429 | InitListMap Inits; |
| 430 | // List of clobbering instructions. |
| 431 | SmallVector<MachineInstr*, 8> Clobbers; |
Austin Kerbow | 423b4a1 | 2019-07-15 22:07:05 +0000 | [diff] [blame] | 432 | // List of instructions marked for deletion. |
| 433 | SmallSet<MachineInstr*, 8> MergedInstrs; |
| 434 | |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 435 | bool Changed = false; |
| 436 | |
| 437 | for (auto &MI : MRI.def_instructions(Reg)) { |
| 438 | MachineOperand *Imm = nullptr; |
Austin Kerbow | 666af67 | 2019-09-11 21:28:41 +0000 | [diff] [blame] | 439 | for (auto &MO : MI.operands()) { |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 440 | if ((MO.isReg() && ((MO.isDef() && MO.getReg() != Reg) || !MO.isDef())) || |
| 441 | (!MO.isImm() && !MO.isReg()) || (MO.isImm() && Imm)) { |
| 442 | Imm = nullptr; |
| 443 | break; |
| 444 | } else if (MO.isImm()) |
| 445 | Imm = &MO; |
| 446 | } |
| 447 | if (Imm) |
| 448 | Inits[Imm->getImm()].push_front(&MI); |
| 449 | else |
| 450 | Clobbers.push_back(&MI); |
| 451 | } |
| 452 | |
| 453 | for (auto &Init : Inits) { |
| 454 | auto &Defs = Init.second; |
| 455 | |
| 456 | for (auto I1 = Defs.begin(), E = Defs.end(); I1 != E; ) { |
| 457 | MachineInstr *MI1 = *I1; |
| 458 | |
| 459 | for (auto I2 = std::next(I1); I2 != E; ) { |
| 460 | MachineInstr *MI2 = *I2; |
| 461 | |
| 462 | // Check any possible interference |
Austin Kerbow | 423b4a1 | 2019-07-15 22:07:05 +0000 | [diff] [blame] | 463 | auto interferes = [&](MachineBasicBlock::iterator From, |
| 464 | MachineBasicBlock::iterator To) -> bool { |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 465 | |
| 466 | assert(MDT.dominates(&*To, &*From)); |
| 467 | |
| 468 | auto interferes = [&MDT, From, To](MachineInstr* &Clobber) -> bool { |
| 469 | const MachineBasicBlock *MBBFrom = From->getParent(); |
| 470 | const MachineBasicBlock *MBBTo = To->getParent(); |
| 471 | bool MayClobberFrom = isReachable(Clobber, &*From, MBBTo, MDT); |
| 472 | bool MayClobberTo = isReachable(Clobber, &*To, MBBTo, MDT); |
| 473 | if (!MayClobberFrom && !MayClobberTo) |
| 474 | return false; |
| 475 | if ((MayClobberFrom && !MayClobberTo) || |
| 476 | (!MayClobberFrom && MayClobberTo)) |
| 477 | return true; |
| 478 | // Both can clobber, this is not an interference only if both are |
| 479 | // dominated by Clobber and belong to the same block or if Clobber |
| 480 | // properly dominates To, given that To >> From, so it dominates |
| 481 | // both and located in a common dominator. |
| 482 | return !((MBBFrom == MBBTo && |
| 483 | MDT.dominates(Clobber, &*From) && |
| 484 | MDT.dominates(Clobber, &*To)) || |
| 485 | MDT.properlyDominates(Clobber->getParent(), MBBTo)); |
| 486 | }; |
| 487 | |
Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 488 | return (llvm::any_of(Clobbers, interferes)) || |
| 489 | (llvm::any_of(Inits, [&](InitListMap::value_type &C) { |
| 490 | return C.first != Init.first && |
| 491 | llvm::any_of(C.second, interferes); |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 492 | })); |
| 493 | }; |
| 494 | |
| 495 | if (MDT.dominates(MI1, MI2)) { |
Austin Kerbow | 423b4a1 | 2019-07-15 22:07:05 +0000 | [diff] [blame] | 496 | if (!interferes(MI2, MI1)) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 497 | LLVM_DEBUG(dbgs() |
| 498 | << "Erasing from " |
| 499 | << printMBBReference(*MI2->getParent()) << " " << *MI2); |
Austin Kerbow | 423b4a1 | 2019-07-15 22:07:05 +0000 | [diff] [blame] | 500 | MergedInstrs.insert(MI2); |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 501 | Changed = true; |
Austin Kerbow | 423b4a1 | 2019-07-15 22:07:05 +0000 | [diff] [blame] | 502 | ++I2; |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 503 | continue; |
| 504 | } |
| 505 | } else if (MDT.dominates(MI2, MI1)) { |
Austin Kerbow | 423b4a1 | 2019-07-15 22:07:05 +0000 | [diff] [blame] | 506 | if (!interferes(MI1, MI2)) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 507 | LLVM_DEBUG(dbgs() |
| 508 | << "Erasing from " |
| 509 | << printMBBReference(*MI1->getParent()) << " " << *MI1); |
Austin Kerbow | 423b4a1 | 2019-07-15 22:07:05 +0000 | [diff] [blame] | 510 | MergedInstrs.insert(MI1); |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 511 | Changed = true; |
Austin Kerbow | 423b4a1 | 2019-07-15 22:07:05 +0000 | [diff] [blame] | 512 | ++I1; |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 513 | break; |
| 514 | } |
| 515 | } else { |
| 516 | auto *MBB = MDT.findNearestCommonDominator(MI1->getParent(), |
| 517 | MI2->getParent()); |
| 518 | if (!MBB) { |
| 519 | ++I2; |
| 520 | continue; |
| 521 | } |
| 522 | |
Austin Kerbow | 423b4a1 | 2019-07-15 22:07:05 +0000 | [diff] [blame] | 523 | MachineBasicBlock::iterator I = getFirstNonPrologue(MBB, TII); |
| 524 | if (!interferes(MI1, I) && !interferes(MI2, I)) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 525 | LLVM_DEBUG(dbgs() |
| 526 | << "Erasing from " |
| 527 | << printMBBReference(*MI1->getParent()) << " " << *MI1 |
| 528 | << "and moving from " |
| 529 | << printMBBReference(*MI2->getParent()) << " to " |
| 530 | << printMBBReference(*I->getParent()) << " " << *MI2); |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 531 | I->getParent()->splice(I, MI2->getParent(), MI2); |
Austin Kerbow | 423b4a1 | 2019-07-15 22:07:05 +0000 | [diff] [blame] | 532 | MergedInstrs.insert(MI1); |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 533 | Changed = true; |
Austin Kerbow | 423b4a1 | 2019-07-15 22:07:05 +0000 | [diff] [blame] | 534 | ++I1; |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 535 | break; |
| 536 | } |
| 537 | } |
| 538 | ++I2; |
| 539 | } |
| 540 | ++I1; |
| 541 | } |
| 542 | } |
| 543 | |
Austin Kerbow | 666af67 | 2019-09-11 21:28:41 +0000 | [diff] [blame] | 544 | // Remove initializations that were merged into another. |
| 545 | for (auto &Init : Inits) { |
| 546 | auto &Defs = Init.second; |
Austin Kerbow | cf321f4 | 2019-09-12 19:12:21 +0000 | [diff] [blame] | 547 | auto I = Defs.begin(); |
| 548 | while (I != Defs.end()) { |
Austin Kerbow | 666af67 | 2019-09-11 21:28:41 +0000 | [diff] [blame] | 549 | if (MergedInstrs.count(*I)) { |
| 550 | (*I)->eraseFromParent(); |
| 551 | I = Defs.erase(I); |
Austin Kerbow | cf321f4 | 2019-09-12 19:12:21 +0000 | [diff] [blame] | 552 | } else |
| 553 | ++I; |
| 554 | } |
Austin Kerbow | 666af67 | 2019-09-11 21:28:41 +0000 | [diff] [blame] | 555 | } |
| 556 | |
| 557 | // Try to schedule SGPR initializations as early as possible in the MBB. |
| 558 | for (auto &Init : Inits) { |
| 559 | auto &Defs = Init.second; |
| 560 | for (auto MI : Defs) { |
| 561 | auto MBB = MI->getParent(); |
| 562 | MachineInstr &BoundaryMI = *getFirstNonPrologue(MBB, TII); |
| 563 | MachineBasicBlock::reverse_iterator B(BoundaryMI); |
Matt Arsenault | e114be6 | 2019-10-09 22:44:47 +0000 | [diff] [blame] | 564 | // Check if B should actually be a boundary. If not set the previous |
Austin Kerbow | 666af67 | 2019-09-11 21:28:41 +0000 | [diff] [blame] | 565 | // instruction as the boundary instead. |
| 566 | if (!TII->isBasicBlockPrologue(*B)) |
| 567 | B++; |
| 568 | |
| 569 | auto R = std::next(MI->getReverseIterator()); |
| 570 | const unsigned Threshold = 50; |
Matt Arsenault | e114be6 | 2019-10-09 22:44:47 +0000 | [diff] [blame] | 571 | // Search until B or Threshold for a place to insert the initialization. |
Austin Kerbow | 666af67 | 2019-09-11 21:28:41 +0000 | [diff] [blame] | 572 | for (unsigned I = 0; R != B && I < Threshold; ++R, ++I) |
| 573 | if (R->readsRegister(Reg, TRI) || R->definesRegister(Reg, TRI) || |
| 574 | TII->isSchedulingBoundary(*R, MBB, *MBB->getParent())) |
| 575 | break; |
| 576 | |
| 577 | // Move to directly after R. |
| 578 | if (&*--R != MI) |
| 579 | MBB->splice(*R, MBB, MI); |
| 580 | } |
| 581 | } |
Austin Kerbow | 423b4a1 | 2019-07-15 22:07:05 +0000 | [diff] [blame] | 582 | |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 583 | if (Changed) |
| 584 | MRI.clearKillFlags(Reg); |
| 585 | |
| 586 | return Changed; |
| 587 | } |
| 588 | |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 589 | bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) { |
Tom Stellard | 5bfbae5 | 2018-07-11 20:59:01 +0000 | [diff] [blame] | 590 | const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 591 | MRI = &MF.getRegInfo(); |
| 592 | TRI = ST.getRegisterInfo(); |
| 593 | TII = ST.getInstrInfo(); |
Tom Stellard | 0bc6881 | 2016-11-29 00:46:46 +0000 | [diff] [blame] | 594 | MDT = &getAnalysis<MachineDominatorTree>(); |
Matt Arsenault | f1aebbf | 2015-11-02 23:30:48 +0000 | [diff] [blame] | 595 | |
| 596 | SmallVector<MachineInstr *, 16> Worklist; |
| 597 | |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 598 | for (MachineFunction::iterator BI = MF.begin(), BE = MF.end(); |
| 599 | BI != BE; ++BI) { |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 600 | MachineBasicBlock &MBB = *BI; |
| 601 | for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); |
Matt Arsenault | f1aebbf | 2015-11-02 23:30:48 +0000 | [diff] [blame] | 602 | I != E; ++I) { |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 603 | MachineInstr &MI = *I; |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 604 | |
| 605 | switch (MI.getOpcode()) { |
Matt Arsenault | 85441dd | 2015-09-21 16:27:22 +0000 | [diff] [blame] | 606 | default: |
| 607 | continue; |
Connor Abbott | 8c217d0 | 2017-08-04 18:36:49 +0000 | [diff] [blame] | 608 | case AMDGPU::COPY: |
Connor Abbott | 92638ab | 2017-08-04 18:36:52 +0000 | [diff] [blame] | 609 | case AMDGPU::WQM: |
Carl Ritson | 00e89b4 | 2019-07-26 09:54:12 +0000 | [diff] [blame] | 610 | case AMDGPU::SOFT_WQM: |
Connor Abbott | 92638ab | 2017-08-04 18:36:52 +0000 | [diff] [blame] | 611 | case AMDGPU::WWM: { |
Matt Arsenault | aff2995 | 2019-08-01 18:27:11 +0000 | [diff] [blame] | 612 | Register DstReg = MI.getOperand(0).getReg(); |
Matt Arsenault | f0d9e47 | 2015-10-13 00:07:54 +0000 | [diff] [blame] | 613 | |
| 614 | const TargetRegisterClass *SrcRC, *DstRC; |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 615 | std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, *MRI); |
Matt Arsenault | aff2995 | 2019-08-01 18:27:11 +0000 | [diff] [blame] | 616 | |
Daniel Sanders | 2bea69b | 2019-08-01 23:27:28 +0000 | [diff] [blame] | 617 | if (!Register::isVirtualRegister(DstReg)) { |
Matt Arsenault | aff2995 | 2019-08-01 18:27:11 +0000 | [diff] [blame] | 618 | // If the destination register is a physical register there isn't |
| 619 | // really much we can do to fix this. |
| 620 | // Some special instructions use M0 as an input. Some even only use |
| 621 | // the first lane. Insert a readfirstlane and hope for the best. |
| 622 | if (DstReg == AMDGPU::M0 && TRI->hasVectorRegisters(SrcRC)) { |
| 623 | Register TmpReg |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 624 | = MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); |
Matt Arsenault | aff2995 | 2019-08-01 18:27:11 +0000 | [diff] [blame] | 625 | |
| 626 | BuildMI(MBB, MI, MI.getDebugLoc(), |
| 627 | TII->get(AMDGPU::V_READFIRSTLANE_B32), TmpReg) |
| 628 | .add(MI.getOperand(1)); |
| 629 | MI.getOperand(1).setReg(TmpReg); |
| 630 | } |
| 631 | |
| 632 | continue; |
| 633 | } |
| 634 | |
Matt Arsenault | f0d9e47 | 2015-10-13 00:07:54 +0000 | [diff] [blame] | 635 | if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) { |
Daniel Sanders | 0c47611 | 2019-08-15 19:22:08 +0000 | [diff] [blame] | 636 | Register SrcReg = MI.getOperand(1).getReg(); |
Daniel Sanders | 2bea69b | 2019-08-01 23:27:28 +0000 | [diff] [blame] | 637 | if (!Register::isVirtualRegister(SrcReg)) { |
Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 638 | TII->moveToVALU(MI, MDT); |
Matt Arsenault | 2a80369 | 2017-04-29 01:26:34 +0000 | [diff] [blame] | 639 | break; |
| 640 | } |
| 641 | |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 642 | MachineInstr *DefMI = MRI->getVRegDef(SrcReg); |
Tom Stellard | 00cfa74 | 2016-12-06 21:13:30 +0000 | [diff] [blame] | 643 | unsigned SMovOp; |
| 644 | int64_t Imm; |
| 645 | // If we are just copying an immediate, we can replace the copy with |
| 646 | // s_mov_b32. |
| 647 | if (isSafeToFoldImmIntoCopy(&MI, DefMI, TII, SMovOp, Imm)) { |
| 648 | MI.getOperand(1).ChangeToImmediate(Imm); |
| 649 | MI.addImplicitDefUseOperands(MF); |
| 650 | MI.setDesc(TII->get(SMovOp)); |
| 651 | break; |
| 652 | } |
Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 653 | TII->moveToVALU(MI, MDT); |
Stanislav Mekhanoshin | 465a1ff | 2017-06-20 18:32:42 +0000 | [diff] [blame] | 654 | } else if (isSGPRToVGPRCopy(SrcRC, DstRC, *TRI)) { |
| 655 | tryChangeVGPRtoSGPRinCopy(MI, TRI, TII); |
Matt Arsenault | 85441dd | 2015-09-21 16:27:22 +0000 | [diff] [blame] | 656 | } |
| 657 | |
| 658 | break; |
| 659 | } |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 660 | case AMDGPU::PHI: { |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 661 | processPHINode(MI); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 662 | break; |
| 663 | } |
Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 664 | case AMDGPU::REG_SEQUENCE: |
Stanislav Mekhanoshin | e67cc38 | 2019-07-11 21:19:33 +0000 | [diff] [blame] | 665 | if (TRI->hasVectorRegisters(TII->getOpRegClass(MI, 0)) || |
| 666 | !hasVectorOperands(MI, TRI)) { |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 667 | foldVGPRCopyIntoRegSequence(MI, TRI, TII, *MRI); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 668 | continue; |
Matt Arsenault | 0de924b | 2015-11-02 23:15:42 +0000 | [diff] [blame] | 669 | } |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 670 | |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 671 | LLVM_DEBUG(dbgs() << "Fixing REG_SEQUENCE: " << MI); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 672 | |
Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 673 | TII->moveToVALU(MI, MDT); |
Tom Stellard | 8216602 | 2013-11-13 23:36:37 +0000 | [diff] [blame] | 674 | break; |
Tom Stellard | 204e61b | 2014-04-07 19:45:45 +0000 | [diff] [blame] | 675 | case AMDGPU::INSERT_SUBREG: { |
Tom Stellard | a568738 | 2014-05-15 14:41:55 +0000 | [diff] [blame] | 676 | const TargetRegisterClass *DstRC, *Src0RC, *Src1RC; |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 677 | DstRC = MRI->getRegClass(MI.getOperand(0).getReg()); |
| 678 | Src0RC = MRI->getRegClass(MI.getOperand(1).getReg()); |
| 679 | Src1RC = MRI->getRegClass(MI.getOperand(2).getReg()); |
Tom Stellard | a568738 | 2014-05-15 14:41:55 +0000 | [diff] [blame] | 680 | if (TRI->isSGPRClass(DstRC) && |
Stanislav Mekhanoshin | e67cc38 | 2019-07-11 21:19:33 +0000 | [diff] [blame] | 681 | (TRI->hasVectorRegisters(Src0RC) || |
| 682 | TRI->hasVectorRegisters(Src1RC))) { |
Nicola Zaghen | d34e60c | 2018-05-14 12:53:11 +0000 | [diff] [blame] | 683 | LLVM_DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI); |
Scott Linder | 823549a | 2018-10-08 18:47:01 +0000 | [diff] [blame] | 684 | TII->moveToVALU(MI, MDT); |
Tom Stellard | a568738 | 2014-05-15 14:41:55 +0000 | [diff] [blame] | 685 | } |
| 686 | break; |
Tom Stellard | 204e61b | 2014-04-07 19:45:45 +0000 | [diff] [blame] | 687 | } |
David Stuttard | 2d6a230 | 2019-10-16 14:37:39 +0000 | [diff] [blame] | 688 | case AMDGPU::V_WRITELANE_B32: { |
| 689 | // Some architectures allow more than one constant bus access without |
| 690 | // SGPR restriction |
| 691 | if (ST.getConstantBusLimit(MI.getOpcode()) != 1) |
| 692 | break; |
| 693 | |
| 694 | // Writelane is special in that it can use SGPR and M0 (which would |
| 695 | // normally count as using the constant bus twice - but in this case it |
| 696 | // is allowed since the lane selector doesn't count as a use of the |
| 697 | // constant bus). However, it is still required to abide by the 1 SGPR |
| 698 | // rule. Apply a fix here as we might have multiple SGPRs after |
| 699 | // legalizing VGPRs to SGPRs |
| 700 | int Src0Idx = |
| 701 | AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); |
| 702 | int Src1Idx = |
| 703 | AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src1); |
| 704 | MachineOperand &Src0 = MI.getOperand(Src0Idx); |
| 705 | MachineOperand &Src1 = MI.getOperand(Src1Idx); |
| 706 | |
| 707 | // Check to see if the instruction violates the 1 SGPR rule |
| 708 | if ((Src0.isReg() && TRI->isSGPRReg(*MRI, Src0.getReg()) && |
| 709 | Src0.getReg() != AMDGPU::M0) && |
| 710 | (Src1.isReg() && TRI->isSGPRReg(*MRI, Src1.getReg()) && |
| 711 | Src1.getReg() != AMDGPU::M0)) { |
| 712 | |
| 713 | // Check for trivially easy constant prop into one of the operands |
| 714 | // If this is the case then perform the operation now to resolve SGPR |
| 715 | // issue. If we don't do that here we will always insert a mov to m0 |
| 716 | // that can't be resolved in later operand folding pass |
| 717 | bool Resolved = false; |
| 718 | for (MachineOperand *MO : {&Src0, &Src1}) { |
| 719 | if (Register::isVirtualRegister(MO->getReg())) { |
| 720 | MachineInstr *DefMI = MRI->getVRegDef(MO->getReg()); |
| 721 | if (DefMI && TII->isFoldableCopy(*DefMI)) { |
| 722 | const MachineOperand &Def = DefMI->getOperand(0); |
| 723 | if (Def.isReg() && |
| 724 | MO->getReg() == Def.getReg() && |
| 725 | MO->getSubReg() == Def.getSubReg()) { |
| 726 | const MachineOperand &Copied = DefMI->getOperand(1); |
| 727 | if (Copied.isImm() && |
| 728 | TII->isInlineConstant(APInt(64, Copied.getImm(), true))) { |
| 729 | MO->ChangeToImmediate(Copied.getImm()); |
| 730 | Resolved = true; |
| 731 | break; |
| 732 | } |
| 733 | } |
| 734 | } |
| 735 | } |
| 736 | } |
| 737 | |
| 738 | if (!Resolved) { |
| 739 | // Haven't managed to resolve by replacing an SGPR with an immediate |
| 740 | // Move src1 to be in M0 |
| 741 | BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), |
| 742 | TII->get(AMDGPU::COPY), AMDGPU::M0) |
| 743 | .add(Src1); |
| 744 | Src1.ChangeToRegister(AMDGPU::M0, false); |
| 745 | } |
| 746 | } |
| 747 | break; |
| 748 | } |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 749 | } |
| 750 | } |
| 751 | } |
Matt Arsenault | 6f67978 | 2014-11-17 21:11:34 +0000 | [diff] [blame] | 752 | |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 753 | if (MF.getTarget().getOptLevel() > CodeGenOpt::None && EnableM0Merge) |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 754 | hoistAndMergeSGPRInits(AMDGPU::M0, *MRI, TRI, *MDT, TII); |
Stanislav Mekhanoshin | bd5394b | 2017-04-24 19:37:54 +0000 | [diff] [blame] | 755 | |
Matt Arsenault | 6f67978 | 2014-11-17 21:11:34 +0000 | [diff] [blame] | 756 | return true; |
Tom Stellard | 2f7cdda | 2013-08-06 23:08:28 +0000 | [diff] [blame] | 757 | } |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 758 | |
| 759 | void SIFixSGPRCopies::processPHINode(MachineInstr &MI) { |
| 760 | unsigned numVGPRUses = 0; |
Stanislav Mekhanoshin | 0fab220 | 2019-10-18 22:48:45 +0000 | [diff] [blame] | 761 | bool AllAGPRUses = true; |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 762 | SetVector<const MachineInstr *> worklist; |
Austin Kerbow | 527e9f9 | 2019-10-15 19:59:45 +0000 | [diff] [blame] | 763 | SmallSet<const MachineInstr *, 4> Visited; |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 764 | worklist.insert(&MI); |
Austin Kerbow | 527e9f9 | 2019-10-15 19:59:45 +0000 | [diff] [blame] | 765 | Visited.insert(&MI); |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 766 | while (!worklist.empty()) { |
| 767 | const MachineInstr *Instr = worklist.pop_back_val(); |
| 768 | unsigned Reg = Instr->getOperand(0).getReg(); |
| 769 | for (const auto &Use : MRI->use_operands(Reg)) { |
| 770 | const MachineInstr *UseMI = Use.getParent(); |
Stanislav Mekhanoshin | 0fab220 | 2019-10-18 22:48:45 +0000 | [diff] [blame] | 771 | AllAGPRUses &= (UseMI->isCopy() && |
| 772 | TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg())) || |
| 773 | TRI->isAGPR(*MRI, Use.getReg()); |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 774 | if (UseMI->isCopy() || UseMI->isRegSequence()) { |
| 775 | if (UseMI->isCopy() && |
| 776 | UseMI->getOperand(0).getReg().isPhysical() && |
| 777 | !TRI->isSGPRReg(*MRI, UseMI->getOperand(0).getReg())) { |
| 778 | numVGPRUses++; |
| 779 | } |
Austin Kerbow | 527e9f9 | 2019-10-15 19:59:45 +0000 | [diff] [blame] | 780 | if (Visited.insert(UseMI).second) |
| 781 | worklist.insert(UseMI); |
| 782 | |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 783 | continue; |
| 784 | } |
| 785 | |
| 786 | if (UseMI->isPHI()) { |
| 787 | const TargetRegisterClass *UseRC = MRI->getRegClass(Use.getReg()); |
| 788 | if (!TRI->isSGPRReg(*MRI, Use.getReg()) && |
| 789 | UseRC != &AMDGPU::VReg_1RegClass) |
| 790 | numVGPRUses++; |
| 791 | continue; |
| 792 | } |
| 793 | |
| 794 | const TargetRegisterClass *OpRC = |
| 795 | TII->getOpRegClass(*UseMI, UseMI->getOperandNo(&Use)); |
| 796 | if (!TRI->isSGPRClass(OpRC) && OpRC != &AMDGPU::VS_32RegClass && |
| 797 | OpRC != &AMDGPU::VS_64RegClass) { |
| 798 | numVGPRUses++; |
| 799 | } |
| 800 | } |
| 801 | } |
Stanislav Mekhanoshin | 0fab220 | 2019-10-18 22:48:45 +0000 | [diff] [blame] | 802 | |
| 803 | Register PHIRes = MI.getOperand(0).getReg(); |
| 804 | const TargetRegisterClass *RC0 = MRI->getRegClass(PHIRes); |
| 805 | if (AllAGPRUses && numVGPRUses && !TRI->hasAGPRs(RC0)) { |
| 806 | LLVM_DEBUG(dbgs() << "Moving PHI to AGPR: " << MI); |
| 807 | MRI->setRegClass(PHIRes, TRI->getEquivalentAGPRClass(RC0)); |
| 808 | } |
| 809 | |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 810 | bool hasVGPRInput = false; |
| 811 | for (unsigned i = 1; i < MI.getNumOperands(); i += 2) { |
| 812 | unsigned InputReg = MI.getOperand(i).getReg(); |
| 813 | MachineInstr *Def = MRI->getVRegDef(InputReg); |
Stanislav Mekhanoshin | 0fab220 | 2019-10-18 22:48:45 +0000 | [diff] [blame] | 814 | if (TRI->isVectorRegister(*MRI, InputReg)) { |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 815 | if (Def->isCopy()) { |
| 816 | unsigned SrcReg = Def->getOperand(1).getReg(); |
| 817 | const TargetRegisterClass *RC = |
| 818 | TRI->getRegClassForReg(*MRI, SrcReg); |
| 819 | if (TRI->isSGPRClass(RC)) |
| 820 | continue; |
| 821 | } |
| 822 | hasVGPRInput = true; |
| 823 | break; |
| 824 | } |
| 825 | else if (Def->isCopy() && |
Stanislav Mekhanoshin | 0fab220 | 2019-10-18 22:48:45 +0000 | [diff] [blame] | 826 | TRI->isVectorRegister(*MRI, Def->getOperand(1).getReg())) { |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 827 | hasVGPRInput = true; |
| 828 | break; |
| 829 | } |
| 830 | } |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 831 | |
Stanislav Mekhanoshin | 0fab220 | 2019-10-18 22:48:45 +0000 | [diff] [blame] | 832 | if ((!TRI->isVectorRegister(*MRI, PHIRes) && |
| 833 | RC0 != &AMDGPU::VReg_1RegClass) && |
Alexander Timofeev | c4d256a | 2019-10-14 12:01:10 +0000 | [diff] [blame] | 834 | (hasVGPRInput || numVGPRUses > 1)) { |
| 835 | LLVM_DEBUG(dbgs() << "Fixing PHI: " << MI); |
| 836 | TII->moveToVALU(MI); |
| 837 | } |
| 838 | else { |
| 839 | LLVM_DEBUG(dbgs() << "Legalizing PHI: " << MI); |
| 840 | TII->legalizeOperands(MI, MDT); |
| 841 | } |
| 842 | |
| 843 | } |