Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 1 | //===- SILoadStoreOptimizer.cpp -------------------------------------------===// |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This pass tries to fuse DS instructions with close by immediate offsets. |
| 11 | // This will fuse operations such as |
| 12 | // ds_read_b32 v0, v2 offset:16 |
| 13 | // ds_read_b32 v1, v2 offset:32 |
| 14 | // ==> |
| 15 | // ds_read2_b32 v[0:1], v2, offset0:4 offset1:8 |
| 16 | // |
| 17 | // |
| 18 | // Future improvements: |
| 19 | // |
| 20 | // - This currently relies on the scheduler to place loads and stores next to |
| 21 | // each other, and then only merges adjacent pairs of instructions. It would |
| 22 | // be good to be more flexible with interleaved instructions, and possibly run |
| 23 | // before scheduling. It currently missing stores of constants because loading |
| 24 | // the constant into the data register is placed between the stores, although |
| 25 | // this is arguably a scheduling problem. |
| 26 | // |
| 27 | // - Live interval recomputing seems inefficient. This currently only matches |
| 28 | // one pair, and recomputes live intervals and moves on to the next pair. It |
Konstantin Zhuravlyov | ecc7cbf | 2016-03-29 15:15:44 +0000 | [diff] [blame] | 29 | // would be better to compute a list of all merges that need to occur. |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 30 | // |
| 31 | // - With a list of instructions to process, we can also merge more. If a |
| 32 | // cluster of loads have offsets that are too large to fit in the 8-bit |
| 33 | // offsets, but are close enough to fit in the 8 bits, we can add to the base |
| 34 | // pointer and use the new reduced offsets. |
| 35 | // |
| 36 | //===----------------------------------------------------------------------===// |
| 37 | |
| 38 | #include "AMDGPU.h" |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 39 | #include "AMDGPUSubtarget.h" |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 40 | #include "SIInstrInfo.h" |
| 41 | #include "SIRegisterInfo.h" |
Eugene Zelenko | 6620376 | 2017-01-21 00:53:49 +0000 | [diff] [blame] | 42 | #include "Utils/AMDGPUBaseInfo.h" |
| 43 | #include "llvm/ADT/ArrayRef.h" |
| 44 | #include "llvm/ADT/SmallVector.h" |
| 45 | #include "llvm/ADT/StringRef.h" |
| 46 | #include "llvm/Analysis/AliasAnalysis.h" |
| 47 | #include "llvm/CodeGen/MachineBasicBlock.h" |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 48 | #include "llvm/CodeGen/MachineFunction.h" |
| 49 | #include "llvm/CodeGen/MachineFunctionPass.h" |
Eugene Zelenko | 6620376 | 2017-01-21 00:53:49 +0000 | [diff] [blame] | 50 | #include "llvm/CodeGen/MachineInstr.h" |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 51 | #include "llvm/CodeGen/MachineInstrBuilder.h" |
Eugene Zelenko | 6620376 | 2017-01-21 00:53:49 +0000 | [diff] [blame] | 52 | #include "llvm/CodeGen/MachineOperand.h" |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 53 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
Eugene Zelenko | 6620376 | 2017-01-21 00:53:49 +0000 | [diff] [blame] | 54 | #include "llvm/IR/DebugLoc.h" |
| 55 | #include "llvm/Pass.h" |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 56 | #include "llvm/Support/Debug.h" |
Eugene Zelenko | 6620376 | 2017-01-21 00:53:49 +0000 | [diff] [blame] | 57 | #include "llvm/Support/MathExtras.h" |
Benjamin Kramer | 799003b | 2015-03-23 19:32:43 +0000 | [diff] [blame] | 58 | #include "llvm/Support/raw_ostream.h" |
Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 59 | #include <algorithm> |
Eugene Zelenko | 6620376 | 2017-01-21 00:53:49 +0000 | [diff] [blame] | 60 | #include <cassert> |
Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 61 | #include <cstdlib> |
Eugene Zelenko | 6620376 | 2017-01-21 00:53:49 +0000 | [diff] [blame] | 62 | #include <iterator> |
| 63 | #include <utility> |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 64 | |
| 65 | using namespace llvm; |
| 66 | |
| 67 | #define DEBUG_TYPE "si-load-store-opt" |
| 68 | |
| 69 | namespace { |
| 70 | |
| 71 | class SILoadStoreOptimizer : public MachineFunctionPass { |
Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 72 | using CombineInfo = struct { |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 73 | MachineBasicBlock::iterator I; |
| 74 | MachineBasicBlock::iterator Paired; |
| 75 | unsigned EltSize; |
| 76 | unsigned Offset0; |
| 77 | unsigned Offset1; |
| 78 | unsigned BaseOff; |
| 79 | bool UseST64; |
| 80 | SmallVector<MachineInstr*, 8> InstsToMove; |
Eugene Zelenko | 59e1282 | 2017-08-08 00:47:13 +0000 | [diff] [blame] | 81 | }; |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 82 | |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 83 | private: |
Eugene Zelenko | 6620376 | 2017-01-21 00:53:49 +0000 | [diff] [blame] | 84 | const SIInstrInfo *TII = nullptr; |
| 85 | const SIRegisterInfo *TRI = nullptr; |
| 86 | MachineRegisterInfo *MRI = nullptr; |
| 87 | AliasAnalysis *AA = nullptr; |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 88 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 89 | static bool offsetsCanBeCombined(CombineInfo &CI); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 90 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 91 | bool findMatchingDSInst(CombineInfo &CI); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 92 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 93 | MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 94 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 95 | MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 96 | |
| 97 | public: |
| 98 | static char ID; |
| 99 | |
Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 100 | SILoadStoreOptimizer() : MachineFunctionPass(ID) { |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 101 | initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry()); |
| 102 | } |
| 103 | |
| 104 | bool optimizeBlock(MachineBasicBlock &MBB); |
| 105 | |
| 106 | bool runOnMachineFunction(MachineFunction &MF) override; |
| 107 | |
Mehdi Amini | 117296c | 2016-10-01 02:56:57 +0000 | [diff] [blame] | 108 | StringRef getPassName() const override { return "SI Load / Store Optimizer"; } |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 109 | |
| 110 | void getAnalysisUsage(AnalysisUsage &AU) const override { |
| 111 | AU.setPreservesCFG(); |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 112 | AU.addRequired<AAResultsWrapperPass>(); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 113 | |
| 114 | MachineFunctionPass::getAnalysisUsage(AU); |
| 115 | } |
| 116 | }; |
| 117 | |
Eugene Zelenko | 6620376 | 2017-01-21 00:53:49 +0000 | [diff] [blame] | 118 | } // end anonymous namespace. |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 119 | |
| 120 | INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE, |
| 121 | "SI Load / Store Optimizer", false, false) |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 122 | INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 123 | INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, |
| 124 | "SI Load / Store Optimizer", false, false) |
| 125 | |
| 126 | char SILoadStoreOptimizer::ID = 0; |
| 127 | |
| 128 | char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID; |
| 129 | |
Francis Visoiu Mistrih | 8b61764 | 2017-05-18 17:21:13 +0000 | [diff] [blame] | 130 | FunctionPass *llvm::createSILoadStoreOptimizerPass() { |
| 131 | return new SILoadStoreOptimizer(); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 132 | } |
| 133 | |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 134 | static void moveInstsAfter(MachineBasicBlock::iterator I, |
| 135 | ArrayRef<MachineInstr*> InstsToMove) { |
| 136 | MachineBasicBlock *MBB = I->getParent(); |
| 137 | ++I; |
| 138 | for (MachineInstr *MI : InstsToMove) { |
| 139 | MI->removeFromParent(); |
| 140 | MBB->insert(I, MI); |
| 141 | } |
| 142 | } |
| 143 | |
| 144 | static void addDefsToList(const MachineInstr &MI, |
| 145 | SmallVectorImpl<const MachineOperand *> &Defs) { |
| 146 | for (const MachineOperand &Def : MI.defs()) { |
| 147 | Defs.push_back(&Def); |
| 148 | } |
| 149 | } |
| 150 | |
Eugene Zelenko | 6620376 | 2017-01-21 00:53:49 +0000 | [diff] [blame] | 151 | static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A, |
| 152 | MachineBasicBlock::iterator B, |
| 153 | const SIInstrInfo *TII, |
| 154 | AliasAnalysis * AA) { |
Alexander Timofeev | f867a40 | 2016-11-03 14:37:13 +0000 | [diff] [blame] | 155 | return (TII->areMemAccessesTriviallyDisjoint(*A, *B, AA) || |
| 156 | // RAW or WAR - cannot reorder |
| 157 | // WAW - cannot reorder |
| 158 | // RAR - safe to reorder |
| 159 | !(A->mayStore() || B->mayStore())); |
| 160 | } |
| 161 | |
Nicolai Haehnle | 7b0e25b | 2016-10-27 08:15:07 +0000 | [diff] [blame] | 162 | // Add MI and its defs to the lists if MI reads one of the defs that are |
| 163 | // already in the list. Returns true in that case. |
| 164 | static bool |
| 165 | addToListsIfDependent(MachineInstr &MI, |
| 166 | SmallVectorImpl<const MachineOperand *> &Defs, |
| 167 | SmallVectorImpl<MachineInstr*> &Insts) { |
| 168 | for (const MachineOperand *Def : Defs) { |
| 169 | bool ReadDef = MI.readsVirtualRegister(Def->getReg()); |
| 170 | // If ReadDef is true, then there is a use of Def between I |
| 171 | // and the instruction that I will potentially be merged with. We |
| 172 | // will need to move this instruction after the merged instructions. |
| 173 | if (ReadDef) { |
| 174 | Insts.push_back(&MI); |
| 175 | addDefsToList(MI, Defs); |
| 176 | return true; |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | return false; |
| 181 | } |
| 182 | |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 183 | static bool |
| 184 | canMoveInstsAcrossMemOp(MachineInstr &MemOp, |
| 185 | ArrayRef<MachineInstr*> InstsToMove, |
| 186 | const SIInstrInfo *TII, |
| 187 | AliasAnalysis *AA) { |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 188 | assert(MemOp.mayLoadOrStore()); |
| 189 | |
| 190 | for (MachineInstr *InstToMove : InstsToMove) { |
| 191 | if (!InstToMove->mayLoadOrStore()) |
| 192 | continue; |
Alexander Timofeev | f867a40 | 2016-11-03 14:37:13 +0000 | [diff] [blame] | 193 | if (!memAccessesCanBeReordered(MemOp, *InstToMove, TII, AA)) |
| 194 | return false; |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 195 | } |
| 196 | return true; |
| 197 | } |
| 198 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 199 | bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) { |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 200 | // XXX - Would the same offset be OK? Is there any reason this would happen or |
| 201 | // be useful? |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 202 | if (CI.Offset0 == CI.Offset1) |
Matt Arsenault | fe0a2e6 | 2014-10-10 22:12:32 +0000 | [diff] [blame] | 203 | return false; |
| 204 | |
| 205 | // This won't be valid if the offset isn't aligned. |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 206 | if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0)) |
Matt Arsenault | fe0a2e6 | 2014-10-10 22:12:32 +0000 | [diff] [blame] | 207 | return false; |
| 208 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 209 | unsigned EltOffset0 = CI.Offset0 / CI.EltSize; |
| 210 | unsigned EltOffset1 = CI.Offset1 / CI.EltSize; |
| 211 | CI.UseST64 = false; |
| 212 | CI.BaseOff = 0; |
Matt Arsenault | fe0a2e6 | 2014-10-10 22:12:32 +0000 | [diff] [blame] | 213 | |
| 214 | // If the offset in elements doesn't fit in 8-bits, we might be able to use |
| 215 | // the stride 64 versions. |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 216 | if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 && |
| 217 | isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) { |
| 218 | CI.Offset0 = EltOffset0 / 64; |
| 219 | CI.Offset1 = EltOffset1 / 64; |
| 220 | CI.UseST64 = true; |
| 221 | return true; |
| 222 | } |
Matt Arsenault | fe0a2e6 | 2014-10-10 22:12:32 +0000 | [diff] [blame] | 223 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 224 | // Check if the new offsets fit in the reduced 8-bit range. |
| 225 | if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) { |
| 226 | CI.Offset0 = EltOffset0; |
| 227 | CI.Offset1 = EltOffset1; |
| 228 | return true; |
| 229 | } |
| 230 | |
| 231 | // Try to shift base address to decrease offsets. |
| 232 | unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0); |
| 233 | CI.BaseOff = std::min(CI.Offset0, CI.Offset1); |
| 234 | |
| 235 | if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) { |
| 236 | CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64; |
| 237 | CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64; |
| 238 | CI.UseST64 = true; |
| 239 | return true; |
| 240 | } |
| 241 | |
| 242 | if (isUInt<8>(OffsetDiff)) { |
| 243 | CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize; |
| 244 | CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize; |
| 245 | return true; |
| 246 | } |
| 247 | |
| 248 | return false; |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 249 | } |
| 250 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 251 | bool SILoadStoreOptimizer::findMatchingDSInst(CombineInfo &CI) { |
| 252 | MachineBasicBlock::iterator E = CI.I->getParent()->end(); |
| 253 | MachineBasicBlock::iterator MBBI = CI.I; |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 254 | ++MBBI; |
| 255 | |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 256 | SmallVector<const MachineOperand *, 8> DefsToMove; |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 257 | addDefsToList(*CI.I, DefsToMove); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 258 | |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 259 | for ( ; MBBI != E; ++MBBI) { |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 260 | if (MBBI->getOpcode() != CI.I->getOpcode()) { |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 261 | // This is not a matching DS instruction, but we can keep looking as |
| 262 | // long as one of these conditions are met: |
| 263 | // 1. It is safe to move I down past MBBI. |
| 264 | // 2. It is safe to move MBBI down past the instruction that I will |
| 265 | // be merged into. |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 266 | |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 267 | if (MBBI->hasUnmodeledSideEffects()) |
| 268 | // We can't re-order this instruction with respect to other memory |
| 269 | // opeations, so we fail both conditions mentioned above. |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 270 | return false; |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 271 | |
| 272 | if (MBBI->mayLoadOrStore() && |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 273 | !memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA)) { |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 274 | // We fail condition #1, but we may still be able to satisfy condition |
| 275 | // #2. Add this instruction to the move list and then we will check |
| 276 | // if condition #2 holds once we have selected the matching instruction. |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 277 | CI.InstsToMove.push_back(&*MBBI); |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 278 | addDefsToList(*MBBI, DefsToMove); |
| 279 | continue; |
| 280 | } |
| 281 | |
| 282 | // When we match I with another DS instruction we will be moving I down |
| 283 | // to the location of the matched instruction any uses of I will need to |
| 284 | // be moved down as well. |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 285 | addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove); |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 286 | continue; |
| 287 | } |
| 288 | |
| 289 | // Don't merge volatiles. |
| 290 | if (MBBI->hasOrderedMemoryRef()) |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 291 | return false; |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 292 | |
Nicolai Haehnle | 7b0e25b | 2016-10-27 08:15:07 +0000 | [diff] [blame] | 293 | // Handle a case like |
| 294 | // DS_WRITE_B32 addr, v, idx0 |
| 295 | // w = DS_READ_B32 addr, idx0 |
| 296 | // DS_WRITE_B32 addr, f(w), idx1 |
| 297 | // where the DS_READ_B32 ends up in InstsToMove and therefore prevents |
| 298 | // merging of the two writes. |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 299 | if (addToListsIfDependent(*MBBI, DefsToMove, CI.InstsToMove)) |
Nicolai Haehnle | 7b0e25b | 2016-10-27 08:15:07 +0000 | [diff] [blame] | 300 | continue; |
| 301 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 302 | int AddrIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), |
| 303 | AMDGPU::OpName::addr); |
| 304 | const MachineOperand &AddrReg0 = CI.I->getOperand(AddrIdx); |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 305 | const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx); |
| 306 | |
| 307 | // Check same base pointer. Be careful of subregisters, which can occur with |
| 308 | // vectors of pointers. |
| 309 | if (AddrReg0.getReg() == AddrReg1.getReg() && |
| 310 | AddrReg0.getSubReg() == AddrReg1.getSubReg()) { |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 311 | int OffsetIdx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 312 | AMDGPU::OpName::offset); |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 313 | CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm() & 0xffff; |
| 314 | CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff; |
| 315 | CI.Paired = MBBI; |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 316 | |
| 317 | // Check both offsets fit in the reduced range. |
| 318 | // We also need to go through the list of instructions that we plan to |
| 319 | // move and make sure they are all safe to move down past the merged |
| 320 | // instruction. |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 321 | if (offsetsCanBeCombined(CI)) |
| 322 | if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA)) |
| 323 | return true; |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 324 | } |
| 325 | |
| 326 | // We've found a load/store that we couldn't merge for some reason. |
| 327 | // We could potentially keep looking, but we'd need to make sure that |
| 328 | // it was safe to move I and also all the instruction in InstsToMove |
| 329 | // down past this instruction. |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 330 | // check if we can move I across MBBI and if we can move all I's users |
| 331 | if (!memAccessesCanBeReordered(*CI.I, *MBBI, TII, AA) || |
| 332 | !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, TII, AA)) |
Alexander Timofeev | f867a40 | 2016-11-03 14:37:13 +0000 | [diff] [blame] | 333 | break; |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 334 | } |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 335 | return false; |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 336 | } |
| 337 | |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 338 | MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair( |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 339 | CombineInfo &CI) { |
| 340 | MachineBasicBlock *MBB = CI.I->getParent(); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 341 | |
| 342 | // Be careful, since the addresses could be subregisters themselves in weird |
| 343 | // cases, like vectors of pointers. |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 344 | const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 345 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 346 | const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst); |
| 347 | const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 348 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 349 | unsigned NewOffset0 = CI.Offset0; |
| 350 | unsigned NewOffset1 = CI.Offset1; |
| 351 | unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2_B32 |
| 352 | : AMDGPU::DS_READ2_B64; |
Matt Arsenault | fe0a2e6 | 2014-10-10 22:12:32 +0000 | [diff] [blame] | 353 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 354 | if (CI.UseST64) |
| 355 | Opc = (CI.EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 |
| 356 | : AMDGPU::DS_READ2ST64_B64; |
Matt Arsenault | fe0a2e6 | 2014-10-10 22:12:32 +0000 | [diff] [blame] | 357 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 358 | unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1; |
| 359 | unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3; |
Tom Stellard | e175d8a | 2016-08-26 21:36:47 +0000 | [diff] [blame] | 360 | |
| 361 | if (NewOffset0 > NewOffset1) { |
| 362 | // Canonicalize the merged instruction so the smaller offset comes first. |
| 363 | std::swap(NewOffset0, NewOffset1); |
| 364 | std::swap(SubRegIdx0, SubRegIdx1); |
| 365 | } |
| 366 | |
Matt Arsenault | fe0a2e6 | 2014-10-10 22:12:32 +0000 | [diff] [blame] | 367 | assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) && |
| 368 | (NewOffset0 != NewOffset1) && |
| 369 | "Computed offset doesn't fit"); |
| 370 | |
| 371 | const MCInstrDesc &Read2Desc = TII->get(Opc); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 372 | |
| 373 | const TargetRegisterClass *SuperRC |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 374 | = (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass; |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 375 | unsigned DestReg = MRI->createVirtualRegister(SuperRC); |
| 376 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 377 | DebugLoc DL = CI.I->getDebugLoc(); |
| 378 | |
| 379 | unsigned BaseReg = AddrReg->getReg(); |
| 380 | unsigned BaseRegFlags = 0; |
| 381 | if (CI.BaseOff) { |
| 382 | BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| 383 | BaseRegFlags = RegState::Kill; |
Reid Kleckner | dbc9ba3 | 2017-04-13 20:32:58 +0000 | [diff] [blame] | 384 | BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg) |
Stanislav Mekhanoshin | 86b0a54 | 2017-04-14 00:33:44 +0000 | [diff] [blame] | 385 | .addImm(CI.BaseOff) |
| 386 | .addReg(AddrReg->getReg()); |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 387 | } |
| 388 | |
Stanislav Mekhanoshin | 86b0a54 | 2017-04-14 00:33:44 +0000 | [diff] [blame] | 389 | MachineInstrBuilder Read2 = |
| 390 | BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg) |
| 391 | .addReg(BaseReg, BaseRegFlags) // addr |
| 392 | .addImm(NewOffset0) // offset0 |
| 393 | .addImm(NewOffset1) // offset1 |
| 394 | .addImm(0) // gds |
| 395 | .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired)); |
| 396 | |
NAKAMURA Takumi | 9720f57 | 2016-08-30 11:50:21 +0000 | [diff] [blame] | 397 | (void)Read2; |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 398 | |
Matt Arsenault | 84db5d9 | 2015-07-14 17:57:36 +0000 | [diff] [blame] | 399 | const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY); |
| 400 | |
| 401 | // Copy to the old destination registers. |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 402 | BuildMI(*MBB, CI.Paired, DL, CopyDesc) |
Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 403 | .add(*Dest0) // Copy to same destination including flags and sub reg. |
| 404 | .addReg(DestReg, 0, SubRegIdx0); |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 405 | MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc) |
Diana Picus | 116bbab | 2017-01-13 09:58:52 +0000 | [diff] [blame] | 406 | .add(*Dest1) |
| 407 | .addReg(DestReg, RegState::Kill, SubRegIdx1); |
Matt Arsenault | 84db5d9 | 2015-07-14 17:57:36 +0000 | [diff] [blame] | 408 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 409 | moveInstsAfter(Copy1, CI.InstsToMove); |
Matt Arsenault | 84db5d9 | 2015-07-14 17:57:36 +0000 | [diff] [blame] | 410 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 411 | MachineBasicBlock::iterator Next = std::next(CI.I); |
| 412 | CI.I->eraseFromParent(); |
| 413 | CI.Paired->eraseFromParent(); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 414 | |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 415 | DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n'); |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 416 | return Next; |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 417 | } |
| 418 | |
| 419 | MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair( |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 420 | CombineInfo &CI) { |
| 421 | MachineBasicBlock *MBB = CI.I->getParent(); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 422 | |
| 423 | // Be sure to use .addOperand(), and not .addReg() with these. We want to be |
| 424 | // sure we preserve the subregister index and any register flags set on them. |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 425 | const MachineOperand *Addr = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr); |
| 426 | const MachineOperand *Data0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 427 | const MachineOperand *Data1 |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 428 | = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 429 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 430 | unsigned NewOffset0 = CI.Offset0; |
| 431 | unsigned NewOffset1 = CI.Offset1; |
| 432 | unsigned Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2_B32 |
| 433 | : AMDGPU::DS_WRITE2_B64; |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 434 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 435 | if (CI.UseST64) |
| 436 | Opc = (CI.EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 |
| 437 | : AMDGPU::DS_WRITE2ST64_B64; |
Matt Arsenault | fe0a2e6 | 2014-10-10 22:12:32 +0000 | [diff] [blame] | 438 | |
Tom Stellard | e175d8a | 2016-08-26 21:36:47 +0000 | [diff] [blame] | 439 | if (NewOffset0 > NewOffset1) { |
| 440 | // Canonicalize the merged instruction so the smaller offset comes first. |
| 441 | std::swap(NewOffset0, NewOffset1); |
| 442 | std::swap(Data0, Data1); |
| 443 | } |
| 444 | |
Matt Arsenault | fe0a2e6 | 2014-10-10 22:12:32 +0000 | [diff] [blame] | 445 | assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) && |
| 446 | (NewOffset0 != NewOffset1) && |
| 447 | "Computed offset doesn't fit"); |
| 448 | |
| 449 | const MCInstrDesc &Write2Desc = TII->get(Opc); |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 450 | DebugLoc DL = CI.I->getDebugLoc(); |
Matt Arsenault | fe0a2e6 | 2014-10-10 22:12:32 +0000 | [diff] [blame] | 451 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 452 | unsigned BaseReg = Addr->getReg(); |
| 453 | unsigned BaseRegFlags = 0; |
| 454 | if (CI.BaseOff) { |
| 455 | BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); |
| 456 | BaseRegFlags = RegState::Kill; |
Reid Kleckner | dbc9ba3 | 2017-04-13 20:32:58 +0000 | [diff] [blame] | 457 | BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::V_ADD_I32_e32), BaseReg) |
Stanislav Mekhanoshin | 86b0a54 | 2017-04-14 00:33:44 +0000 | [diff] [blame] | 458 | .addImm(CI.BaseOff) |
| 459 | .addReg(Addr->getReg()); |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 460 | } |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 461 | |
Stanislav Mekhanoshin | 86b0a54 | 2017-04-14 00:33:44 +0000 | [diff] [blame] | 462 | MachineInstrBuilder Write2 = |
| 463 | BuildMI(*MBB, CI.Paired, DL, Write2Desc) |
| 464 | .addReg(BaseReg, BaseRegFlags) // addr |
| 465 | .add(*Data0) // data0 |
| 466 | .add(*Data1) // data1 |
| 467 | .addImm(NewOffset0) // offset0 |
| 468 | .addImm(NewOffset1) // offset1 |
| 469 | .addImm(0) // gds |
| 470 | .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired)); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 471 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 472 | moveInstsAfter(Write2, CI.InstsToMove); |
| 473 | |
| 474 | MachineBasicBlock::iterator Next = std::next(CI.I); |
| 475 | CI.I->eraseFromParent(); |
| 476 | CI.Paired->eraseFromParent(); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 477 | |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 478 | DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n'); |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 479 | return Next; |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 480 | } |
| 481 | |
| 482 | // Scan through looking for adjacent LDS operations with constant offsets from |
| 483 | // the same base register. We rely on the scheduler to do the hard work of |
| 484 | // clustering nearby loads, and assume these are all adjacent. |
| 485 | bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) { |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 486 | bool Modified = false; |
| 487 | |
| 488 | for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) { |
| 489 | MachineInstr &MI = *I; |
| 490 | |
| 491 | // Don't combine if volatile. |
| 492 | if (MI.hasOrderedMemoryRef()) { |
| 493 | ++I; |
| 494 | continue; |
| 495 | } |
| 496 | |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 497 | CombineInfo CI; |
| 498 | CI.I = I; |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 499 | unsigned Opc = MI.getOpcode(); |
| 500 | if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) { |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 501 | CI.EltSize = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4; |
| 502 | if (findMatchingDSInst(CI)) { |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 503 | Modified = true; |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 504 | I = mergeRead2Pair(CI); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 505 | } else { |
| 506 | ++I; |
| 507 | } |
| 508 | |
| 509 | continue; |
| 510 | } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) { |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 511 | CI.EltSize = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4; |
| 512 | if (findMatchingDSInst(CI)) { |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 513 | Modified = true; |
Stanislav Mekhanoshin | d026f79 | 2017-04-13 17:53:07 +0000 | [diff] [blame] | 514 | I = mergeWrite2Pair(CI); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 515 | } else { |
| 516 | ++I; |
| 517 | } |
| 518 | |
| 519 | continue; |
| 520 | } |
| 521 | |
| 522 | ++I; |
| 523 | } |
| 524 | |
| 525 | return Modified; |
| 526 | } |
| 527 | |
| 528 | bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) { |
Andrew Kaylor | 7de74af | 2016-04-25 22:23:44 +0000 | [diff] [blame] | 529 | if (skipFunction(*MF.getFunction())) |
| 530 | return false; |
| 531 | |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 532 | const SISubtarget &STM = MF.getSubtarget<SISubtarget>(); |
Matt Arsenault | 03d8584 | 2016-06-27 20:32:13 +0000 | [diff] [blame] | 533 | if (!STM.loadStoreOptEnabled()) |
| 534 | return false; |
| 535 | |
Matt Arsenault | 43e92fe | 2016-06-24 06:30:11 +0000 | [diff] [blame] | 536 | TII = STM.getInstrInfo(); |
| 537 | TRI = &TII->getRegisterInfo(); |
| 538 | |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 539 | MRI = &MF.getRegInfo(); |
Tom Stellard | c2ff0eb | 2016-08-29 19:15:22 +0000 | [diff] [blame] | 540 | AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 541 | |
| 542 | DEBUG(dbgs() << "Running SILoadStoreOptimizer\n"); |
| 543 | |
Matt Arsenault | 4103328 | 2014-10-10 22:01:59 +0000 | [diff] [blame] | 544 | bool Modified = false; |
| 545 | |
| 546 | for (MachineBasicBlock &MBB : MF) |
| 547 | Modified |= optimizeBlock(MBB); |
| 548 | |
| 549 | return Modified; |
| 550 | } |