blob: 5a988dd3ae424399e38a2d7b57f4168058437465 [file] [log] [blame]
Eugene Zelenko59e12822017-08-08 00:47:13 +00001//===- SILoadStoreOptimizer.cpp -------------------------------------------===//
Matt Arsenault41033282014-10-10 22:01:59 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Matt Arsenault41033282014-10-10 22:01:59 +00006//
7//===----------------------------------------------------------------------===//
8//
9// This pass tries to fuse DS instructions with close by immediate offsets.
10// This will fuse operations such as
11// ds_read_b32 v0, v2 offset:16
12// ds_read_b32 v1, v2 offset:32
13// ==>
14// ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
15//
Nicolai Haehnleb4f28de2017-11-28 08:42:46 +000016// The same is done for certain SMEM and VMEM opcodes, e.g.:
Marek Olsakb953cc32017-11-09 01:52:23 +000017// s_buffer_load_dword s4, s[0:3], 4
18// s_buffer_load_dword s5, s[0:3], 8
19// ==>
20// s_buffer_load_dwordx2 s[4:5], s[0:3], 4
21//
Farhana Aleence095c52018-12-14 21:13:14 +000022// This pass also tries to promote constant offset to the immediate by
23// adjusting the base. It tries to use a base from the nearby instructions that
24// allows it to have a 13bit constant offset and then promotes the 13bit offset
25// to the immediate.
26// E.g.
27// s_movk_i32 s0, 0x1800
28// v_add_co_u32_e32 v0, vcc, s0, v2
29// v_addc_co_u32_e32 v1, vcc, 0, v6, vcc
30//
31// s_movk_i32 s0, 0x1000
32// v_add_co_u32_e32 v5, vcc, s0, v2
33// v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
34// global_load_dwordx2 v[5:6], v[5:6], off
35// global_load_dwordx2 v[0:1], v[0:1], off
36// =>
37// s_movk_i32 s0, 0x1000
38// v_add_co_u32_e32 v5, vcc, s0, v2
39// v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
40// global_load_dwordx2 v[5:6], v[5:6], off
41// global_load_dwordx2 v[0:1], v[5:6], off offset:2048
Matt Arsenault41033282014-10-10 22:01:59 +000042//
43// Future improvements:
44//
45// - This currently relies on the scheduler to place loads and stores next to
46// each other, and then only merges adjacent pairs of instructions. It would
47// be good to be more flexible with interleaved instructions, and possibly run
48// before scheduling. It currently missing stores of constants because loading
49// the constant into the data register is placed between the stores, although
50// this is arguably a scheduling problem.
51//
52// - Live interval recomputing seems inefficient. This currently only matches
53// one pair, and recomputes live intervals and moves on to the next pair. It
Konstantin Zhuravlyovecc7cbf2016-03-29 15:15:44 +000054// would be better to compute a list of all merges that need to occur.
Matt Arsenault41033282014-10-10 22:01:59 +000055//
56// - With a list of instructions to process, we can also merge more. If a
57// cluster of loads have offsets that are too large to fit in the 8-bit
58// offsets, but are close enough to fit in the 8 bits, we can add to the base
59// pointer and use the new reduced offsets.
60//
61//===----------------------------------------------------------------------===//
62
63#include "AMDGPU.h"
Matt Arsenault43e92fe2016-06-24 06:30:11 +000064#include "AMDGPUSubtarget.h"
Neil Henning76504a42018-12-12 16:15:21 +000065#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Matt Arsenault41033282014-10-10 22:01:59 +000066#include "SIInstrInfo.h"
67#include "SIRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000068#include "Utils/AMDGPUBaseInfo.h"
69#include "llvm/ADT/ArrayRef.h"
70#include "llvm/ADT/SmallVector.h"
71#include "llvm/ADT/StringRef.h"
72#include "llvm/Analysis/AliasAnalysis.h"
73#include "llvm/CodeGen/MachineBasicBlock.h"
Matt Arsenault41033282014-10-10 22:01:59 +000074#include "llvm/CodeGen/MachineFunction.h"
75#include "llvm/CodeGen/MachineFunctionPass.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000076#include "llvm/CodeGen/MachineInstr.h"
Matt Arsenault41033282014-10-10 22:01:59 +000077#include "llvm/CodeGen/MachineInstrBuilder.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000078#include "llvm/CodeGen/MachineOperand.h"
Matt Arsenault41033282014-10-10 22:01:59 +000079#include "llvm/CodeGen/MachineRegisterInfo.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000080#include "llvm/IR/DebugLoc.h"
81#include "llvm/Pass.h"
Matt Arsenault41033282014-10-10 22:01:59 +000082#include "llvm/Support/Debug.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000083#include "llvm/Support/MathExtras.h"
Benjamin Kramer799003b2015-03-23 19:32:43 +000084#include "llvm/Support/raw_ostream.h"
Eugene Zelenko59e12822017-08-08 00:47:13 +000085#include <algorithm>
Eugene Zelenko66203762017-01-21 00:53:49 +000086#include <cassert>
Eugene Zelenko59e12822017-08-08 00:47:13 +000087#include <cstdlib>
Eugene Zelenko66203762017-01-21 00:53:49 +000088#include <iterator>
89#include <utility>
Matt Arsenault41033282014-10-10 22:01:59 +000090
91using namespace llvm;
92
93#define DEBUG_TYPE "si-load-store-opt"
94
95namespace {
Neil Henning76504a42018-12-12 16:15:21 +000096enum InstClassEnum {
97 UNKNOWN,
98 DS_READ,
99 DS_WRITE,
100 S_BUFFER_LOAD_IMM,
101 BUFFER_LOAD_OFFEN = AMDGPU::BUFFER_LOAD_DWORD_OFFEN,
102 BUFFER_LOAD_OFFSET = AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
103 BUFFER_STORE_OFFEN = AMDGPU::BUFFER_STORE_DWORD_OFFEN,
104 BUFFER_STORE_OFFSET = AMDGPU::BUFFER_STORE_DWORD_OFFSET,
105 BUFFER_LOAD_OFFEN_exact = AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact,
106 BUFFER_LOAD_OFFSET_exact = AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact,
107 BUFFER_STORE_OFFEN_exact = AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact,
108 BUFFER_STORE_OFFSET_exact = AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact,
109};
110
111enum RegisterEnum {
112 SBASE = 0x1,
113 SRSRC = 0x2,
114 SOFFSET = 0x4,
115 VADDR = 0x8,
116 ADDR = 0x10,
117};
Matt Arsenault41033282014-10-10 22:01:59 +0000118
119class SILoadStoreOptimizer : public MachineFunctionPass {
NAKAMURA Takumiaba2b3d2017-10-10 08:30:53 +0000120 struct CombineInfo {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000121 MachineBasicBlock::iterator I;
122 MachineBasicBlock::iterator Paired;
123 unsigned EltSize;
124 unsigned Offset0;
125 unsigned Offset1;
Neil Henning76504a42018-12-12 16:15:21 +0000126 unsigned Width0;
127 unsigned Width1;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000128 unsigned BaseOff;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000129 InstClassEnum InstClass;
Marek Olsakb953cc32017-11-09 01:52:23 +0000130 bool GLC0;
131 bool GLC1;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000132 bool SLC0;
133 bool SLC1;
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000134 bool DLC0;
135 bool DLC1;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000136 bool UseST64;
Neil Henning76504a42018-12-12 16:15:21 +0000137 SmallVector<MachineInstr *, 8> InstsToMove;
138 };
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000139
Farhana Aleence095c52018-12-14 21:13:14 +0000140 struct BaseRegisters {
141 unsigned LoReg = 0;
142 unsigned HiReg = 0;
143
144 unsigned LoSubReg = 0;
145 unsigned HiSubReg = 0;
146 };
147
148 struct MemAddress {
149 BaseRegisters Base;
150 int64_t Offset = 0;
151 };
152
153 using MemInfoMap = DenseMap<MachineInstr *, MemAddress>;
154
Matt Arsenault41033282014-10-10 22:01:59 +0000155private:
Tom Stellard5bfbae52018-07-11 20:59:01 +0000156 const GCNSubtarget *STM = nullptr;
Eugene Zelenko66203762017-01-21 00:53:49 +0000157 const SIInstrInfo *TII = nullptr;
158 const SIRegisterInfo *TRI = nullptr;
159 MachineRegisterInfo *MRI = nullptr;
160 AliasAnalysis *AA = nullptr;
Neil Henning76504a42018-12-12 16:15:21 +0000161 bool OptimizeAgain;
Matt Arsenault41033282014-10-10 22:01:59 +0000162
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000163 static bool offsetsCanBeCombined(CombineInfo &CI);
Neil Henninge85d45a2019-01-10 16:21:08 +0000164 static bool widthsFit(const GCNSubtarget &STM, const CombineInfo &CI);
Neil Henning76504a42018-12-12 16:15:21 +0000165 static unsigned getNewOpcode(const CombineInfo &CI);
166 static std::pair<unsigned, unsigned> getSubRegIdxs(const CombineInfo &CI);
167 const TargetRegisterClass *getTargetRegisterClass(const CombineInfo &CI);
168 unsigned getOpcodeWidth(const MachineInstr &MI);
169 InstClassEnum getInstClass(unsigned Opc);
170 unsigned getRegs(unsigned Opc);
Matt Arsenault41033282014-10-10 22:01:59 +0000171
Marek Olsakb953cc32017-11-09 01:52:23 +0000172 bool findMatchingInst(CombineInfo &CI);
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000173
174 unsigned read2Opcode(unsigned EltSize) const;
175 unsigned read2ST64Opcode(unsigned EltSize) const;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000176 MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI);
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000177
178 unsigned write2Opcode(unsigned EltSize) const;
179 unsigned write2ST64Opcode(unsigned EltSize) const;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000180 MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI);
Marek Olsakb953cc32017-11-09 01:52:23 +0000181 MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI);
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000182 MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI);
Marek Olsak58410f32017-11-09 01:52:55 +0000183 MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI);
Matt Arsenault41033282014-10-10 22:01:59 +0000184
Farhana Aleence095c52018-12-14 21:13:14 +0000185 void updateBaseAndOffset(MachineInstr &I, unsigned NewBase,
186 int32_t NewOffset);
187 unsigned computeBase(MachineInstr &MI, const MemAddress &Addr);
188 MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI);
189 Optional<int32_t> extractConstOffset(const MachineOperand &Op);
190 void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr);
191 /// Promotes constant offset to the immediate by adjusting the base. It
192 /// tries to use a base from the nearby instructions that allows it to have
193 /// a 13bit constant offset which gets promoted to the immediate.
194 bool promoteConstantOffsetToImm(MachineInstr &CI,
195 MemInfoMap &Visited,
196 SmallPtrSet<MachineInstr *, 4> &Promoted);
197
Matt Arsenault41033282014-10-10 22:01:59 +0000198public:
199 static char ID;
200
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000201 SILoadStoreOptimizer() : MachineFunctionPass(ID) {
Matt Arsenault41033282014-10-10 22:01:59 +0000202 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
203 }
204
205 bool optimizeBlock(MachineBasicBlock &MBB);
206
207 bool runOnMachineFunction(MachineFunction &MF) override;
208
Mark Searles7687d422018-01-22 21:46:43 +0000209 StringRef getPassName() const override { return "SI Load Store Optimizer"; }
Matt Arsenault41033282014-10-10 22:01:59 +0000210
211 void getAnalysisUsage(AnalysisUsage &AU) const override {
212 AU.setPreservesCFG();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000213 AU.addRequired<AAResultsWrapperPass>();
Matt Arsenault41033282014-10-10 22:01:59 +0000214
215 MachineFunctionPass::getAnalysisUsage(AU);
216 }
217};
218
Eugene Zelenko66203762017-01-21 00:53:49 +0000219} // end anonymous namespace.
Matt Arsenault41033282014-10-10 22:01:59 +0000220
221INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
Mark Searles7687d422018-01-22 21:46:43 +0000222 "SI Load Store Optimizer", false, false)
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000223INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
Neil Henning76504a42018-12-12 16:15:21 +0000224INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, "SI Load Store Optimizer",
225 false, false)
Matt Arsenault41033282014-10-10 22:01:59 +0000226
227char SILoadStoreOptimizer::ID = 0;
228
229char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
230
Francis Visoiu Mistrih8b617642017-05-18 17:21:13 +0000231FunctionPass *llvm::createSILoadStoreOptimizerPass() {
232 return new SILoadStoreOptimizer();
Matt Arsenault41033282014-10-10 22:01:59 +0000233}
234
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000235static void moveInstsAfter(MachineBasicBlock::iterator I,
Neil Henning76504a42018-12-12 16:15:21 +0000236 ArrayRef<MachineInstr *> InstsToMove) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000237 MachineBasicBlock *MBB = I->getParent();
238 ++I;
239 for (MachineInstr *MI : InstsToMove) {
240 MI->removeFromParent();
241 MBB->insert(I, MI);
242 }
243}
244
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000245static void addDefsUsesToList(const MachineInstr &MI,
246 DenseSet<unsigned> &RegDefs,
247 DenseSet<unsigned> &PhysRegUses) {
248 for (const MachineOperand &Op : MI.operands()) {
249 if (Op.isReg()) {
250 if (Op.isDef())
251 RegDefs.insert(Op.getReg());
252 else if (Op.readsReg() &&
253 TargetRegisterInfo::isPhysicalRegister(Op.getReg()))
254 PhysRegUses.insert(Op.getReg());
255 }
Matt Arsenaultb02cebf2018-02-08 01:56:14 +0000256 }
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000257}
258
Eugene Zelenko66203762017-01-21 00:53:49 +0000259static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
260 MachineBasicBlock::iterator B,
Neil Henning76504a42018-12-12 16:15:21 +0000261 AliasAnalysis *AA) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000262 // RAW or WAR - cannot reorder
263 // WAW - cannot reorder
264 // RAR - safe to reorder
Changpeng Fang4cabf6d2019-02-18 23:00:26 +0000265 return !(A->mayStore() || B->mayStore()) || !A->mayAlias(AA, *B, true);
Alexander Timofeevf867a402016-11-03 14:37:13 +0000266}
267
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000268// Add MI and its defs to the lists if MI reads one of the defs that are
269// already in the list. Returns true in that case.
Neil Henning76504a42018-12-12 16:15:21 +0000270static bool addToListsIfDependent(MachineInstr &MI, DenseSet<unsigned> &RegDefs,
271 DenseSet<unsigned> &PhysRegUses,
272 SmallVectorImpl<MachineInstr *> &Insts) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000273 for (MachineOperand &Use : MI.operands()) {
274 // If one of the defs is read, then there is a use of Def between I and the
275 // instruction that I will potentially be merged with. We will need to move
276 // this instruction after the merged instructions.
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000277 //
278 // Similarly, if there is a def which is read by an instruction that is to
279 // be moved for merging, then we need to move the def-instruction as well.
280 // This can only happen for physical registers such as M0; virtual
281 // registers are in SSA form.
282 if (Use.isReg() &&
283 ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
Rhys Perryc4bc61b2019-05-17 09:32:23 +0000284 (Use.isDef() && RegDefs.count(Use.getReg())) ||
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000285 (Use.isDef() && TargetRegisterInfo::isPhysicalRegister(Use.getReg()) &&
286 PhysRegUses.count(Use.getReg())))) {
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000287 Insts.push_back(&MI);
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000288 addDefsUsesToList(MI, RegDefs, PhysRegUses);
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000289 return true;
290 }
291 }
292
293 return false;
294}
295
Neil Henning76504a42018-12-12 16:15:21 +0000296static bool canMoveInstsAcrossMemOp(MachineInstr &MemOp,
297 ArrayRef<MachineInstr *> InstsToMove,
Changpeng Fang4cabf6d2019-02-18 23:00:26 +0000298 AliasAnalysis *AA) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000299 assert(MemOp.mayLoadOrStore());
300
301 for (MachineInstr *InstToMove : InstsToMove) {
302 if (!InstToMove->mayLoadOrStore())
303 continue;
Changpeng Fang4cabf6d2019-02-18 23:00:26 +0000304 if (!memAccessesCanBeReordered(MemOp, *InstToMove, AA))
Neil Henning76504a42018-12-12 16:15:21 +0000305 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000306 }
307 return true;
308}
309
Tom Stellardcc0bc942019-07-29 16:40:58 +0000310// This function assumes that \p A and \p B have are identical except for
311// size and offset, and they referecne adjacent memory.
312static MachineMemOperand *combineKnownAdjacentMMOs(MachineFunction &MF,
313 const MachineMemOperand *A,
314 const MachineMemOperand *B) {
315 unsigned MinOffset = std::min(A->getOffset(), B->getOffset());
316 unsigned Size = A->getSize() + B->getSize();
317 return MF.getMachineMemOperand(A, MinOffset, Size);
318}
319
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000320bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI) {
Matt Arsenault41033282014-10-10 22:01:59 +0000321 // XXX - Would the same offset be OK? Is there any reason this would happen or
322 // be useful?
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000323 if (CI.Offset0 == CI.Offset1)
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000324 return false;
325
326 // This won't be valid if the offset isn't aligned.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000327 if ((CI.Offset0 % CI.EltSize != 0) || (CI.Offset1 % CI.EltSize != 0))
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000328 return false;
329
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000330 unsigned EltOffset0 = CI.Offset0 / CI.EltSize;
331 unsigned EltOffset1 = CI.Offset1 / CI.EltSize;
332 CI.UseST64 = false;
333 CI.BaseOff = 0;
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000334
Marek Olsak58410f32017-11-09 01:52:55 +0000335 // Handle SMEM and VMEM instructions.
Neil Henning76504a42018-12-12 16:15:21 +0000336 if ((CI.InstClass != DS_READ) && (CI.InstClass != DS_WRITE)) {
337 return (EltOffset0 + CI.Width0 == EltOffset1 ||
338 EltOffset1 + CI.Width1 == EltOffset0) &&
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000339 CI.GLC0 == CI.GLC1 && CI.DLC0 == CI.DLC1 &&
Marek Olsak6a0548a2017-11-09 01:52:30 +0000340 (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC0 == CI.SLC1);
Marek Olsakb953cc32017-11-09 01:52:23 +0000341 }
342
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000343 // If the offset in elements doesn't fit in 8-bits, we might be able to use
344 // the stride 64 versions.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000345 if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
346 isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
347 CI.Offset0 = EltOffset0 / 64;
348 CI.Offset1 = EltOffset1 / 64;
349 CI.UseST64 = true;
350 return true;
351 }
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000352
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000353 // Check if the new offsets fit in the reduced 8-bit range.
354 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
355 CI.Offset0 = EltOffset0;
356 CI.Offset1 = EltOffset1;
357 return true;
358 }
359
360 // Try to shift base address to decrease offsets.
361 unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
362 CI.BaseOff = std::min(CI.Offset0, CI.Offset1);
363
364 if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
365 CI.Offset0 = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
366 CI.Offset1 = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
367 CI.UseST64 = true;
368 return true;
369 }
370
371 if (isUInt<8>(OffsetDiff)) {
372 CI.Offset0 = EltOffset0 - CI.BaseOff / CI.EltSize;
373 CI.Offset1 = EltOffset1 - CI.BaseOff / CI.EltSize;
374 return true;
375 }
376
377 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000378}
379
Neil Henninge85d45a2019-01-10 16:21:08 +0000380bool SILoadStoreOptimizer::widthsFit(const GCNSubtarget &STM,
381 const CombineInfo &CI) {
Neil Henning76504a42018-12-12 16:15:21 +0000382 const unsigned Width = (CI.Width0 + CI.Width1);
383 switch (CI.InstClass) {
384 default:
Neil Henninge85d45a2019-01-10 16:21:08 +0000385 return (Width <= 4) && (STM.hasDwordx3LoadStores() || (Width != 3));
Neil Henning76504a42018-12-12 16:15:21 +0000386 case S_BUFFER_LOAD_IMM:
387 switch (Width) {
388 default:
389 return false;
390 case 2:
391 case 4:
392 return true;
393 }
394 }
395}
396
397unsigned SILoadStoreOptimizer::getOpcodeWidth(const MachineInstr &MI) {
398 const unsigned Opc = MI.getOpcode();
399
400 if (TII->isMUBUF(MI)) {
401 return AMDGPU::getMUBUFDwords(Opc);
402 }
403
404 switch (Opc) {
405 default:
406 return 0;
407 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
408 return 1;
409 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
410 return 2;
411 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
412 return 4;
413 }
414}
415
416InstClassEnum SILoadStoreOptimizer::getInstClass(unsigned Opc) {
417 if (TII->isMUBUF(Opc)) {
418 const int baseOpcode = AMDGPU::getMUBUFBaseOpcode(Opc);
419
420 // If we couldn't identify the opcode, bail out.
421 if (baseOpcode == -1) {
422 return UNKNOWN;
423 }
424
425 switch (baseOpcode) {
426 default:
427 return UNKNOWN;
428 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
429 return BUFFER_LOAD_OFFEN;
430 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET:
431 return BUFFER_LOAD_OFFSET;
432 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
433 return BUFFER_STORE_OFFEN;
434 case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
435 return BUFFER_STORE_OFFSET;
436 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact:
437 return BUFFER_LOAD_OFFEN_exact;
438 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact:
439 return BUFFER_LOAD_OFFSET_exact;
440 case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
441 return BUFFER_STORE_OFFEN_exact;
442 case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
443 return BUFFER_STORE_OFFSET_exact;
444 }
445 }
446
447 switch (Opc) {
448 default:
449 return UNKNOWN;
450 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
451 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
452 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
453 return S_BUFFER_LOAD_IMM;
454 case AMDGPU::DS_READ_B32:
455 case AMDGPU::DS_READ_B64:
456 case AMDGPU::DS_READ_B32_gfx9:
457 case AMDGPU::DS_READ_B64_gfx9:
458 return DS_READ;
459 case AMDGPU::DS_WRITE_B32:
460 case AMDGPU::DS_WRITE_B64:
461 case AMDGPU::DS_WRITE_B32_gfx9:
462 case AMDGPU::DS_WRITE_B64_gfx9:
463 return DS_WRITE;
464 }
465}
466
467unsigned SILoadStoreOptimizer::getRegs(unsigned Opc) {
468 if (TII->isMUBUF(Opc)) {
469 unsigned result = 0;
470
471 if (AMDGPU::getMUBUFHasVAddr(Opc)) {
472 result |= VADDR;
473 }
474
475 if (AMDGPU::getMUBUFHasSrsrc(Opc)) {
476 result |= SRSRC;
477 }
478
479 if (AMDGPU::getMUBUFHasSoffset(Opc)) {
480 result |= SOFFSET;
481 }
482
483 return result;
484 }
485
486 switch (Opc) {
487 default:
488 return 0;
489 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
490 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
491 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
492 return SBASE;
493 case AMDGPU::DS_READ_B32:
494 case AMDGPU::DS_READ_B64:
495 case AMDGPU::DS_READ_B32_gfx9:
496 case AMDGPU::DS_READ_B64_gfx9:
497 case AMDGPU::DS_WRITE_B32:
498 case AMDGPU::DS_WRITE_B64:
499 case AMDGPU::DS_WRITE_B32_gfx9:
500 case AMDGPU::DS_WRITE_B64_gfx9:
501 return ADDR;
502 }
503}
504
Marek Olsakb953cc32017-11-09 01:52:23 +0000505bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI) {
Matt Arsenault67e72de2017-08-31 01:53:09 +0000506 MachineBasicBlock *MBB = CI.I->getParent();
507 MachineBasicBlock::iterator E = MBB->end();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000508 MachineBasicBlock::iterator MBBI = CI.I;
Matt Arsenault3cb61632017-08-30 03:26:18 +0000509
Neil Henning76504a42018-12-12 16:15:21 +0000510 const unsigned Opc = CI.I->getOpcode();
511 const InstClassEnum InstClass = getInstClass(Opc);
512
513 if (InstClass == UNKNOWN) {
514 return false;
515 }
516
517 const unsigned Regs = getRegs(Opc);
518
519 unsigned AddrOpName[5] = {0};
520 int AddrIdx[5];
521 const MachineOperand *AddrReg[5];
Marek Olsak6a0548a2017-11-09 01:52:30 +0000522 unsigned NumAddresses = 0;
Marek Olsakb953cc32017-11-09 01:52:23 +0000523
Neil Henning76504a42018-12-12 16:15:21 +0000524 if (Regs & ADDR) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000525 AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
Neil Henning76504a42018-12-12 16:15:21 +0000526 }
527
528 if (Regs & SBASE) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000529 AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
Neil Henning76504a42018-12-12 16:15:21 +0000530 }
531
532 if (Regs & SRSRC) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000533 AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
Neil Henning76504a42018-12-12 16:15:21 +0000534 }
535
536 if (Regs & SOFFSET) {
537 AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
538 }
539
540 if (Regs & VADDR) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000541 AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000542 }
Matt Arsenault3cb61632017-08-30 03:26:18 +0000543
Marek Olsak6a0548a2017-11-09 01:52:30 +0000544 for (unsigned i = 0; i < NumAddresses; i++) {
545 AddrIdx[i] = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AddrOpName[i]);
546 AddrReg[i] = &CI.I->getOperand(AddrIdx[i]);
547
Neil Henning76504a42018-12-12 16:15:21 +0000548 // We only ever merge operations with the same base address register, so
549 // don't bother scanning forward if there are no other uses.
Marek Olsak6a0548a2017-11-09 01:52:30 +0000550 if (AddrReg[i]->isReg() &&
551 (TargetRegisterInfo::isPhysicalRegister(AddrReg[i]->getReg()) ||
552 MRI->hasOneNonDBGUse(AddrReg[i]->getReg())))
553 return false;
554 }
Matt Arsenault3cb61632017-08-30 03:26:18 +0000555
Matt Arsenault41033282014-10-10 22:01:59 +0000556 ++MBBI;
557
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000558 DenseSet<unsigned> RegDefsToMove;
559 DenseSet<unsigned> PhysRegUsesToMove;
560 addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove);
Matt Arsenault41033282014-10-10 22:01:59 +0000561
Neil Henning76504a42018-12-12 16:15:21 +0000562 for (; MBBI != E; ++MBBI) {
563 const bool IsDS = (InstClass == DS_READ) || (InstClass == DS_WRITE);
564
565 if ((getInstClass(MBBI->getOpcode()) != InstClass) ||
566 (IsDS && (MBBI->getOpcode() != Opc))) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000567 // This is not a matching DS instruction, but we can keep looking as
568 // long as one of these conditions are met:
569 // 1. It is safe to move I down past MBBI.
570 // 2. It is safe to move MBBI down past the instruction that I will
571 // be merged into.
Matt Arsenault41033282014-10-10 22:01:59 +0000572
Matt Arsenault2d69c922017-08-29 21:25:51 +0000573 if (MBBI->hasUnmodeledSideEffects()) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000574 // We can't re-order this instruction with respect to other memory
Matt Arsenault2d69c922017-08-29 21:25:51 +0000575 // operations, so we fail both conditions mentioned above.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000576 return false;
Matt Arsenault2d69c922017-08-29 21:25:51 +0000577 }
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000578
579 if (MBBI->mayLoadOrStore() &&
Changpeng Fang4cabf6d2019-02-18 23:00:26 +0000580 (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
581 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))) {
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000582 // We fail condition #1, but we may still be able to satisfy condition
583 // #2. Add this instruction to the move list and then we will check
584 // if condition #2 holds once we have selected the matching instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000585 CI.InstsToMove.push_back(&*MBBI);
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000586 addDefsUsesToList(*MBBI, RegDefsToMove, PhysRegUsesToMove);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000587 continue;
588 }
589
590 // When we match I with another DS instruction we will be moving I down
591 // to the location of the matched instruction any uses of I will need to
592 // be moved down as well.
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000593 addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
594 CI.InstsToMove);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000595 continue;
596 }
597
598 // Don't merge volatiles.
599 if (MBBI->hasOrderedMemoryRef())
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000600 return false;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000601
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000602 // Handle a case like
603 // DS_WRITE_B32 addr, v, idx0
604 // w = DS_READ_B32 addr, idx0
605 // DS_WRITE_B32 addr, f(w), idx1
606 // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
607 // merging of the two writes.
Nicolai Haehnle6cf306d2018-02-23 10:45:56 +0000608 if (addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
609 CI.InstsToMove))
Nicolai Haehnle7b0e25b2016-10-27 08:15:07 +0000610 continue;
611
Marek Olsak6a0548a2017-11-09 01:52:30 +0000612 bool Match = true;
613 for (unsigned i = 0; i < NumAddresses; i++) {
614 const MachineOperand &AddrRegNext = MBBI->getOperand(AddrIdx[i]);
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000615
Marek Olsak6a0548a2017-11-09 01:52:30 +0000616 if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
617 if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
618 AddrReg[i]->getImm() != AddrRegNext.getImm()) {
619 Match = false;
620 break;
621 }
622 continue;
623 }
624
Neil Henning76504a42018-12-12 16:15:21 +0000625 // Check same base pointer. Be careful of subregisters, which can occur
626 // with vectors of pointers.
Marek Olsak6a0548a2017-11-09 01:52:30 +0000627 if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
628 AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
629 Match = false;
630 break;
631 }
632 }
633
634 if (Match) {
Neil Henning76504a42018-12-12 16:15:21 +0000635 int OffsetIdx =
636 AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::offset);
Marek Olsakb953cc32017-11-09 01:52:23 +0000637 CI.Offset0 = CI.I->getOperand(OffsetIdx).getImm();
Neil Henning76504a42018-12-12 16:15:21 +0000638 CI.Width0 = getOpcodeWidth(*CI.I);
Marek Olsakb953cc32017-11-09 01:52:23 +0000639 CI.Offset1 = MBBI->getOperand(OffsetIdx).getImm();
Neil Henning76504a42018-12-12 16:15:21 +0000640 CI.Width1 = getOpcodeWidth(*MBBI);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000641 CI.Paired = MBBI;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000642
Neil Henning76504a42018-12-12 16:15:21 +0000643 if ((CI.InstClass == DS_READ) || (CI.InstClass == DS_WRITE)) {
Marek Olsakb953cc32017-11-09 01:52:23 +0000644 CI.Offset0 &= 0xffff;
645 CI.Offset1 &= 0xffff;
Marek Olsak6a0548a2017-11-09 01:52:30 +0000646 } else {
647 CI.GLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::glc)->getImm();
648 CI.GLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::glc)->getImm();
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000649 if (CI.InstClass != S_BUFFER_LOAD_IMM) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000650 CI.SLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::slc)->getImm();
651 CI.SLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::slc)->getImm();
652 }
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000653 CI.DLC0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::dlc)->getImm();
654 CI.DLC1 = TII->getNamedOperand(*MBBI, AMDGPU::OpName::dlc)->getImm();
Marek Olsakb953cc32017-11-09 01:52:23 +0000655 }
656
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000657 // Check both offsets fit in the reduced range.
658 // We also need to go through the list of instructions that we plan to
659 // move and make sure they are all safe to move down past the merged
660 // instruction.
Neil Henninge85d45a2019-01-10 16:21:08 +0000661 if (widthsFit(*STM, CI) && offsetsCanBeCombined(CI))
Changpeng Fang4cabf6d2019-02-18 23:00:26 +0000662 if (canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000663 return true;
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000664 }
665
666 // We've found a load/store that we couldn't merge for some reason.
667 // We could potentially keep looking, but we'd need to make sure that
668 // it was safe to move I and also all the instruction in InstsToMove
669 // down past this instruction.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000670 // check if we can move I across MBBI and if we can move all I's users
Changpeng Fang4cabf6d2019-02-18 23:00:26 +0000671 if (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
672 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
Alexander Timofeevf867a402016-11-03 14:37:13 +0000673 break;
Matt Arsenault41033282014-10-10 22:01:59 +0000674 }
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000675 return false;
Matt Arsenault41033282014-10-10 22:01:59 +0000676}
677
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000678unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
679 if (STM->ldsRequiresM0Init())
680 return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
681 return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
682}
683
684unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
685 if (STM->ldsRequiresM0Init())
686 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
687
Neil Henning76504a42018-12-12 16:15:21 +0000688 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32_gfx9
689 : AMDGPU::DS_READ2ST64_B64_gfx9;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000690}
691
Neil Henning76504a42018-12-12 16:15:21 +0000692MachineBasicBlock::iterator
693SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000694 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000695
696 // Be careful, since the addresses could be subregisters themselves in weird
697 // cases, like vectors of pointers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000698 const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
Matt Arsenault41033282014-10-10 22:01:59 +0000699
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000700 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
701 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdst);
Matt Arsenault41033282014-10-10 22:01:59 +0000702
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000703 unsigned NewOffset0 = CI.Offset0;
704 unsigned NewOffset1 = CI.Offset1;
Neil Henning76504a42018-12-12 16:15:21 +0000705 unsigned Opc =
706 CI.UseST64 ? read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000707
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000708 unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
709 unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
Tom Stellarde175d8a2016-08-26 21:36:47 +0000710
711 if (NewOffset0 > NewOffset1) {
712 // Canonicalize the merged instruction so the smaller offset comes first.
713 std::swap(NewOffset0, NewOffset1);
714 std::swap(SubRegIdx0, SubRegIdx1);
715 }
716
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000717 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
Neil Henning76504a42018-12-12 16:15:21 +0000718 (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000719
720 const MCInstrDesc &Read2Desc = TII->get(Opc);
Matt Arsenault41033282014-10-10 22:01:59 +0000721
Neil Henning76504a42018-12-12 16:15:21 +0000722 const TargetRegisterClass *SuperRC =
723 (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
Matt Arsenault41033282014-10-10 22:01:59 +0000724 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
725
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000726 DebugLoc DL = CI.I->getDebugLoc();
727
728 unsigned BaseReg = AddrReg->getReg();
Stanislav Mekhanoshin8dfcd832018-09-25 23:33:18 +0000729 unsigned BaseSubReg = AddrReg->getSubReg();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000730 unsigned BaseRegFlags = 0;
731 if (CI.BaseOff) {
Mark Searles7687d422018-01-22 21:46:43 +0000732 unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
733 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
Neil Henning76504a42018-12-12 16:15:21 +0000734 .addImm(CI.BaseOff);
Mark Searles7687d422018-01-22 21:46:43 +0000735
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000736 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
737 BaseRegFlags = RegState::Kill;
Matt Arsenault84445dd2017-11-30 22:51:26 +0000738
Mark Searles7687d422018-01-22 21:46:43 +0000739 TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
Neil Henning76504a42018-12-12 16:15:21 +0000740 .addReg(ImmReg)
Tim Renoufcfdfba92019-03-18 19:35:44 +0000741 .addReg(AddrReg->getReg(), 0, BaseSubReg)
742 .addImm(0); // clamp bit
Stanislav Mekhanoshin8dfcd832018-09-25 23:33:18 +0000743 BaseSubReg = 0;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000744 }
745
Neil Henning76504a42018-12-12 16:15:21 +0000746 MachineInstrBuilder Read2 =
747 BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
748 .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
749 .addImm(NewOffset0) // offset0
750 .addImm(NewOffset1) // offset1
751 .addImm(0) // gds
752 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
Stanislav Mekhanoshin86b0a542017-04-14 00:33:44 +0000753
NAKAMURA Takumi9720f572016-08-30 11:50:21 +0000754 (void)Read2;
Matt Arsenault41033282014-10-10 22:01:59 +0000755
Matt Arsenault84db5d92015-07-14 17:57:36 +0000756 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
757
758 // Copy to the old destination registers.
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000759 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000760 .add(*Dest0) // Copy to same destination including flags and sub reg.
761 .addReg(DestReg, 0, SubRegIdx0);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000762 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
Diana Picus116bbab2017-01-13 09:58:52 +0000763 .add(*Dest1)
764 .addReg(DestReg, RegState::Kill, SubRegIdx1);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000765
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000766 moveInstsAfter(Copy1, CI.InstsToMove);
Matt Arsenault84db5d92015-07-14 17:57:36 +0000767
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000768 MachineBasicBlock::iterator Next = std::next(CI.I);
769 CI.I->eraseFromParent();
770 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000771
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000772 LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000773 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000774}
775
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000776unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
777 if (STM->ldsRequiresM0Init())
778 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
Neil Henning76504a42018-12-12 16:15:21 +0000779 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9
780 : AMDGPU::DS_WRITE2_B64_gfx9;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000781}
782
783unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
784 if (STM->ldsRequiresM0Init())
Neil Henning76504a42018-12-12 16:15:21 +0000785 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
786 : AMDGPU::DS_WRITE2ST64_B64;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000787
Neil Henning76504a42018-12-12 16:15:21 +0000788 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32_gfx9
789 : AMDGPU::DS_WRITE2ST64_B64_gfx9;
Matt Arsenault3f71c0e2017-11-29 00:55:57 +0000790}
791
Neil Henning76504a42018-12-12 16:15:21 +0000792MachineBasicBlock::iterator
793SILoadStoreOptimizer::mergeWrite2Pair(CombineInfo &CI) {
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000794 MachineBasicBlock *MBB = CI.I->getParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000795
796 // Be sure to use .addOperand(), and not .addReg() with these. We want to be
797 // sure we preserve the subregister index and any register flags set on them.
Neil Henning76504a42018-12-12 16:15:21 +0000798 const MachineOperand *AddrReg =
799 TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
800 const MachineOperand *Data0 =
801 TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
802 const MachineOperand *Data1 =
803 TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::data0);
Matt Arsenault41033282014-10-10 22:01:59 +0000804
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000805 unsigned NewOffset0 = CI.Offset0;
806 unsigned NewOffset1 = CI.Offset1;
Neil Henning76504a42018-12-12 16:15:21 +0000807 unsigned Opc =
808 CI.UseST64 ? write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000809
Tom Stellarde175d8a2016-08-26 21:36:47 +0000810 if (NewOffset0 > NewOffset1) {
811 // Canonicalize the merged instruction so the smaller offset comes first.
812 std::swap(NewOffset0, NewOffset1);
813 std::swap(Data0, Data1);
814 }
815
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000816 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
Neil Henning76504a42018-12-12 16:15:21 +0000817 (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000818
819 const MCInstrDesc &Write2Desc = TII->get(Opc);
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000820 DebugLoc DL = CI.I->getDebugLoc();
Matt Arsenaultfe0a2e62014-10-10 22:12:32 +0000821
Mark Searles7687d422018-01-22 21:46:43 +0000822 unsigned BaseReg = AddrReg->getReg();
Stanislav Mekhanoshin8dfcd832018-09-25 23:33:18 +0000823 unsigned BaseSubReg = AddrReg->getSubReg();
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000824 unsigned BaseRegFlags = 0;
825 if (CI.BaseOff) {
Mark Searles7687d422018-01-22 21:46:43 +0000826 unsigned ImmReg = MRI->createVirtualRegister(&AMDGPU::SGPR_32RegClass);
827 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
Neil Henning76504a42018-12-12 16:15:21 +0000828 .addImm(CI.BaseOff);
Mark Searles7687d422018-01-22 21:46:43 +0000829
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000830 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
831 BaseRegFlags = RegState::Kill;
Matt Arsenault84445dd2017-11-30 22:51:26 +0000832
Mark Searles7687d422018-01-22 21:46:43 +0000833 TII->getAddNoCarry(*MBB, CI.Paired, DL, BaseReg)
Neil Henning76504a42018-12-12 16:15:21 +0000834 .addReg(ImmReg)
Tim Renoufcfdfba92019-03-18 19:35:44 +0000835 .addReg(AddrReg->getReg(), 0, BaseSubReg)
836 .addImm(0); // clamp bit
Stanislav Mekhanoshin8dfcd832018-09-25 23:33:18 +0000837 BaseSubReg = 0;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000838 }
Matt Arsenault41033282014-10-10 22:01:59 +0000839
Neil Henning76504a42018-12-12 16:15:21 +0000840 MachineInstrBuilder Write2 =
841 BuildMI(*MBB, CI.Paired, DL, Write2Desc)
842 .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
843 .add(*Data0) // data0
844 .add(*Data1) // data1
845 .addImm(NewOffset0) // offset0
846 .addImm(NewOffset1) // offset1
847 .addImm(0) // gds
848 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
Matt Arsenault41033282014-10-10 22:01:59 +0000849
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +0000850 moveInstsAfter(Write2, CI.InstsToMove);
851
852 MachineBasicBlock::iterator Next = std::next(CI.I);
853 CI.I->eraseFromParent();
854 CI.Paired->eraseFromParent();
Matt Arsenault41033282014-10-10 22:01:59 +0000855
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000856 LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
Tom Stellardc2ff0eb2016-08-29 19:15:22 +0000857 return Next;
Matt Arsenault41033282014-10-10 22:01:59 +0000858}
859
Neil Henning76504a42018-12-12 16:15:21 +0000860MachineBasicBlock::iterator
861SILoadStoreOptimizer::mergeSBufferLoadImmPair(CombineInfo &CI) {
Marek Olsakb953cc32017-11-09 01:52:23 +0000862 MachineBasicBlock *MBB = CI.I->getParent();
863 DebugLoc DL = CI.I->getDebugLoc();
Neil Henning76504a42018-12-12 16:15:21 +0000864 const unsigned Opcode = getNewOpcode(CI);
Marek Olsakb953cc32017-11-09 01:52:23 +0000865
Neil Henning76504a42018-12-12 16:15:21 +0000866 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
867
Marek Olsakb953cc32017-11-09 01:52:23 +0000868 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
869 unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
870
Tom Stellardcc0bc942019-07-29 16:40:58 +0000871 // It shouldn't be possible to get this far if the two instructions
872 // don't have a single memoperand, because MachineInstr::mayAlias()
873 // will return true if this is the case.
874 assert(CI.I->hasOneMemOperand() && CI.Paired->hasOneMemOperand());
875
876 const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
877 const MachineMemOperand *MMOb = *CI.Paired->memoperands_begin();
878
Marek Olsakb953cc32017-11-09 01:52:23 +0000879 BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg)
880 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
881 .addImm(MergedOffset) // offset
882 .addImm(CI.GLC0) // glc
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000883 .addImm(CI.DLC0) // dlc
Tom Stellardcc0bc942019-07-29 16:40:58 +0000884 .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
Marek Olsakb953cc32017-11-09 01:52:23 +0000885
Neil Henning76504a42018-12-12 16:15:21 +0000886 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
887 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
888 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
Marek Olsakb953cc32017-11-09 01:52:23 +0000889
890 // Copy to the old destination registers.
891 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
892 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
893 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::sdst);
894
895 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
896 .add(*Dest0) // Copy to same destination including flags and sub reg.
897 .addReg(DestReg, 0, SubRegIdx0);
898 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
899 .add(*Dest1)
900 .addReg(DestReg, RegState::Kill, SubRegIdx1);
901
902 moveInstsAfter(Copy1, CI.InstsToMove);
903
904 MachineBasicBlock::iterator Next = std::next(CI.I);
905 CI.I->eraseFromParent();
906 CI.Paired->eraseFromParent();
907 return Next;
908}
909
Neil Henning76504a42018-12-12 16:15:21 +0000910MachineBasicBlock::iterator
911SILoadStoreOptimizer::mergeBufferLoadPair(CombineInfo &CI) {
Marek Olsak6a0548a2017-11-09 01:52:30 +0000912 MachineBasicBlock *MBB = CI.I->getParent();
913 DebugLoc DL = CI.I->getDebugLoc();
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000914
Neil Henning76504a42018-12-12 16:15:21 +0000915 const unsigned Opcode = getNewOpcode(CI);
Marek Olsak6a0548a2017-11-09 01:52:30 +0000916
Neil Henning76504a42018-12-12 16:15:21 +0000917 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
918
919 // Copy to the new source register.
Marek Olsak6a0548a2017-11-09 01:52:30 +0000920 unsigned DestReg = MRI->createVirtualRegister(SuperRC);
921 unsigned MergedOffset = std::min(CI.Offset0, CI.Offset1);
922
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000923 auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode), DestReg);
924
Neil Henning76504a42018-12-12 16:15:21 +0000925 const unsigned Regs = getRegs(Opcode);
926
927 if (Regs & VADDR)
928 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000929
Tom Stellardcc0bc942019-07-29 16:40:58 +0000930 // It shouldn't be possible to get this far if the two instructions
931 // don't have a single memoperand, because MachineInstr::mayAlias()
932 // will return true if this is the case.
933 assert(CI.I->hasOneMemOperand() && CI.Paired->hasOneMemOperand());
934
935 const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
936 const MachineMemOperand *MMOb = *CI.Paired->memoperands_begin();
937
Marek Olsak4c421a2d2017-11-09 01:52:36 +0000938 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
Marek Olsak6a0548a2017-11-09 01:52:30 +0000939 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
940 .addImm(MergedOffset) // offset
941 .addImm(CI.GLC0) // glc
942 .addImm(CI.SLC0) // slc
943 .addImm(0) // tfe
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +0000944 .addImm(CI.DLC0) // dlc
Tom Stellardcc0bc942019-07-29 16:40:58 +0000945 .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
Marek Olsak6a0548a2017-11-09 01:52:30 +0000946
Neil Henning76504a42018-12-12 16:15:21 +0000947 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
948 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
949 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
Marek Olsak6a0548a2017-11-09 01:52:30 +0000950
951 // Copy to the old destination registers.
952 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
953 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
954 const auto *Dest1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
955
956 BuildMI(*MBB, CI.Paired, DL, CopyDesc)
957 .add(*Dest0) // Copy to same destination including flags and sub reg.
958 .addReg(DestReg, 0, SubRegIdx0);
959 MachineInstr *Copy1 = BuildMI(*MBB, CI.Paired, DL, CopyDesc)
960 .add(*Dest1)
961 .addReg(DestReg, RegState::Kill, SubRegIdx1);
962
963 moveInstsAfter(Copy1, CI.InstsToMove);
964
965 MachineBasicBlock::iterator Next = std::next(CI.I);
966 CI.I->eraseFromParent();
967 CI.Paired->eraseFromParent();
968 return Next;
969}
970
Neil Henning76504a42018-12-12 16:15:21 +0000971unsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI) {
972 const unsigned Width = CI.Width0 + CI.Width1;
Marek Olsak58410f32017-11-09 01:52:55 +0000973
Neil Henning76504a42018-12-12 16:15:21 +0000974 switch (CI.InstClass) {
975 default:
976 return AMDGPU::getMUBUFOpcode(CI.InstClass, Width);
977 case UNKNOWN:
978 llvm_unreachable("Unknown instruction class");
979 case S_BUFFER_LOAD_IMM:
980 switch (Width) {
981 default:
982 return 0;
983 case 2:
984 return AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
985 case 4:
986 return AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM;
987 }
Marek Olsak58410f32017-11-09 01:52:55 +0000988 }
Marek Olsak58410f32017-11-09 01:52:55 +0000989}
990
Neil Henning76504a42018-12-12 16:15:21 +0000991std::pair<unsigned, unsigned>
992SILoadStoreOptimizer::getSubRegIdxs(const CombineInfo &CI) {
993 if (CI.Offset0 > CI.Offset1) {
994 switch (CI.Width0) {
995 default:
996 return std::make_pair(0, 0);
997 case 1:
998 switch (CI.Width1) {
999 default:
1000 return std::make_pair(0, 0);
1001 case 1:
1002 return std::make_pair(AMDGPU::sub1, AMDGPU::sub0);
1003 case 2:
1004 return std::make_pair(AMDGPU::sub2, AMDGPU::sub0_sub1);
1005 case 3:
1006 return std::make_pair(AMDGPU::sub3, AMDGPU::sub0_sub1_sub2);
1007 }
1008 case 2:
1009 switch (CI.Width1) {
1010 default:
1011 return std::make_pair(0, 0);
1012 case 1:
1013 return std::make_pair(AMDGPU::sub1_sub2, AMDGPU::sub0);
1014 case 2:
1015 return std::make_pair(AMDGPU::sub2_sub3, AMDGPU::sub0_sub1);
1016 }
1017 case 3:
1018 switch (CI.Width1) {
1019 default:
1020 return std::make_pair(0, 0);
1021 case 1:
1022 return std::make_pair(AMDGPU::sub1_sub2_sub3, AMDGPU::sub0);
1023 }
1024 }
1025 } else {
1026 switch (CI.Width0) {
1027 default:
1028 return std::make_pair(0, 0);
1029 case 1:
1030 switch (CI.Width1) {
1031 default:
1032 return std::make_pair(0, 0);
1033 case 1:
1034 return std::make_pair(AMDGPU::sub0, AMDGPU::sub1);
1035 case 2:
1036 return std::make_pair(AMDGPU::sub0, AMDGPU::sub1_sub2);
1037 case 3:
1038 return std::make_pair(AMDGPU::sub0, AMDGPU::sub1_sub2_sub3);
1039 }
1040 case 2:
1041 switch (CI.Width1) {
1042 default:
1043 return std::make_pair(0, 0);
1044 case 1:
1045 return std::make_pair(AMDGPU::sub0_sub1, AMDGPU::sub2);
1046 case 2:
1047 return std::make_pair(AMDGPU::sub0_sub1, AMDGPU::sub2_sub3);
1048 }
1049 case 3:
1050 switch (CI.Width1) {
1051 default:
1052 return std::make_pair(0, 0);
1053 case 1:
1054 return std::make_pair(AMDGPU::sub0_sub1_sub2, AMDGPU::sub3);
1055 }
1056 }
1057 }
1058}
1059
1060const TargetRegisterClass *
1061SILoadStoreOptimizer::getTargetRegisterClass(const CombineInfo &CI) {
1062 if (CI.InstClass == S_BUFFER_LOAD_IMM) {
1063 switch (CI.Width0 + CI.Width1) {
1064 default:
1065 return nullptr;
1066 case 2:
1067 return &AMDGPU::SReg_64_XEXECRegClass;
1068 case 4:
1069 return &AMDGPU::SReg_128RegClass;
1070 case 8:
1071 return &AMDGPU::SReg_256RegClass;
1072 case 16:
1073 return &AMDGPU::SReg_512RegClass;
1074 }
1075 } else {
1076 switch (CI.Width0 + CI.Width1) {
1077 default:
1078 return nullptr;
1079 case 2:
1080 return &AMDGPU::VReg_64RegClass;
1081 case 3:
1082 return &AMDGPU::VReg_96RegClass;
1083 case 4:
1084 return &AMDGPU::VReg_128RegClass;
1085 }
1086 }
1087}
1088
1089MachineBasicBlock::iterator
1090SILoadStoreOptimizer::mergeBufferStorePair(CombineInfo &CI) {
Marek Olsak58410f32017-11-09 01:52:55 +00001091 MachineBasicBlock *MBB = CI.I->getParent();
1092 DebugLoc DL = CI.I->getDebugLoc();
Marek Olsak58410f32017-11-09 01:52:55 +00001093
Neil Henning76504a42018-12-12 16:15:21 +00001094 const unsigned Opcode = getNewOpcode(CI);
Marek Olsak58410f32017-11-09 01:52:55 +00001095
Neil Henning76504a42018-12-12 16:15:21 +00001096 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI);
1097 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1098 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
Marek Olsak58410f32017-11-09 01:52:55 +00001099
1100 // Copy to the new source register.
Neil Henning76504a42018-12-12 16:15:21 +00001101 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI);
Marek Olsak58410f32017-11-09 01:52:55 +00001102 unsigned SrcReg = MRI->createVirtualRegister(SuperRC);
1103
1104 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1105 const auto *Src1 = TII->getNamedOperand(*CI.Paired, AMDGPU::OpName::vdata);
1106
1107 BuildMI(*MBB, CI.Paired, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
1108 .add(*Src0)
1109 .addImm(SubRegIdx0)
1110 .add(*Src1)
1111 .addImm(SubRegIdx1);
1112
1113 auto MIB = BuildMI(*MBB, CI.Paired, DL, TII->get(Opcode))
Neil Henning76504a42018-12-12 16:15:21 +00001114 .addReg(SrcReg, RegState::Kill);
Marek Olsak58410f32017-11-09 01:52:55 +00001115
Neil Henning76504a42018-12-12 16:15:21 +00001116 const unsigned Regs = getRegs(Opcode);
1117
1118 if (Regs & VADDR)
1119 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
Marek Olsak58410f32017-11-09 01:52:55 +00001120
Tom Stellardcc0bc942019-07-29 16:40:58 +00001121
1122 // It shouldn't be possible to get this far if the two instructions
1123 // don't have a single memoperand, because MachineInstr::mayAlias()
1124 // will return true if this is the case.
1125 assert(CI.I->hasOneMemOperand() && CI.Paired->hasOneMemOperand());
1126
1127 const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1128 const MachineMemOperand *MMOb = *CI.Paired->memoperands_begin();
1129
Marek Olsak58410f32017-11-09 01:52:55 +00001130 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1131 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1132 .addImm(std::min(CI.Offset0, CI.Offset1)) // offset
Stanislav Mekhanoshina6322942019-04-30 22:08:23 +00001133 .addImm(CI.GLC0) // glc
1134 .addImm(CI.SLC0) // slc
1135 .addImm(0) // tfe
1136 .addImm(CI.DLC0) // dlc
Tom Stellardcc0bc942019-07-29 16:40:58 +00001137 .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
Marek Olsak58410f32017-11-09 01:52:55 +00001138
1139 moveInstsAfter(MIB, CI.InstsToMove);
1140
1141 MachineBasicBlock::iterator Next = std::next(CI.I);
1142 CI.I->eraseFromParent();
1143 CI.Paired->eraseFromParent();
1144 return Next;
1145}
1146
Farhana Aleence095c52018-12-14 21:13:14 +00001147MachineOperand
1148SILoadStoreOptimizer::createRegOrImm(int32_t Val, MachineInstr &MI) {
1149 APInt V(32, Val, true);
1150 if (TII->isInlineConstant(V))
1151 return MachineOperand::CreateImm(Val);
1152
1153 unsigned Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1154 MachineInstr *Mov =
1155 BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
1156 TII->get(AMDGPU::S_MOV_B32), Reg)
1157 .addImm(Val);
Simon Pilgrim9831d402018-12-15 12:25:22 +00001158 (void)Mov;
Farhana Aleence095c52018-12-14 21:13:14 +00001159 LLVM_DEBUG(dbgs() << " "; Mov->dump());
1160 return MachineOperand::CreateReg(Reg, false);
1161}
1162
1163// Compute base address using Addr and return the final register.
1164unsigned SILoadStoreOptimizer::computeBase(MachineInstr &MI,
1165 const MemAddress &Addr) {
1166 MachineBasicBlock *MBB = MI.getParent();
1167 MachineBasicBlock::iterator MBBI = MI.getIterator();
1168 DebugLoc DL = MI.getDebugLoc();
1169
1170 assert((TRI->getRegSizeInBits(Addr.Base.LoReg, *MRI) == 32 ||
1171 Addr.Base.LoSubReg) &&
1172 "Expected 32-bit Base-Register-Low!!");
1173
1174 assert((TRI->getRegSizeInBits(Addr.Base.HiReg, *MRI) == 32 ||
1175 Addr.Base.HiSubReg) &&
1176 "Expected 32-bit Base-Register-Hi!!");
1177
1178 LLVM_DEBUG(dbgs() << " Re-Computed Anchor-Base:\n");
1179 MachineOperand OffsetLo = createRegOrImm(static_cast<int32_t>(Addr.Offset), MI);
1180 MachineOperand OffsetHi =
1181 createRegOrImm(static_cast<int32_t>(Addr.Offset >> 32), MI);
Stanislav Mekhanoshin52500212019-06-16 17:13:09 +00001182
1183 const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
1184 unsigned CarryReg = MRI->createVirtualRegister(CarryRC);
1185 unsigned DeadCarryReg = MRI->createVirtualRegister(CarryRC);
Farhana Aleence095c52018-12-14 21:13:14 +00001186
1187 unsigned DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1188 unsigned DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1189 MachineInstr *LoHalf =
1190 BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADD_I32_e64), DestSub0)
1191 .addReg(CarryReg, RegState::Define)
1192 .addReg(Addr.Base.LoReg, 0, Addr.Base.LoSubReg)
Tim Renoufcfdfba92019-03-18 19:35:44 +00001193 .add(OffsetLo)
1194 .addImm(0); // clamp bit
Simon Pilgrim9831d402018-12-15 12:25:22 +00001195 (void)LoHalf;
Farhana Aleence095c52018-12-14 21:13:14 +00001196 LLVM_DEBUG(dbgs() << " "; LoHalf->dump(););
1197
1198 MachineInstr *HiHalf =
1199 BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADDC_U32_e64), DestSub1)
1200 .addReg(DeadCarryReg, RegState::Define | RegState::Dead)
1201 .addReg(Addr.Base.HiReg, 0, Addr.Base.HiSubReg)
1202 .add(OffsetHi)
Tim Renoufcfdfba92019-03-18 19:35:44 +00001203 .addReg(CarryReg, RegState::Kill)
1204 .addImm(0); // clamp bit
Simon Pilgrim9831d402018-12-15 12:25:22 +00001205 (void)HiHalf;
Farhana Aleence095c52018-12-14 21:13:14 +00001206 LLVM_DEBUG(dbgs() << " "; HiHalf->dump(););
1207
1208 unsigned FullDestReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
1209 MachineInstr *FullBase =
1210 BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::REG_SEQUENCE), FullDestReg)
1211 .addReg(DestSub0)
1212 .addImm(AMDGPU::sub0)
1213 .addReg(DestSub1)
1214 .addImm(AMDGPU::sub1);
Simon Pilgrim9831d402018-12-15 12:25:22 +00001215 (void)FullBase;
Farhana Aleence095c52018-12-14 21:13:14 +00001216 LLVM_DEBUG(dbgs() << " "; FullBase->dump(); dbgs() << "\n";);
1217
1218 return FullDestReg;
1219}
1220
1221// Update base and offset with the NewBase and NewOffset in MI.
1222void SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI,
1223 unsigned NewBase,
1224 int32_t NewOffset) {
1225 TII->getNamedOperand(MI, AMDGPU::OpName::vaddr)->setReg(NewBase);
1226 TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset);
1227}
1228
1229Optional<int32_t>
1230SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) {
1231 if (Op.isImm())
1232 return Op.getImm();
1233
1234 if (!Op.isReg())
1235 return None;
1236
1237 MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
1238 if (!Def || Def->getOpcode() != AMDGPU::S_MOV_B32 ||
1239 !Def->getOperand(1).isImm())
1240 return None;
1241
1242 return Def->getOperand(1).getImm();
1243}
1244
1245// Analyze Base and extracts:
1246// - 32bit base registers, subregisters
1247// - 64bit constant offset
1248// Expecting base computation as:
1249// %OFFSET0:sgpr_32 = S_MOV_B32 8000
1250// %LO:vgpr_32, %c:sreg_64_xexec =
1251// V_ADD_I32_e64 %BASE_LO:vgpr_32, %103:sgpr_32,
1252// %HI:vgpr_32, = V_ADDC_U32_e64 %BASE_HI:vgpr_32, 0, killed %c:sreg_64_xexec
1253// %Base:vreg_64 =
1254// REG_SEQUENCE %LO:vgpr_32, %subreg.sub0, %HI:vgpr_32, %subreg.sub1
1255void SILoadStoreOptimizer::processBaseWithConstOffset(const MachineOperand &Base,
1256 MemAddress &Addr) {
1257 if (!Base.isReg())
1258 return;
1259
1260 MachineInstr *Def = MRI->getUniqueVRegDef(Base.getReg());
1261 if (!Def || Def->getOpcode() != AMDGPU::REG_SEQUENCE
1262 || Def->getNumOperands() != 5)
1263 return;
1264
1265 MachineOperand BaseLo = Def->getOperand(1);
1266 MachineOperand BaseHi = Def->getOperand(3);
1267 if (!BaseLo.isReg() || !BaseHi.isReg())
1268 return;
1269
1270 MachineInstr *BaseLoDef = MRI->getUniqueVRegDef(BaseLo.getReg());
1271 MachineInstr *BaseHiDef = MRI->getUniqueVRegDef(BaseHi.getReg());
1272
1273 if (!BaseLoDef || BaseLoDef->getOpcode() != AMDGPU::V_ADD_I32_e64 ||
1274 !BaseHiDef || BaseHiDef->getOpcode() != AMDGPU::V_ADDC_U32_e64)
1275 return;
1276
1277 const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0);
1278 const auto *Src1 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src1);
1279
1280 auto Offset0P = extractConstOffset(*Src0);
1281 if (Offset0P)
1282 BaseLo = *Src1;
1283 else {
1284 if (!(Offset0P = extractConstOffset(*Src1)))
1285 return;
1286 BaseLo = *Src0;
1287 }
1288
1289 Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0);
1290 Src1 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src1);
1291
1292 if (Src0->isImm())
1293 std::swap(Src0, Src1);
1294
1295 if (!Src1->isImm())
1296 return;
1297
Farhana Aleence095c52018-12-14 21:13:14 +00001298 uint64_t Offset1 = Src1->getImm();
1299 BaseHi = *Src0;
1300
1301 Addr.Base.LoReg = BaseLo.getReg();
1302 Addr.Base.HiReg = BaseHi.getReg();
1303 Addr.Base.LoSubReg = BaseLo.getSubReg();
1304 Addr.Base.HiSubReg = BaseHi.getSubReg();
1305 Addr.Offset = (*Offset0P & 0x00000000ffffffff) | (Offset1 << 32);
1306}
1307
1308bool SILoadStoreOptimizer::promoteConstantOffsetToImm(
1309 MachineInstr &MI,
1310 MemInfoMap &Visited,
1311 SmallPtrSet<MachineInstr *, 4> &AnchorList) {
1312
1313 // TODO: Support flat and scratch.
1314 if (AMDGPU::getGlobalSaddrOp(MI.getOpcode()) < 0 ||
1315 TII->getNamedOperand(MI, AMDGPU::OpName::vdata) != NULL)
1316 return false;
1317
1318 // TODO: Support Store.
1319 if (!MI.mayLoad())
1320 return false;
1321
1322 if (AnchorList.count(&MI))
1323 return false;
1324
1325 LLVM_DEBUG(dbgs() << "\nTryToPromoteConstantOffsetToImmFor "; MI.dump());
1326
1327 if (TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm()) {
1328 LLVM_DEBUG(dbgs() << " Const-offset is already promoted.\n";);
1329 return false;
1330 }
1331
1332 // Step1: Find the base-registers and a 64bit constant offset.
1333 MachineOperand &Base = *TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
1334 MemAddress MAddr;
1335 if (Visited.find(&MI) == Visited.end()) {
1336 processBaseWithConstOffset(Base, MAddr);
1337 Visited[&MI] = MAddr;
1338 } else
1339 MAddr = Visited[&MI];
1340
1341 if (MAddr.Offset == 0) {
1342 LLVM_DEBUG(dbgs() << " Failed to extract constant-offset or there are no"
1343 " constant offsets that can be promoted.\n";);
1344 return false;
1345 }
1346
1347 LLVM_DEBUG(dbgs() << " BASE: {" << MAddr.Base.HiReg << ", "
1348 << MAddr.Base.LoReg << "} Offset: " << MAddr.Offset << "\n\n";);
1349
1350 // Step2: Traverse through MI's basic block and find an anchor(that has the
1351 // same base-registers) with the highest 13bit distance from MI's offset.
1352 // E.g. (64bit loads)
1353 // bb:
1354 // addr1 = &a + 4096; load1 = load(addr1, 0)
1355 // addr2 = &a + 6144; load2 = load(addr2, 0)
1356 // addr3 = &a + 8192; load3 = load(addr3, 0)
1357 // addr4 = &a + 10240; load4 = load(addr4, 0)
1358 // addr5 = &a + 12288; load5 = load(addr5, 0)
1359 //
1360 // Starting from the first load, the optimization will try to find a new base
1361 // from which (&a + 4096) has 13 bit distance. Both &a + 6144 and &a + 8192
1362 // has 13bit distance from &a + 4096. The heuristic considers &a + 8192
1363 // as the new-base(anchor) because of the maximum distance which can
1364 // accomodate more intermediate bases presumeably.
1365 //
1366 // Step3: move (&a + 8192) above load1. Compute and promote offsets from
1367 // (&a + 8192) for load1, load2, load4.
1368 // addr = &a + 8192
1369 // load1 = load(addr, -4096)
1370 // load2 = load(addr, -2048)
1371 // load3 = load(addr, 0)
1372 // load4 = load(addr, 2048)
1373 // addr5 = &a + 12288; load5 = load(addr5, 0)
1374 //
1375 MachineInstr *AnchorInst = nullptr;
1376 MemAddress AnchorAddr;
1377 uint32_t MaxDist = std::numeric_limits<uint32_t>::min();
1378 SmallVector<std::pair<MachineInstr *, int64_t>, 4> InstsWCommonBase;
1379
1380 MachineBasicBlock *MBB = MI.getParent();
1381 MachineBasicBlock::iterator E = MBB->end();
1382 MachineBasicBlock::iterator MBBI = MI.getIterator();
1383 ++MBBI;
1384 const SITargetLowering *TLI =
1385 static_cast<const SITargetLowering *>(STM->getTargetLowering());
1386
1387 for ( ; MBBI != E; ++MBBI) {
1388 MachineInstr &MINext = *MBBI;
1389 // TODO: Support finding an anchor(with same base) from store addresses or
1390 // any other load addresses where the opcodes are different.
1391 if (MINext.getOpcode() != MI.getOpcode() ||
1392 TII->getNamedOperand(MINext, AMDGPU::OpName::offset)->getImm())
1393 continue;
1394
1395 const MachineOperand &BaseNext =
1396 *TII->getNamedOperand(MINext, AMDGPU::OpName::vaddr);
1397 MemAddress MAddrNext;
1398 if (Visited.find(&MINext) == Visited.end()) {
1399 processBaseWithConstOffset(BaseNext, MAddrNext);
1400 Visited[&MINext] = MAddrNext;
1401 } else
1402 MAddrNext = Visited[&MINext];
1403
1404 if (MAddrNext.Base.LoReg != MAddr.Base.LoReg ||
1405 MAddrNext.Base.HiReg != MAddr.Base.HiReg ||
1406 MAddrNext.Base.LoSubReg != MAddr.Base.LoSubReg ||
1407 MAddrNext.Base.HiSubReg != MAddr.Base.HiSubReg)
1408 continue;
1409
1410 InstsWCommonBase.push_back(std::make_pair(&MINext, MAddrNext.Offset));
1411
1412 int64_t Dist = MAddr.Offset - MAddrNext.Offset;
1413 TargetLoweringBase::AddrMode AM;
1414 AM.HasBaseReg = true;
1415 AM.BaseOffs = Dist;
1416 if (TLI->isLegalGlobalAddressingMode(AM) &&
Florian Hahnabe32c92018-12-15 01:32:58 +00001417 (uint32_t)std::abs(Dist) > MaxDist) {
1418 MaxDist = std::abs(Dist);
Farhana Aleence095c52018-12-14 21:13:14 +00001419
1420 AnchorAddr = MAddrNext;
1421 AnchorInst = &MINext;
1422 }
1423 }
1424
1425 if (AnchorInst) {
1426 LLVM_DEBUG(dbgs() << " Anchor-Inst(with max-distance from Offset): ";
1427 AnchorInst->dump());
1428 LLVM_DEBUG(dbgs() << " Anchor-Offset from BASE: "
1429 << AnchorAddr.Offset << "\n\n");
1430
1431 // Instead of moving up, just re-compute anchor-instruction's base address.
1432 unsigned Base = computeBase(MI, AnchorAddr);
1433
1434 updateBaseAndOffset(MI, Base, MAddr.Offset - AnchorAddr.Offset);
1435 LLVM_DEBUG(dbgs() << " After promotion: "; MI.dump(););
1436
1437 for (auto P : InstsWCommonBase) {
1438 TargetLoweringBase::AddrMode AM;
1439 AM.HasBaseReg = true;
1440 AM.BaseOffs = P.second - AnchorAddr.Offset;
1441
1442 if (TLI->isLegalGlobalAddressingMode(AM)) {
1443 LLVM_DEBUG(dbgs() << " Promote Offset(" << P.second;
1444 dbgs() << ")"; P.first->dump());
1445 updateBaseAndOffset(*P.first, Base, P.second - AnchorAddr.Offset);
1446 LLVM_DEBUG(dbgs() << " After promotion: "; P.first->dump());
1447 }
1448 }
1449 AnchorList.insert(AnchorInst);
1450 return true;
1451 }
1452
1453 return false;
1454}
1455
Matt Arsenault41033282014-10-10 22:01:59 +00001456// Scan through looking for adjacent LDS operations with constant offsets from
1457// the same base register. We rely on the scheduler to do the hard work of
1458// clustering nearby loads, and assume these are all adjacent.
1459bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
Matt Arsenault41033282014-10-10 22:01:59 +00001460 bool Modified = false;
1461
Farhana Aleence095c52018-12-14 21:13:14 +00001462 // Contain the list
1463 MemInfoMap Visited;
1464 // Contains the list of instructions for which constant offsets are being
1465 // promoted to the IMM.
1466 SmallPtrSet<MachineInstr *, 4> AnchorList;
1467
Matt Arsenault41033282014-10-10 22:01:59 +00001468 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
1469 MachineInstr &MI = *I;
1470
Farhana Aleence095c52018-12-14 21:13:14 +00001471 if (promoteConstantOffsetToImm(MI, Visited, AnchorList))
1472 Modified = true;
1473
Matt Arsenault41033282014-10-10 22:01:59 +00001474 // Don't combine if volatile.
1475 if (MI.hasOrderedMemoryRef()) {
1476 ++I;
1477 continue;
1478 }
1479
Neil Henning76504a42018-12-12 16:15:21 +00001480 const unsigned Opc = MI.getOpcode();
1481
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +00001482 CombineInfo CI;
1483 CI.I = I;
Neil Henning76504a42018-12-12 16:15:21 +00001484 CI.InstClass = getInstClass(Opc);
Matt Arsenault3f71c0e2017-11-29 00:55:57 +00001485
Neil Henning76504a42018-12-12 16:15:21 +00001486 switch (CI.InstClass) {
1487 default:
1488 break;
1489 case DS_READ:
Matt Arsenault3f71c0e2017-11-29 00:55:57 +00001490 CI.EltSize =
Neil Henning76504a42018-12-12 16:15:21 +00001491 (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8
1492 : 4;
Marek Olsakb953cc32017-11-09 01:52:23 +00001493 if (findMatchingInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +00001494 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +00001495 I = mergeRead2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +00001496 } else {
1497 ++I;
1498 }
Matt Arsenault41033282014-10-10 22:01:59 +00001499 continue;
Neil Henning76504a42018-12-12 16:15:21 +00001500 case DS_WRITE:
1501 CI.EltSize =
1502 (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8
1503 : 4;
Marek Olsakb953cc32017-11-09 01:52:23 +00001504 if (findMatchingInst(CI)) {
Matt Arsenault41033282014-10-10 22:01:59 +00001505 Modified = true;
Stanislav Mekhanoshind026f792017-04-13 17:53:07 +00001506 I = mergeWrite2Pair(CI);
Matt Arsenault41033282014-10-10 22:01:59 +00001507 } else {
1508 ++I;
1509 }
Matt Arsenault41033282014-10-10 22:01:59 +00001510 continue;
Neil Henning76504a42018-12-12 16:15:21 +00001511 case S_BUFFER_LOAD_IMM:
Marek Olsakb953cc32017-11-09 01:52:23 +00001512 CI.EltSize = AMDGPU::getSMRDEncodedOffset(*STM, 4);
Marek Olsakb953cc32017-11-09 01:52:23 +00001513 if (findMatchingInst(CI)) {
1514 Modified = true;
1515 I = mergeSBufferLoadImmPair(CI);
Neil Henning76504a42018-12-12 16:15:21 +00001516 OptimizeAgain |= (CI.Width0 + CI.Width1) < 16;
Marek Olsakb953cc32017-11-09 01:52:23 +00001517 } else {
1518 ++I;
1519 }
1520 continue;
Neil Henning76504a42018-12-12 16:15:21 +00001521 case BUFFER_LOAD_OFFEN:
1522 case BUFFER_LOAD_OFFSET:
1523 case BUFFER_LOAD_OFFEN_exact:
1524 case BUFFER_LOAD_OFFSET_exact:
Marek Olsak6a0548a2017-11-09 01:52:30 +00001525 CI.EltSize = 4;
Marek Olsak6a0548a2017-11-09 01:52:30 +00001526 if (findMatchingInst(CI)) {
1527 Modified = true;
Marek Olsak4c421a2d2017-11-09 01:52:36 +00001528 I = mergeBufferLoadPair(CI);
Neil Henning76504a42018-12-12 16:15:21 +00001529 OptimizeAgain |= (CI.Width0 + CI.Width1) < 4;
Marek Olsak6a0548a2017-11-09 01:52:30 +00001530 } else {
1531 ++I;
1532 }
1533 continue;
Neil Henning76504a42018-12-12 16:15:21 +00001534 case BUFFER_STORE_OFFEN:
1535 case BUFFER_STORE_OFFSET:
1536 case BUFFER_STORE_OFFEN_exact:
1537 case BUFFER_STORE_OFFSET_exact:
Marek Olsak58410f32017-11-09 01:52:55 +00001538 CI.EltSize = 4;
Marek Olsak58410f32017-11-09 01:52:55 +00001539 if (findMatchingInst(CI)) {
1540 Modified = true;
1541 I = mergeBufferStorePair(CI);
Neil Henning76504a42018-12-12 16:15:21 +00001542 OptimizeAgain |= (CI.Width0 + CI.Width1) < 4;
Marek Olsak58410f32017-11-09 01:52:55 +00001543 } else {
1544 ++I;
1545 }
1546 continue;
1547 }
1548
Matt Arsenault41033282014-10-10 22:01:59 +00001549 ++I;
1550 }
1551
1552 return Modified;
1553}
1554
1555bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
Matthias Braunf1caa282017-12-15 22:22:58 +00001556 if (skipFunction(MF.getFunction()))
Andrew Kaylor7de74af2016-04-25 22:23:44 +00001557 return false;
1558
Tom Stellard5bfbae52018-07-11 20:59:01 +00001559 STM = &MF.getSubtarget<GCNSubtarget>();
Marek Olsakb953cc32017-11-09 01:52:23 +00001560 if (!STM->loadStoreOptEnabled())
Matt Arsenault03d85842016-06-27 20:32:13 +00001561 return false;
1562
Marek Olsakb953cc32017-11-09 01:52:23 +00001563 TII = STM->getInstrInfo();
Matt Arsenault43e92fe2016-06-24 06:30:11 +00001564 TRI = &TII->getRegisterInfo();
1565
Matt Arsenault41033282014-10-10 22:01:59 +00001566 MRI = &MF.getRegInfo();
Tom Stellardc2ff0eb2016-08-29 19:15:22 +00001567 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
Matt Arsenault41033282014-10-10 22:01:59 +00001568
Matt Arsenault67e72de2017-08-31 01:53:09 +00001569 assert(MRI->isSSA() && "Must be run on SSA");
1570
Nicola Zaghend34e60c2018-05-14 12:53:11 +00001571 LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
Matt Arsenault41033282014-10-10 22:01:59 +00001572
Matt Arsenault41033282014-10-10 22:01:59 +00001573 bool Modified = false;
1574
Nicolai Haehnleb4f28de2017-11-28 08:42:46 +00001575 for (MachineBasicBlock &MBB : MF) {
Neil Henning76504a42018-12-12 16:15:21 +00001576 do {
1577 OptimizeAgain = false;
Marek Olsakb953cc32017-11-09 01:52:23 +00001578 Modified |= optimizeBlock(MBB);
Neil Henning76504a42018-12-12 16:15:21 +00001579 } while (OptimizeAgain);
Marek Olsakb953cc32017-11-09 01:52:23 +00001580 }
1581
Matt Arsenault41033282014-10-10 22:01:59 +00001582 return Modified;
1583}